hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
38b191f8869e3f1c4139cc8b04f5f600654b7885.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma (iuriish@yahoo.com), created on 19.04.2018
// @author raver119@gmail.com
//
#include <helpers/ConstantTadHelper.h>
#include <helpers/PointersManager.h>
#include <helpers/ShapeUtils.h>
#include <ops/declarable/helpers/activations.h>
#include <system/op_boilerplate.h>
#include <numeric>
namespace sd {
namespace ops {
namespace helpers {
///////////////////////////////////////////////////////////////////
template <typename X, typename Y>
void SD_KERNEL preluCuda(const void *vx, const sd::LongType *xShapeInfo, const void *vy, const sd::LongType *yShapeInfo,
void *vz) {
const auto x = reinterpret_cast<const X *>(vx);
const auto y = reinterpret_cast<const Y *>(vy);
auto z = reinterpret_cast<X *>(vz);
__shared__ sd::LongType xzLen;
__shared__ int xzRank, yRank;
if (threadIdx.x == 0) {
xzLen = shape::length(xShapeInfo);
xzRank = shape::rank(xShapeInfo);
yRank = shape::rank(yShapeInfo);
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
int coords[SD_MAX_RANK];
for (int i = tid; i < xzLen; i += blockDim.x * gridDim.x) {
shape::index2coords(i, xShapeInfo, coords);
const auto xzOffset = shape::getOffset(xShapeInfo, coords);
const auto xVal = x[xzOffset];
if (xVal < 0) {
for (sd::Unsigned j = 0; j < yRank; ++j)
if (yShapeInfo[j + 1] == 1) coords[j + 1] = 0;
z[xzOffset] = xVal * y[shape::getOffset(yShapeInfo, coords + 1)];
} else
z[xzOffset] = xVal;
}
}
///////////////////////////////////////////////////////////////////
template <typename X, typename Y>
void preluCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem,
const hipStream_t *stream, const void *vx, const sd::LongType *xShapeInfo, const void *vy,
const sd::LongType *yShapeInfo, void *vz) {
hipLaunchKernelGGL(( preluCuda<X, Y>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vy, yShapeInfo, vz);
}
///////////////////////////////////////////////////////////////////
void prelu(sd::LaunchContext *context, const NDArray &input, const NDArray &alpha, NDArray &output) {
PointersManager manager(context, "prelu");
const int threadsPerBlock = 256;
const int blocksPerGrid = 512;
const int sharedMem = 512;
const auto xType = input.dataType();
const auto yType = alpha.dataType();
NDArray::prepareSpecialUse({&output}, {&input, &alpha});
BUILD_SINGLE_SELECTOR_TWICE(
xType, preluCudaLauncher,
(blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.specialBuffer(),
input.specialShapeInfo(), alpha.specialBuffer(), alpha.specialShapeInfo(), output.specialBuffer()),
SD_FLOAT_TYPES);
NDArray::registerSpecialUse({&output}, {&input, &alpha});
manager.synchronize();
}
///////////////////////////////////////////////////////////////////
template <typename X, typename Y>
void SD_KERNEL preluBPCuda(const void *vIn, const sd::LongType *inShapeInfo, const void *vAlpha,
const sd::LongType *alphaShapeInfo, const void *vdLdO, const sd::LongType *dLdOShapeInfo,
void *vdLdI, const sd::LongType *dLdIShapeInfo, void *vdLdA,
const sd::LongType *dLdAShapeInfo) {
const auto in = reinterpret_cast<const X *>(vIn);
const auto alpha = reinterpret_cast<const Y *>(vAlpha);
const auto dLdO = reinterpret_cast<const Y *>(vdLdO);
auto dLdI = reinterpret_cast<Y *>(vdLdI);
auto dLdA = reinterpret_cast<Y *>(vdLdA);
__shared__ sd::LongType inLen, totalThreads;
__shared__ int inRank, alphaRank;
if (threadIdx.x == 0) {
inLen = shape::length(inShapeInfo);
totalThreads = gridDim.x * blockDim.x;
inRank = shape::rank(inShapeInfo);
alphaRank = shape::rank(alphaShapeInfo);
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
int coords[SD_MAX_RANK];
for (int i = tid; i < inLen; i += totalThreads) {
shape::index2coords(i, inShapeInfo, coords);
const auto inOffset = shape::getOffset(inShapeInfo, coords);
const auto dLdOOffset = shape::getOffset(dLdOShapeInfo, coords);
const auto dLdIOffset = shape::getOffset(dLdIShapeInfo, coords);
const auto xVal = in[inOffset];
const auto grO = dLdO[dLdOOffset];
if (xVal < 0) {
for (sd::Unsigned j = 0; j < alphaRank; ++j)
if (alphaShapeInfo[j + 1] == 1) coords[j + 1] = 0;
const auto alphaOffset = shape::getOffset(alphaShapeInfo, coords + 1);
const auto dLdAOffset = shape::getOffset(dLdAShapeInfo, coords + 1);
dLdI[dLdIOffset] = grO * alpha[alphaOffset];
sd::math::atomics::sd_atomicAdd<Y>(&dLdA[dLdAOffset], static_cast<Y>(grO * xVal));
} else
dLdI[dLdIOffset] = grO;
}
}
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Y>
void SD_HOST preluBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem,
const hipStream_t *stream, const void *vIn, const sd::LongType *inShapeInfo,
const void *vAlpha, const sd::LongType *alphaShapeInfo, const void *vdLdO,
const sd::LongType *dLdOShapeInfo, void *vdLdI, const sd::LongType *dLdIShapeInfo,
void *vdLdA, const sd::LongType *dLdAShapeInfo) {
hipLaunchKernelGGL(( preluBPCuda<X, Y>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream,
vIn, inShapeInfo, vAlpha, alphaShapeInfo, vdLdO, dLdOShapeInfo, vdLdI, dLdIShapeInfo, vdLdA, dLdAShapeInfo);
}
//////////////////////////////////////////////////////////////////////////
void preluBP(sd::LaunchContext *context, const NDArray &input, const NDArray &alpha, const NDArray &dLdO, NDArray &dLdI,
NDArray &dLdA) {
dLdA.nullify();
PointersManager manager(context, "preluBP");
const int threadsPerBlock = 256;
const int blocksPerGrid = 512;
const int sharedMem = 512;
const auto xType = input.dataType();
const auto zType = alpha.dataType();
NDArray::prepareSpecialUse({&dLdI, &dLdA}, {&input, &alpha, &dLdO});
BUILD_SINGLE_SELECTOR_TWICE(
xType, preluBPCudaLauncher,
(blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.specialBuffer(),
input.specialShapeInfo(), alpha.specialBuffer(), alpha.specialShapeInfo(), dLdO.specialBuffer(),
dLdO.specialShapeInfo(), dLdI.specialBuffer(), dLdI.specialShapeInfo(), dLdA.specialBuffer(),
dLdA.specialShapeInfo()),
SD_FLOAT_TYPES);
NDArray::registerSpecialUse({&dLdI, &dLdA}, {&input, &alpha, &dLdO});
manager.synchronize();
}
///////////////////////////////////////////////////////////////////
template <typename T>
SD_DEVICE void softMaxForVectorCuda(const void *vx, const sd::LongType *xShapeInfo, void *vz,
const sd::LongType *zShapeInfo) {
// logic of this kernel is based on assumption gridDim = 1
const auto x = reinterpret_cast<const T *>(vx);
auto z = reinterpret_cast<T *>(vz);
__shared__ sd::LongType len;
__shared__ int numOfIters;
__shared__ T shmem[SD_CUDA_BLOCK_SIZE];
if (threadIdx.x == 0) {
len = shape::length(xShapeInfo);
numOfIters = (len + blockDim.x - 1) / blockDim.x; // ceil (len / blockDim.x)
}
__syncthreads();
T temp =
-DataTypeUtils::max<T>(); // set start value to compare with at first iteration, FIXME: what if T is unsigned ??
// ************ evaluate max element in input array x ************ //
for (int i = 0; i < numOfIters; ++i) {
const sd::LongType elemIdx = i * blockDim.x + threadIdx.x;
if (elemIdx < len) {
const sd::LongType xOffset = shape::getIndexOffset(elemIdx, xShapeInfo);
shmem[threadIdx.x] =
(threadIdx.x != 0)
? x[xOffset]
: sd::math::sd_max<T>(
x[xOffset],
temp); // take into account max element evaluated on previous iteration and stored in temp
} else
shmem[threadIdx.x] = -DataTypeUtils::max<T>(); // FIXME: what if T is unsigned ??
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s /= 2) {
if (threadIdx.x < s) shmem[threadIdx.x] = sd::math::sd_max<T>(shmem[threadIdx.x], shmem[threadIdx.x + s]);
__syncthreads();
}
temp = shmem[0]; // save max value calculated at current iteration
}
const T max = temp;
temp = 0;
// ************ evaluate value of exp(x[offset] - max) per each element, store it to shared memory shmem ************
// // at the same evaluate sum of exponents, sum will be stored in shmem[0]
for (int i = 0; i < numOfIters; ++i) {
const sd::LongType elemIdx = i * blockDim.x + threadIdx.x;
if (elemIdx < len) {
const sd::LongType xOffset = shape::getIndexOffset(elemIdx, xShapeInfo);
const sd::LongType zOffset = shape::getIndexOffset(elemIdx, zShapeInfo);
z[zOffset] = sd::math::sd_exp<T, T>(x[xOffset] - max);
shmem[threadIdx.x] =
(threadIdx.x != 0)
? z[zOffset]
: (z[zOffset] +
temp); // take into account sum element evaluated on previous iteration and stored in temp
} else
shmem[threadIdx.x] = 0;
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s /= 2) {
if (threadIdx.x < s) shmem[threadIdx.x] += shmem[threadIdx.x + s];
__syncthreads();
}
temp = shmem[0]; // save sum calculated at current iteration
}
// ************ evaluate z[offset] / sum ************ //
for (int i = 0; i < numOfIters; ++i) {
const sd::LongType elemIdx = i * blockDim.x + threadIdx.x;
if (elemIdx >= len) continue;
const sd::LongType zOffset = shape::getIndexOffset(elemIdx, zShapeInfo);
z[zOffset] /= shmem[0];
}
}
template <typename T>
void SD_KERNEL softMaxForVectorCudaGlobal(const void *vx, const sd::LongType *xShapeInfo, void *vz,
const sd::LongType *zShapeInfo) {
softMaxForVectorCuda<T>(vx, xShapeInfo, vz, zShapeInfo);
}
///////////////////////////////////////////////////////////////////
template <typename T>
void softMaxForVectorCudaLauncher(const hipStream_t *stream, const void *vx, const sd::LongType *xShapeInfo, void *vz,
const sd::LongType *zShapeInfo) {
hipLaunchKernelGGL(( softMaxForVectorCudaGlobal<T>), dim3(1), dim3(SD_CUDA_BLOCK_SIZE), 1024, *stream, vx, xShapeInfo, vz, zShapeInfo);
}
///////////////////////////////////////////////////////////////////
template <typename T>
SD_KERNEL static void softMaxCuda(const void *vx, const sd::LongType *xTadShapeInfo, const sd::LongType *xOffsets,
void *vz, const sd::LongType *zTadShapeInfo, const sd::LongType *zOffsets) {
const auto x = reinterpret_cast<const T *>(vx);
auto z = reinterpret_cast<T *>(vz);
const auto *xTad = x + xOffsets[blockIdx.x];
auto *zTad = z + zOffsets[blockIdx.x];
softMaxForVectorCuda<T>(xTad, xTadShapeInfo, zTad, zTadShapeInfo);
}
///////////////////////////////////////////////////////////////////
template <typename T>
static void softMaxCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem,
const hipStream_t *stream, const void *vx, const sd::LongType *xTadShapeInfo,
const sd::LongType *xOffsets, void *vz, const sd::LongType *zTadShapeInfo,
const sd::LongType *zOffsets) {
hipLaunchKernelGGL(( softMaxCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xTadShapeInfo, xOffsets, vz, zTadShapeInfo,
zOffsets);
}
//////////////////////////////////////////////////////////////////////////
void softmax(sd::LaunchContext *context, const NDArray &input, NDArray &output, const int dimension) {
if (!input.isActualOnDeviceSide()) input.syncToDevice();
const int rank = input.rankOf();
PointersManager manager(context, "helpers::softmax");
if (input.isVector()) {
if (rank == 1 || input.sizeAt(dimension) != 1) {
NDArray::prepareSpecialUse({&output}, {&input});
BUILD_SINGLE_SELECTOR(input.dataType(), softMaxForVectorCudaLauncher,
(context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(),
output.specialBuffer(), output.specialShapeInfo()),
SD_FLOAT_TYPES);
NDArray::registerSpecialUse({&output}, {&input});
} else
output = 1.;
} else {
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input.shapeInfo(), {dimension});
auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output.shapeInfo(), {dimension});
const int threadsPerBlock = SD_CUDA_BLOCK_SIZE;
const int blocksPerGrid = packZ.numberOfTads();
const int sharedMem = 1024;
NDArray::prepareSpecialUse({&output}, {&input});
BUILD_SINGLE_SELECTOR(input.dataType(), softMaxCudaLauncher,
(blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.specialBuffer(),
packX.specialShapeInfo(), packX.specialOffsets(), output.specialBuffer(),
packZ.specialShapeInfo(), packZ.specialOffsets()),
SD_FLOAT_TYPES);
NDArray::registerSpecialUse({&output}, {&input});
// auto maxAlongDim = const_cast<NDArray&>(input).reduceAlongDimension(reduce::Max, {dimension}, true);
// (input - maxAlongDim).applyTransform(transform::Exp, &output); // output contains exponents temporarily
// auto sumAlongDim = output.reduceAlongDimension(reduce::Sum, {dimension}, true);
// output /= sumAlongDim;
// input.tickReadDevice();
}
manager.synchronize();
output.tickWriteDevice();
}
///////////////////////////////////////////////////////////////////
template <typename T>
void SD_KERNEL logSoftMaxForVectorCuda(const void *vx, const sd::LongType *xzShapeInfo, void *vz) {
// logic of this kernel is based on assumption gridDim = 1
const auto x = reinterpret_cast<const T *>(vx);
auto z = reinterpret_cast<T *>(vz);
__shared__ sd::LongType len;
__shared__ int numOfIters;
__shared__ T shmem[SD_CUDA_BLOCK_SIZE];
if (threadIdx.x == 0) {
len = shape::length(xzShapeInfo);
numOfIters = (len + blockDim.x - 1) / blockDim.x; // ceil (len / blockDim.x)
}
__syncthreads();
T temp =
-DataTypeUtils::max<T>(); // set start value to compare with at first iteration, FIXME: what if T is unsigned ??
// ************ evaluate max element in input array x ************ //
for (int i = 0; i < numOfIters; ++i) {
const sd::LongType elemIdx = i * blockDim.x + threadIdx.x;
if (elemIdx < len) {
const sd::LongType offset = shape::getIndexOffset(elemIdx, xzShapeInfo);
shmem[threadIdx.x] =
(threadIdx.x != 0)
? x[offset]
: sd::math::sd_max<T>(
x[offset],
temp); // take into account max element evaluated on previous iteration and stored in temp
} else
shmem[threadIdx.x] = -DataTypeUtils::max<T>(); // FIXME: what if T is unsigned ??
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s /= 2) {
if (threadIdx.x < s) shmem[threadIdx.x] = sd::math::sd_max<T>(shmem[threadIdx.x], shmem[threadIdx.x + s]);
__syncthreads();
}
temp = shmem[0]; // save max value calculated at current iteration
}
const T max = temp;
temp = 0;
// ************ evaluate value of exp(x[offset] - max) per each element, store it to shared memory shmem ************
// // at the same time evaluate sum of exponents, sum will be stored in shmem[0]
for (int i = 0; i < numOfIters; ++i) {
const sd::LongType elemIdx = i * blockDim.x + threadIdx.x;
if (elemIdx < len) {
const sd::LongType offset = shape::getIndexOffset(elemIdx, xzShapeInfo);
z[offset] = sd::math::sd_exp<T, T>(x[offset] - max);
shmem[threadIdx.x] =
(threadIdx.x != 0)
? z[offset]
: (z[offset] + temp); // take into account sum element evaluated on previous iteration and stored in temp
} else
shmem[threadIdx.x] = 0;
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s /= 2) {
if (threadIdx.x < s) shmem[threadIdx.x] += shmem[threadIdx.x + s];
__syncthreads();
}
temp = shmem[0]; // save sum calculated at current iteration
}
// ************ evaluate log(z[offset] / sum) ************ //
for (int i = 0; i < numOfIters; ++i) {
const sd::LongType elemIdx = i * blockDim.x + threadIdx.x;
if (elemIdx >= len) continue;
const sd::LongType offset = shape::getIndexOffset(elemIdx, xzShapeInfo);
z[offset] = sd::math::sd_log<T, T>(z[offset] / shmem[0]);
}
}
///////////////////////////////////////////////////////////////////
template <typename T>
void logSoftMaxForVectorCudaLauncher(const hipStream_t *stream, const void *vx, const sd::LongType *xzShapeInfo,
void *vz) {
hipLaunchKernelGGL(( logSoftMaxForVectorCuda<T>), dim3(1), dim3(SD_CUDA_BLOCK_SIZE), 1024, *stream, vx, xzShapeInfo, vz);
}
//////////////////////////////////////////////////////////////////////////
void logSoftmax(sd::LaunchContext *context, const NDArray &input, NDArray &output, const int dimension) {
if (!input.isActualOnDeviceSide()) input.syncToDevice();
const int rank = input.rankOf();
if (input.isVector()) {
if (rank == 1 || input.sizeAt(dimension) != 1) {
BUILD_SINGLE_SELECTOR(
input.dataType(), logSoftMaxForVectorCudaLauncher,
(context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer()),
SD_FLOAT_TYPES);
input.tickReadDevice();
} else
output = 0.;
} else {
auto maxAlongDim = const_cast<NDArray &>(input).reduceAlongDimension(reduce::Max, {dimension}, true);
(input - maxAlongDim).applyTransform(transform::Exp, output); // output contains exponents temporarily
auto sumAlongDim = output.reduceAlongDimension(reduce::Sum, {dimension}, true);
output /= sumAlongDim;
output.applyTransform(transform::Log, output);
input.tickReadDevice();
}
PointersManager manager(context, "helpers::logSoftmax");
manager.synchronize();
output.tickWriteDevice();
}
///////////////////////////////////////////////////////////////////
template <typename T>
void SD_KERNEL softMaxDerivForVectorCuda(const void *vx, const sd::LongType *xzShapeInfo, void *vz) {
// logic of this kernel is based on assumption gridDim = 1
const auto x = reinterpret_cast<const T *>(vx);
auto z = reinterpret_cast<T *>(vz);
__shared__ sd::LongType len;
__shared__ int numOfIters;
__shared__ T shmem[SD_CUDA_BLOCK_SIZE];
if (threadIdx.x == 0) {
len = shape::length(xzShapeInfo);
numOfIters = (len + blockDim.x - 1) / blockDim.x; // ceil (len / blockDim.x)
}
__syncthreads();
T temp =
-DataTypeUtils::max<T>(); // set start value to compare with at first iteration, FIXME: what if T is unsigned ??
// ************ evaluate max element in input array x ************ //
for (int i = 0; i < numOfIters; ++i) {
const sd::LongType elemIdx = i * blockDim.x + threadIdx.x;
if (elemIdx < len) {
const sd::LongType offset = shape::getIndexOffset(elemIdx, xzShapeInfo);
shmem[threadIdx.x] =
(threadIdx.x != 0)
? x[offset]
: sd::math::sd_max<T>(
x[offset],
temp); // take into account max element evaluated on previous iteration and stored in temp
} else
shmem[threadIdx.x] = -DataTypeUtils::max<T>(); // FIXME: what if T is unsigned ??
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s /= 2) {
if (threadIdx.x < s) shmem[threadIdx.x] = sd::math::sd_max<T>(shmem[threadIdx.x], shmem[threadIdx.x + s]);
__syncthreads();
}
temp = shmem[0]; // save max value calculated at current iteration
}
const T max = temp;
temp = 0;
// ************ evaluate value of exp(x[offset] - max) per each element, store it to shared memory shmem ************
// // at the same evaluate sum of exponents, sum will be stored in shmem[0]
for (int i = 0; i < numOfIters; ++i) {
const sd::LongType elemIdx = i * blockDim.x + threadIdx.x;
if (elemIdx < len) {
const sd::LongType offset = shape::getIndexOffset(elemIdx, xzShapeInfo);
z[offset] = sd::math::sd_exp<T, T>(x[offset] - max);
shmem[threadIdx.x] =
(threadIdx.x != 0)
? z[offset]
: (z[offset] + temp); // take into account sum element evaluated on previous iteration and stored in temp
} else
shmem[threadIdx.x] = 0;
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s /= 2) {
if (threadIdx.x < s) shmem[threadIdx.x] += shmem[threadIdx.x + s];
__syncthreads();
}
temp = shmem[0]; // save sum calculated at current iteration
}
// ************ evaluate (z[offset] / sum) and derivative z[offset] = z[offset] * (1 - z[offset]) ************ //
for (int i = 0; i < numOfIters; ++i) {
const sd::LongType elemIdx = i * blockDim.x + threadIdx.x;
if (elemIdx >= len) continue;
const sd::LongType offset = shape::getIndexOffset(elemIdx, xzShapeInfo);
z[offset] /= shmem[0];
z[offset] *= (1.f - z[offset]); // derivative
}
}
///////////////////////////////////////////////////////////////////
template <typename T>
void softMaxDerivForVectorCudaLauncher(const hipStream_t *stream, const void *vx, const sd::LongType *xzShapeInfo,
void *vz) {
hipLaunchKernelGGL(( softMaxDerivForVectorCuda<T>), dim3(1), dim3(SD_CUDA_BLOCK_SIZE), 1024, *stream, vx, xzShapeInfo, vz);
}
///////////////////////////////////////////////////////////////////
void softmaxDerivative(sd::LaunchContext *context, const NDArray &input, NDArray &output, const int dimension) {
if (!input.isActualOnDeviceSide()) input.syncToDevice();
const int rank = input.rankOf();
int temp;
if (shape::isCommonVector(input.shapeInfo(), temp)) {
BUILD_SINGLE_SELECTOR(
input.dataType(), softMaxDerivForVectorCudaLauncher,
(context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer()),
SD_FLOAT_TYPES);
input.tickReadDevice();
} else {
auto maxAlongDim = const_cast<NDArray &>(input).reduceAlongDimension(reduce::Max, {dimension}, true);
(input - maxAlongDim).applyTransform(transform::Exp, output); // output contains exponents temporarily
auto sumAlongDim = output.reduceAlongDimension(reduce::Sum, {dimension}, true);
output /= sumAlongDim;
output *= (1.f - output); // derivative
input.tickReadDevice();
}
PointersManager manager(context, "helpers::softmaxDerivative");
manager.synchronize();
output.tickWriteDevice();
}
template <typename T>
void thresholdRelu_(NDArray const &input, double threshold, NDArray &output) {
auto routine = LAMBDA_T(_x, threshold) { return _x > (T)threshold ? _x : (T)0.f; };
const_cast<NDArray &>(input).applyLambda(routine, output);
}
void thresholdRelu(sd::LaunchContext *context, NDArray const &input, double threshold, NDArray &output) {
BUILD_SINGLE_SELECTOR(input.dataType(), thresholdRelu_, (input, threshold, output), SD_FLOAT_TYPES);
}
template <typename T>
void thresholdReluDerivative_(NDArray *input, double theta, NDArray *dLdO, NDArray *output) {
auto derivative = LAMBDA_TT(_x, grO, theta) {
if (_x > theta)
return grO;
else
return static_cast<T>(0);
};
input->applyPairwiseLambda(*dLdO, derivative, *output);
}
void thresholdReluDerivative(sd::LaunchContext *context, NDArray *input, double threshold, NDArray *dLdO,
NDArray *output) {
BUILD_SINGLE_SELECTOR(input->dataType(), thresholdReluDerivative_, (input, threshold, dLdO, output), SD_FLOAT_TYPES);
}
} // namespace helpers
} // namespace ops
} // namespace sd
| 38b191f8869e3f1c4139cc8b04f5f600654b7885.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma (iuriish@yahoo.com), created on 19.04.2018
// @author raver119@gmail.com
//
#include <helpers/ConstantTadHelper.h>
#include <helpers/PointersManager.h>
#include <helpers/ShapeUtils.h>
#include <ops/declarable/helpers/activations.h>
#include <system/op_boilerplate.h>
#include <numeric>
namespace sd {
namespace ops {
namespace helpers {
///////////////////////////////////////////////////////////////////
template <typename X, typename Y>
void SD_KERNEL preluCuda(const void *vx, const sd::LongType *xShapeInfo, const void *vy, const sd::LongType *yShapeInfo,
void *vz) {
const auto x = reinterpret_cast<const X *>(vx);
const auto y = reinterpret_cast<const Y *>(vy);
auto z = reinterpret_cast<X *>(vz);
__shared__ sd::LongType xzLen;
__shared__ int xzRank, yRank;
if (threadIdx.x == 0) {
xzLen = shape::length(xShapeInfo);
xzRank = shape::rank(xShapeInfo);
yRank = shape::rank(yShapeInfo);
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
int coords[SD_MAX_RANK];
for (int i = tid; i < xzLen; i += blockDim.x * gridDim.x) {
shape::index2coords(i, xShapeInfo, coords);
const auto xzOffset = shape::getOffset(xShapeInfo, coords);
const auto xVal = x[xzOffset];
if (xVal < 0) {
for (sd::Unsigned j = 0; j < yRank; ++j)
if (yShapeInfo[j + 1] == 1) coords[j + 1] = 0;
z[xzOffset] = xVal * y[shape::getOffset(yShapeInfo, coords + 1)];
} else
z[xzOffset] = xVal;
}
}
///////////////////////////////////////////////////////////////////
template <typename X, typename Y>
void preluCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem,
const cudaStream_t *stream, const void *vx, const sd::LongType *xShapeInfo, const void *vy,
const sd::LongType *yShapeInfo, void *vz) {
preluCuda<X, Y><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vy, yShapeInfo, vz);
}
///////////////////////////////////////////////////////////////////
void prelu(sd::LaunchContext *context, const NDArray &input, const NDArray &alpha, NDArray &output) {
PointersManager manager(context, "prelu");
const int threadsPerBlock = 256;
const int blocksPerGrid = 512;
const int sharedMem = 512;
const auto xType = input.dataType();
const auto yType = alpha.dataType();
NDArray::prepareSpecialUse({&output}, {&input, &alpha});
BUILD_SINGLE_SELECTOR_TWICE(
xType, preluCudaLauncher,
(blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.specialBuffer(),
input.specialShapeInfo(), alpha.specialBuffer(), alpha.specialShapeInfo(), output.specialBuffer()),
SD_FLOAT_TYPES);
NDArray::registerSpecialUse({&output}, {&input, &alpha});
manager.synchronize();
}
///////////////////////////////////////////////////////////////////
template <typename X, typename Y>
void SD_KERNEL preluBPCuda(const void *vIn, const sd::LongType *inShapeInfo, const void *vAlpha,
const sd::LongType *alphaShapeInfo, const void *vdLdO, const sd::LongType *dLdOShapeInfo,
void *vdLdI, const sd::LongType *dLdIShapeInfo, void *vdLdA,
const sd::LongType *dLdAShapeInfo) {
const auto in = reinterpret_cast<const X *>(vIn);
const auto alpha = reinterpret_cast<const Y *>(vAlpha);
const auto dLdO = reinterpret_cast<const Y *>(vdLdO);
auto dLdI = reinterpret_cast<Y *>(vdLdI);
auto dLdA = reinterpret_cast<Y *>(vdLdA);
__shared__ sd::LongType inLen, totalThreads;
__shared__ int inRank, alphaRank;
if (threadIdx.x == 0) {
inLen = shape::length(inShapeInfo);
totalThreads = gridDim.x * blockDim.x;
inRank = shape::rank(inShapeInfo);
alphaRank = shape::rank(alphaShapeInfo);
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
int coords[SD_MAX_RANK];
for (int i = tid; i < inLen; i += totalThreads) {
shape::index2coords(i, inShapeInfo, coords);
const auto inOffset = shape::getOffset(inShapeInfo, coords);
const auto dLdOOffset = shape::getOffset(dLdOShapeInfo, coords);
const auto dLdIOffset = shape::getOffset(dLdIShapeInfo, coords);
const auto xVal = in[inOffset];
const auto grO = dLdO[dLdOOffset];
if (xVal < 0) {
for (sd::Unsigned j = 0; j < alphaRank; ++j)
if (alphaShapeInfo[j + 1] == 1) coords[j + 1] = 0;
const auto alphaOffset = shape::getOffset(alphaShapeInfo, coords + 1);
const auto dLdAOffset = shape::getOffset(dLdAShapeInfo, coords + 1);
dLdI[dLdIOffset] = grO * alpha[alphaOffset];
sd::math::atomics::sd_atomicAdd<Y>(&dLdA[dLdAOffset], static_cast<Y>(grO * xVal));
} else
dLdI[dLdIOffset] = grO;
}
}
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Y>
void SD_HOST preluBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem,
const cudaStream_t *stream, const void *vIn, const sd::LongType *inShapeInfo,
const void *vAlpha, const sd::LongType *alphaShapeInfo, const void *vdLdO,
const sd::LongType *dLdOShapeInfo, void *vdLdI, const sd::LongType *dLdIShapeInfo,
void *vdLdA, const sd::LongType *dLdAShapeInfo) {
preluBPCuda<X, Y><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(
vIn, inShapeInfo, vAlpha, alphaShapeInfo, vdLdO, dLdOShapeInfo, vdLdI, dLdIShapeInfo, vdLdA, dLdAShapeInfo);
}
//////////////////////////////////////////////////////////////////////////
void preluBP(sd::LaunchContext *context, const NDArray &input, const NDArray &alpha, const NDArray &dLdO, NDArray &dLdI,
NDArray &dLdA) {
dLdA.nullify();
PointersManager manager(context, "preluBP");
const int threadsPerBlock = 256;
const int blocksPerGrid = 512;
const int sharedMem = 512;
const auto xType = input.dataType();
const auto zType = alpha.dataType();
NDArray::prepareSpecialUse({&dLdI, &dLdA}, {&input, &alpha, &dLdO});
BUILD_SINGLE_SELECTOR_TWICE(
xType, preluBPCudaLauncher,
(blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.specialBuffer(),
input.specialShapeInfo(), alpha.specialBuffer(), alpha.specialShapeInfo(), dLdO.specialBuffer(),
dLdO.specialShapeInfo(), dLdI.specialBuffer(), dLdI.specialShapeInfo(), dLdA.specialBuffer(),
dLdA.specialShapeInfo()),
SD_FLOAT_TYPES);
NDArray::registerSpecialUse({&dLdI, &dLdA}, {&input, &alpha, &dLdO});
manager.synchronize();
}
///////////////////////////////////////////////////////////////////
template <typename T>
SD_DEVICE void softMaxForVectorCuda(const void *vx, const sd::LongType *xShapeInfo, void *vz,
const sd::LongType *zShapeInfo) {
// logic of this kernel is based on assumption gridDim = 1
const auto x = reinterpret_cast<const T *>(vx);
auto z = reinterpret_cast<T *>(vz);
__shared__ sd::LongType len;
__shared__ int numOfIters;
__shared__ T shmem[SD_CUDA_BLOCK_SIZE];
if (threadIdx.x == 0) {
len = shape::length(xShapeInfo);
numOfIters = (len + blockDim.x - 1) / blockDim.x; // ceil (len / blockDim.x)
}
__syncthreads();
T temp =
-DataTypeUtils::max<T>(); // set start value to compare with at first iteration, FIXME: what if T is unsigned ??
// ************ evaluate max element in input array x ************ //
for (int i = 0; i < numOfIters; ++i) {
const sd::LongType elemIdx = i * blockDim.x + threadIdx.x;
if (elemIdx < len) {
const sd::LongType xOffset = shape::getIndexOffset(elemIdx, xShapeInfo);
shmem[threadIdx.x] =
(threadIdx.x != 0)
? x[xOffset]
: sd::math::sd_max<T>(
x[xOffset],
temp); // take into account max element evaluated on previous iteration and stored in temp
} else
shmem[threadIdx.x] = -DataTypeUtils::max<T>(); // FIXME: what if T is unsigned ??
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s /= 2) {
if (threadIdx.x < s) shmem[threadIdx.x] = sd::math::sd_max<T>(shmem[threadIdx.x], shmem[threadIdx.x + s]);
__syncthreads();
}
temp = shmem[0]; // save max value calculated at current iteration
}
const T max = temp;
temp = 0;
// ************ evaluate value of exp(x[offset] - max) per each element, store it to shared memory shmem ************
// // at the same evaluate sum of exponents, sum will be stored in shmem[0]
for (int i = 0; i < numOfIters; ++i) {
const sd::LongType elemIdx = i * blockDim.x + threadIdx.x;
if (elemIdx < len) {
const sd::LongType xOffset = shape::getIndexOffset(elemIdx, xShapeInfo);
const sd::LongType zOffset = shape::getIndexOffset(elemIdx, zShapeInfo);
z[zOffset] = sd::math::sd_exp<T, T>(x[xOffset] - max);
shmem[threadIdx.x] =
(threadIdx.x != 0)
? z[zOffset]
: (z[zOffset] +
temp); // take into account sum element evaluated on previous iteration and stored in temp
} else
shmem[threadIdx.x] = 0;
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s /= 2) {
if (threadIdx.x < s) shmem[threadIdx.x] += shmem[threadIdx.x + s];
__syncthreads();
}
temp = shmem[0]; // save sum calculated at current iteration
}
// ************ evaluate z[offset] / sum ************ //
for (int i = 0; i < numOfIters; ++i) {
const sd::LongType elemIdx = i * blockDim.x + threadIdx.x;
if (elemIdx >= len) continue;
const sd::LongType zOffset = shape::getIndexOffset(elemIdx, zShapeInfo);
z[zOffset] /= shmem[0];
}
}
template <typename T>
void SD_KERNEL softMaxForVectorCudaGlobal(const void *vx, const sd::LongType *xShapeInfo, void *vz,
const sd::LongType *zShapeInfo) {
softMaxForVectorCuda<T>(vx, xShapeInfo, vz, zShapeInfo);
}
///////////////////////////////////////////////////////////////////
template <typename T>
void softMaxForVectorCudaLauncher(const cudaStream_t *stream, const void *vx, const sd::LongType *xShapeInfo, void *vz,
const sd::LongType *zShapeInfo) {
softMaxForVectorCudaGlobal<T><<<1, SD_CUDA_BLOCK_SIZE, 1024, *stream>>>(vx, xShapeInfo, vz, zShapeInfo);
}
///////////////////////////////////////////////////////////////////
template <typename T>
SD_KERNEL static void softMaxCuda(const void *vx, const sd::LongType *xTadShapeInfo, const sd::LongType *xOffsets,
void *vz, const sd::LongType *zTadShapeInfo, const sd::LongType *zOffsets) {
const auto x = reinterpret_cast<const T *>(vx);
auto z = reinterpret_cast<T *>(vz);
const auto *xTad = x + xOffsets[blockIdx.x];
auto *zTad = z + zOffsets[blockIdx.x];
softMaxForVectorCuda<T>(xTad, xTadShapeInfo, zTad, zTadShapeInfo);
}
///////////////////////////////////////////////////////////////////
template <typename T>
static void softMaxCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem,
const cudaStream_t *stream, const void *vx, const sd::LongType *xTadShapeInfo,
const sd::LongType *xOffsets, void *vz, const sd::LongType *zTadShapeInfo,
const sd::LongType *zOffsets) {
softMaxCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xTadShapeInfo, xOffsets, vz, zTadShapeInfo,
zOffsets);
}
//////////////////////////////////////////////////////////////////////////
void softmax(sd::LaunchContext *context, const NDArray &input, NDArray &output, const int dimension) {
if (!input.isActualOnDeviceSide()) input.syncToDevice();
const int rank = input.rankOf();
PointersManager manager(context, "helpers::softmax");
if (input.isVector()) {
if (rank == 1 || input.sizeAt(dimension) != 1) {
NDArray::prepareSpecialUse({&output}, {&input});
BUILD_SINGLE_SELECTOR(input.dataType(), softMaxForVectorCudaLauncher,
(context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(),
output.specialBuffer(), output.specialShapeInfo()),
SD_FLOAT_TYPES);
NDArray::registerSpecialUse({&output}, {&input});
} else
output = 1.;
} else {
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input.shapeInfo(), {dimension});
auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output.shapeInfo(), {dimension});
const int threadsPerBlock = SD_CUDA_BLOCK_SIZE;
const int blocksPerGrid = packZ.numberOfTads();
const int sharedMem = 1024;
NDArray::prepareSpecialUse({&output}, {&input});
BUILD_SINGLE_SELECTOR(input.dataType(), softMaxCudaLauncher,
(blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.specialBuffer(),
packX.specialShapeInfo(), packX.specialOffsets(), output.specialBuffer(),
packZ.specialShapeInfo(), packZ.specialOffsets()),
SD_FLOAT_TYPES);
NDArray::registerSpecialUse({&output}, {&input});
// auto maxAlongDim = const_cast<NDArray&>(input).reduceAlongDimension(reduce::Max, {dimension}, true);
// (input - maxAlongDim).applyTransform(transform::Exp, &output); // output contains exponents temporarily
// auto sumAlongDim = output.reduceAlongDimension(reduce::Sum, {dimension}, true);
// output /= sumAlongDim;
// input.tickReadDevice();
}
manager.synchronize();
output.tickWriteDevice();
}
///////////////////////////////////////////////////////////////////
template <typename T>
void SD_KERNEL logSoftMaxForVectorCuda(const void *vx, const sd::LongType *xzShapeInfo, void *vz) {
// logic of this kernel is based on assumption gridDim = 1
const auto x = reinterpret_cast<const T *>(vx);
auto z = reinterpret_cast<T *>(vz);
__shared__ sd::LongType len;
__shared__ int numOfIters;
__shared__ T shmem[SD_CUDA_BLOCK_SIZE];
if (threadIdx.x == 0) {
len = shape::length(xzShapeInfo);
numOfIters = (len + blockDim.x - 1) / blockDim.x; // ceil (len / blockDim.x)
}
__syncthreads();
T temp =
-DataTypeUtils::max<T>(); // set start value to compare with at first iteration, FIXME: what if T is unsigned ??
// ************ evaluate max element in input array x ************ //
for (int i = 0; i < numOfIters; ++i) {
const sd::LongType elemIdx = i * blockDim.x + threadIdx.x;
if (elemIdx < len) {
const sd::LongType offset = shape::getIndexOffset(elemIdx, xzShapeInfo);
shmem[threadIdx.x] =
(threadIdx.x != 0)
? x[offset]
: sd::math::sd_max<T>(
x[offset],
temp); // take into account max element evaluated on previous iteration and stored in temp
} else
shmem[threadIdx.x] = -DataTypeUtils::max<T>(); // FIXME: what if T is unsigned ??
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s /= 2) {
if (threadIdx.x < s) shmem[threadIdx.x] = sd::math::sd_max<T>(shmem[threadIdx.x], shmem[threadIdx.x + s]);
__syncthreads();
}
temp = shmem[0]; // save max value calculated at current iteration
}
const T max = temp;
temp = 0;
// ************ evaluate value of exp(x[offset] - max) per each element, store it to shared memory shmem ************
// // at the same time evaluate sum of exponents, sum will be stored in shmem[0]
for (int i = 0; i < numOfIters; ++i) {
const sd::LongType elemIdx = i * blockDim.x + threadIdx.x;
if (elemIdx < len) {
const sd::LongType offset = shape::getIndexOffset(elemIdx, xzShapeInfo);
z[offset] = sd::math::sd_exp<T, T>(x[offset] - max);
shmem[threadIdx.x] =
(threadIdx.x != 0)
? z[offset]
: (z[offset] + temp); // take into account sum element evaluated on previous iteration and stored in temp
} else
shmem[threadIdx.x] = 0;
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s /= 2) {
if (threadIdx.x < s) shmem[threadIdx.x] += shmem[threadIdx.x + s];
__syncthreads();
}
temp = shmem[0]; // save sum calculated at current iteration
}
// ************ evaluate log(z[offset] / sum) ************ //
for (int i = 0; i < numOfIters; ++i) {
const sd::LongType elemIdx = i * blockDim.x + threadIdx.x;
if (elemIdx >= len) continue;
const sd::LongType offset = shape::getIndexOffset(elemIdx, xzShapeInfo);
z[offset] = sd::math::sd_log<T, T>(z[offset] / shmem[0]);
}
}
///////////////////////////////////////////////////////////////////
template <typename T>
void logSoftMaxForVectorCudaLauncher(const cudaStream_t *stream, const void *vx, const sd::LongType *xzShapeInfo,
void *vz) {
logSoftMaxForVectorCuda<T><<<1, SD_CUDA_BLOCK_SIZE, 1024, *stream>>>(vx, xzShapeInfo, vz);
}
//////////////////////////////////////////////////////////////////////////
void logSoftmax(sd::LaunchContext *context, const NDArray &input, NDArray &output, const int dimension) {
if (!input.isActualOnDeviceSide()) input.syncToDevice();
const int rank = input.rankOf();
if (input.isVector()) {
if (rank == 1 || input.sizeAt(dimension) != 1) {
BUILD_SINGLE_SELECTOR(
input.dataType(), logSoftMaxForVectorCudaLauncher,
(context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer()),
SD_FLOAT_TYPES);
input.tickReadDevice();
} else
output = 0.;
} else {
auto maxAlongDim = const_cast<NDArray &>(input).reduceAlongDimension(reduce::Max, {dimension}, true);
(input - maxAlongDim).applyTransform(transform::Exp, output); // output contains exponents temporarily
auto sumAlongDim = output.reduceAlongDimension(reduce::Sum, {dimension}, true);
output /= sumAlongDim;
output.applyTransform(transform::Log, output);
input.tickReadDevice();
}
PointersManager manager(context, "helpers::logSoftmax");
manager.synchronize();
output.tickWriteDevice();
}
///////////////////////////////////////////////////////////////////
template <typename T>
void SD_KERNEL softMaxDerivForVectorCuda(const void *vx, const sd::LongType *xzShapeInfo, void *vz) {
// logic of this kernel is based on assumption gridDim = 1
const auto x = reinterpret_cast<const T *>(vx);
auto z = reinterpret_cast<T *>(vz);
__shared__ sd::LongType len;
__shared__ int numOfIters;
__shared__ T shmem[SD_CUDA_BLOCK_SIZE];
if (threadIdx.x == 0) {
len = shape::length(xzShapeInfo);
numOfIters = (len + blockDim.x - 1) / blockDim.x; // ceil (len / blockDim.x)
}
__syncthreads();
T temp =
-DataTypeUtils::max<T>(); // set start value to compare with at first iteration, FIXME: what if T is unsigned ??
// ************ evaluate max element in input array x ************ //
for (int i = 0; i < numOfIters; ++i) {
const sd::LongType elemIdx = i * blockDim.x + threadIdx.x;
if (elemIdx < len) {
const sd::LongType offset = shape::getIndexOffset(elemIdx, xzShapeInfo);
shmem[threadIdx.x] =
(threadIdx.x != 0)
? x[offset]
: sd::math::sd_max<T>(
x[offset],
temp); // take into account max element evaluated on previous iteration and stored in temp
} else
shmem[threadIdx.x] = -DataTypeUtils::max<T>(); // FIXME: what if T is unsigned ??
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s /= 2) {
if (threadIdx.x < s) shmem[threadIdx.x] = sd::math::sd_max<T>(shmem[threadIdx.x], shmem[threadIdx.x + s]);
__syncthreads();
}
temp = shmem[0]; // save max value calculated at current iteration
}
const T max = temp;
temp = 0;
// ************ evaluate value of exp(x[offset] - max) per each element, store it to shared memory shmem ************
// // at the same evaluate sum of exponents, sum will be stored in shmem[0]
for (int i = 0; i < numOfIters; ++i) {
const sd::LongType elemIdx = i * blockDim.x + threadIdx.x;
if (elemIdx < len) {
const sd::LongType offset = shape::getIndexOffset(elemIdx, xzShapeInfo);
z[offset] = sd::math::sd_exp<T, T>(x[offset] - max);
shmem[threadIdx.x] =
(threadIdx.x != 0)
? z[offset]
: (z[offset] + temp); // take into account sum element evaluated on previous iteration and stored in temp
} else
shmem[threadIdx.x] = 0;
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s /= 2) {
if (threadIdx.x < s) shmem[threadIdx.x] += shmem[threadIdx.x + s];
__syncthreads();
}
temp = shmem[0]; // save sum calculated at current iteration
}
// ************ evaluate (z[offset] / sum) and derivative z[offset] = z[offset] * (1 - z[offset]) ************ //
for (int i = 0; i < numOfIters; ++i) {
const sd::LongType elemIdx = i * blockDim.x + threadIdx.x;
if (elemIdx >= len) continue;
const sd::LongType offset = shape::getIndexOffset(elemIdx, xzShapeInfo);
z[offset] /= shmem[0];
z[offset] *= (1.f - z[offset]); // derivative
}
}
///////////////////////////////////////////////////////////////////
template <typename T>
void softMaxDerivForVectorCudaLauncher(const cudaStream_t *stream, const void *vx, const sd::LongType *xzShapeInfo,
void *vz) {
softMaxDerivForVectorCuda<T><<<1, SD_CUDA_BLOCK_SIZE, 1024, *stream>>>(vx, xzShapeInfo, vz);
}
///////////////////////////////////////////////////////////////////
void softmaxDerivative(sd::LaunchContext *context, const NDArray &input, NDArray &output, const int dimension) {
if (!input.isActualOnDeviceSide()) input.syncToDevice();
const int rank = input.rankOf();
int temp;
if (shape::isCommonVector(input.shapeInfo(), temp)) {
BUILD_SINGLE_SELECTOR(
input.dataType(), softMaxDerivForVectorCudaLauncher,
(context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer()),
SD_FLOAT_TYPES);
input.tickReadDevice();
} else {
auto maxAlongDim = const_cast<NDArray &>(input).reduceAlongDimension(reduce::Max, {dimension}, true);
(input - maxAlongDim).applyTransform(transform::Exp, output); // output contains exponents temporarily
auto sumAlongDim = output.reduceAlongDimension(reduce::Sum, {dimension}, true);
output /= sumAlongDim;
output *= (1.f - output); // derivative
input.tickReadDevice();
}
PointersManager manager(context, "helpers::softmaxDerivative");
manager.synchronize();
output.tickWriteDevice();
}
template <typename T>
void thresholdRelu_(NDArray const &input, double threshold, NDArray &output) {
auto routine = LAMBDA_T(_x, threshold) { return _x > (T)threshold ? _x : (T)0.f; };
const_cast<NDArray &>(input).applyLambda(routine, output);
}
void thresholdRelu(sd::LaunchContext *context, NDArray const &input, double threshold, NDArray &output) {
BUILD_SINGLE_SELECTOR(input.dataType(), thresholdRelu_, (input, threshold, output), SD_FLOAT_TYPES);
}
template <typename T>
void thresholdReluDerivative_(NDArray *input, double theta, NDArray *dLdO, NDArray *output) {
auto derivative = LAMBDA_TT(_x, grO, theta) {
if (_x > theta)
return grO;
else
return static_cast<T>(0);
};
input->applyPairwiseLambda(*dLdO, derivative, *output);
}
void thresholdReluDerivative(sd::LaunchContext *context, NDArray *input, double threshold, NDArray *dLdO,
NDArray *output) {
BUILD_SINGLE_SELECTOR(input->dataType(), thresholdReluDerivative_, (input, threshold, dLdO, output), SD_FLOAT_TYPES);
}
} // namespace helpers
} // namespace ops
} // namespace sd
|
ad35fac4869c9a4b2e5e01cad110e16bbcb28738.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (C) 2015 Davis E. King (davis@dlib.net)
// License: Boost Software License See LICENSE.txt for the full license.
#include "cuda_utils.h"
#include "cuda_dlib.h"
namespace dlib
{
namespace cuda
{
// -----------------------------------------------------------------------------------
void set_device (
int dev
)
{
CHECK_CUDA(hipSetDevice(dev));
}
int get_device (
)
{
int dev = 0;
CHECK_CUDA(hipGetDevice(&dev));
return dev;
}
std::string get_device_name (
int device
)
{
hipDeviceProp_t props;
CHECK_CUDA(hipGetDeviceProperties(&props, device));
return props.name;
}
void set_current_device_blocking_sync(
)
{
CHECK_CUDA(hipSetDeviceFlags(hipDeviceScheduleBlockingSync));
}
int get_num_devices (
)
{
int num_devices;
CHECK_CUDA(hipGetDeviceCount(&num_devices));
return num_devices;
}
bool can_access_peer (int device_id, int peer_device_id)
{
int can_access;
CHECK_CUDA(hipDeviceCanAccessPeer(&can_access, device_id, peer_device_id));
return can_access != 0;
}
bool can_access_peer (const tensor& device, const tensor& peer_device)
{
return can_access_peer(device.device_id(), peer_device.device_id());
}
void device_synchronize (int dev)
{
raii_set_device set_dev(dev);
CHECK_CUDA(hipDeviceSynchronize());
}
void device_synchronize (const tensor& dev) { device_synchronize(dev.device_id()); }
enable_peer_access::
enable_peer_access(
int device_id,
int peer_device_id
) : call_disable(false), device_id(device_id), peer_device_id(peer_device_id)
{
raii_set_device set_dev(device_id);
auto err = hipDeviceEnablePeerAccess(peer_device_id, 0);
if (err == hipSuccess)
{
call_disable = true;
}
else if (err == hipErrorPeerAccessAlreadyEnabled)
{
// call hipGetLastError() to dispose of this error since we don't
// care.
auto err2 = hipGetLastError();
if (err2 != hipErrorPeerAccessAlreadyEnabled)
CHECK_CUDA(err2);
}
else
{
CHECK_CUDA(err);
}
}
enable_peer_access::
~enable_peer_access() noexcept(false)
{
if (call_disable)
{
raii_set_device set_dev(device_id);
CHECK_CUDA(hipDeviceDisablePeerAccess(peer_device_id));
}
}
// -----------------------------------------------------------------------------------
// -----------------------------------------------------------------------------------
// -----------------------------------------------------------------------------------
__global__ void _cuda_inverse_norms(float* invnorms, const float* data, size_t nr, size_t nc, const float eps)
{
// initialize invnorms before we begin.
for (auto i : grid_stride_range_y(0, nr))
for (auto j : grid_stride_range(0, 1))
invnorms[i] = eps;
__syncthreads();
for (auto i : grid_stride_range_y(0, nr))
{
auto p = data + i*nc;
float temp = 0;
for (auto j : grid_stride_range(0, nc))
temp += p[j]*p[j];
// and store the sum into invnorms[i]
warp_reduce_atomic_add(invnorms[i], temp);
}
__syncthreads();
for (auto i : grid_stride_range_y(0, nr))
for (auto j : grid_stride_range(0, 1))
invnorms[i] = 1.0/std::sqrt(invnorms[i]);
}
void inverse_norms (
resizable_tensor& invnorms,
const tensor& data,
const double eps
)
{
invnorms.set_size(data.num_samples());
dim3 blocks(1,10); // x size 1 so we don't need to worry about inter-block synchronization (since only y spans blocks)
dim3 threads(32,32); // x size must be 32 because we are using warp_reduce_atomic_add() in the kernel.
hipLaunchKernelGGL(( _cuda_inverse_norms), dim3(blocks),dim3(threads), 0, 0, invnorms.device(), data.device(), data.num_samples(), data.size()/data.num_samples(), eps);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_dot_prods(float* out, const float* lhs, const float* rhs, size_t nr, size_t nc)
{
// initialize out before we begin.
for (auto i : grid_stride_range_y(0, nr))
for (auto j : grid_stride_range(0, 1))
out[i] = 0;
__syncthreads();
for (auto i : grid_stride_range_y(0, nr))
{
auto l = lhs + i*nc;
auto r = rhs + i*nc;
float temp = 0;
for (auto j : grid_stride_range(0, nc))
temp += l[j]*r[j];
// and store the sum into out[i]
warp_reduce_atomic_add(out[i], temp);
}
}
void dot_prods (
resizable_tensor& out,
const tensor& lhs,
const tensor& rhs
)
{
out.set_size(lhs.num_samples());
dim3 blocks(1,10); // x size 1 so we don't need to worry about inter-block synchronization (since only y spans blocks)
dim3 threads(32,32); // x size must be 32 because we are using warp_reduce_atomic_add() in the kernel.
hipLaunchKernelGGL(( _cuda_dot_prods), dim3(blocks),dim3(threads), 0, 0, out.device(), lhs.device(), rhs.device(), lhs.num_samples(), lhs.size()/lhs.num_samples());
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_scale_columns(float* out, const float* m, const float* v, size_t nr, size_t nc)
{
for (auto j : grid_stride_range(0, nr*nc))
{
out[j] = m[j]*v[j%nc];
}
}
void scale_columns (
tensor& out,
const tensor& m,
const tensor& v
)
{
launch_kernel(_cuda_scale_columns, max_jobs(m.size()), out.device(), m.device(), v.device(), m.num_samples(), m.size()/m.num_samples());
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_scale_rows(float* out, const float* m, const float* v, size_t nr, size_t nc)
{
for (auto j : grid_stride_range(0, nr*nc))
{
out[j] = m[j]*v[j/nc];
}
}
void scale_rows (
tensor& out,
const tensor& m,
const tensor& v
)
{
launch_kernel(_cuda_scale_rows, max_jobs(m.size()), out.device(), m.device(), v.device(), m.num_samples(), m.size()/m.num_samples());
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_scale_rows2(float* out, const float* m1, const float* m2, const float* v1, const float* v2, size_t nr, size_t nc)
{
for (auto j : grid_stride_range(0, nr*nc))
{
out[j] = (m1[j] - m2[j]*v1[j/nc]) * v2[j/nc];
}
}
__global__ void _cuda_scale_rows2_beta(const float beta, float* out, const float* m1, const float* m2, const float* v1, const float* v2, size_t nr, size_t nc)
{
for (auto j : grid_stride_range(0, nr*nc))
{
out[j] = beta*out[j] + (m1[j] - m2[j]*v1[j/nc]) * v2[j/nc];
}
}
void scale_rows2 (
float beta,
tensor& out,
const tensor& m1,
const tensor& m2,
const tensor& v1,
const tensor& v2
)
{
if (beta == 0)
{
launch_kernel(_cuda_scale_rows2, max_jobs(m1.size()), out.device(),
m1.device(), m2.device(), v1.device(), v2.device(), m1.num_samples(),
m1.size()/m1.num_samples());
}
else
{
launch_kernel(_cuda_scale_rows2_beta, max_jobs(m1.size()), beta,
out.device(), m1.device(), m2.device(), v1.device(), v2.device(),
m1.num_samples(), m1.size()/m1.num_samples());
}
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_exp(float* dest, const float* src, size_t n)
{
for (auto i : grid_stride_range(0, n))
dest[i] = ::exp(src[i]);
}
void exp (
tensor& dest,
const tensor& src
)
{
DLIB_ASSERT(dest.size() == src.size());
launch_kernel(_cuda_exp, max_jobs(src.size()), dest.device(), src.device(), src.size());
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_log(float* dest, const float* src, size_t n)
{
for (auto i : grid_stride_range(0, n))
dest[i] = ::log(src[i]);
}
void log (
tensor& dest,
const tensor& src
)
{
DLIB_ASSERT(dest.size() == src.size());
launch_kernel(_cuda_log, max_jobs(src.size()), dest.device(), src.device(), src.size());
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_log10(float* dest, const float* src, size_t n)
{
for (auto i : grid_stride_range(0, n))
dest[i] = ::log10(src[i]);
}
void log10 (
tensor& dest,
const tensor& src
)
{
DLIB_ASSERT(dest.size() == src.size());
launch_kernel(_cuda_log10, max_jobs(src.size()), dest.device(), src.device(), src.size());
}
// -----------------------------------------------------------------------------------
__global__ void _cuda_multiply1(float* d, const float* s1, const float* s2, size_t n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = s1[i]*s2[i];
}
}
__global__ void _cuda_multiply2(float* d, const float* s1, const float* s2,
size_t n, size_t s1_n, size_t s2_n, size_t max_size)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = 0;
for (size_t j = i; j < max_size; j += n)
d[i] += s1[j%s1_n]*s2[j%s2_n];
}
}
__global__ void _cuda_multiply3(float* d, const float* s1, const float* s2,
size_t n, size_t s1_n, size_t s2_n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = s1[i%s1_n]*s2[i%s2_n];
}
}
__global__ void _cuda_multiply1_add_to(float* d, const float* s1, const float* s2, size_t n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] += s1[i]*s2[i];
}
}
__global__ void _cuda_multiply2_add_to(float* d, const float* s1, const float* s2,
size_t n, size_t s1_n, size_t s2_n, size_t max_size)
{
for (auto i : grid_stride_range(0, n))
{
for (size_t j = i; j < max_size; j += n)
d[i] += s1[j%s1_n]*s2[j%s2_n];
}
}
__global__ void _cuda_multiply3_add_to(float* d, const float* s1, const float* s2,
size_t n, size_t s1_n, size_t s2_n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] += s1[i%s1_n]*s2[i%s2_n];
}
}
void multiply (
bool add_to,
tensor& dest,
const tensor& src1,
const tensor& src2
)
{
DLIB_CASSERT(dest.k() == src1.k() && src1.k() == src2.k() &&
dest.nr() == src1.nr() && src1.nr() == src2.nr() &&
dest.nc() == src1.nc() && src1.nc() == src2.nc() );
const long MD = ::max(::max(dest.num_samples(),src1.num_samples()),src2.num_samples());
DLIB_CASSERT((dest.num_samples()==1 || dest.num_samples()==MD) &&
(src1.num_samples()==1 || src1.num_samples()==MD) &&
(src2.num_samples()==1 || src2.num_samples()==MD) );
if (dest.size() == 0)
return;
const size_t max_size = ::max(::max(dest.size(),src1.size()),src2.size());
const auto d = dest.host();
const auto s1 = src1.host();
const auto s2 = src2.host();
if (dest.size() == src1.size() && src1.size() == src2.size())
{
if (add_to)
launch_kernel(_cuda_multiply1_add_to,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), src1.size());
else
launch_kernel(_cuda_multiply1,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), src1.size());
}
else if (dest.num_samples() == 1)
{
if (add_to)
launch_kernel(_cuda_multiply2_add_to,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(),
dest.size(), src1.size(), src2.size(), max_size);
else
launch_kernel(_cuda_multiply2,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(),
dest.size(), src1.size(), src2.size(), max_size);
}
else
{
if (add_to)
launch_kernel(_cuda_multiply3_add_to,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(),
dest.size(), src1.size(), src2.size());
else
launch_kernel(_cuda_multiply3,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(),
dest.size(), src1.size(), src2.size());
}
}
// ------------------------------------------------------------------------------------
__global__ void _cuda_multiply_conv(float* d, const float* s1, size_t n, const float* s2, size_t bs, size_t ks)
{
for (auto i : grid_stride_range(0, n))
{
auto k = (i/bs)%ks;
d[i] = s1[i]*s2[k];
}
}
__global__ void _cuda_multiply_conv2(float* d, const float* s1, size_t n, const float* s2, size_t bs, size_t ks)
{
// zero initialize d before we begin.
for (auto i : grid_stride_range_y(0, ks))
for (auto j : grid_stride_range(0, 1))
d[i] = 0;
__syncthreads();
// loop over all the image planes
for (auto i : grid_stride_range_y(0, n))
{
// sum all the elements in the i-th image plane
float temp = 0;
for (auto j : grid_stride_range(i*bs, (i+1)*bs))
temp += s1[j]*s2[j];
auto k = i%ks;
// and store the sum into d[k]
warp_reduce_atomic_add(d[k], temp);
}
}
__global__ void _cuda_multiply_conv_add_to(float* d, const float* s1, size_t n, const float* s2, size_t bs, size_t ks)
{
for (auto i : grid_stride_range(0, n))
{
auto k = (i/bs)%ks;
d[i] += s1[i]*s2[k];
}
}
__global__ void _cuda_multiply_conv2_add_to(float* d, const float* s1, size_t n, const float* s2, size_t bs, size_t ks)
{
// loop over all the image planes
for (auto i : grid_stride_range_y(0, n))
{
// sum all the elements in the i-th image plane
float temp = 0;
for (auto j : grid_stride_range(i*bs, (i+1)*bs))
temp += s1[j]*s2[j];
auto k = i%ks;
// and store the sum into d[k]
warp_reduce_atomic_add(d[k], temp);
}
}
void multiply_conv (
bool add_to,
tensor& dest,
const tensor& src1,
const tensor& src2
)
{
if (have_same_dimensions(dest,src1))
{
DLIB_CASSERT(src2.num_samples() == 1 && src2.nr() == 1 && src2.nc() == 1 && src2.k() == src1.k());
if (dest.size() == 0)
return;
if (add_to)
launch_kernel(_cuda_multiply_conv_add_to,max_jobs(dest.size()),
dest.device(), src1.device(), src1.size(), src2.device(), src1.nr()*src1.nc(), src1.k());
else
launch_kernel(_cuda_multiply_conv,max_jobs(dest.size()),
dest.device(), src1.device(), src1.size(), src2.device(), src1.nr()*src1.nc(), src1.k());
}
else
{
DLIB_CASSERT(have_same_dimensions(src1,src2));
DLIB_CASSERT(dest.num_samples() == 1 && dest.nr() == 1 && dest.nc() == 1 && dest.k() == src1.k());
if (dest.size() == 0)
return;
dim3 blocks(1,10); // x size 1 so we don't need to worry about inter-block synchronization (since only y spans blocks)
dim3 threads(32,32); // x size must be 32 because we are using warp_reduce_atomic_add() in the kernel.
if (add_to)
hipLaunchKernelGGL(( _cuda_multiply_conv2_add_to), dim3(blocks),dim3(threads), 0, 0,
dest.device(), src1.device(), src1.num_samples()*src1.k(), src2.device(), src1.nr()*src1.nc(), src1.k());
else
hipLaunchKernelGGL(( _cuda_multiply_conv2), dim3(blocks),dim3(threads), 0, 0,
dest.device(), src1.device(), src1.num_samples()*src1.k(), src2.device(), src1.nr()*src1.nc(), src1.k());
}
}
// ------------------------------------------------------------------------------------
__global__ void _cuda_mult1(float* d, const float* s1, const float* s2, size_t n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = s1[i]*s2[i];
}
}
__global__ void _cuda_mult1_add_to(float* d, const float* s1, const float* s2, size_t n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] += s1[i]*s2[i];
}
}
__global__ void _cuda_mult2(float* d, const float* s1, const float* s2,
size_t dn, size_t dk, size_t dr, size_t dc,
size_t s1n, size_t s1k, size_t s1r, size_t s1c,
size_t s2n, size_t s2k, size_t s2r, size_t s2c)
{
for (auto i : grid_stride_range(0, dn*dk*dr*dc))
{
size_t n,k,r,c;
unpack_idx(i, dk,dr,dc, n,k,r,c);
float v1 = 0;
float v2 = 0;
if (n < s1n &&
k < s1k &&
r < s1r &&
c < s1c )
{
v1 = s1[pack_idx(s1k,s1r,s1c, n,k,r,c)];
}
if (n < s2n &&
k < s2k &&
r < s2r &&
c < s2c )
{
v2 = s2[pack_idx(s2k,s2r,s2c, n,k,r,c)];
}
d[i] = v1*v2;
}
}
__global__ void _cuda_mult2_add_to(float* d, const float* s1, const float* s2,
size_t dn, size_t dk, size_t dr, size_t dc,
size_t s1n, size_t s1k, size_t s1r, size_t s1c,
size_t s2n, size_t s2k, size_t s2r, size_t s2c)
{
for (auto i : grid_stride_range(0, dn*dk*dr*dc))
{
size_t n,k,r,c;
unpack_idx(i, dk,dr,dc, n,k,r,c);
float v1 = 0;
float v2 = 0;
if (n < s1n &&
k < s1k &&
r < s1r &&
c < s1c )
{
v1 = s1[pack_idx(s1k,s1r,s1c, n,k,r,c)];
}
if (n < s2n &&
k < s2k &&
r < s2r &&
c < s2c )
{
v2 = s2[pack_idx(s2k,s2r,s2c, n,k,r,c)];
}
d[i] += v1*v2;
}
}
void multiply_zero_padded (
bool add_to,
tensor& dest,
const tensor& src1,
const tensor& src2
)
{
if (dest.size() == 0)
return;
// Do the simple and fast version if everything has the same dimensions
if (have_same_dimensions(dest, src1) &&
have_same_dimensions(dest, src2))
{
if (add_to)
launch_kernel(_cuda_mult1_add_to,max_jobs(dest.size()), dest.device(), src1.device(), src2.device(), dest.size());
else
launch_kernel(_cuda_mult1,max_jobs(dest.size()), dest.device(), src1.device(), src2.device(), dest.size());
}
else
{
if (add_to)
{
// Otherwise, do the more complex version with bounds checking.
launch_kernel(_cuda_mult2_add_to,max_jobs(dest.size()),
dest.device(), src1.device(), src2.device(),
dest.num_samples(), dest.k(), dest.nr(), dest.nc(),
src1.num_samples(), src1.k(), src1.nr(), src1.nc(),
src2.num_samples(), src2.k(), src2.nr(), src2.nc()
);
}
else
{
// Otherwise, do the more complex version with bounds checking.
launch_kernel(_cuda_mult2,max_jobs(dest.size()),
dest.device(), src1.device(), src2.device(),
dest.num_samples(), dest.k(), dest.nr(), dest.nc(),
src1.num_samples(), src1.k(), src1.nr(), src1.nc(),
src2.num_samples(), src2.k(), src2.nr(), src2.nc()
);
}
}
}
// ------------------------------------------------------------------------------------
__global__ void _cuda_add1(float* d, const float* s1, const float* s2, size_t n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = s1[i]+s2[i];
}
}
__global__ void _cuda_add2(float* d, const float* s1, const float* s2,
size_t dn, size_t dk, size_t dr, size_t dc,
size_t s1n, size_t s1k, size_t s1r, size_t s1c,
size_t s2n, size_t s2k, size_t s2r, size_t s2c)
{
for (auto i : grid_stride_range(0, dn*dk*dr*dc))
{
size_t n,k,r,c;
unpack_idx(i, dk,dr,dc, n,k,r,c);
float v1 = 0;
float v2 = 0;
if (n < s1n &&
k < s1k &&
r < s1r &&
c < s1c )
{
v1 = s1[pack_idx(s1k,s1r,s1c, n,k,r,c)];
}
if (n < s2n &&
k < s2k &&
r < s2r &&
c < s2c )
{
v2 = s2[pack_idx(s2k,s2r,s2c, n,k,r,c)];
}
d[i] = v1+v2;
}
}
void add (
tensor& dest,
const tensor& src1,
const tensor& src2
)
{
if (dest.size() == 0)
return;
// Do the simple and fast version if everything has the same dimensions
if (have_same_dimensions(dest, src1) &&
have_same_dimensions(dest, src2))
{
launch_kernel(_cuda_add1,max_jobs(dest.size()), dest.device(), src1.device(), src2.device(), dest.size());
}
else
{
// Otherwise, do the more complex version with bounds checking.
launch_kernel(_cuda_add2,max_jobs(dest.size()),
dest.device(), src1.device(), src2.device(),
dest.num_samples(), dest.k(), dest.nr(), dest.nc(),
src1.num_samples(), src1.k(), src1.nr(), src1.nc(),
src2.num_samples(), src2.k(), src2.nr(), src2.nc()
);
}
}
// ------------------------------------------------------------------------------------
__global__ void _cuda_affine_transform1(float* d, const float* s, size_t n, float A, float B)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A*s[i] + B;
}
}
__global__ void _cuda_affine_transform1_0(float* d, const float* s, size_t n, float A)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A*s[i];
}
}
void affine_transform(
tensor& dest,
const tensor& src,
const float A,
const float B
)
{
DLIB_CASSERT(dest.size()==src.size());
if (B != 0)
launch_kernel(_cuda_affine_transform1,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A, B);
else
launch_kernel(_cuda_affine_transform1_0,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A);
}
void affine_transform(
tensor& dest,
const tensor& src,
const float A
)
{
DLIB_CASSERT(dest.size()==src.size());
launch_kernel(_cuda_affine_transform1_0,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_affine_transform_rect(
float* d,
const float* s1,
const float* s2,
const float* s3,
float A,
float B,
float C,
size_t start_idx,
size_t n,
size_t rect_nc,
size_t total_nc
)
{
for (auto i : grid_stride_range(0, n))
{
size_t r = i/rect_nc;
size_t c = i%rect_nc;
size_t idx = r*total_nc + c + start_idx;
d[idx] = A*s1[idx] + B*s2[idx] + C*s3[idx];
}
}
void affine_transform(
const rectangle& rect,
tensor& dest,
const tensor& src1,
const tensor& src2,
const tensor& src3,
float A,
float B,
float C
)
{
DLIB_CASSERT(dest.size() == src1.size());
DLIB_CASSERT(dest.size() == src2.size());
DLIB_CASSERT(dest.size() == src3.size());
DLIB_CASSERT(dest.num_samples() == src1.num_samples());
DLIB_CASSERT(dest.num_samples() == src2.num_samples());
DLIB_CASSERT(dest.num_samples() == src3.num_samples());
DLIB_CASSERT(rectangle(0,0, dest.size()/dest.num_samples()-1, dest.num_samples()-1).contains(rect));
launch_kernel(_cuda_affine_transform_rect,max_jobs(rect.area()),
dest.device(), src1.device(), src2.device(), src3.device(), A, B, C,
rect.left() + rect.top()*(dest.size()/dest.num_samples()),
rect.area(),
rect.width(),
dest.size()/dest.num_samples());
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_affine_transform4(float* d, const float* s1, const float* s2, size_t n, float A, float B, float C)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A*s1[i] + B*s2[i] + C;
}
}
__global__ void _cuda_affine_transform4_0(float* d, const float* s1, const float* s2, size_t n, float A, float B)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A*s1[i] + B*s2[i];
}
}
void affine_transform(
tensor& dest,
const tensor& src1,
const tensor& src2,
const float A,
const float B,
const float C
)
{
DLIB_CASSERT(dest.size()==src1.size());
DLIB_CASSERT(dest.size()==src2.size());
if (C != 0)
launch_kernel(_cuda_affine_transform4,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), A, B, C);
else
launch_kernel(_cuda_affine_transform4_0,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), A, B);
}
void affine_transform(
tensor& dest,
const tensor& src1,
const tensor& src2,
const float A,
const float B
)
{
DLIB_CASSERT(dest.size()==src1.size());
DLIB_CASSERT(dest.size()==src2.size());
launch_kernel(_cuda_affine_transform4_0,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), A, B);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_add_scaled(float* d, const float* s, size_t n, float scale)
{
for (auto i : grid_stride_range(0, n))
{
d[i] += scale*s[i];
}
}
void add_scaled(
tensor& dest,
const float scale,
const tensor& src
)
{
DLIB_CASSERT(dest.size()==src.size());
launch_kernel(_cuda_add_scaled,max_jobs(dest.size()),dest.device(), src.device(), dest.size(), scale);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_add_cv_to_all_columns(float beta, float* dest, float alpha, const float* src, size_t size, size_t stride)
{
for (auto i : grid_stride_range(0, size))
{
dest[i] = beta*dest[i] + alpha*src[i/stride];
}
}
__global__ void _cuda_add_cv_to_all_columns_no_beta(float* dest, float alpha, const float* src, size_t size, size_t stride)
{
for (auto i : grid_stride_range(0, size))
{
dest[i] = alpha*src[i/stride];
}
}
void add_cv_to_all_columns(
float beta,
tensor& dest,
float alpha,
const tensor& src
)
{
DLIB_CASSERT(dest.num_samples() == src.num_samples() && src.num_samples() == src.size());
if (beta == 0)
launch_kernel(_cuda_add_cv_to_all_columns_no_beta, max_jobs(dest.size()), dest.device(), alpha, src.device(), dest.size(), dest.size()/dest.num_samples());
else
launch_kernel(_cuda_add_cv_to_all_columns, max_jobs(dest.size()), beta, dest.device(), alpha, src.device(), dest.size(), dest.size()/dest.num_samples());
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_affine_transform5(
float* d, const float* s1, const float* s2, const float* s3, size_t n, float A, float B, float C, float D
)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A*s1[i] + B*s2[i] + C*s3[i] + D;
}
}
void affine_transform(
tensor& dest,
const tensor& src1,
const tensor& src2,
const tensor& src3,
const float A,
const float B,
const float C,
const float D
)
{
DLIB_CASSERT(dest.size()==src1.size());
DLIB_CASSERT(dest.size()==src2.size());
DLIB_CASSERT(dest.size()==src3.size());
launch_kernel(_cuda_affine_transform5,max_jobs(dest.size()),dest.device(), src1.device(),
src2.device(), src3.device(), dest.size(), A, B, C, D);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_affine_transform_range(
float* d, const float* s1, const float* s2, const float* s3, size_t begin, size_t end, float A, float B, float C
)
{
for (auto i : grid_stride_range(begin, end))
{
d[i] = A*s1[i] + B*s2[i] + C*s3[i];
}
}
void affine_transform_range(
size_t begin,
size_t end,
tensor& dest,
const tensor& src1,
const tensor& src2,
const tensor& src3,
const float A,
const float B,
const float C
)
{
DLIB_CASSERT(dest.size()==src1.size());
DLIB_CASSERT(dest.size()==src2.size());
DLIB_CASSERT(dest.size()==src3.size());
DLIB_CASSERT(begin <= end && end <= dest.size());
launch_kernel(_cuda_affine_transform_range,max_jobs(end-begin),
dest.device(), src1.device(),
src2.device(), src3.device(), begin, end, A, B, C);
}
// -----------------------------------------------------------------------------------
__global__ void _cuda_affine_transform2(float* d, const float* s, size_t n, const float* A, const float* B)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A[i]*s[i] + B[i];
}
}
__global__ void _cuda_affine_transform3(float* d, const float* s, size_t n, const float* A, const float* B, size_t bs)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A[i%bs]*s[i] + B[i%bs];
}
}
void affine_transform(
tensor& dest,
const tensor& src,
const tensor& A,
const tensor& B
)
{
DLIB_CASSERT(have_same_dimensions(dest, src));
DLIB_CASSERT(
((A.num_samples()==1 && B.num_samples()==1) ||
(A.num_samples()==src.num_samples() && B.num_samples()==src.num_samples())));
DLIB_CASSERT(
A.nr()==B.nr() && B.nr()==src.nr() &&
A.nc()==B.nc() && B.nc()==src.nc() &&
A.k() ==B.k() && B.k()==src.k(),
"\nA.nr(): " << A.nr() << "\nB.nr(): " << B.nr() << "\nsrc.nr(): " << src.nr()
<<"\nA.nc(): " << A.nc() << "\nB.nc(): " << B.nc() << "\nsrc.nc(): " << src.nc()
<<"\nA.k(): " << A.k() << "\nB.k(): " << B.k() << "\nsrc.k(): " << src.k()
);
if (A.num_samples() == 1)
{
launch_kernel(_cuda_affine_transform3,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A.device(), B.device(), A.size());
}
else
{
launch_kernel(_cuda_affine_transform2,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A.device(), B.device());
}
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_compute_adam_update(
size_t begin,
size_t end,
float* s,
float* m,
float* v,
const float alpha,
const float weight_decay,
const float momentum1,
const float momentum2,
const float* params,
const float* params_grad
)
{
const float eps = 1e-8;
// The loop is equivalent to doing this:
// m = momentum1*m + (1-momentum1) * (weight_decay*params + params_grad);
// v = momentum2*v + (1-momentum2)*squared(weight_decay*params + params_grad);
// s = -alpha*m/(sqrt(v) + eps);
for (auto i : grid_stride_range(begin, end))
{
float g = (weight_decay*params[i] + params_grad[i]);
m[i] = momentum1*m[i] + (1-momentum1)*g;
v[i] = momentum2*v[i] + (1-momentum2)*g*g;
s[i] = -alpha*m[i]/(std::sqrt(v[i]) + eps);
}
}
void compute_adam_update (
size_t begin,
size_t end,
tensor& s,
tensor& m,
tensor& v,
const float t,
const float learning_rate,
const float weight_decay,
const float momentum1,
const float momentum2,
const tensor& params,
const tensor& params_grad
)
{
DLIB_CASSERT(s.size() == m.size() &&
s.size() == v.size() &&
s.size() == params.size() &&
s.size() == params_grad.size());
DLIB_CASSERT(begin <= end && end <= params.size());
const float alpha = learning_rate*std::sqrt(1-::pow(momentum2,t))/(1-::pow(momentum1, t));
launch_kernel(_cuda_compute_adam_update,max_jobs(end-begin),
begin, end, s.device(), m.device(), v.device(), alpha, weight_decay,
momentum1, momentum2, params.device(), params_grad.device());
}
// -----------------------------------------------------------------------------------
__global__ void _cuda_affine_transform_conv(float* d, const float* s, size_t n, const float* A, const float* B, size_t bs, size_t ks)
{
for (auto i : grid_stride_range(0, n))
{
auto k = (i/bs)%ks;
d[i] = A[k]*s[i] + B[k];
}
}
void affine_transform_conv(
tensor& dest,
const tensor& src,
const tensor& A,
const tensor& B
)
{
DLIB_CASSERT(have_same_dimensions(dest, src));
DLIB_CASSERT(have_same_dimensions(A, B));
DLIB_CASSERT(A.num_samples() == 1 && A.nr() == 1 && A.nc() == 1 && A.k() == src.k());
launch_kernel(_cuda_affine_transform_conv,max_jobs(dest.size()),
dest.device(), src.device(), src.size(), A.device(), B.device(), src.nr()*src.nc(), src.k());
}
// -----------------------------------------------------------------------------------
__global__ void _add_bias_gradient(float* out, const float* in, size_t n, size_t total_n)
{
for (auto i : grid_stride_range(0, n))
{
out[i] = in[i];
for (size_t j = i+n; j < total_n; j+=n)
out[i] += in[j];
}
}
void assign_bias_gradient (
tensor& grad,
const tensor& gradient_input
)
{
DLIB_CASSERT(
grad.num_samples() == 1 &&
gradient_input.k() == grad.k() &&
gradient_input.nr() == grad.nr() &&
gradient_input.nc() == grad.nc() &&
gradient_input.size() > 0);
launch_kernel(_add_bias_gradient,max_jobs(grad.size()),grad.device(), gradient_input.device(), grad.size(), gradient_input.size());
}
// ----------------------------------------------------------------------------------------
__global__ void _set_tensor(float* out, size_t n, const float val)
{
for (auto i : grid_stride_range(0, n))
out[i] = val;
}
void set_tensor (
tensor& t,
float value
)
{
launch_kernel(_set_tensor, max_jobs(t.size()), t.device(), t.size(), value);
}
// ----------------------------------------------------------------------------------------
__global__ void _scale_tensor(float* out, size_t n, const float val)
{
for (auto i : grid_stride_range(0, n))
out[i] *= val;
}
void scale_tensor (
tensor& t,
float value
)
{
launch_kernel(_scale_tensor, max_jobs(t.size()), t.device(), t.size(), value);
}
// -----------------------------------------------------------------------------------
// -----------------------------------------------------------------------------------
__global__ void _cuda_threshold(float* d, size_t n, float thresh)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = d[i]>thresh ? 1:0;
}
}
void threshold (
tensor& data,
float thresh
)
{
launch_kernel(_cuda_threshold,max_jobs(data.size()),data.device(), data.size(), thresh);
}
// ------------------------------------------------------------------------------------
__global__ void _cuda_dot(const float* a, const float* b, size_t n, float* result)
{
// Parallel sum everything into local temp variables.
float temp = 0;
for(auto i : grid_stride_range(0, n))
temp += a[i]*b[i];
// Then do the warp reduce add thing to merge into one output value.
warp_reduce_atomic_add(*result, temp);
}
void dot (
const tensor& a,
const tensor& b,
tensor& result,
size_t idx
)
{
DLIB_CASSERT(a.size() == b.size());
DLIB_CASSERT(idx < result.size());
launch_kernel(_cuda_dot, max_jobs(a.size()), a.device(), b.device(), a.size(), result.device()+idx);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_prelu(const float* s, float* d, size_t n, const float* pp)
{
const float p = *pp;
for (auto i : grid_stride_range(0, n))
{
if (s[i] > 0)
d[i] = s[i];
else
d[i] = p*s[i];
}
}
void prelu (
tensor& dest,
const tensor& src,
const tensor& param
)
{
launch_kernel(_cuda_prelu, max_jobs(dest.size()),
src.device(), dest.device(), src.size(), param.device());
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_prelu_gradient(float* out, const float* s, const float* gi, size_t n, const float* pp, float* ppgrad)
{
const float p = *pp;
float pgrad = 0;
for(auto i : grid_stride_range(0, n))
{
if (s[i] > 0)
{
out[i] += gi[i];
}
else
{
out[i] += p*gi[i];
pgrad += gi[i]*s[i];
}
}
// Then do the warp reduce add thing to merge into one output value.
warp_reduce_atomic_add(*ppgrad, pgrad);
}
void prelu_gradient (
tensor& grad,
const tensor& src,
const tensor& gradient_input,
const tensor& param,
tensor& params_grad
)
{
params_grad = 0;
launch_kernel(_cuda_prelu_gradient, max_jobs(grad.size()),
grad.device(), src.device(), gradient_input.device(), grad.size(),
param.device(), params_grad.device());
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_resize_bilinear(size_t dsize, size_t dchan_size, size_t dnc, float* d,
size_t schan_size, int snr, int snc, const float* s,
const float x_scale, const float y_scale)
{
for(auto i : grid_stride_range(0, dsize))
{
const int idx = i%dchan_size;
const int channel = i/dchan_size;
const int sidx = channel*schan_size;
const int r = idx/dnc;
const int c = idx%dnc;
const float y = r*y_scale;
const int top = static_cast<int>(::floor(y));
const int bottom = ::min(top+1, snr-1);
const float tb_frac = y - top;
const float x = c*x_scale;
const int left = static_cast<int>(::floor(x));
const int right = ::min(left+1, snc-1);
const float lr_frac = x - left;
float tl = s[sidx+top*snc+left];
float tr = s[sidx+top*snc+right];
float bl = s[sidx+bottom*snc+left];
float br = s[sidx+bottom*snc+right];
float temp = (1-tb_frac)*((1-lr_frac)*tl + lr_frac*tr) +
tb_frac*((1-lr_frac)*bl + lr_frac*br);
d[i] = temp;
}
}
__global__ void _cuda_resize_bilinear_strided(size_t dsize, size_t dchan_size, size_t dnc, float* d,
size_t schan_size, int snr, int snc, const float* s,
const float x_scale, const float y_scale,
size_t dest_row_stride, size_t src_row_stride, size_t dest_chan_size_strided
)
{
for(auto i : grid_stride_range(0, dsize))
{
const int idx = i%dchan_size;
const int channel = i/dchan_size;
const int sidx = channel*schan_size;
const int r = idx/dnc;
const int c = idx%dnc;
const int didx = channel*dest_chan_size_strided + r*dest_row_stride+c;
const float y = r*y_scale;
const int top = static_cast<int>(::floor(y));
const int bottom = ::min(top+1, snr-1);
const float tb_frac = y - top;
const float x = c*x_scale;
const int left = static_cast<int>(::floor(x));
const int right = ::min(left+1, snc-1);
const float lr_frac = x - left;
float tl = s[sidx+top*src_row_stride+left];
float tr = s[sidx+top*src_row_stride+right];
float bl = s[sidx+bottom*src_row_stride+left];
float br = s[sidx+bottom*src_row_stride+right];
float temp = (1-tb_frac)*((1-lr_frac)*tl + lr_frac*tr) +
tb_frac*((1-lr_frac)*bl + lr_frac*br);
d[didx] = temp;
}
}
void resize_bilinear (
tensor& dest,
long dest_row_stride,
long dest_channel_stride,
const tensor& src,
long src_row_stride,
long src_channel_stride
)
{
DLIB_CASSERT(is_same_object(dest, src)==false);
DLIB_CASSERT(dest.num_samples() == src.num_samples());
DLIB_CASSERT(dest.k() == src.k());
if (dest.size() == 0 || src.size() == 0)
return;
const float x_scale = (src.nc()-1)/(float)std::max<long>((dest.nc()-1),1);
const float y_scale = (src.nr()-1)/(float)std::max<long>((dest.nr()-1),1);
if (dest.nc() == dest_row_stride && dest.nr()*dest.nc()==dest_channel_stride &&
src.nc() == src_row_stride && src.nr()*src.nc()==src_channel_stride)
{
launch_kernel(_cuda_resize_bilinear,
dest.size(), dest.nr()*dest.nc(), dest.nc(), dest.device(),
src.nr()*src.nc(), src.nr(), src.nc(), src.device(),
x_scale, y_scale);
}
else
{
launch_kernel(_cuda_resize_bilinear_strided,
dest.size(), dest.nr()*dest.nc(), dest.nc(), dest.device(),
src_channel_stride, src.nr(), src.nc(), src.device(),
x_scale, y_scale, dest_row_stride, src_row_stride, dest_channel_stride);
}
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_resize_bilinear_gradient(size_t dsize, size_t dchan_size, size_t dnc, const float* d,
size_t schan_size, int snr, int snc, float* s,
const float x_scale, const float y_scale)
{
for(auto i : grid_stride_range(0, dsize))
{
const float tmp = d[i];
const int idx = i%dchan_size;
const int channel = i/dchan_size;
const int sidx = channel*schan_size;
const int r = idx/dnc;
const int c = idx%dnc;
const float y = r*y_scale;
const int top = static_cast<int>(::floor(y));
const int bottom = ::min(top+1, snr-1);
const float tb_frac = y - top;
const float x = c*x_scale;
const int left = static_cast<int>(::floor(x));
const int right = ::min(left+1, snc-1);
const float lr_frac = x - left;
atomicAdd(s+sidx+top*snc+left, tmp*(1-tb_frac)*(1-lr_frac));
atomicAdd(s+sidx+top*snc+right, tmp*(1-tb_frac)*(lr_frac));
atomicAdd(s+sidx+bottom*snc+left, tmp*(tb_frac)*(1-lr_frac));
atomicAdd(s+sidx+bottom*snc+right, tmp*(tb_frac)*(lr_frac));
}
}
__global__ void _cuda_resize_bilinear_gradient_strided(size_t dsize, size_t dchan_size, size_t dnc, const float* d,
size_t schan_size, int snr, int snc, float* s,
const float x_scale, const float y_scale,
size_t dest_row_stride, size_t src_row_stride, size_t dest_chan_size_strided
)
{
for(auto i : grid_stride_range(0, dsize))
{
const int idx = i%dchan_size;
const int channel = i/dchan_size;
const int didx = channel*dest_chan_size_strided;
const int sidx = channel*schan_size;
const int r = idx/dnc;
const int c = idx%dnc;
const float tmp = d[didx + r*dest_row_stride+c];
const float y = r*y_scale;
const int top = static_cast<int>(::floor(y));
const int bottom = ::min(top+1, snr-1);
const float tb_frac = y - top;
const float x = c*x_scale;
const int left = static_cast<int>(::floor(x));
const int right = ::min(left+1, snc-1);
const float lr_frac = x - left;
atomicAdd(s+sidx+top*src_row_stride+left, tmp*(1-tb_frac)*(1-lr_frac));
atomicAdd(s+sidx+top*src_row_stride+right, tmp*(1-tb_frac)*(lr_frac));
atomicAdd(s+sidx+bottom*src_row_stride+left, tmp*(tb_frac)*(1-lr_frac));
atomicAdd(s+sidx+bottom*src_row_stride+right, tmp*(tb_frac)*(lr_frac));
}
}
void resize_bilinear_gradient (
tensor& grad,
long grad_row_stride,
long grad_channel_stride,
const tensor& gradient_input,
long gradient_input_row_stride,
long gradient_input_channel_stride
)
{
DLIB_CASSERT(is_same_object(grad, gradient_input)==false);
DLIB_CASSERT(gradient_input.num_samples() == grad.num_samples());
DLIB_CASSERT(gradient_input.k() == grad.k());
if (grad.size() == 0 || gradient_input.size() == 0)
return;
const float x_scale = (grad.nc()-1)/(float)std::max<long>((gradient_input.nc()-1),1);
const float y_scale = (grad.nr()-1)/(float)std::max<long>((gradient_input.nr()-1),1);
if (grad.nc() == grad_row_stride && grad.nr()*grad.nc()==grad_channel_stride &&
gradient_input.nc() == gradient_input_row_stride && gradient_input.nr()*gradient_input.nc()==gradient_input_channel_stride)
{
launch_kernel(_cuda_resize_bilinear_gradient,
gradient_input.size(), gradient_input.nr()*gradient_input.nc(), gradient_input.nc(), gradient_input.device(),
grad.nr()*grad.nc(), grad.nr(), grad.nc(), grad.device(),
x_scale, y_scale);
}
else
{
launch_kernel(_cuda_resize_bilinear_gradient_strided,
gradient_input.size(), gradient_input.nr()*gradient_input.nc(), gradient_input.nc(), gradient_input.device(),
grad_channel_stride, grad.nr(), grad.nc(), grad.device(),
x_scale, y_scale, gradient_input_row_stride, grad_row_stride, gradient_input_channel_stride);
}
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_copy_tensor_add_to (float* dest, size_t size, const float* src, size_t dest_stride, size_t src_stride, size_t block_size)
{
for(auto i : grid_stride_range(0, size))
{
size_t blk = i/block_size;
size_t j = i%block_size;
dest[blk*dest_stride + j] += src[blk*src_stride + j];
}
}
__global__ void _cuda_copy_tensor (float* dest, size_t size, const float* src, size_t dest_stride, size_t src_stride, size_t block_size)
{
for(auto i : grid_stride_range(0, size))
{
size_t blk = i/block_size;
size_t j = i%block_size;
dest[blk*dest_stride + j] = src[blk*src_stride + j];
}
}
void copy_tensor(
bool add_to,
tensor& dest,
size_t dest_k_offset,
const tensor& src,
size_t src_k_offset,
size_t count_k
)
{
const size_t dest_sample_size = static_cast<size_t>(dest.nc() * dest.nr() * dest.k());
const size_t src_sample_size = static_cast<size_t>(src.nc() * src.nr() * src.k());
const size_t block_size = count_k * dest.nc() * dest.nr();
DLIB_CASSERT(dest.num_samples() == src.num_samples() &&
dest.nc() == src.nc() && dest.nr() == src.nr(), "All sources should fit into dest tensor size");
DLIB_CASSERT(dest.k() - dest_k_offset >= count_k, "Not enough space in dest tensor");
DLIB_CASSERT(src.k() - src_k_offset >= count_k, "Not enough space in src tensor");
float* dest_p = dest.device() + dest_k_offset * dest.nc() * dest.nr();
const float* src_p = src.device() + src_k_offset * src.nc() * src.nr();;
if (add_to)
{
launch_kernel(_cuda_copy_tensor_add_to, max_jobs(dest.size()),
dest_p, block_size*dest.num_samples(),
src_p, dest_sample_size, src_sample_size, block_size);
}
else
{
launch_kernel(_cuda_copy_tensor, max_jobs(dest.size()),
dest_p, block_size*dest.num_samples(),
src_p, dest_sample_size, src_sample_size, block_size);
}
}
// ----------------------------------------------------------------------------------------
}
}
| ad35fac4869c9a4b2e5e01cad110e16bbcb28738.cu | // Copyright (C) 2015 Davis E. King (davis@dlib.net)
// License: Boost Software License See LICENSE.txt for the full license.
#include "cuda_utils.h"
#include "cuda_dlib.h"
namespace dlib
{
namespace cuda
{
// -----------------------------------------------------------------------------------
void set_device (
int dev
)
{
CHECK_CUDA(cudaSetDevice(dev));
}
int get_device (
)
{
int dev = 0;
CHECK_CUDA(cudaGetDevice(&dev));
return dev;
}
std::string get_device_name (
int device
)
{
cudaDeviceProp props;
CHECK_CUDA(cudaGetDeviceProperties(&props, device));
return props.name;
}
void set_current_device_blocking_sync(
)
{
CHECK_CUDA(cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync));
}
int get_num_devices (
)
{
int num_devices;
CHECK_CUDA(cudaGetDeviceCount(&num_devices));
return num_devices;
}
bool can_access_peer (int device_id, int peer_device_id)
{
int can_access;
CHECK_CUDA(cudaDeviceCanAccessPeer(&can_access, device_id, peer_device_id));
return can_access != 0;
}
bool can_access_peer (const tensor& device, const tensor& peer_device)
{
return can_access_peer(device.device_id(), peer_device.device_id());
}
void device_synchronize (int dev)
{
raii_set_device set_dev(dev);
CHECK_CUDA(cudaDeviceSynchronize());
}
void device_synchronize (const tensor& dev) { device_synchronize(dev.device_id()); }
enable_peer_access::
enable_peer_access(
int device_id,
int peer_device_id
) : call_disable(false), device_id(device_id), peer_device_id(peer_device_id)
{
raii_set_device set_dev(device_id);
auto err = cudaDeviceEnablePeerAccess(peer_device_id, 0);
if (err == cudaSuccess)
{
call_disable = true;
}
else if (err == cudaErrorPeerAccessAlreadyEnabled)
{
// call cudaGetLastError() to dispose of this error since we don't
// care.
auto err2 = cudaGetLastError();
if (err2 != cudaErrorPeerAccessAlreadyEnabled)
CHECK_CUDA(err2);
}
else
{
CHECK_CUDA(err);
}
}
enable_peer_access::
~enable_peer_access() noexcept(false)
{
if (call_disable)
{
raii_set_device set_dev(device_id);
CHECK_CUDA(cudaDeviceDisablePeerAccess(peer_device_id));
}
}
// -----------------------------------------------------------------------------------
// -----------------------------------------------------------------------------------
// -----------------------------------------------------------------------------------
__global__ void _cuda_inverse_norms(float* invnorms, const float* data, size_t nr, size_t nc, const float eps)
{
// initialize invnorms before we begin.
for (auto i : grid_stride_range_y(0, nr))
for (auto j : grid_stride_range(0, 1))
invnorms[i] = eps;
__syncthreads();
for (auto i : grid_stride_range_y(0, nr))
{
auto p = data + i*nc;
float temp = 0;
for (auto j : grid_stride_range(0, nc))
temp += p[j]*p[j];
// and store the sum into invnorms[i]
warp_reduce_atomic_add(invnorms[i], temp);
}
__syncthreads();
for (auto i : grid_stride_range_y(0, nr))
for (auto j : grid_stride_range(0, 1))
invnorms[i] = 1.0/std::sqrt(invnorms[i]);
}
void inverse_norms (
resizable_tensor& invnorms,
const tensor& data,
const double eps
)
{
invnorms.set_size(data.num_samples());
dim3 blocks(1,10); // x size 1 so we don't need to worry about inter-block synchronization (since only y spans blocks)
dim3 threads(32,32); // x size must be 32 because we are using warp_reduce_atomic_add() in the kernel.
_cuda_inverse_norms<<<blocks,threads>>>(invnorms.device(), data.device(), data.num_samples(), data.size()/data.num_samples(), eps);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_dot_prods(float* out, const float* lhs, const float* rhs, size_t nr, size_t nc)
{
// initialize out before we begin.
for (auto i : grid_stride_range_y(0, nr))
for (auto j : grid_stride_range(0, 1))
out[i] = 0;
__syncthreads();
for (auto i : grid_stride_range_y(0, nr))
{
auto l = lhs + i*nc;
auto r = rhs + i*nc;
float temp = 0;
for (auto j : grid_stride_range(0, nc))
temp += l[j]*r[j];
// and store the sum into out[i]
warp_reduce_atomic_add(out[i], temp);
}
}
void dot_prods (
resizable_tensor& out,
const tensor& lhs,
const tensor& rhs
)
{
out.set_size(lhs.num_samples());
dim3 blocks(1,10); // x size 1 so we don't need to worry about inter-block synchronization (since only y spans blocks)
dim3 threads(32,32); // x size must be 32 because we are using warp_reduce_atomic_add() in the kernel.
_cuda_dot_prods<<<blocks,threads>>>(out.device(), lhs.device(), rhs.device(), lhs.num_samples(), lhs.size()/lhs.num_samples());
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_scale_columns(float* out, const float* m, const float* v, size_t nr, size_t nc)
{
for (auto j : grid_stride_range(0, nr*nc))
{
out[j] = m[j]*v[j%nc];
}
}
void scale_columns (
tensor& out,
const tensor& m,
const tensor& v
)
{
launch_kernel(_cuda_scale_columns, max_jobs(m.size()), out.device(), m.device(), v.device(), m.num_samples(), m.size()/m.num_samples());
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_scale_rows(float* out, const float* m, const float* v, size_t nr, size_t nc)
{
for (auto j : grid_stride_range(0, nr*nc))
{
out[j] = m[j]*v[j/nc];
}
}
void scale_rows (
tensor& out,
const tensor& m,
const tensor& v
)
{
launch_kernel(_cuda_scale_rows, max_jobs(m.size()), out.device(), m.device(), v.device(), m.num_samples(), m.size()/m.num_samples());
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_scale_rows2(float* out, const float* m1, const float* m2, const float* v1, const float* v2, size_t nr, size_t nc)
{
for (auto j : grid_stride_range(0, nr*nc))
{
out[j] = (m1[j] - m2[j]*v1[j/nc]) * v2[j/nc];
}
}
__global__ void _cuda_scale_rows2_beta(const float beta, float* out, const float* m1, const float* m2, const float* v1, const float* v2, size_t nr, size_t nc)
{
for (auto j : grid_stride_range(0, nr*nc))
{
out[j] = beta*out[j] + (m1[j] - m2[j]*v1[j/nc]) * v2[j/nc];
}
}
void scale_rows2 (
float beta,
tensor& out,
const tensor& m1,
const tensor& m2,
const tensor& v1,
const tensor& v2
)
{
if (beta == 0)
{
launch_kernel(_cuda_scale_rows2, max_jobs(m1.size()), out.device(),
m1.device(), m2.device(), v1.device(), v2.device(), m1.num_samples(),
m1.size()/m1.num_samples());
}
else
{
launch_kernel(_cuda_scale_rows2_beta, max_jobs(m1.size()), beta,
out.device(), m1.device(), m2.device(), v1.device(), v2.device(),
m1.num_samples(), m1.size()/m1.num_samples());
}
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_exp(float* dest, const float* src, size_t n)
{
for (auto i : grid_stride_range(0, n))
dest[i] = ::exp(src[i]);
}
void exp (
tensor& dest,
const tensor& src
)
{
DLIB_ASSERT(dest.size() == src.size());
launch_kernel(_cuda_exp, max_jobs(src.size()), dest.device(), src.device(), src.size());
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_log(float* dest, const float* src, size_t n)
{
for (auto i : grid_stride_range(0, n))
dest[i] = ::log(src[i]);
}
void log (
tensor& dest,
const tensor& src
)
{
DLIB_ASSERT(dest.size() == src.size());
launch_kernel(_cuda_log, max_jobs(src.size()), dest.device(), src.device(), src.size());
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_log10(float* dest, const float* src, size_t n)
{
for (auto i : grid_stride_range(0, n))
dest[i] = ::log10(src[i]);
}
void log10 (
tensor& dest,
const tensor& src
)
{
DLIB_ASSERT(dest.size() == src.size());
launch_kernel(_cuda_log10, max_jobs(src.size()), dest.device(), src.device(), src.size());
}
// -----------------------------------------------------------------------------------
__global__ void _cuda_multiply1(float* d, const float* s1, const float* s2, size_t n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = s1[i]*s2[i];
}
}
__global__ void _cuda_multiply2(float* d, const float* s1, const float* s2,
size_t n, size_t s1_n, size_t s2_n, size_t max_size)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = 0;
for (size_t j = i; j < max_size; j += n)
d[i] += s1[j%s1_n]*s2[j%s2_n];
}
}
__global__ void _cuda_multiply3(float* d, const float* s1, const float* s2,
size_t n, size_t s1_n, size_t s2_n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = s1[i%s1_n]*s2[i%s2_n];
}
}
__global__ void _cuda_multiply1_add_to(float* d, const float* s1, const float* s2, size_t n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] += s1[i]*s2[i];
}
}
__global__ void _cuda_multiply2_add_to(float* d, const float* s1, const float* s2,
size_t n, size_t s1_n, size_t s2_n, size_t max_size)
{
for (auto i : grid_stride_range(0, n))
{
for (size_t j = i; j < max_size; j += n)
d[i] += s1[j%s1_n]*s2[j%s2_n];
}
}
__global__ void _cuda_multiply3_add_to(float* d, const float* s1, const float* s2,
size_t n, size_t s1_n, size_t s2_n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] += s1[i%s1_n]*s2[i%s2_n];
}
}
void multiply (
bool add_to,
tensor& dest,
const tensor& src1,
const tensor& src2
)
{
DLIB_CASSERT(dest.k() == src1.k() && src1.k() == src2.k() &&
dest.nr() == src1.nr() && src1.nr() == src2.nr() &&
dest.nc() == src1.nc() && src1.nc() == src2.nc() );
const long MD = std::max(std::max(dest.num_samples(),src1.num_samples()),src2.num_samples());
DLIB_CASSERT((dest.num_samples()==1 || dest.num_samples()==MD) &&
(src1.num_samples()==1 || src1.num_samples()==MD) &&
(src2.num_samples()==1 || src2.num_samples()==MD) );
if (dest.size() == 0)
return;
const size_t max_size = std::max(std::max(dest.size(),src1.size()),src2.size());
const auto d = dest.host();
const auto s1 = src1.host();
const auto s2 = src2.host();
if (dest.size() == src1.size() && src1.size() == src2.size())
{
if (add_to)
launch_kernel(_cuda_multiply1_add_to,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), src1.size());
else
launch_kernel(_cuda_multiply1,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), src1.size());
}
else if (dest.num_samples() == 1)
{
if (add_to)
launch_kernel(_cuda_multiply2_add_to,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(),
dest.size(), src1.size(), src2.size(), max_size);
else
launch_kernel(_cuda_multiply2,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(),
dest.size(), src1.size(), src2.size(), max_size);
}
else
{
if (add_to)
launch_kernel(_cuda_multiply3_add_to,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(),
dest.size(), src1.size(), src2.size());
else
launch_kernel(_cuda_multiply3,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(),
dest.size(), src1.size(), src2.size());
}
}
// ------------------------------------------------------------------------------------
__global__ void _cuda_multiply_conv(float* d, const float* s1, size_t n, const float* s2, size_t bs, size_t ks)
{
for (auto i : grid_stride_range(0, n))
{
auto k = (i/bs)%ks;
d[i] = s1[i]*s2[k];
}
}
__global__ void _cuda_multiply_conv2(float* d, const float* s1, size_t n, const float* s2, size_t bs, size_t ks)
{
// zero initialize d before we begin.
for (auto i : grid_stride_range_y(0, ks))
for (auto j : grid_stride_range(0, 1))
d[i] = 0;
__syncthreads();
// loop over all the image planes
for (auto i : grid_stride_range_y(0, n))
{
// sum all the elements in the i-th image plane
float temp = 0;
for (auto j : grid_stride_range(i*bs, (i+1)*bs))
temp += s1[j]*s2[j];
auto k = i%ks;
// and store the sum into d[k]
warp_reduce_atomic_add(d[k], temp);
}
}
__global__ void _cuda_multiply_conv_add_to(float* d, const float* s1, size_t n, const float* s2, size_t bs, size_t ks)
{
for (auto i : grid_stride_range(0, n))
{
auto k = (i/bs)%ks;
d[i] += s1[i]*s2[k];
}
}
__global__ void _cuda_multiply_conv2_add_to(float* d, const float* s1, size_t n, const float* s2, size_t bs, size_t ks)
{
// loop over all the image planes
for (auto i : grid_stride_range_y(0, n))
{
// sum all the elements in the i-th image plane
float temp = 0;
for (auto j : grid_stride_range(i*bs, (i+1)*bs))
temp += s1[j]*s2[j];
auto k = i%ks;
// and store the sum into d[k]
warp_reduce_atomic_add(d[k], temp);
}
}
void multiply_conv (
bool add_to,
tensor& dest,
const tensor& src1,
const tensor& src2
)
{
if (have_same_dimensions(dest,src1))
{
DLIB_CASSERT(src2.num_samples() == 1 && src2.nr() == 1 && src2.nc() == 1 && src2.k() == src1.k());
if (dest.size() == 0)
return;
if (add_to)
launch_kernel(_cuda_multiply_conv_add_to,max_jobs(dest.size()),
dest.device(), src1.device(), src1.size(), src2.device(), src1.nr()*src1.nc(), src1.k());
else
launch_kernel(_cuda_multiply_conv,max_jobs(dest.size()),
dest.device(), src1.device(), src1.size(), src2.device(), src1.nr()*src1.nc(), src1.k());
}
else
{
DLIB_CASSERT(have_same_dimensions(src1,src2));
DLIB_CASSERT(dest.num_samples() == 1 && dest.nr() == 1 && dest.nc() == 1 && dest.k() == src1.k());
if (dest.size() == 0)
return;
dim3 blocks(1,10); // x size 1 so we don't need to worry about inter-block synchronization (since only y spans blocks)
dim3 threads(32,32); // x size must be 32 because we are using warp_reduce_atomic_add() in the kernel.
if (add_to)
_cuda_multiply_conv2_add_to<<<blocks,threads>>>(
dest.device(), src1.device(), src1.num_samples()*src1.k(), src2.device(), src1.nr()*src1.nc(), src1.k());
else
_cuda_multiply_conv2<<<blocks,threads>>>(
dest.device(), src1.device(), src1.num_samples()*src1.k(), src2.device(), src1.nr()*src1.nc(), src1.k());
}
}
// ------------------------------------------------------------------------------------
__global__ void _cuda_mult1(float* d, const float* s1, const float* s2, size_t n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = s1[i]*s2[i];
}
}
__global__ void _cuda_mult1_add_to(float* d, const float* s1, const float* s2, size_t n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] += s1[i]*s2[i];
}
}
__global__ void _cuda_mult2(float* d, const float* s1, const float* s2,
size_t dn, size_t dk, size_t dr, size_t dc,
size_t s1n, size_t s1k, size_t s1r, size_t s1c,
size_t s2n, size_t s2k, size_t s2r, size_t s2c)
{
for (auto i : grid_stride_range(0, dn*dk*dr*dc))
{
size_t n,k,r,c;
unpack_idx(i, dk,dr,dc, n,k,r,c);
float v1 = 0;
float v2 = 0;
if (n < s1n &&
k < s1k &&
r < s1r &&
c < s1c )
{
v1 = s1[pack_idx(s1k,s1r,s1c, n,k,r,c)];
}
if (n < s2n &&
k < s2k &&
r < s2r &&
c < s2c )
{
v2 = s2[pack_idx(s2k,s2r,s2c, n,k,r,c)];
}
d[i] = v1*v2;
}
}
__global__ void _cuda_mult2_add_to(float* d, const float* s1, const float* s2,
size_t dn, size_t dk, size_t dr, size_t dc,
size_t s1n, size_t s1k, size_t s1r, size_t s1c,
size_t s2n, size_t s2k, size_t s2r, size_t s2c)
{
for (auto i : grid_stride_range(0, dn*dk*dr*dc))
{
size_t n,k,r,c;
unpack_idx(i, dk,dr,dc, n,k,r,c);
float v1 = 0;
float v2 = 0;
if (n < s1n &&
k < s1k &&
r < s1r &&
c < s1c )
{
v1 = s1[pack_idx(s1k,s1r,s1c, n,k,r,c)];
}
if (n < s2n &&
k < s2k &&
r < s2r &&
c < s2c )
{
v2 = s2[pack_idx(s2k,s2r,s2c, n,k,r,c)];
}
d[i] += v1*v2;
}
}
void multiply_zero_padded (
bool add_to,
tensor& dest,
const tensor& src1,
const tensor& src2
)
{
if (dest.size() == 0)
return;
// Do the simple and fast version if everything has the same dimensions
if (have_same_dimensions(dest, src1) &&
have_same_dimensions(dest, src2))
{
if (add_to)
launch_kernel(_cuda_mult1_add_to,max_jobs(dest.size()), dest.device(), src1.device(), src2.device(), dest.size());
else
launch_kernel(_cuda_mult1,max_jobs(dest.size()), dest.device(), src1.device(), src2.device(), dest.size());
}
else
{
if (add_to)
{
// Otherwise, do the more complex version with bounds checking.
launch_kernel(_cuda_mult2_add_to,max_jobs(dest.size()),
dest.device(), src1.device(), src2.device(),
dest.num_samples(), dest.k(), dest.nr(), dest.nc(),
src1.num_samples(), src1.k(), src1.nr(), src1.nc(),
src2.num_samples(), src2.k(), src2.nr(), src2.nc()
);
}
else
{
// Otherwise, do the more complex version with bounds checking.
launch_kernel(_cuda_mult2,max_jobs(dest.size()),
dest.device(), src1.device(), src2.device(),
dest.num_samples(), dest.k(), dest.nr(), dest.nc(),
src1.num_samples(), src1.k(), src1.nr(), src1.nc(),
src2.num_samples(), src2.k(), src2.nr(), src2.nc()
);
}
}
}
// ------------------------------------------------------------------------------------
__global__ void _cuda_add1(float* d, const float* s1, const float* s2, size_t n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = s1[i]+s2[i];
}
}
__global__ void _cuda_add2(float* d, const float* s1, const float* s2,
size_t dn, size_t dk, size_t dr, size_t dc,
size_t s1n, size_t s1k, size_t s1r, size_t s1c,
size_t s2n, size_t s2k, size_t s2r, size_t s2c)
{
for (auto i : grid_stride_range(0, dn*dk*dr*dc))
{
size_t n,k,r,c;
unpack_idx(i, dk,dr,dc, n,k,r,c);
float v1 = 0;
float v2 = 0;
if (n < s1n &&
k < s1k &&
r < s1r &&
c < s1c )
{
v1 = s1[pack_idx(s1k,s1r,s1c, n,k,r,c)];
}
if (n < s2n &&
k < s2k &&
r < s2r &&
c < s2c )
{
v2 = s2[pack_idx(s2k,s2r,s2c, n,k,r,c)];
}
d[i] = v1+v2;
}
}
void add (
tensor& dest,
const tensor& src1,
const tensor& src2
)
{
if (dest.size() == 0)
return;
// Do the simple and fast version if everything has the same dimensions
if (have_same_dimensions(dest, src1) &&
have_same_dimensions(dest, src2))
{
launch_kernel(_cuda_add1,max_jobs(dest.size()), dest.device(), src1.device(), src2.device(), dest.size());
}
else
{
// Otherwise, do the more complex version with bounds checking.
launch_kernel(_cuda_add2,max_jobs(dest.size()),
dest.device(), src1.device(), src2.device(),
dest.num_samples(), dest.k(), dest.nr(), dest.nc(),
src1.num_samples(), src1.k(), src1.nr(), src1.nc(),
src2.num_samples(), src2.k(), src2.nr(), src2.nc()
);
}
}
// ------------------------------------------------------------------------------------
__global__ void _cuda_affine_transform1(float* d, const float* s, size_t n, float A, float B)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A*s[i] + B;
}
}
__global__ void _cuda_affine_transform1_0(float* d, const float* s, size_t n, float A)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A*s[i];
}
}
void affine_transform(
tensor& dest,
const tensor& src,
const float A,
const float B
)
{
DLIB_CASSERT(dest.size()==src.size());
if (B != 0)
launch_kernel(_cuda_affine_transform1,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A, B);
else
launch_kernel(_cuda_affine_transform1_0,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A);
}
void affine_transform(
tensor& dest,
const tensor& src,
const float A
)
{
DLIB_CASSERT(dest.size()==src.size());
launch_kernel(_cuda_affine_transform1_0,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_affine_transform_rect(
float* d,
const float* s1,
const float* s2,
const float* s3,
float A,
float B,
float C,
size_t start_idx,
size_t n,
size_t rect_nc,
size_t total_nc
)
{
for (auto i : grid_stride_range(0, n))
{
size_t r = i/rect_nc;
size_t c = i%rect_nc;
size_t idx = r*total_nc + c + start_idx;
d[idx] = A*s1[idx] + B*s2[idx] + C*s3[idx];
}
}
void affine_transform(
const rectangle& rect,
tensor& dest,
const tensor& src1,
const tensor& src2,
const tensor& src3,
float A,
float B,
float C
)
{
DLIB_CASSERT(dest.size() == src1.size());
DLIB_CASSERT(dest.size() == src2.size());
DLIB_CASSERT(dest.size() == src3.size());
DLIB_CASSERT(dest.num_samples() == src1.num_samples());
DLIB_CASSERT(dest.num_samples() == src2.num_samples());
DLIB_CASSERT(dest.num_samples() == src3.num_samples());
DLIB_CASSERT(rectangle(0,0, dest.size()/dest.num_samples()-1, dest.num_samples()-1).contains(rect));
launch_kernel(_cuda_affine_transform_rect,max_jobs(rect.area()),
dest.device(), src1.device(), src2.device(), src3.device(), A, B, C,
rect.left() + rect.top()*(dest.size()/dest.num_samples()),
rect.area(),
rect.width(),
dest.size()/dest.num_samples());
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_affine_transform4(float* d, const float* s1, const float* s2, size_t n, float A, float B, float C)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A*s1[i] + B*s2[i] + C;
}
}
__global__ void _cuda_affine_transform4_0(float* d, const float* s1, const float* s2, size_t n, float A, float B)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A*s1[i] + B*s2[i];
}
}
void affine_transform(
tensor& dest,
const tensor& src1,
const tensor& src2,
const float A,
const float B,
const float C
)
{
DLIB_CASSERT(dest.size()==src1.size());
DLIB_CASSERT(dest.size()==src2.size());
if (C != 0)
launch_kernel(_cuda_affine_transform4,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), A, B, C);
else
launch_kernel(_cuda_affine_transform4_0,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), A, B);
}
void affine_transform(
tensor& dest,
const tensor& src1,
const tensor& src2,
const float A,
const float B
)
{
DLIB_CASSERT(dest.size()==src1.size());
DLIB_CASSERT(dest.size()==src2.size());
launch_kernel(_cuda_affine_transform4_0,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), A, B);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_add_scaled(float* d, const float* s, size_t n, float scale)
{
for (auto i : grid_stride_range(0, n))
{
d[i] += scale*s[i];
}
}
void add_scaled(
tensor& dest,
const float scale,
const tensor& src
)
{
DLIB_CASSERT(dest.size()==src.size());
launch_kernel(_cuda_add_scaled,max_jobs(dest.size()),dest.device(), src.device(), dest.size(), scale);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_add_cv_to_all_columns(float beta, float* dest, float alpha, const float* src, size_t size, size_t stride)
{
for (auto i : grid_stride_range(0, size))
{
dest[i] = beta*dest[i] + alpha*src[i/stride];
}
}
__global__ void _cuda_add_cv_to_all_columns_no_beta(float* dest, float alpha, const float* src, size_t size, size_t stride)
{
for (auto i : grid_stride_range(0, size))
{
dest[i] = alpha*src[i/stride];
}
}
void add_cv_to_all_columns(
float beta,
tensor& dest,
float alpha,
const tensor& src
)
{
DLIB_CASSERT(dest.num_samples() == src.num_samples() && src.num_samples() == src.size());
if (beta == 0)
launch_kernel(_cuda_add_cv_to_all_columns_no_beta, max_jobs(dest.size()), dest.device(), alpha, src.device(), dest.size(), dest.size()/dest.num_samples());
else
launch_kernel(_cuda_add_cv_to_all_columns, max_jobs(dest.size()), beta, dest.device(), alpha, src.device(), dest.size(), dest.size()/dest.num_samples());
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_affine_transform5(
float* d, const float* s1, const float* s2, const float* s3, size_t n, float A, float B, float C, float D
)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A*s1[i] + B*s2[i] + C*s3[i] + D;
}
}
void affine_transform(
tensor& dest,
const tensor& src1,
const tensor& src2,
const tensor& src3,
const float A,
const float B,
const float C,
const float D
)
{
DLIB_CASSERT(dest.size()==src1.size());
DLIB_CASSERT(dest.size()==src2.size());
DLIB_CASSERT(dest.size()==src3.size());
launch_kernel(_cuda_affine_transform5,max_jobs(dest.size()),dest.device(), src1.device(),
src2.device(), src3.device(), dest.size(), A, B, C, D);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_affine_transform_range(
float* d, const float* s1, const float* s2, const float* s3, size_t begin, size_t end, float A, float B, float C
)
{
for (auto i : grid_stride_range(begin, end))
{
d[i] = A*s1[i] + B*s2[i] + C*s3[i];
}
}
void affine_transform_range(
size_t begin,
size_t end,
tensor& dest,
const tensor& src1,
const tensor& src2,
const tensor& src3,
const float A,
const float B,
const float C
)
{
DLIB_CASSERT(dest.size()==src1.size());
DLIB_CASSERT(dest.size()==src2.size());
DLIB_CASSERT(dest.size()==src3.size());
DLIB_CASSERT(begin <= end && end <= dest.size());
launch_kernel(_cuda_affine_transform_range,max_jobs(end-begin),
dest.device(), src1.device(),
src2.device(), src3.device(), begin, end, A, B, C);
}
// -----------------------------------------------------------------------------------
__global__ void _cuda_affine_transform2(float* d, const float* s, size_t n, const float* A, const float* B)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A[i]*s[i] + B[i];
}
}
__global__ void _cuda_affine_transform3(float* d, const float* s, size_t n, const float* A, const float* B, size_t bs)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A[i%bs]*s[i] + B[i%bs];
}
}
void affine_transform(
tensor& dest,
const tensor& src,
const tensor& A,
const tensor& B
)
{
DLIB_CASSERT(have_same_dimensions(dest, src));
DLIB_CASSERT(
((A.num_samples()==1 && B.num_samples()==1) ||
(A.num_samples()==src.num_samples() && B.num_samples()==src.num_samples())));
DLIB_CASSERT(
A.nr()==B.nr() && B.nr()==src.nr() &&
A.nc()==B.nc() && B.nc()==src.nc() &&
A.k() ==B.k() && B.k()==src.k(),
"\nA.nr(): " << A.nr() << "\nB.nr(): " << B.nr() << "\nsrc.nr(): " << src.nr()
<<"\nA.nc(): " << A.nc() << "\nB.nc(): " << B.nc() << "\nsrc.nc(): " << src.nc()
<<"\nA.k(): " << A.k() << "\nB.k(): " << B.k() << "\nsrc.k(): " << src.k()
);
if (A.num_samples() == 1)
{
launch_kernel(_cuda_affine_transform3,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A.device(), B.device(), A.size());
}
else
{
launch_kernel(_cuda_affine_transform2,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A.device(), B.device());
}
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_compute_adam_update(
size_t begin,
size_t end,
float* s,
float* m,
float* v,
const float alpha,
const float weight_decay,
const float momentum1,
const float momentum2,
const float* params,
const float* params_grad
)
{
const float eps = 1e-8;
// The loop is equivalent to doing this:
// m = momentum1*m + (1-momentum1) * (weight_decay*params + params_grad);
// v = momentum2*v + (1-momentum2)*squared(weight_decay*params + params_grad);
// s = -alpha*m/(sqrt(v) + eps);
for (auto i : grid_stride_range(begin, end))
{
float g = (weight_decay*params[i] + params_grad[i]);
m[i] = momentum1*m[i] + (1-momentum1)*g;
v[i] = momentum2*v[i] + (1-momentum2)*g*g;
s[i] = -alpha*m[i]/(std::sqrt(v[i]) + eps);
}
}
void compute_adam_update (
size_t begin,
size_t end,
tensor& s,
tensor& m,
tensor& v,
const float t,
const float learning_rate,
const float weight_decay,
const float momentum1,
const float momentum2,
const tensor& params,
const tensor& params_grad
)
{
DLIB_CASSERT(s.size() == m.size() &&
s.size() == v.size() &&
s.size() == params.size() &&
s.size() == params_grad.size());
DLIB_CASSERT(begin <= end && end <= params.size());
const float alpha = learning_rate*std::sqrt(1-std::pow(momentum2,t))/(1-std::pow(momentum1, t));
launch_kernel(_cuda_compute_adam_update,max_jobs(end-begin),
begin, end, s.device(), m.device(), v.device(), alpha, weight_decay,
momentum1, momentum2, params.device(), params_grad.device());
}
// -----------------------------------------------------------------------------------
__global__ void _cuda_affine_transform_conv(float* d, const float* s, size_t n, const float* A, const float* B, size_t bs, size_t ks)
{
for (auto i : grid_stride_range(0, n))
{
auto k = (i/bs)%ks;
d[i] = A[k]*s[i] + B[k];
}
}
void affine_transform_conv(
tensor& dest,
const tensor& src,
const tensor& A,
const tensor& B
)
{
DLIB_CASSERT(have_same_dimensions(dest, src));
DLIB_CASSERT(have_same_dimensions(A, B));
DLIB_CASSERT(A.num_samples() == 1 && A.nr() == 1 && A.nc() == 1 && A.k() == src.k());
launch_kernel(_cuda_affine_transform_conv,max_jobs(dest.size()),
dest.device(), src.device(), src.size(), A.device(), B.device(), src.nr()*src.nc(), src.k());
}
// -----------------------------------------------------------------------------------
__global__ void _add_bias_gradient(float* out, const float* in, size_t n, size_t total_n)
{
for (auto i : grid_stride_range(0, n))
{
out[i] = in[i];
for (size_t j = i+n; j < total_n; j+=n)
out[i] += in[j];
}
}
void assign_bias_gradient (
tensor& grad,
const tensor& gradient_input
)
{
DLIB_CASSERT(
grad.num_samples() == 1 &&
gradient_input.k() == grad.k() &&
gradient_input.nr() == grad.nr() &&
gradient_input.nc() == grad.nc() &&
gradient_input.size() > 0);
launch_kernel(_add_bias_gradient,max_jobs(grad.size()),grad.device(), gradient_input.device(), grad.size(), gradient_input.size());
}
// ----------------------------------------------------------------------------------------
__global__ void _set_tensor(float* out, size_t n, const float val)
{
for (auto i : grid_stride_range(0, n))
out[i] = val;
}
void set_tensor (
tensor& t,
float value
)
{
launch_kernel(_set_tensor, max_jobs(t.size()), t.device(), t.size(), value);
}
// ----------------------------------------------------------------------------------------
__global__ void _scale_tensor(float* out, size_t n, const float val)
{
for (auto i : grid_stride_range(0, n))
out[i] *= val;
}
void scale_tensor (
tensor& t,
float value
)
{
launch_kernel(_scale_tensor, max_jobs(t.size()), t.device(), t.size(), value);
}
// -----------------------------------------------------------------------------------
// -----------------------------------------------------------------------------------
__global__ void _cuda_threshold(float* d, size_t n, float thresh)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = d[i]>thresh ? 1:0;
}
}
void threshold (
tensor& data,
float thresh
)
{
launch_kernel(_cuda_threshold,max_jobs(data.size()),data.device(), data.size(), thresh);
}
// ------------------------------------------------------------------------------------
__global__ void _cuda_dot(const float* a, const float* b, size_t n, float* result)
{
// Parallel sum everything into local temp variables.
float temp = 0;
for(auto i : grid_stride_range(0, n))
temp += a[i]*b[i];
// Then do the warp reduce add thing to merge into one output value.
warp_reduce_atomic_add(*result, temp);
}
void dot (
const tensor& a,
const tensor& b,
tensor& result,
size_t idx
)
{
DLIB_CASSERT(a.size() == b.size());
DLIB_CASSERT(idx < result.size());
launch_kernel(_cuda_dot, max_jobs(a.size()), a.device(), b.device(), a.size(), result.device()+idx);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_prelu(const float* s, float* d, size_t n, const float* pp)
{
const float p = *pp;
for (auto i : grid_stride_range(0, n))
{
if (s[i] > 0)
d[i] = s[i];
else
d[i] = p*s[i];
}
}
void prelu (
tensor& dest,
const tensor& src,
const tensor& param
)
{
launch_kernel(_cuda_prelu, max_jobs(dest.size()),
src.device(), dest.device(), src.size(), param.device());
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_prelu_gradient(float* out, const float* s, const float* gi, size_t n, const float* pp, float* ppgrad)
{
const float p = *pp;
float pgrad = 0;
for(auto i : grid_stride_range(0, n))
{
if (s[i] > 0)
{
out[i] += gi[i];
}
else
{
out[i] += p*gi[i];
pgrad += gi[i]*s[i];
}
}
// Then do the warp reduce add thing to merge into one output value.
warp_reduce_atomic_add(*ppgrad, pgrad);
}
void prelu_gradient (
tensor& grad,
const tensor& src,
const tensor& gradient_input,
const tensor& param,
tensor& params_grad
)
{
params_grad = 0;
launch_kernel(_cuda_prelu_gradient, max_jobs(grad.size()),
grad.device(), src.device(), gradient_input.device(), grad.size(),
param.device(), params_grad.device());
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_resize_bilinear(size_t dsize, size_t dchan_size, size_t dnc, float* d,
size_t schan_size, int snr, int snc, const float* s,
const float x_scale, const float y_scale)
{
for(auto i : grid_stride_range(0, dsize))
{
const int idx = i%dchan_size;
const int channel = i/dchan_size;
const int sidx = channel*schan_size;
const int r = idx/dnc;
const int c = idx%dnc;
const float y = r*y_scale;
const int top = static_cast<int>(::floor(y));
const int bottom = ::min(top+1, snr-1);
const float tb_frac = y - top;
const float x = c*x_scale;
const int left = static_cast<int>(::floor(x));
const int right = ::min(left+1, snc-1);
const float lr_frac = x - left;
float tl = s[sidx+top*snc+left];
float tr = s[sidx+top*snc+right];
float bl = s[sidx+bottom*snc+left];
float br = s[sidx+bottom*snc+right];
float temp = (1-tb_frac)*((1-lr_frac)*tl + lr_frac*tr) +
tb_frac*((1-lr_frac)*bl + lr_frac*br);
d[i] = temp;
}
}
__global__ void _cuda_resize_bilinear_strided(size_t dsize, size_t dchan_size, size_t dnc, float* d,
size_t schan_size, int snr, int snc, const float* s,
const float x_scale, const float y_scale,
size_t dest_row_stride, size_t src_row_stride, size_t dest_chan_size_strided
)
{
for(auto i : grid_stride_range(0, dsize))
{
const int idx = i%dchan_size;
const int channel = i/dchan_size;
const int sidx = channel*schan_size;
const int r = idx/dnc;
const int c = idx%dnc;
const int didx = channel*dest_chan_size_strided + r*dest_row_stride+c;
const float y = r*y_scale;
const int top = static_cast<int>(::floor(y));
const int bottom = ::min(top+1, snr-1);
const float tb_frac = y - top;
const float x = c*x_scale;
const int left = static_cast<int>(::floor(x));
const int right = ::min(left+1, snc-1);
const float lr_frac = x - left;
float tl = s[sidx+top*src_row_stride+left];
float tr = s[sidx+top*src_row_stride+right];
float bl = s[sidx+bottom*src_row_stride+left];
float br = s[sidx+bottom*src_row_stride+right];
float temp = (1-tb_frac)*((1-lr_frac)*tl + lr_frac*tr) +
tb_frac*((1-lr_frac)*bl + lr_frac*br);
d[didx] = temp;
}
}
void resize_bilinear (
tensor& dest,
long dest_row_stride,
long dest_channel_stride,
const tensor& src,
long src_row_stride,
long src_channel_stride
)
{
DLIB_CASSERT(is_same_object(dest, src)==false);
DLIB_CASSERT(dest.num_samples() == src.num_samples());
DLIB_CASSERT(dest.k() == src.k());
if (dest.size() == 0 || src.size() == 0)
return;
const float x_scale = (src.nc()-1)/(float)std::max<long>((dest.nc()-1),1);
const float y_scale = (src.nr()-1)/(float)std::max<long>((dest.nr()-1),1);
if (dest.nc() == dest_row_stride && dest.nr()*dest.nc()==dest_channel_stride &&
src.nc() == src_row_stride && src.nr()*src.nc()==src_channel_stride)
{
launch_kernel(_cuda_resize_bilinear,
dest.size(), dest.nr()*dest.nc(), dest.nc(), dest.device(),
src.nr()*src.nc(), src.nr(), src.nc(), src.device(),
x_scale, y_scale);
}
else
{
launch_kernel(_cuda_resize_bilinear_strided,
dest.size(), dest.nr()*dest.nc(), dest.nc(), dest.device(),
src_channel_stride, src.nr(), src.nc(), src.device(),
x_scale, y_scale, dest_row_stride, src_row_stride, dest_channel_stride);
}
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_resize_bilinear_gradient(size_t dsize, size_t dchan_size, size_t dnc, const float* d,
size_t schan_size, int snr, int snc, float* s,
const float x_scale, const float y_scale)
{
for(auto i : grid_stride_range(0, dsize))
{
const float tmp = d[i];
const int idx = i%dchan_size;
const int channel = i/dchan_size;
const int sidx = channel*schan_size;
const int r = idx/dnc;
const int c = idx%dnc;
const float y = r*y_scale;
const int top = static_cast<int>(::floor(y));
const int bottom = ::min(top+1, snr-1);
const float tb_frac = y - top;
const float x = c*x_scale;
const int left = static_cast<int>(::floor(x));
const int right = ::min(left+1, snc-1);
const float lr_frac = x - left;
atomicAdd(s+sidx+top*snc+left, tmp*(1-tb_frac)*(1-lr_frac));
atomicAdd(s+sidx+top*snc+right, tmp*(1-tb_frac)*(lr_frac));
atomicAdd(s+sidx+bottom*snc+left, tmp*(tb_frac)*(1-lr_frac));
atomicAdd(s+sidx+bottom*snc+right, tmp*(tb_frac)*(lr_frac));
}
}
__global__ void _cuda_resize_bilinear_gradient_strided(size_t dsize, size_t dchan_size, size_t dnc, const float* d,
size_t schan_size, int snr, int snc, float* s,
const float x_scale, const float y_scale,
size_t dest_row_stride, size_t src_row_stride, size_t dest_chan_size_strided
)
{
for(auto i : grid_stride_range(0, dsize))
{
const int idx = i%dchan_size;
const int channel = i/dchan_size;
const int didx = channel*dest_chan_size_strided;
const int sidx = channel*schan_size;
const int r = idx/dnc;
const int c = idx%dnc;
const float tmp = d[didx + r*dest_row_stride+c];
const float y = r*y_scale;
const int top = static_cast<int>(::floor(y));
const int bottom = ::min(top+1, snr-1);
const float tb_frac = y - top;
const float x = c*x_scale;
const int left = static_cast<int>(::floor(x));
const int right = ::min(left+1, snc-1);
const float lr_frac = x - left;
atomicAdd(s+sidx+top*src_row_stride+left, tmp*(1-tb_frac)*(1-lr_frac));
atomicAdd(s+sidx+top*src_row_stride+right, tmp*(1-tb_frac)*(lr_frac));
atomicAdd(s+sidx+bottom*src_row_stride+left, tmp*(tb_frac)*(1-lr_frac));
atomicAdd(s+sidx+bottom*src_row_stride+right, tmp*(tb_frac)*(lr_frac));
}
}
void resize_bilinear_gradient (
tensor& grad,
long grad_row_stride,
long grad_channel_stride,
const tensor& gradient_input,
long gradient_input_row_stride,
long gradient_input_channel_stride
)
{
DLIB_CASSERT(is_same_object(grad, gradient_input)==false);
DLIB_CASSERT(gradient_input.num_samples() == grad.num_samples());
DLIB_CASSERT(gradient_input.k() == grad.k());
if (grad.size() == 0 || gradient_input.size() == 0)
return;
const float x_scale = (grad.nc()-1)/(float)std::max<long>((gradient_input.nc()-1),1);
const float y_scale = (grad.nr()-1)/(float)std::max<long>((gradient_input.nr()-1),1);
if (grad.nc() == grad_row_stride && grad.nr()*grad.nc()==grad_channel_stride &&
gradient_input.nc() == gradient_input_row_stride && gradient_input.nr()*gradient_input.nc()==gradient_input_channel_stride)
{
launch_kernel(_cuda_resize_bilinear_gradient,
gradient_input.size(), gradient_input.nr()*gradient_input.nc(), gradient_input.nc(), gradient_input.device(),
grad.nr()*grad.nc(), grad.nr(), grad.nc(), grad.device(),
x_scale, y_scale);
}
else
{
launch_kernel(_cuda_resize_bilinear_gradient_strided,
gradient_input.size(), gradient_input.nr()*gradient_input.nc(), gradient_input.nc(), gradient_input.device(),
grad_channel_stride, grad.nr(), grad.nc(), grad.device(),
x_scale, y_scale, gradient_input_row_stride, grad_row_stride, gradient_input_channel_stride);
}
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_copy_tensor_add_to (float* dest, size_t size, const float* src, size_t dest_stride, size_t src_stride, size_t block_size)
{
for(auto i : grid_stride_range(0, size))
{
size_t blk = i/block_size;
size_t j = i%block_size;
dest[blk*dest_stride + j] += src[blk*src_stride + j];
}
}
__global__ void _cuda_copy_tensor (float* dest, size_t size, const float* src, size_t dest_stride, size_t src_stride, size_t block_size)
{
for(auto i : grid_stride_range(0, size))
{
size_t blk = i/block_size;
size_t j = i%block_size;
dest[blk*dest_stride + j] = src[blk*src_stride + j];
}
}
void copy_tensor(
bool add_to,
tensor& dest,
size_t dest_k_offset,
const tensor& src,
size_t src_k_offset,
size_t count_k
)
{
const size_t dest_sample_size = static_cast<size_t>(dest.nc() * dest.nr() * dest.k());
const size_t src_sample_size = static_cast<size_t>(src.nc() * src.nr() * src.k());
const size_t block_size = count_k * dest.nc() * dest.nr();
DLIB_CASSERT(dest.num_samples() == src.num_samples() &&
dest.nc() == src.nc() && dest.nr() == src.nr(), "All sources should fit into dest tensor size");
DLIB_CASSERT(dest.k() - dest_k_offset >= count_k, "Not enough space in dest tensor");
DLIB_CASSERT(src.k() - src_k_offset >= count_k, "Not enough space in src tensor");
float* dest_p = dest.device() + dest_k_offset * dest.nc() * dest.nr();
const float* src_p = src.device() + src_k_offset * src.nc() * src.nr();;
if (add_to)
{
launch_kernel(_cuda_copy_tensor_add_to, max_jobs(dest.size()),
dest_p, block_size*dest.num_samples(),
src_p, dest_sample_size, src_sample_size, block_size);
}
else
{
launch_kernel(_cuda_copy_tensor, max_jobs(dest.size()),
dest_p, block_size*dest.num_samples(),
src_p, dest_sample_size, src_sample_size, block_size);
}
}
// ----------------------------------------------------------------------------------------
}
}
|
dbf523d16f33d7a5ebbfd10d8d861840e428dc71.hip | // !!! This is a file automatically generated by hipify!!!
#include "cudnn_func.hpp"
template <typename Dtype>
syshen_deconvolution<Dtype>::syshen_deconvolution(cudnnHandle_t handle_) {
CHECK_CUDNN_ERROR(cudnnCreateTensorDescriptor(&input_desc));
CHECK_CUDNN_ERROR(cudnnCreateTensorDescriptor(&output_desc));
CHECK_CUDNN_ERROR(cudnnCreateFilterDescriptor(&filter_desc));
CHECK_CUDNN_ERROR(cudnnCreateConvolutionDescriptor(&conv_desc));
if (has_bias) {
CHECK_CUDNN_ERROR(cudnnCreateTensorDescriptor(&bias));
}
if (!handle_) {
CHECK_CUDNN_ERROR(cudnnCreate(&handle_t));
set_cudnn_handle = true;
}
else {
handle_t = handle_;
set_cudnn_handle = false;
}
if (use_stream) {
CHECK_CUDA_ERROR(hipStreamCreate(&stream));
CHECK_CUDA_ERROR(hipEventCreate(&start));
}
batch = 1;
in_channels = 1;
stride_h = 1;
stride_w = 1;
pad_h = 1;
pad_w = 1;
dilation_h = 1;
dilation_w = 1;
kernel_h = 1;
kernel_w = 1;
}
template <typename Dtype>
syshen_deconvolution<Dtype>::~syshen_deconvolution() {
CHECK_CUDNN_ERROR(cudnnDestroyTensorDescriptor(input_desc));
CHECK_CUDNN_ERROR(cudnnDestroyTensorDescriptor(output_desc));
CHECK_CUDNN_ERROR(cudnnDestroyFilterDescriptor(filter_desc));
CHECK_CUDNN_ERROR(cudnnDestroyConvolutionDescriptor(conv_desc));
if (has_bias) {
CHECK_CUDNN_ERROR(cudnnDestroyTensorDescriptor(bias));
}
if (set_cudnn_handle) {
CHECK_CUDNN_ERROR(cudnnDestroy(handle_t));
}
if (use_stream) {
CHECK_CUDA_ERROR(hipStreamDestroy(stream));
//CHECK_CUDA_ERROR(hipEventDestroy(strat));
}
}
template<typename Dtype>
void syshen_deconvolution<Dtype>::SetUp() {
int nStride = in_channels * in_h * in_w;
int cStride = in_h * in_w;
CHECK_CUDNN_ERROR(cudnnSetTensor4dDescriptorEx(
input_desc,
cudnnDataType_t::CUDNN_DATA_FLOAT,
batch,
in_channels,
in_h, in_w, nStride, cStride, in_w, 1));
CHECK_CUDNN_ERROR(cudnnSetFilter4dDescriptor(
filter_desc,
cudnnDataType_t::CUDNN_DATA_FLOAT,
CUDNN_TENSOR_NCHW,
out_channels, in_channels, kernel_h, kernel_w));
CHECK_CUDNN_ERROR(cudnnSetConvolution2dDescriptor(
conv_desc, pad_h, pad_w, stride_h,
stride_w, dilation_h, dilation_w,
CUDNN_CROSS_CORRELATION, cudnnDataType_t::CUDNN_DATA_FLOAT));
/*CHECK_CUDNN_ERROR(cudnnGetConvolution2dForwardOutputDim(
conv_desc, input_desc, filter_dsec,
&out_batch, &out_channels, &out_h, &out_w));*/
CHECK_CUDNN_ERROR(cudnnSetTensor4dDescriptor(
output_desc, CUDNN_TENSOR_NCHW,
cudnnDataType_t::CUDNN_DATA_FLOAT,
out_batch, out_channels, out_h, out_w));
/*CHECK_CUDNN_ERROR(cudnnGetConvolutionBackwardFilterAlgorithm(
handle_t, input_desc, filter_desc,
conv_desc, output_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST,
0, &algo));
CHECK_CUDNN_ERROR(cudnnGetConvolutionBackwardFilterWorkspaceSize(
handle_t, input_desc, output_desc,
conv_desc, filter_desc, algo, &workSpaceSize));*/
CHECK_CUDNN_ERROR(cudnnGetConvolutionBackwardDataAlgorithm(
handle_t, filter_desc, input_desc, conv_desc,
output_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
CHECK_CUDNN_ERROR(cudnnGetConvolutionBackwardDataWorkspaceSize(
handle_t, filter_desc, input_desc, conv_desc,
output_desc, algo, &workSpaceSize));
if (0 != workSpaceSize)
CHECK_CUDA_ERROR(hipMalloc((void**)&workSpace, workSpaceSize));
if (has_bias) {
cudnnSetTensor4dDescriptor(bias, CUDNN_TENSOR_NCHW, cudnnDataType_t::CUDNN_DATA_FLOAT, out_batch, out_channels, 1, 1);
}
}
template<typename Dtype>
void syshen_deconvolution<Dtype>::Forward(Dtype *input, Dtype *output, Dtype *weights, Dtype *bias_weights) {
Dtype conv_alpha = 1.0f;
Dtype conv_beta = 0;
CHECK_CUDNN_ERROR(cudnnConvolutionBackwardData(
handle_t, &conv_alpha, filter_desc, weights,
input_desc, input, conv_desc, algo, workSpace,
workSpaceSize, &conv_beta, output_desc, output));
if (has_bias) {
cudnnAddTensor(handle_t, &conv_alpha, bias, bias_weights, &conv_alpha, output_desc, output);
}
} | dbf523d16f33d7a5ebbfd10d8d861840e428dc71.cu | #include "cudnn_func.hpp"
template <typename Dtype>
syshen_deconvolution<Dtype>::syshen_deconvolution(cudnnHandle_t handle_) {
CHECK_CUDNN_ERROR(cudnnCreateTensorDescriptor(&input_desc));
CHECK_CUDNN_ERROR(cudnnCreateTensorDescriptor(&output_desc));
CHECK_CUDNN_ERROR(cudnnCreateFilterDescriptor(&filter_desc));
CHECK_CUDNN_ERROR(cudnnCreateConvolutionDescriptor(&conv_desc));
if (has_bias) {
CHECK_CUDNN_ERROR(cudnnCreateTensorDescriptor(&bias));
}
if (!handle_) {
CHECK_CUDNN_ERROR(cudnnCreate(&handle_t));
set_cudnn_handle = true;
}
else {
handle_t = handle_;
set_cudnn_handle = false;
}
if (use_stream) {
CHECK_CUDA_ERROR(cudaStreamCreate(&stream));
CHECK_CUDA_ERROR(cudaEventCreate(&start));
}
batch = 1;
in_channels = 1;
stride_h = 1;
stride_w = 1;
pad_h = 1;
pad_w = 1;
dilation_h = 1;
dilation_w = 1;
kernel_h = 1;
kernel_w = 1;
}
template <typename Dtype>
syshen_deconvolution<Dtype>::~syshen_deconvolution() {
CHECK_CUDNN_ERROR(cudnnDestroyTensorDescriptor(input_desc));
CHECK_CUDNN_ERROR(cudnnDestroyTensorDescriptor(output_desc));
CHECK_CUDNN_ERROR(cudnnDestroyFilterDescriptor(filter_desc));
CHECK_CUDNN_ERROR(cudnnDestroyConvolutionDescriptor(conv_desc));
if (has_bias) {
CHECK_CUDNN_ERROR(cudnnDestroyTensorDescriptor(bias));
}
if (set_cudnn_handle) {
CHECK_CUDNN_ERROR(cudnnDestroy(handle_t));
}
if (use_stream) {
CHECK_CUDA_ERROR(cudaStreamDestroy(stream));
//CHECK_CUDA_ERROR(cudaEventDestroy(strat));
}
}
template<typename Dtype>
void syshen_deconvolution<Dtype>::SetUp() {
int nStride = in_channels * in_h * in_w;
int cStride = in_h * in_w;
CHECK_CUDNN_ERROR(cudnnSetTensor4dDescriptorEx(
input_desc,
cudnnDataType_t::CUDNN_DATA_FLOAT,
batch,
in_channels,
in_h, in_w, nStride, cStride, in_w, 1));
CHECK_CUDNN_ERROR(cudnnSetFilter4dDescriptor(
filter_desc,
cudnnDataType_t::CUDNN_DATA_FLOAT,
CUDNN_TENSOR_NCHW,
out_channels, in_channels, kernel_h, kernel_w));
CHECK_CUDNN_ERROR(cudnnSetConvolution2dDescriptor(
conv_desc, pad_h, pad_w, stride_h,
stride_w, dilation_h, dilation_w,
CUDNN_CROSS_CORRELATION, cudnnDataType_t::CUDNN_DATA_FLOAT));
/*CHECK_CUDNN_ERROR(cudnnGetConvolution2dForwardOutputDim(
conv_desc, input_desc, filter_dsec,
&out_batch, &out_channels, &out_h, &out_w));*/
CHECK_CUDNN_ERROR(cudnnSetTensor4dDescriptor(
output_desc, CUDNN_TENSOR_NCHW,
cudnnDataType_t::CUDNN_DATA_FLOAT,
out_batch, out_channels, out_h, out_w));
/*CHECK_CUDNN_ERROR(cudnnGetConvolutionBackwardFilterAlgorithm(
handle_t, input_desc, filter_desc,
conv_desc, output_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST,
0, &algo));
CHECK_CUDNN_ERROR(cudnnGetConvolutionBackwardFilterWorkspaceSize(
handle_t, input_desc, output_desc,
conv_desc, filter_desc, algo, &workSpaceSize));*/
CHECK_CUDNN_ERROR(cudnnGetConvolutionBackwardDataAlgorithm(
handle_t, filter_desc, input_desc, conv_desc,
output_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
CHECK_CUDNN_ERROR(cudnnGetConvolutionBackwardDataWorkspaceSize(
handle_t, filter_desc, input_desc, conv_desc,
output_desc, algo, &workSpaceSize));
if (0 != workSpaceSize)
CHECK_CUDA_ERROR(cudaMalloc((void**)&workSpace, workSpaceSize));
if (has_bias) {
cudnnSetTensor4dDescriptor(bias, CUDNN_TENSOR_NCHW, cudnnDataType_t::CUDNN_DATA_FLOAT, out_batch, out_channels, 1, 1);
}
}
template<typename Dtype>
void syshen_deconvolution<Dtype>::Forward(Dtype *input, Dtype *output, Dtype *weights, Dtype *bias_weights) {
Dtype conv_alpha = 1.0f;
Dtype conv_beta = 0;
CHECK_CUDNN_ERROR(cudnnConvolutionBackwardData(
handle_t, &conv_alpha, filter_desc, weights,
input_desc, input, conv_desc, algo, workSpace,
workSpaceSize, &conv_beta, output_desc, output));
if (has_bias) {
cudnnAddTensor(handle_t, &conv_alpha, bias, bias_weights, &conv_alpha, output_desc, output);
}
} |
0d416779ddbea4c9215b6c2321cde0f694a24011.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <set>
#include <vector>
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/math/selected_rows_functor.h"
#include "paddle/fluid/platform/cuda_primitives.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
namespace math {
template <typename T>
struct SelectedRowsAdd<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::SelectedRows& input1,
const framework::SelectedRows& input2,
framework::SelectedRows* output) {
auto in1_height = input1.height();
PADDLE_ENFORCE_EQ(in1_height, input2.height());
output->set_height(in1_height);
framework::Vector<int64_t> in1_rows(input1.rows());
auto& in2_rows = input2.rows();
std::vector<int64_t> out_rows;
out_rows.reserve(in1_rows.size() + in2_rows.size());
// concat rows
out_rows.insert(out_rows.end(), in1_rows.begin(), in1_rows.end());
out_rows.insert(out_rows.end(), in2_rows.begin(), in2_rows.end());
output->set_rows(out_rows);
auto* out_value = output->mutable_value();
auto& in1_value = input1.value();
auto& in2_value = input2.value();
auto in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(in1_row_numel, in2_value.numel() / in2_rows.size());
PADDLE_ENFORCE_EQ(in1_row_numel, out_value->numel() / out_rows.size());
auto* out_data = out_value->data<T>();
auto* in1_data = in1_value.data<T>();
auto in1_place = input1.place();
PADDLE_ENFORCE_EQ(platform::is_gpu_place(in1_place), true);
auto in2_place = input2.place();
PADDLE_ENFORCE_EQ(platform::is_gpu_place(in2_place), true);
auto out_place = context.GetPlace();
PADDLE_ENFORCE_EQ(platform::is_gpu_place(out_place), true);
memory::Copy(boost::get<platform::CUDAPlace>(out_place), out_data,
boost::get<platform::CUDAPlace>(in1_place), in1_data,
in1_value.numel() * sizeof(T), context.stream());
auto* in2_data = in2_value.data<T>();
memory::Copy(boost::get<platform::CUDAPlace>(out_place),
out_data + in1_value.numel(),
boost::get<platform::CUDAPlace>(in2_place), in2_data,
in2_value.numel() * sizeof(T), context.stream());
}
};
template struct SelectedRowsAdd<platform::CUDADeviceContext, float>;
template struct SelectedRowsAdd<platform::CUDADeviceContext, double>;
namespace {
template <typename T, int block_size>
__global__ void SelectedRowsAddTensorKernel(const T* selected_rows,
const int64_t* rows, T* tensor_out,
int64_t row_numel) {
const int ty = blockIdx.x;
int tid = threadIdx.x;
selected_rows += ty * row_numel;
tensor_out += rows[ty] * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
// Since index in rows of SelectedRows can be duplicate, we can not use
// tensor_out[index] += selected_rows[index]; Instead, we have to use
// AtomicAdd to avoid concurrent write error.
paddle::platform::CudaAtomicAdd(tensor_out + index, selected_rows[index]);
}
}
} // namespace
template <typename T>
struct SelectedRowsAddTensor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::SelectedRows& input1,
const framework::Tensor& input2, framework::Tensor* output) {
auto in1_height = input1.height();
auto in2_dims = input2.dims();
auto out_dims = output->dims();
PADDLE_ENFORCE_EQ(in1_height, in2_dims[0]);
PADDLE_ENFORCE_EQ(in1_height, out_dims[0]);
auto& in1_value = input1.value();
auto& in1_rows = input1.rows();
int64_t in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(in1_row_numel, input2.numel() / in1_height);
PADDLE_ENFORCE_EQ(in1_row_numel, output->numel() / in1_height);
auto* in1_data = in1_value.data<T>();
auto* in2_data = input2.data<T>();
auto* out_data = output->data<T>();
SetConstant<platform::CUDADeviceContext, T> functor;
functor(context, output, static_cast<T>(0));
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid(in1_rows.size(), 1);
hipLaunchKernelGGL(( SelectedRowsAddTensorKernel<
T, block_size>), dim3(grid), dim3(threads), 0, context.stream(),
in1_data, in1_rows.CUDAData(context.GetPlace()), out_data,
in1_row_numel);
auto out_eigen = framework::EigenVector<T>::Flatten(*output);
auto in2_eigen = framework::EigenVector<T>::Flatten(input2);
out_eigen.device(*context.eigen_device()) = out_eigen + in2_eigen;
}
};
template struct SelectedRowsAddTensor<platform::CUDADeviceContext, float>;
template struct SelectedRowsAddTensor<platform::CUDADeviceContext, double>;
template struct SelectedRowsAdd<platform::CUDADeviceContext, platform::float16>;
template struct SelectedRowsAddTensor<platform::CUDADeviceContext,
platform::float16>;
template <typename T>
struct SelectedRowsAddTo<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::SelectedRows& input1,
const int64_t input2_offset,
framework::SelectedRows* input2) {
auto in1_height = input1.height();
PADDLE_ENFORCE_EQ(in1_height, input2->height());
auto& in1_rows = input1.rows();
auto& in2_rows = *(input2->mutable_rows());
auto& in1_value = input1.value();
auto* in2_value = input2->mutable_value();
// concat rows
if (in1_rows.size()) {
in2_rows.Extend(in1_rows.begin(), in1_rows.end());
}
auto in1_place = input1.place();
PADDLE_ENFORCE_EQ(platform::is_gpu_place(in1_place), true);
auto in2_place = input2->place();
PADDLE_ENFORCE_EQ(platform::is_gpu_place(in2_place), true);
auto* in1_data = in1_value.data<T>();
auto* in2_data = in2_value->data<T>();
memory::Copy(boost::get<platform::CUDAPlace>(in2_place),
in2_data + input2_offset,
boost::get<platform::CUDAPlace>(in1_place), in1_data,
in1_value.numel() * sizeof(T), context.stream());
}
};
template struct SelectedRowsAddTo<platform::CUDADeviceContext, float>;
template struct SelectedRowsAddTo<platform::CUDADeviceContext, double>;
template struct SelectedRowsAddTo<platform::CUDADeviceContext, int>;
template struct SelectedRowsAddTo<platform::CUDADeviceContext, int64_t>;
template struct SelectedRowsAddTo<platform::CUDADeviceContext,
platform::float16>;
namespace {
template <typename T, int block_size>
__global__ void SelectedRowsAddToTensorKernel(const T* selected_rows,
const int64_t* rows,
T* tensor_out,
int64_t row_numel) {
const int ty = blockIdx.x;
int tid = threadIdx.x;
selected_rows += ty * row_numel;
tensor_out += rows[ty] * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
// Since index in rows of SelectedRows can be duplicate, we have to use
// Atomic Operation to avoid concurrent write error.
paddle::platform::CudaAtomicAdd(tensor_out + index, selected_rows[index]);
}
}
} // namespace
template <typename T>
struct SelectedRowsAddToTensor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::SelectedRows& input1,
framework::Tensor* input2) {
auto in1_height = input1.height();
auto in2_dims = input2->dims();
PADDLE_ENFORCE_EQ(in1_height, in2_dims[0]);
auto& in1_value = input1.value();
auto& in1_rows = input1.rows();
int64_t in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(in1_row_numel, input2->numel() / in1_height);
auto* in1_data = in1_value.data<T>();
auto* in2_data = input2->data<T>();
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid(in1_rows.size(), 1);
hipLaunchKernelGGL(( SelectedRowsAddToTensorKernel<
T, block_size>), dim3(grid), dim3(threads), 0, context.stream(),
in1_data, in1_rows.CUDAData(context.GetPlace()), in2_data,
in1_row_numel);
}
};
template struct SelectedRowsAddToTensor<platform::CUDADeviceContext, float>;
template struct SelectedRowsAddToTensor<platform::CUDADeviceContext, double>;
template struct SelectedRowsAddToTensor<platform::CUDADeviceContext, int>;
template struct SelectedRowsAddToTensor<platform::CUDADeviceContext, int64_t>;
template struct SelectedRowsAddToTensor<platform::CUDADeviceContext,
platform::float16>;
namespace scatter {
template <typename T, int block_size>
__global__ void MergeAddKernel(const T* input, const int64_t* input_rows,
T* out, const int64_t* out_rows,
size_t out_rows_size, int64_t row_numel) {
const int ty = blockIdx.x;
int tid = threadIdx.x;
__shared__ size_t out_idx;
if (tid == 0) {
for (size_t i = 0; i < out_rows_size; i++) {
if (input_rows[ty] == out_rows[i]) {
out_idx = i;
}
}
}
__syncthreads();
input += ty * row_numel;
out += out_idx * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
paddle::platform::CudaAtomicAdd(out + index, input[index]);
}
}
template <typename T>
struct MergeAdd<platform::CUDADeviceContext, T> {
framework::SelectedRows operator()(const platform::CUDADeviceContext& context,
const framework::SelectedRows& input,
const bool sorted_result = false) {
framework::SelectedRows out;
(*this)(context, input, &out);
return out;
}
void operator()(const platform::CUDADeviceContext& context,
const framework::SelectedRows& input,
framework::SelectedRows* output,
const bool sorted_result = false) {
framework::Vector<int64_t> input_rows(input.rows());
if (input_rows.size() == 0) {
return;
}
framework::SelectedRows& out = *output;
std::set<int64_t> row_set(input_rows.begin(), input_rows.end());
std::vector<int64_t> merge_rows_cpu(row_set.begin(), row_set.end());
framework::Vector<int64_t> merge_rows(merge_rows_cpu);
auto input_width = input.value().dims()[1];
out.set_rows(merge_rows);
out.set_height(input.height());
out.mutable_value()->mutable_data<T>(
framework::make_ddim(
{static_cast<int64_t>(merge_rows.size()), input_width}),
context.GetPlace());
math::SetConstant<platform::CUDADeviceContext, T> constant_functor;
constant_functor(context, out.mutable_value(), static_cast<T>(0));
auto* out_data = out.mutable_value()->data<T>();
auto* input_data = input.value().data<T>();
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid1(input_rows.size(), 1);
hipLaunchKernelGGL(( MergeAddKernel<T, 256>), dim3(grid1), dim3(threads), 0, context.stream(),
input_data, input_rows.CUDAData(context.GetPlace()), out_data,
out.mutable_rows()->CUDAMutableData(context.GetPlace()),
out.rows().size(), input_width);
}
void operator()(const platform::CUDADeviceContext& context,
const std::vector<const framework::SelectedRows*>& inputs,
framework::SelectedRows* output,
const bool sorted_result = false) {
if (inputs.size() == 0) {
VLOG(3) << "no input! return";
return;
}
const framework::SelectedRows* has_value_input = nullptr;
for (auto* in : inputs) {
if (in->rows().size() > 0) {
has_value_input = in;
break;
}
}
if (has_value_input == nullptr) {
VLOG(3) << "no input has value! just return" << std::endl;
return;
}
auto input_width = has_value_input->value().dims()[1];
auto input_height = has_value_input->height();
framework::SelectedRows& out = *output;
std::set<int64_t> merged_row_set;
for (auto* input : inputs) {
if (input->rows().size() == 0) {
continue;
}
PADDLE_ENFORCE_EQ(input_width, input->value().dims()[1],
"all input should have same "
"dimension except for the first one");
PADDLE_ENFORCE_EQ(input_height, input->height(),
"all input should have same height");
merged_row_set.insert(input->rows().begin(), input->rows().end());
}
std::vector<int64_t> merge_rows_cpu(merged_row_set.begin(),
merged_row_set.end());
framework::Vector<int64_t> merge_rows(merge_rows_cpu);
out.set_rows(merge_rows);
out.set_height(input_height);
out.mutable_value()->mutable_data<T>(
framework::make_ddim(
{static_cast<int64_t>(merge_rows.size()), input_width}),
context.GetPlace());
math::SetConstant<platform::CUDADeviceContext, T> constant_functor;
constant_functor(context, out.mutable_value(), static_cast<T>(0));
auto* out_data = out.mutable_value()->data<T>();
const int block_size = 256;
dim3 threads(block_size, 1);
for (auto* input : inputs) {
if (input->rows().size() == 0) {
continue;
}
auto* input_data = input->value().data<T>();
auto& input_rows = input->rows();
dim3 grid1(input_rows.size(), 1);
hipLaunchKernelGGL(( MergeAddKernel<T, 256>), dim3(grid1), dim3(threads), 0, context.stream(),
input_data, input_rows.CUDAData(context.GetPlace()), out_data,
out.mutable_rows()->CUDAMutableData(context.GetPlace()),
out.rows().size(), input_width);
}
}
};
template struct MergeAdd<platform::CUDADeviceContext, float>;
template struct MergeAdd<platform::CUDADeviceContext, double>;
template struct MergeAdd<platform::CUDADeviceContext, int>;
template struct MergeAdd<platform::CUDADeviceContext, int64_t>;
template struct MergeAdd<platform::CUDADeviceContext, platform::float16>;
template <typename T, int block_size>
__global__ void UpdateToTensorKernel(const T* selected_rows,
const int64_t* rows, const ScatterOps& op,
T* tensor_out, int64_t row_numel) {
const int ty = blockIdx.x;
int tid = threadIdx.x;
selected_rows += ty * row_numel;
tensor_out += rows[ty] * row_numel;
// FIXME(typhoonzero): use macro fix the below messy code.
switch (op) {
case ScatterOps::ASSIGN:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] = selected_rows[index];
}
break;
case ScatterOps::ADD:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] += selected_rows[index];
}
break;
case ScatterOps::SUB:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] -= selected_rows[index];
}
break;
case ScatterOps::SUBBY:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] = selected_rows[index] - tensor_out[index];
}
break;
case ScatterOps::MUL:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] *= selected_rows[index];
}
break;
case ScatterOps::DIV:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] /= selected_rows[index];
}
break;
case ScatterOps::DIVBY:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] = selected_rows[index] / tensor_out[index];
}
break;
}
}
template <typename T>
struct UpdateToTensor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const ScatterOps& op, const framework::SelectedRows& input1,
framework::Tensor* input2) {
// NOTE: Use SelectedRowsAddToTensor for better performance
// no additional MergeAdd called.
MergeAdd<platform::CUDADeviceContext, T> merge_func;
auto merged_in1 = merge_func(context, input1);
auto in1_height = merged_in1.height();
auto in2_dims = input2->dims();
PADDLE_ENFORCE_EQ(in1_height, in2_dims[0]);
auto& in1_value = merged_in1.value();
auto& in1_rows = merged_in1.rows();
int64_t in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(in1_row_numel, input2->numel() / in1_height);
auto* in1_data = in1_value.template data<T>();
auto* in2_data = input2->data<T>();
dim3 threads(platform::PADDLE_CUDA_NUM_THREADS, 1);
dim3 grid(in1_rows.size(), 1);
hipLaunchKernelGGL(( UpdateToTensorKernel<T, platform::PADDLE_CUDA_NUM_THREADS>),
dim3(grid), dim3(threads), 0, context.stream(), in1_data, in1_rows.cuda_data(),
op, in2_data, in1_row_numel);
}
};
} // namespace scatter
} // namespace math
} // namespace operators
} // namespace paddle
| 0d416779ddbea4c9215b6c2321cde0f694a24011.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <set>
#include <vector>
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/math/selected_rows_functor.h"
#include "paddle/fluid/platform/cuda_primitives.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
namespace math {
template <typename T>
struct SelectedRowsAdd<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::SelectedRows& input1,
const framework::SelectedRows& input2,
framework::SelectedRows* output) {
auto in1_height = input1.height();
PADDLE_ENFORCE_EQ(in1_height, input2.height());
output->set_height(in1_height);
framework::Vector<int64_t> in1_rows(input1.rows());
auto& in2_rows = input2.rows();
std::vector<int64_t> out_rows;
out_rows.reserve(in1_rows.size() + in2_rows.size());
// concat rows
out_rows.insert(out_rows.end(), in1_rows.begin(), in1_rows.end());
out_rows.insert(out_rows.end(), in2_rows.begin(), in2_rows.end());
output->set_rows(out_rows);
auto* out_value = output->mutable_value();
auto& in1_value = input1.value();
auto& in2_value = input2.value();
auto in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(in1_row_numel, in2_value.numel() / in2_rows.size());
PADDLE_ENFORCE_EQ(in1_row_numel, out_value->numel() / out_rows.size());
auto* out_data = out_value->data<T>();
auto* in1_data = in1_value.data<T>();
auto in1_place = input1.place();
PADDLE_ENFORCE_EQ(platform::is_gpu_place(in1_place), true);
auto in2_place = input2.place();
PADDLE_ENFORCE_EQ(platform::is_gpu_place(in2_place), true);
auto out_place = context.GetPlace();
PADDLE_ENFORCE_EQ(platform::is_gpu_place(out_place), true);
memory::Copy(boost::get<platform::CUDAPlace>(out_place), out_data,
boost::get<platform::CUDAPlace>(in1_place), in1_data,
in1_value.numel() * sizeof(T), context.stream());
auto* in2_data = in2_value.data<T>();
memory::Copy(boost::get<platform::CUDAPlace>(out_place),
out_data + in1_value.numel(),
boost::get<platform::CUDAPlace>(in2_place), in2_data,
in2_value.numel() * sizeof(T), context.stream());
}
};
template struct SelectedRowsAdd<platform::CUDADeviceContext, float>;
template struct SelectedRowsAdd<platform::CUDADeviceContext, double>;
namespace {
template <typename T, int block_size>
__global__ void SelectedRowsAddTensorKernel(const T* selected_rows,
const int64_t* rows, T* tensor_out,
int64_t row_numel) {
const int ty = blockIdx.x;
int tid = threadIdx.x;
selected_rows += ty * row_numel;
tensor_out += rows[ty] * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
// Since index in rows of SelectedRows can be duplicate, we can not use
// tensor_out[index] += selected_rows[index]; Instead, we have to use
// AtomicAdd to avoid concurrent write error.
paddle::platform::CudaAtomicAdd(tensor_out + index, selected_rows[index]);
}
}
} // namespace
template <typename T>
struct SelectedRowsAddTensor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::SelectedRows& input1,
const framework::Tensor& input2, framework::Tensor* output) {
auto in1_height = input1.height();
auto in2_dims = input2.dims();
auto out_dims = output->dims();
PADDLE_ENFORCE_EQ(in1_height, in2_dims[0]);
PADDLE_ENFORCE_EQ(in1_height, out_dims[0]);
auto& in1_value = input1.value();
auto& in1_rows = input1.rows();
int64_t in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(in1_row_numel, input2.numel() / in1_height);
PADDLE_ENFORCE_EQ(in1_row_numel, output->numel() / in1_height);
auto* in1_data = in1_value.data<T>();
auto* in2_data = input2.data<T>();
auto* out_data = output->data<T>();
SetConstant<platform::CUDADeviceContext, T> functor;
functor(context, output, static_cast<T>(0));
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid(in1_rows.size(), 1);
SelectedRowsAddTensorKernel<
T, block_size><<<grid, threads, 0, context.stream()>>>(
in1_data, in1_rows.CUDAData(context.GetPlace()), out_data,
in1_row_numel);
auto out_eigen = framework::EigenVector<T>::Flatten(*output);
auto in2_eigen = framework::EigenVector<T>::Flatten(input2);
out_eigen.device(*context.eigen_device()) = out_eigen + in2_eigen;
}
};
template struct SelectedRowsAddTensor<platform::CUDADeviceContext, float>;
template struct SelectedRowsAddTensor<platform::CUDADeviceContext, double>;
template struct SelectedRowsAdd<platform::CUDADeviceContext, platform::float16>;
template struct SelectedRowsAddTensor<platform::CUDADeviceContext,
platform::float16>;
template <typename T>
struct SelectedRowsAddTo<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::SelectedRows& input1,
const int64_t input2_offset,
framework::SelectedRows* input2) {
auto in1_height = input1.height();
PADDLE_ENFORCE_EQ(in1_height, input2->height());
auto& in1_rows = input1.rows();
auto& in2_rows = *(input2->mutable_rows());
auto& in1_value = input1.value();
auto* in2_value = input2->mutable_value();
// concat rows
if (in1_rows.size()) {
in2_rows.Extend(in1_rows.begin(), in1_rows.end());
}
auto in1_place = input1.place();
PADDLE_ENFORCE_EQ(platform::is_gpu_place(in1_place), true);
auto in2_place = input2->place();
PADDLE_ENFORCE_EQ(platform::is_gpu_place(in2_place), true);
auto* in1_data = in1_value.data<T>();
auto* in2_data = in2_value->data<T>();
memory::Copy(boost::get<platform::CUDAPlace>(in2_place),
in2_data + input2_offset,
boost::get<platform::CUDAPlace>(in1_place), in1_data,
in1_value.numel() * sizeof(T), context.stream());
}
};
template struct SelectedRowsAddTo<platform::CUDADeviceContext, float>;
template struct SelectedRowsAddTo<platform::CUDADeviceContext, double>;
template struct SelectedRowsAddTo<platform::CUDADeviceContext, int>;
template struct SelectedRowsAddTo<platform::CUDADeviceContext, int64_t>;
template struct SelectedRowsAddTo<platform::CUDADeviceContext,
platform::float16>;
namespace {
template <typename T, int block_size>
__global__ void SelectedRowsAddToTensorKernel(const T* selected_rows,
const int64_t* rows,
T* tensor_out,
int64_t row_numel) {
const int ty = blockIdx.x;
int tid = threadIdx.x;
selected_rows += ty * row_numel;
tensor_out += rows[ty] * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
// Since index in rows of SelectedRows can be duplicate, we have to use
// Atomic Operation to avoid concurrent write error.
paddle::platform::CudaAtomicAdd(tensor_out + index, selected_rows[index]);
}
}
} // namespace
template <typename T>
struct SelectedRowsAddToTensor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::SelectedRows& input1,
framework::Tensor* input2) {
auto in1_height = input1.height();
auto in2_dims = input2->dims();
PADDLE_ENFORCE_EQ(in1_height, in2_dims[0]);
auto& in1_value = input1.value();
auto& in1_rows = input1.rows();
int64_t in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(in1_row_numel, input2->numel() / in1_height);
auto* in1_data = in1_value.data<T>();
auto* in2_data = input2->data<T>();
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid(in1_rows.size(), 1);
SelectedRowsAddToTensorKernel<
T, block_size><<<grid, threads, 0, context.stream()>>>(
in1_data, in1_rows.CUDAData(context.GetPlace()), in2_data,
in1_row_numel);
}
};
template struct SelectedRowsAddToTensor<platform::CUDADeviceContext, float>;
template struct SelectedRowsAddToTensor<platform::CUDADeviceContext, double>;
template struct SelectedRowsAddToTensor<platform::CUDADeviceContext, int>;
template struct SelectedRowsAddToTensor<platform::CUDADeviceContext, int64_t>;
template struct SelectedRowsAddToTensor<platform::CUDADeviceContext,
platform::float16>;
namespace scatter {
template <typename T, int block_size>
__global__ void MergeAddKernel(const T* input, const int64_t* input_rows,
T* out, const int64_t* out_rows,
size_t out_rows_size, int64_t row_numel) {
const int ty = blockIdx.x;
int tid = threadIdx.x;
__shared__ size_t out_idx;
if (tid == 0) {
for (size_t i = 0; i < out_rows_size; i++) {
if (input_rows[ty] == out_rows[i]) {
out_idx = i;
}
}
}
__syncthreads();
input += ty * row_numel;
out += out_idx * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
paddle::platform::CudaAtomicAdd(out + index, input[index]);
}
}
template <typename T>
struct MergeAdd<platform::CUDADeviceContext, T> {
framework::SelectedRows operator()(const platform::CUDADeviceContext& context,
const framework::SelectedRows& input,
const bool sorted_result = false) {
framework::SelectedRows out;
(*this)(context, input, &out);
return out;
}
void operator()(const platform::CUDADeviceContext& context,
const framework::SelectedRows& input,
framework::SelectedRows* output,
const bool sorted_result = false) {
framework::Vector<int64_t> input_rows(input.rows());
if (input_rows.size() == 0) {
return;
}
framework::SelectedRows& out = *output;
std::set<int64_t> row_set(input_rows.begin(), input_rows.end());
std::vector<int64_t> merge_rows_cpu(row_set.begin(), row_set.end());
framework::Vector<int64_t> merge_rows(merge_rows_cpu);
auto input_width = input.value().dims()[1];
out.set_rows(merge_rows);
out.set_height(input.height());
out.mutable_value()->mutable_data<T>(
framework::make_ddim(
{static_cast<int64_t>(merge_rows.size()), input_width}),
context.GetPlace());
math::SetConstant<platform::CUDADeviceContext, T> constant_functor;
constant_functor(context, out.mutable_value(), static_cast<T>(0));
auto* out_data = out.mutable_value()->data<T>();
auto* input_data = input.value().data<T>();
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid1(input_rows.size(), 1);
MergeAddKernel<T, 256><<<grid1, threads, 0, context.stream()>>>(
input_data, input_rows.CUDAData(context.GetPlace()), out_data,
out.mutable_rows()->CUDAMutableData(context.GetPlace()),
out.rows().size(), input_width);
}
void operator()(const platform::CUDADeviceContext& context,
const std::vector<const framework::SelectedRows*>& inputs,
framework::SelectedRows* output,
const bool sorted_result = false) {
if (inputs.size() == 0) {
VLOG(3) << "no input! return";
return;
}
const framework::SelectedRows* has_value_input = nullptr;
for (auto* in : inputs) {
if (in->rows().size() > 0) {
has_value_input = in;
break;
}
}
if (has_value_input == nullptr) {
VLOG(3) << "no input has value! just return" << std::endl;
return;
}
auto input_width = has_value_input->value().dims()[1];
auto input_height = has_value_input->height();
framework::SelectedRows& out = *output;
std::set<int64_t> merged_row_set;
for (auto* input : inputs) {
if (input->rows().size() == 0) {
continue;
}
PADDLE_ENFORCE_EQ(input_width, input->value().dims()[1],
"all input should have same "
"dimension except for the first one");
PADDLE_ENFORCE_EQ(input_height, input->height(),
"all input should have same height");
merged_row_set.insert(input->rows().begin(), input->rows().end());
}
std::vector<int64_t> merge_rows_cpu(merged_row_set.begin(),
merged_row_set.end());
framework::Vector<int64_t> merge_rows(merge_rows_cpu);
out.set_rows(merge_rows);
out.set_height(input_height);
out.mutable_value()->mutable_data<T>(
framework::make_ddim(
{static_cast<int64_t>(merge_rows.size()), input_width}),
context.GetPlace());
math::SetConstant<platform::CUDADeviceContext, T> constant_functor;
constant_functor(context, out.mutable_value(), static_cast<T>(0));
auto* out_data = out.mutable_value()->data<T>();
const int block_size = 256;
dim3 threads(block_size, 1);
for (auto* input : inputs) {
if (input->rows().size() == 0) {
continue;
}
auto* input_data = input->value().data<T>();
auto& input_rows = input->rows();
dim3 grid1(input_rows.size(), 1);
MergeAddKernel<T, 256><<<grid1, threads, 0, context.stream()>>>(
input_data, input_rows.CUDAData(context.GetPlace()), out_data,
out.mutable_rows()->CUDAMutableData(context.GetPlace()),
out.rows().size(), input_width);
}
}
};
template struct MergeAdd<platform::CUDADeviceContext, float>;
template struct MergeAdd<platform::CUDADeviceContext, double>;
template struct MergeAdd<platform::CUDADeviceContext, int>;
template struct MergeAdd<platform::CUDADeviceContext, int64_t>;
template struct MergeAdd<platform::CUDADeviceContext, platform::float16>;
template <typename T, int block_size>
__global__ void UpdateToTensorKernel(const T* selected_rows,
const int64_t* rows, const ScatterOps& op,
T* tensor_out, int64_t row_numel) {
const int ty = blockIdx.x;
int tid = threadIdx.x;
selected_rows += ty * row_numel;
tensor_out += rows[ty] * row_numel;
// FIXME(typhoonzero): use macro fix the below messy code.
switch (op) {
case ScatterOps::ASSIGN:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] = selected_rows[index];
}
break;
case ScatterOps::ADD:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] += selected_rows[index];
}
break;
case ScatterOps::SUB:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] -= selected_rows[index];
}
break;
case ScatterOps::SUBBY:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] = selected_rows[index] - tensor_out[index];
}
break;
case ScatterOps::MUL:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] *= selected_rows[index];
}
break;
case ScatterOps::DIV:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] /= selected_rows[index];
}
break;
case ScatterOps::DIVBY:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] = selected_rows[index] / tensor_out[index];
}
break;
}
}
template <typename T>
struct UpdateToTensor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const ScatterOps& op, const framework::SelectedRows& input1,
framework::Tensor* input2) {
// NOTE: Use SelectedRowsAddToTensor for better performance
// no additional MergeAdd called.
MergeAdd<platform::CUDADeviceContext, T> merge_func;
auto merged_in1 = merge_func(context, input1);
auto in1_height = merged_in1.height();
auto in2_dims = input2->dims();
PADDLE_ENFORCE_EQ(in1_height, in2_dims[0]);
auto& in1_value = merged_in1.value();
auto& in1_rows = merged_in1.rows();
int64_t in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(in1_row_numel, input2->numel() / in1_height);
auto* in1_data = in1_value.template data<T>();
auto* in2_data = input2->data<T>();
dim3 threads(platform::PADDLE_CUDA_NUM_THREADS, 1);
dim3 grid(in1_rows.size(), 1);
UpdateToTensorKernel<T, platform::PADDLE_CUDA_NUM_THREADS><<<
grid, threads, 0, context.stream()>>>(in1_data, in1_rows.cuda_data(),
op, in2_data, in1_row_numel);
}
};
} // namespace scatter
} // namespace math
} // namespace operators
} // namespace paddle
|
f33c74c4a9c476d6de1ac7463d820bbcf6146285.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "driverBFS.c"
#include "cuda_queue.h"
__global__ void kernel_compute_bfs(gpudata data, csrdata csrg) {
unsigned int i, j, warp_id, increment, my_location;
UL U, V, s, e, *node;
/* Dimensione del warp */
int warp_size = data.warp_size;
/* Quanti warp ci sono in ogni blocco */
int warps_block = blockDim.x / warp_size;
/* Imposto l'id del Warp */
warp_id = blockIdx.x * warps_block + threadIdx.x / warp_size;
/* Incremento da effettuare ogni iterazione del ciclo */
increment = (gridDim.x * blockDim.x)/warp_size;
for(i = warp_id; i < *(data.nq); i+= increment) {
U = data.queue[i];
s = csrg.offsets[U];
e = csrg.offsets[U+1] - s;
node = &csrg.rows[s];
/* Ora faccio fare il lavoro ad ogni thread */
for (j = threadIdx.x % warp_size; j < e; j += warp_size) {
/* Inserisco il nodo vicino nella frontiera */
V = node[j];
if ((data.visited[V] != 1) && (data.dist[V] == ULONG_MAX)) {
data.visited[V] = 1;
my_location = atomicAdd((unsigned int *) data.nq2, (unsigned int) 1);
data.queue2[my_location] = V;
data.dist[V] = data.level + 1;
}
}
}
}
UL *do_bfs_cuda(UL source, csrdata *csrgraph, csrdata *csrgraph_gpu, double *cudatime, int thread_per_block)
{
int num_threads, num_blocks, i;
// Creo le strutture dati per i timer
hipEvent_t exec_start, exec_stop, alloc_copy_start, alloc_copy_stop;
double alloc_copy_time = 0.0, bfs_time = 0.0;
// Creo le strutture dati per passare i parametri al Device (GPU)
gpudata host;
gpudata dev;
// Inizializzo i dati prima sull' Host (CPU)
host.queue = (int *) Malloc(csrgraph->nv * sizeof(int));
host.queue2 = (int *) Malloc(csrgraph->nv * sizeof(int));
host.dist = (UL *) Malloc(csrgraph->nv * sizeof(UL));
host.visited = (char *) Malloc(csrgraph->nv * sizeof(char));
host.nq = (int *) Malloc(sizeof(int));
host.nq2 = (int *) Malloc(sizeof(int));
*(host.nq) = 1;
*(host.nq2) = 0;
memset(host.queue, 0, csrgraph->nv * sizeof(int));
memset(host.queue2, 0, csrgraph->nv * sizeof(int));
memset(host.visited, 0, csrgraph->nv * sizeof(char));
for (i = 0; i < csrgraph->nv; i++) host.dist[i] = ULONG_MAX;
host.dist[source] = 0;
host.visited[source] = 1;
host.queue[0] = source;
dev.level = host.level = 0;
dev.warp_size = host.warp_size = get_warp_size();
// Inizio ad allocare memoria e copiare i dati sul device (GPU)
START_CUDA_TIMER(&alloc_copy_start, &alloc_copy_stop);
copy_data_on_gpu(&host, &dev, csrgraph->nv);
alloc_copy_time = STOP_CUDA_TIMER(&alloc_copy_start, &alloc_copy_stop);
// printf("\nTime spent for allocation and copy: %.5f\n", alloc_copy_time);
*(host.nq2) = 1;
host.nq = host.nq2;
// Faccio partire i kernel
START_CUDA_TIMER(&exec_start, &exec_stop);
while(1) {
set_threads_and_blocks(&num_threads, &num_blocks, dev.warp_size, *(host.nq), thread_per_block);
// Lancio il kernel che si occupa di mettere nella frontiera i vicini
hipLaunchKernelGGL(( kernel_compute_bfs), dim3(num_blocks), dim3(num_threads), 0, 0, dev, *csrgraph_gpu);
// Controllo e inverto le code
if(swap_queues_and_check(&host, &dev, csrgraph->nv) == false) {
break;
}
// Incremento la distanza
dev.level += 1;
}
// Fermo il timer e stampo il tempo di esecuzione
bfs_time = STOP_CUDA_TIMER(&exec_start, &exec_stop);
// printf("Time spent for cuda bfs: %.5f\n", bfs_time);
// Mi copio l'array di distanze su host per effettuare il controllo
copy_data_on_host(&host, &dev, csrgraph->nv);
// Libero la memoria sul device
free_gpu_data(&dev);
// Libero la memoria sull'host
free(host.queue);
free(host.queue2);
free(host.visited);
*cudatime = bfs_time;
return host.dist;
}
UL *traverse_parallel(UL *edges, UL nedges, UL nvertices, UL root, int randsource, int seed, int thread)
{
csrdata csrgraph, csrgraph_gpu; // csr data structure to represent the graph
UL *dist; // array of distances from the source
// Vars for timing
struct timeval begin, end;
double cudatime = 0.0, csrtime, total = 0.0;
int timer = 1;
csrgraph.offsets = NULL;
csrgraph.rows = NULL;
csrgraph.deg = NULL;
// Build the CSR data structure
START_TIMER(begin)
csrgraph.offsets = (UL *)Malloc((nvertices+1)*sizeof(UL));
csrgraph.rows = (UL *)Malloc(nedges *sizeof(UL));
csrgraph.deg = (UL *)Malloc(nvertices *sizeof(UL));
build_csr(edges, nedges, nvertices, &csrgraph);
copy_csr_on_gpu(&csrgraph, &csrgraph_gpu);
END_TIMER(end);
ELAPSED_TIME(csrtime, begin, end)
if (randsource) {
root = random_source(&csrgraph, seed);
fprintf(stdout, "Random source vertex %lu\n", root);
}
for(int i = 0; i < 10; i++) {
dist = do_bfs_cuda(root, &csrgraph, &csrgraph_gpu, &cudatime, thread);
if(i < 9) free(dist);
total += cudatime;
}
cudatime = total / 10;
// Timing output
fprintf(stdout, "\n");
fprintf(stdout, "Cuda build csr= \t%.5f\n", csrtime);
fprintf(stdout, "Cuda bfs time = \t%.5f with:= %d thread per block\n", cudatime, thread);
fprintf(stdout, "\n");
// Libero memoria su host e device
free_gpu_csr(&csrgraph_gpu);
if(csrgraph.offsets) free(csrgraph.offsets);
if(csrgraph.rows) free(csrgraph.rows);
return dist;
}
| f33c74c4a9c476d6de1ac7463d820bbcf6146285.cu | #include "driverBFS.c"
#include "cuda_queue.h"
__global__ void kernel_compute_bfs(gpudata data, csrdata csrg) {
unsigned int i, j, warp_id, increment, my_location;
UL U, V, s, e, *node;
/* Dimensione del warp */
int warp_size = data.warp_size;
/* Quanti warp ci sono in ogni blocco */
int warps_block = blockDim.x / warp_size;
/* Imposto l'id del Warp */
warp_id = blockIdx.x * warps_block + threadIdx.x / warp_size;
/* Incremento da effettuare ogni iterazione del ciclo */
increment = (gridDim.x * blockDim.x)/warp_size;
for(i = warp_id; i < *(data.nq); i+= increment) {
U = data.queue[i];
s = csrg.offsets[U];
e = csrg.offsets[U+1] - s;
node = &csrg.rows[s];
/* Ora faccio fare il lavoro ad ogni thread */
for (j = threadIdx.x % warp_size; j < e; j += warp_size) {
/* Inserisco il nodo vicino nella frontiera */
V = node[j];
if ((data.visited[V] != 1) && (data.dist[V] == ULONG_MAX)) {
data.visited[V] = 1;
my_location = atomicAdd((unsigned int *) data.nq2, (unsigned int) 1);
data.queue2[my_location] = V;
data.dist[V] = data.level + 1;
}
}
}
}
UL *do_bfs_cuda(UL source, csrdata *csrgraph, csrdata *csrgraph_gpu, double *cudatime, int thread_per_block)
{
int num_threads, num_blocks, i;
// Creo le strutture dati per i timer
cudaEvent_t exec_start, exec_stop, alloc_copy_start, alloc_copy_stop;
double alloc_copy_time = 0.0, bfs_time = 0.0;
// Creo le strutture dati per passare i parametri al Device (GPU)
gpudata host;
gpudata dev;
// Inizializzo i dati prima sull' Host (CPU)
host.queue = (int *) Malloc(csrgraph->nv * sizeof(int));
host.queue2 = (int *) Malloc(csrgraph->nv * sizeof(int));
host.dist = (UL *) Malloc(csrgraph->nv * sizeof(UL));
host.visited = (char *) Malloc(csrgraph->nv * sizeof(char));
host.nq = (int *) Malloc(sizeof(int));
host.nq2 = (int *) Malloc(sizeof(int));
*(host.nq) = 1;
*(host.nq2) = 0;
memset(host.queue, 0, csrgraph->nv * sizeof(int));
memset(host.queue2, 0, csrgraph->nv * sizeof(int));
memset(host.visited, 0, csrgraph->nv * sizeof(char));
for (i = 0; i < csrgraph->nv; i++) host.dist[i] = ULONG_MAX;
host.dist[source] = 0;
host.visited[source] = 1;
host.queue[0] = source;
dev.level = host.level = 0;
dev.warp_size = host.warp_size = get_warp_size();
// Inizio ad allocare memoria e copiare i dati sul device (GPU)
START_CUDA_TIMER(&alloc_copy_start, &alloc_copy_stop);
copy_data_on_gpu(&host, &dev, csrgraph->nv);
alloc_copy_time = STOP_CUDA_TIMER(&alloc_copy_start, &alloc_copy_stop);
// printf("\nTime spent for allocation and copy: %.5f\n", alloc_copy_time);
*(host.nq2) = 1;
host.nq = host.nq2;
// Faccio partire i kernel
START_CUDA_TIMER(&exec_start, &exec_stop);
while(1) {
set_threads_and_blocks(&num_threads, &num_blocks, dev.warp_size, *(host.nq), thread_per_block);
// Lancio il kernel che si occupa di mettere nella frontiera i vicini
kernel_compute_bfs<<<num_blocks, num_threads>>>(dev, *csrgraph_gpu);
// Controllo e inverto le code
if(swap_queues_and_check(&host, &dev, csrgraph->nv) == false) {
break;
}
// Incremento la distanza
dev.level += 1;
}
// Fermo il timer e stampo il tempo di esecuzione
bfs_time = STOP_CUDA_TIMER(&exec_start, &exec_stop);
// printf("Time spent for cuda bfs: %.5f\n", bfs_time);
// Mi copio l'array di distanze su host per effettuare il controllo
copy_data_on_host(&host, &dev, csrgraph->nv);
// Libero la memoria sul device
free_gpu_data(&dev);
// Libero la memoria sull'host
free(host.queue);
free(host.queue2);
free(host.visited);
*cudatime = bfs_time;
return host.dist;
}
UL *traverse_parallel(UL *edges, UL nedges, UL nvertices, UL root, int randsource, int seed, int thread)
{
csrdata csrgraph, csrgraph_gpu; // csr data structure to represent the graph
UL *dist; // array of distances from the source
// Vars for timing
struct timeval begin, end;
double cudatime = 0.0, csrtime, total = 0.0;
int timer = 1;
csrgraph.offsets = NULL;
csrgraph.rows = NULL;
csrgraph.deg = NULL;
// Build the CSR data structure
START_TIMER(begin)
csrgraph.offsets = (UL *)Malloc((nvertices+1)*sizeof(UL));
csrgraph.rows = (UL *)Malloc(nedges *sizeof(UL));
csrgraph.deg = (UL *)Malloc(nvertices *sizeof(UL));
build_csr(edges, nedges, nvertices, &csrgraph);
copy_csr_on_gpu(&csrgraph, &csrgraph_gpu);
END_TIMER(end);
ELAPSED_TIME(csrtime, begin, end)
if (randsource) {
root = random_source(&csrgraph, seed);
fprintf(stdout, "Random source vertex %lu\n", root);
}
for(int i = 0; i < 10; i++) {
dist = do_bfs_cuda(root, &csrgraph, &csrgraph_gpu, &cudatime, thread);
if(i < 9) free(dist);
total += cudatime;
}
cudatime = total / 10;
// Timing output
fprintf(stdout, "\n");
fprintf(stdout, "Cuda build csr= \t%.5f\n", csrtime);
fprintf(stdout, "Cuda bfs time = \t%.5f with:= %d thread per block\n", cudatime, thread);
fprintf(stdout, "\n");
// Libero memoria su host e device
free_gpu_csr(&csrgraph_gpu);
if(csrgraph.offsets) free(csrgraph.offsets);
if(csrgraph.rows) free(csrgraph.rows);
return dist;
}
|
28a8ec4fd006f07307ede196e927954782efcbaa.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 2019-2023 by XGBoost Contributors
*/
#include <gtest/gtest.h>
#include <thrust/device_vector.h>
#include <xgboost/base.h> // for bst_bin_t
#include <xgboost/c_api.h>
#include <xgboost/data.h>
#include <algorithm> // for transform
#include <cmath> // for floor
#include <cstddef> // for size_t
#include <limits> // for numeric_limits
#include <string> // for string, to_string
#include <tuple> // for tuple, make_tuple
#include <vector> // for vector
#include "../../../include/xgboost/logging.h"
#include "../../../src/common/device_helpers.cuh"
#include "../../../src/common/hist_util.cuh"
#include "../../../src/common/hist_util.h"
#include "../../../src/data/device_adapter.cuh"
#include "../../../src/data/simple_dmatrix.h"
#include "../data/test_array_interface.h"
#include "../filesystem.h" // dmlc::TemporaryDirectory
#include "../helpers.h"
#include "test_hist_util.h"
namespace xgboost::common {
template <typename AdapterT>
HistogramCuts GetHostCuts(Context const* ctx, AdapterT* adapter, int num_bins, float missing) {
data::SimpleDMatrix dmat(adapter, missing, 1);
HistogramCuts cuts = SketchOnDMatrix(ctx, &dmat, num_bins);
return cuts;
}
TEST(HistUtil, DeviceSketch) {
auto ctx = MakeCUDACtx(0);
int num_columns = 1;
int num_bins = 4;
std::vector<float> x = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 7.0f, -1.0f};
int num_rows = x.size();
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto device_cuts = DeviceSketch(&ctx, dmat.get(), num_bins);
Context cpu_ctx;
HistogramCuts host_cuts = SketchOnDMatrix(&cpu_ctx, dmat.get(), num_bins);
EXPECT_EQ(device_cuts.Values(), host_cuts.Values());
EXPECT_EQ(device_cuts.Ptrs(), host_cuts.Ptrs());
EXPECT_EQ(device_cuts.MinValues(), host_cuts.MinValues());
}
TEST(HistUtil, SketchBatchNumElements) {
#if defined(XGBOOST_USE_RMM) && XGBOOST_USE_RMM == 1
LOG(WARNING) << "Test not runnable with RMM enabled.";
return;
#endif // defined(XGBOOST_USE_RMM) && XGBOOST_USE_RMM == 1
size_t constexpr kCols = 10000;
int device;
dh::safe_cuda(hipGetDevice(&device));
auto avail = static_cast<size_t>(dh::AvailableMemory(device) * 0.8);
auto per_elem = detail::BytesPerElement(false);
auto avail_elem = avail / per_elem;
size_t rows = avail_elem / kCols * 10;
auto batch = detail::SketchBatchNumElements(0, rows, kCols, rows * kCols, device, 256, false);
ASSERT_EQ(batch, avail_elem);
}
TEST(HistUtil, DeviceSketchMemory) {
auto ctx = MakeCUDACtx(0);
int num_columns = 100;
int num_rows = 1000;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
dh::GlobalMemoryLogger().Clear();
ConsoleLogger::Configure({{"verbosity", "3"}});
auto device_cuts = DeviceSketch(&ctx, dmat.get(), num_bins);
size_t bytes_required = detail::RequiredMemory(
num_rows, num_columns, num_rows * num_columns, num_bins, false);
EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05);
EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 0.95);
ConsoleLogger::Configure({{"verbosity", "0"}});
}
TEST(HistUtil, DeviceSketchWeightsMemory) {
auto ctx = MakeCUDACtx(0);
int num_columns = 100;
int num_rows = 1000;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
dmat->Info().weights_.HostVector() = GenerateRandomWeights(num_rows);
dh::GlobalMemoryLogger().Clear();
ConsoleLogger::Configure({{"verbosity", "3"}});
auto device_cuts = DeviceSketch(&ctx, dmat.get(), num_bins);
ConsoleLogger::Configure({{"verbosity", "0"}});
size_t bytes_required = detail::RequiredMemory(
num_rows, num_columns, num_rows * num_columns, num_bins, true);
EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05);
EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required);
}
TEST(HistUtil, DeviceSketchDeterminism) {
auto ctx = MakeCUDACtx(0);
int num_rows = 500;
int num_columns = 5;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto reference_sketch = DeviceSketch(&ctx, dmat.get(), num_bins);
size_t constexpr kRounds{ 100 };
for (size_t r = 0; r < kRounds; ++r) {
auto new_sketch = DeviceSketch(&ctx, dmat.get(), num_bins);
ASSERT_EQ(reference_sketch.Values(), new_sketch.Values());
ASSERT_EQ(reference_sketch.MinValues(), new_sketch.MinValues());
}
}
TEST(HistUtil, DeviceSketchCategoricalAsNumeric) {
auto ctx = MakeCUDACtx(0);
auto categorical_sizes = {2, 6, 8, 12};
int num_bins = 256;
auto sizes = {25, 100, 1000};
for (auto n : sizes) {
for (auto num_categories : categorical_sizes) {
auto x = GenerateRandomCategoricalSingleColumn(n, num_categories);
auto dmat = GetDMatrixFromData(x, n, 1);
auto cuts = DeviceSketch(&ctx, dmat.get(), num_bins);
ValidateCuts(cuts, dmat.get(), num_bins);
}
}
}
TEST(HistUtil, DeviceSketchCategoricalFeatures) {
auto ctx = MakeCUDACtx(0);
TestCategoricalSketch(1000, 256, 32, false, [ctx](DMatrix* p_fmat, int32_t num_bins) {
return DeviceSketch(&ctx, p_fmat, num_bins);
});
TestCategoricalSketch(1000, 256, 32, true, [ctx](DMatrix* p_fmat, int32_t num_bins) {
return DeviceSketch(&ctx, p_fmat, num_bins);
});
}
void TestMixedSketch() {
size_t n_samples = 1000, n_features = 2, n_categories = 3;
bst_bin_t n_bins = 64;
std::vector<float> data(n_samples * n_features);
SimpleLCG gen;
SimpleRealUniformDistribution<float> cat_d{0.0f, static_cast<float>(n_categories)};
SimpleRealUniformDistribution<float> num_d{0.0f, 3.0f};
for (size_t i = 0; i < n_samples * n_features; ++i) {
// two features, row major. The first column is numeric and the second is categorical.
if (i % 2 == 0) {
data[i] = ::floor(cat_d(&gen));
} else {
data[i] = num_d(&gen);
}
}
auto m = GetDMatrixFromData(data, n_samples, n_features);
m->Info().feature_types.HostVector().push_back(FeatureType::kCategorical);
m->Info().feature_types.HostVector().push_back(FeatureType::kNumerical);
auto ctx = MakeCUDACtx(0);
auto cuts = DeviceSketch(&ctx, m.get(), n_bins);
ASSERT_EQ(cuts.Values().size(), n_bins + n_categories);
}
TEST(HistUtil, DeviceSketchMixedFeatures) { TestMixedSketch(); }
TEST(HistUtil, RemoveDuplicatedCategories) {
bst_row_t n_samples = 512;
bst_feature_t n_features = 3;
bst_cat_t n_categories = 5;
auto ctx = MakeCUDACtx(0);
SimpleLCG rng;
SimpleRealUniformDistribution<float> cat_d{0.0f, static_cast<float>(n_categories)};
dh::device_vector<Entry> sorted_entries(n_samples * n_features);
for (std::size_t i = 0; i < n_samples; ++i) {
for (bst_feature_t j = 0; j < n_features; ++j) {
float fvalue{0.0f};
// The second column is categorical
if (j == 1) {
fvalue = ::floor(cat_d(&rng));
} else {
fvalue = i;
}
sorted_entries[i * n_features + j] = Entry{j, fvalue};
}
}
MetaInfo info;
info.num_col_ = n_features;
info.num_row_ = n_samples;
info.feature_types.HostVector() = std::vector<FeatureType>{
FeatureType::kNumerical, FeatureType::kCategorical, FeatureType::kNumerical};
ASSERT_EQ(info.feature_types.Size(), n_features);
HostDeviceVector<bst_row_t> cuts_ptr{0, n_samples, n_samples * 2, n_samples * 3};
cuts_ptr.SetDevice(0);
dh::device_vector<float> weight(n_samples * n_features, 0);
dh::Iota(dh::ToSpan(weight));
dh::caching_device_vector<bst_row_t> columns_ptr(4);
for (std::size_t i = 0; i < columns_ptr.size(); ++i) {
columns_ptr[i] = i * n_samples;
}
// sort into column major
thrust::sort_by_key(sorted_entries.begin(), sorted_entries.end(), weight.begin(),
detail::EntryCompareOp());
detail::RemoveDuplicatedCategories(ctx.gpu_id, info, cuts_ptr.DeviceSpan(), &sorted_entries,
&weight, &columns_ptr);
auto const& h_cptr = cuts_ptr.ConstHostVector();
ASSERT_EQ(h_cptr.back(), n_samples * 2 + n_categories);
// check numerical
for (std::size_t i = 0; i < n_samples; ++i) {
ASSERT_EQ(weight[i], i * 3);
}
auto beg = n_samples + n_categories;
for (std::size_t i = 0; i < n_samples; ++i) {
ASSERT_EQ(weight[i + beg], i * 3 + 2);
}
// check categorical
beg = n_samples;
for (bst_cat_t i = 0; i < n_categories; ++i) {
// all from the second column
ASSERT_EQ(static_cast<bst_feature_t>(weight[i + beg]) % n_features, 1);
}
}
TEST(HistUtil, DeviceSketchMultipleColumns) {
auto ctx = MakeCUDACtx(0);
auto bin_sizes = {2, 16, 256, 512};
auto sizes = {100, 1000, 1500};
int num_columns = 5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
for (auto num_bins : bin_sizes) {
auto cuts = DeviceSketch(&ctx, dmat.get(), num_bins);
ValidateCuts(cuts, dmat.get(), num_bins);
}
}
}
TEST(HistUtil, DeviceSketchMultipleColumnsWeights) {
auto ctx = MakeCUDACtx(0);
auto bin_sizes = {2, 16, 256, 512};
auto sizes = {100, 1000, 1500};
int num_columns = 5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
dmat->Info().weights_.HostVector() = GenerateRandomWeights(num_rows);
for (auto num_bins : bin_sizes) {
auto cuts = DeviceSketch(&ctx, dmat.get(), num_bins);
ValidateCuts(cuts, dmat.get(), num_bins);
}
}
}
TEST(HistUitl, DeviceSketchWeights) {
auto ctx = MakeCUDACtx(0);
auto bin_sizes = {2, 16, 256, 512};
auto sizes = {100, 1000, 1500};
int num_columns = 5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto weighted_dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto& h_weights = weighted_dmat->Info().weights_.HostVector();
h_weights.resize(num_rows);
std::fill(h_weights.begin(), h_weights.end(), 1.0f);
for (auto num_bins : bin_sizes) {
auto cuts = DeviceSketch(&ctx, dmat.get(), num_bins);
auto wcuts = DeviceSketch(&ctx, weighted_dmat.get(), num_bins);
ASSERT_EQ(cuts.MinValues(), wcuts.MinValues());
ASSERT_EQ(cuts.Ptrs(), wcuts.Ptrs());
ASSERT_EQ(cuts.Values(), wcuts.Values());
ValidateCuts(cuts, dmat.get(), num_bins);
ValidateCuts(wcuts, weighted_dmat.get(), num_bins);
}
}
}
TEST(HistUtil, DeviceSketchBatches) {
auto ctx = MakeCUDACtx(0);
int num_bins = 256;
int num_rows = 5000;
auto batch_sizes = {0, 100, 1500, 6000};
int num_columns = 5;
for (auto batch_size : batch_sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto cuts = DeviceSketch(&ctx, dmat.get(), num_bins, batch_size);
ValidateCuts(cuts, dmat.get(), num_bins);
}
num_rows = 1000;
size_t batches = 16;
auto x = GenerateRandom(num_rows * batches, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows * batches, num_columns);
auto cuts_with_batches = DeviceSketch(&ctx, dmat.get(), num_bins, num_rows);
auto cuts = DeviceSketch(&ctx, dmat.get(), num_bins, 0);
auto const& cut_values_batched = cuts_with_batches.Values();
auto const& cut_values = cuts.Values();
CHECK_EQ(cut_values.size(), cut_values_batched.size());
for (size_t i = 0; i < cut_values.size(); ++i) {
ASSERT_NEAR(cut_values_batched[i], cut_values[i], 1e5);
}
}
TEST(HistUtil, DeviceSketchMultipleColumnsExternal) {
auto ctx = MakeCUDACtx(0);
auto bin_sizes = {2, 16, 256, 512};
auto sizes = {100, 1000, 1500};
int num_columns =5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
dmlc::TemporaryDirectory temp;
auto dmat = GetExternalMemoryDMatrixFromData(x, num_rows, num_columns, temp);
for (auto num_bins : bin_sizes) {
auto cuts = DeviceSketch(&ctx, dmat.get(), num_bins);
ValidateCuts(cuts, dmat.get(), num_bins);
}
}
}
// See https://github.com/dmlc/xgboost/issues/5866.
TEST(HistUtil, DeviceSketchExternalMemoryWithWeights) {
auto ctx = MakeCUDACtx(0);
auto bin_sizes = {2, 16, 256, 512};
auto sizes = {100, 1000, 1500};
int num_columns = 5;
dmlc::TemporaryDirectory temp;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetExternalMemoryDMatrixFromData(x, num_rows, num_columns, temp);
dmat->Info().weights_.HostVector() = GenerateRandomWeights(num_rows);
for (auto num_bins : bin_sizes) {
auto cuts = DeviceSketch(&ctx, dmat.get(), num_bins);
ValidateCuts(cuts, dmat.get(), num_bins);
}
}
}
template <typename Adapter>
auto MakeUnweightedCutsForTest(Adapter adapter, int32_t num_bins, float missing, size_t batch_size = 0) {
common::HistogramCuts batched_cuts;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_container(ft, num_bins, adapter.NumColumns(), adapter.NumRows(), 0);
MetaInfo info;
AdapterDeviceSketch(adapter.Value(), num_bins, info, missing, &sketch_container, batch_size);
sketch_container.MakeCuts(&batched_cuts, info.IsColumnSplit());
return batched_cuts;
}
template <typename Adapter>
void ValidateBatchedCuts(Adapter adapter, int num_bins, DMatrix* dmat, size_t batch_size = 0) {
common::HistogramCuts batched_cuts = MakeUnweightedCutsForTest(
adapter, num_bins, std::numeric_limits<float>::quiet_NaN(), batch_size);
ValidateCuts(batched_cuts, dmat, num_bins);
}
TEST(HistUtil, AdapterDeviceSketch) {
int rows = 5;
int cols = 1;
int num_bins = 4;
float missing = - 1.0;
thrust::device_vector< float> data(rows*cols);
auto json_array_interface = Generate2dArrayInterface(rows, cols, "<f4", &data);
data = std::vector<float >{ 1.0,2.0,3.0,4.0,5.0 };
std::string str;
Json::Dump(json_array_interface, &str);
data::CupyAdapter adapter(str);
auto device_cuts = MakeUnweightedCutsForTest(adapter, num_bins, missing);
Context ctx;
auto host_cuts = GetHostCuts(&ctx, &adapter, num_bins, missing);
EXPECT_EQ(device_cuts.Values(), host_cuts.Values());
EXPECT_EQ(device_cuts.Ptrs(), host_cuts.Ptrs());
EXPECT_EQ(device_cuts.MinValues(), host_cuts.MinValues());
}
TEST(HistUtil, AdapterDeviceSketchMemory) {
int num_columns = 100;
int num_rows = 1000;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
dh::GlobalMemoryLogger().Clear();
ConsoleLogger::Configure({{"verbosity", "3"}});
auto cuts = MakeUnweightedCutsForTest(adapter, num_bins, std::numeric_limits<float>::quiet_NaN());
ConsoleLogger::Configure({{"verbosity", "0"}});
size_t bytes_required = detail::RequiredMemory(
num_rows, num_columns, num_rows * num_columns, num_bins, false);
EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05);
EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 0.95);
}
TEST(HistUtil, AdapterSketchSlidingWindowMemory) {
int num_columns = 100;
int num_rows = 1000;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
MetaInfo info;
dh::GlobalMemoryLogger().Clear();
ConsoleLogger::Configure({{"verbosity", "3"}});
common::HistogramCuts batched_cuts;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_container(ft, num_bins, num_columns, num_rows, 0);
AdapterDeviceSketch(adapter.Value(), num_bins, info, std::numeric_limits<float>::quiet_NaN(),
&sketch_container);
HistogramCuts cuts;
sketch_container.MakeCuts(&cuts, info.IsColumnSplit());
size_t bytes_required = detail::RequiredMemory(
num_rows, num_columns, num_rows * num_columns, num_bins, false);
EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05);
EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 0.95);
ConsoleLogger::Configure({{"verbosity", "0"}});
}
TEST(HistUtil, AdapterSketchSlidingWindowWeightedMemory) {
int num_columns = 100;
int num_rows = 1000;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
MetaInfo info;
auto& h_weights = info.weights_.HostVector();
h_weights.resize(num_rows);
std::fill(h_weights.begin(), h_weights.end(), 1.0f);
dh::GlobalMemoryLogger().Clear();
ConsoleLogger::Configure({{"verbosity", "3"}});
common::HistogramCuts batched_cuts;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_container(ft, num_bins, num_columns, num_rows, 0);
AdapterDeviceSketch(adapter.Value(), num_bins, info,
std::numeric_limits<float>::quiet_NaN(),
&sketch_container);
HistogramCuts cuts;
sketch_container.MakeCuts(&cuts, info.IsColumnSplit());
ConsoleLogger::Configure({{"verbosity", "0"}});
size_t bytes_required = detail::RequiredMemory(
num_rows, num_columns, num_rows * num_columns, num_bins, true);
EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05);
EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required);
}
void TestCategoricalSketchAdapter(size_t n, size_t num_categories,
int32_t num_bins, bool weighted) {
auto h_x = GenerateRandomCategoricalSingleColumn(n, num_categories);
thrust::device_vector<float> x(h_x);
auto adapter = AdapterFromData(x, n, 1);
MetaInfo info;
info.num_row_ = n;
info.num_col_ = 1;
info.feature_types.HostVector().push_back(FeatureType::kCategorical);
if (weighted) {
std::vector<float> weights(n, 0);
SimpleLCG lcg;
SimpleRealUniformDistribution<float> dist(0, 1);
for (auto& v : weights) {
v = dist(&lcg);
}
info.weights_.HostVector() = weights;
}
ASSERT_EQ(info.feature_types.Size(), 1);
SketchContainer container(info.feature_types, num_bins, 1, n, 0);
AdapterDeviceSketch(adapter.Value(), num_bins, info,
std::numeric_limits<float>::quiet_NaN(), &container);
HistogramCuts cuts;
container.MakeCuts(&cuts, info.IsColumnSplit());
thrust::sort(x.begin(), x.end());
auto n_uniques = thrust::unique(x.begin(), x.end()) - x.begin();
ASSERT_NE(n_uniques, x.size());
ASSERT_EQ(cuts.TotalBins(), n_uniques);
ASSERT_EQ(n_uniques, num_categories);
auto& values = cuts.cut_values_.HostVector();
ASSERT_TRUE(std::is_sorted(values.cbegin(), values.cend()));
auto is_unique = (std::unique(values.begin(), values.end()) - values.begin()) == n_uniques;
ASSERT_TRUE(is_unique);
x.resize(n_uniques);
h_x.resize(n_uniques);
thrust::copy(x.begin(), x.end(), h_x.begin());
for (decltype(n_uniques) i = 0; i < n_uniques; ++i) {
ASSERT_EQ(h_x[i], values[i]);
}
}
TEST(HistUtil, AdapterDeviceSketchCategorical) {
auto categorical_sizes = {2, 6, 8, 12};
int num_bins = 256;
auto sizes = {25, 100, 1000};
for (auto n : sizes) {
for (auto num_categories : categorical_sizes) {
auto x = GenerateRandomCategoricalSingleColumn(n, num_categories);
auto dmat = GetDMatrixFromData(x, n, 1);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, n, 1);
ValidateBatchedCuts(adapter, num_bins, dmat.get());
TestCategoricalSketchAdapter(n, num_categories, num_bins, true);
TestCategoricalSketchAdapter(n, num_categories, num_bins, false);
}
}
}
TEST(HistUtil, AdapterDeviceSketchMultipleColumns) {
auto bin_sizes = {2, 16, 256, 512};
auto sizes = {100, 1000, 1500};
int num_columns = 5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto x_device = thrust::device_vector<float>(x);
for (auto num_bins : bin_sizes) {
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
ValidateBatchedCuts(adapter, num_bins, dmat.get());
}
}
}
TEST(HistUtil, AdapterDeviceSketchBatches) {
int num_bins = 256;
int num_rows = 5000;
auto batch_sizes = {0, 100, 1500, 6000};
int num_columns = 5;
for (auto batch_size : batch_sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
ValidateBatchedCuts(adapter, num_bins, dmat.get(), batch_size);
}
}
namespace {
auto MakeData(Context const* ctx, std::size_t n_samples, bst_feature_t n_features) {
dh::safe_cuda(hipSetDevice(ctx->gpu_id));
auto n = n_samples * n_features;
std::vector<float> x;
x.resize(n);
std::iota(x.begin(), x.end(), 0);
std::int32_t c{0};
float missing = n_samples * n_features;
for (std::size_t i = 0; i < x.size(); ++i) {
if (i % 5 == 0) {
x[i] = missing;
c++;
}
}
thrust::device_vector<float> d_x;
d_x = x;
auto n_invalids = n / 10 * 2 + 1;
auto is_valid = data::IsValidFunctor{missing};
return std::tuple{x, d_x, n_invalids, is_valid};
}
void TestGetColumnSize(std::size_t n_samples) {
auto ctx = MakeCUDACtx(0);
bst_feature_t n_features = 12;
[[maybe_unused]] auto [x, d_x, n_invalids, is_valid] = MakeData(&ctx, n_samples, n_features);
auto adapter = AdapterFromData(d_x, n_samples, n_features);
auto batch = adapter.Value();
auto batch_iter = dh::MakeTransformIterator<data::COOTuple>(
thrust::make_counting_iterator(0llu),
[=] __device__(std::size_t idx) { return batch.GetElement(idx); });
dh::caching_device_vector<std::size_t> column_sizes_scan;
column_sizes_scan.resize(n_features + 1);
std::vector<std::size_t> h_column_size(column_sizes_scan.size());
std::vector<std::size_t> h_column_size_1(column_sizes_scan.size());
detail::LaunchGetColumnSizeKernel<decltype(batch_iter), true, true>(
ctx.gpu_id, IterSpan{batch_iter, batch.Size()}, is_valid, dh::ToSpan(column_sizes_scan));
thrust::copy(column_sizes_scan.begin(), column_sizes_scan.end(), h_column_size.begin());
detail::LaunchGetColumnSizeKernel<decltype(batch_iter), true, false>(
ctx.gpu_id, IterSpan{batch_iter, batch.Size()}, is_valid, dh::ToSpan(column_sizes_scan));
thrust::copy(column_sizes_scan.begin(), column_sizes_scan.end(), h_column_size_1.begin());
ASSERT_EQ(h_column_size, h_column_size_1);
detail::LaunchGetColumnSizeKernel<decltype(batch_iter), false, true>(
ctx.gpu_id, IterSpan{batch_iter, batch.Size()}, is_valid, dh::ToSpan(column_sizes_scan));
thrust::copy(column_sizes_scan.begin(), column_sizes_scan.end(), h_column_size_1.begin());
ASSERT_EQ(h_column_size, h_column_size_1);
detail::LaunchGetColumnSizeKernel<decltype(batch_iter), false, false>(
ctx.gpu_id, IterSpan{batch_iter, batch.Size()}, is_valid, dh::ToSpan(column_sizes_scan));
thrust::copy(column_sizes_scan.begin(), column_sizes_scan.end(), h_column_size_1.begin());
ASSERT_EQ(h_column_size, h_column_size_1);
}
} // namespace
TEST(HistUtil, GetColumnSize) {
bst_row_t n_samples = 4096;
TestGetColumnSize(n_samples);
}
// Check sketching from adapter or DMatrix results in the same answer
// Consistency here is useful for testing and user experience
TEST(HistUtil, SketchingEquivalent) {
auto ctx = MakeCUDACtx(0);
auto bin_sizes = {2, 16, 256, 512};
auto sizes = {100, 1000, 1500};
int num_columns = 5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
for (auto num_bins : bin_sizes) {
auto dmat_cuts = DeviceSketch(&ctx, dmat.get(), num_bins);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
common::HistogramCuts adapter_cuts = MakeUnweightedCutsForTest(
adapter, num_bins, std::numeric_limits<float>::quiet_NaN());
EXPECT_EQ(dmat_cuts.Values(), adapter_cuts.Values());
EXPECT_EQ(dmat_cuts.Ptrs(), adapter_cuts.Ptrs());
EXPECT_EQ(dmat_cuts.MinValues(), adapter_cuts.MinValues());
ValidateBatchedCuts(adapter, num_bins, dmat.get());
}
}
}
TEST(HistUtil, DeviceSketchFromGroupWeights) {
auto ctx = MakeCUDACtx(0);
size_t constexpr kRows = 3000, kCols = 200, kBins = 256;
size_t constexpr kGroups = 10;
auto m = RandomDataGenerator{kRows, kCols, 0}.GenerateDMatrix();
// sketch with group weight
auto& h_weights = m->Info().weights_.HostVector();
h_weights.resize(kGroups);
std::fill(h_weights.begin(), h_weights.end(), 1.0f);
std::vector<bst_group_t> groups(kGroups);
for (size_t i = 0; i < kGroups; ++i) {
groups[i] = kRows / kGroups;
}
m->SetInfo("group", groups.data(), DataType::kUInt32, kGroups);
HistogramCuts weighted_cuts = DeviceSketch(&ctx, m.get(), kBins, 0);
// sketch with no weight
h_weights.clear();
HistogramCuts cuts = DeviceSketch(&ctx, m.get(), kBins, 0);
ASSERT_EQ(cuts.Values().size(), weighted_cuts.Values().size());
ASSERT_EQ(cuts.MinValues().size(), weighted_cuts.MinValues().size());
ASSERT_EQ(cuts.Ptrs().size(), weighted_cuts.Ptrs().size());
for (size_t i = 0; i < cuts.Values().size(); ++i) {
EXPECT_EQ(cuts.Values()[i], weighted_cuts.Values()[i]) << "i:"<< i;
}
for (size_t i = 0; i < cuts.MinValues().size(); ++i) {
ASSERT_EQ(cuts.MinValues()[i], weighted_cuts.MinValues()[i]);
}
for (size_t i = 0; i < cuts.Ptrs().size(); ++i) {
ASSERT_EQ(cuts.Ptrs().at(i), weighted_cuts.Ptrs().at(i));
}
ValidateCuts(weighted_cuts, m.get(), kBins);
}
void TestAdapterSketchFromWeights(bool with_group) {
size_t constexpr kRows = 300, kCols = 20, kBins = 256;
size_t constexpr kGroups = 10;
HostDeviceVector<float> storage;
std::string m =
RandomDataGenerator{kRows, kCols, 0}.Device(0).GenerateArrayInterface(
&storage);
MetaInfo info;
Context ctx;
auto& h_weights = info.weights_.HostVector();
if (with_group) {
h_weights.resize(kGroups);
} else {
h_weights.resize(kRows);
}
std::fill(h_weights.begin(), h_weights.end(), 1.0f);
std::vector<bst_group_t> groups(kGroups);
if (with_group) {
for (size_t i = 0; i < kGroups; ++i) {
groups[i] = kRows / kGroups;
}
info.SetInfo(ctx, "group", groups.data(), DataType::kUInt32, kGroups);
}
info.weights_.SetDevice(0);
info.num_row_ = kRows;
info.num_col_ = kCols;
data::CupyAdapter adapter(m);
auto const& batch = adapter.Value();
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_container(ft, kBins, kCols, kRows, 0);
AdapterDeviceSketch(adapter.Value(), kBins, info, std::numeric_limits<float>::quiet_NaN(),
&sketch_container);
common::HistogramCuts cuts;
sketch_container.MakeCuts(&cuts, info.IsColumnSplit());
auto dmat = GetDMatrixFromData(storage.HostVector(), kRows, kCols);
if (with_group) {
dmat->Info().SetInfo(ctx, "group", groups.data(), DataType::kUInt32, kGroups);
}
dmat->Info().SetInfo(ctx, "weight", h_weights.data(), DataType::kFloat32, h_weights.size());
dmat->Info().num_col_ = kCols;
dmat->Info().num_row_ = kRows;
ASSERT_EQ(cuts.Ptrs().size(), kCols + 1);
ValidateCuts(cuts, dmat.get(), kBins);
auto cuda_ctx = MakeCUDACtx(0);
if (with_group) {
dmat->Info().weights_ = decltype(dmat->Info().weights_)(); // remove weight
HistogramCuts non_weighted = DeviceSketch(&cuda_ctx, dmat.get(), kBins, 0);
for (size_t i = 0; i < cuts.Values().size(); ++i) {
ASSERT_EQ(cuts.Values()[i], non_weighted.Values()[i]);
}
for (size_t i = 0; i < cuts.MinValues().size(); ++i) {
ASSERT_EQ(cuts.MinValues()[i], non_weighted.MinValues()[i]);
}
for (size_t i = 0; i < cuts.Ptrs().size(); ++i) {
ASSERT_EQ(cuts.Ptrs().at(i), non_weighted.Ptrs().at(i));
}
}
if (with_group) {
common::HistogramCuts weighted;
auto& h_weights = info.weights_.HostVector();
h_weights.resize(kGroups);
// Generate different weight.
for (size_t i = 0; i < h_weights.size(); ++i) {
// FIXME(jiamingy): Some entries generated GPU test cannot pass the validate cuts if
// we use more diverse weights, partially caused by
// https://github.com/dmlc/xgboost/issues/7946
h_weights[i] = (i % 2 == 0 ? 1 : 2) / static_cast<float>(kGroups);
}
SketchContainer sketch_container(ft, kBins, kCols, kRows, 0);
AdapterDeviceSketch(adapter.Value(), kBins, info, std::numeric_limits<float>::quiet_NaN(),
&sketch_container);
sketch_container.MakeCuts(&weighted, info.IsColumnSplit());
ValidateCuts(weighted, dmat.get(), kBins);
}
}
TEST(HistUtil, AdapterSketchFromWeights) {
TestAdapterSketchFromWeights(false);
TestAdapterSketchFromWeights(true);
}
namespace {
class DeviceSketchWithHessianTest
: public ::testing::TestWithParam<std::tuple<bool, bst_row_t, bst_bin_t>> {
bst_feature_t n_features_ = 5;
bst_group_t n_groups_{3};
auto GenerateHessian(Context const* ctx, bst_row_t n_samples) const {
HostDeviceVector<float> hessian;
auto& h_hess = hessian.HostVector();
h_hess = GenerateRandomWeights(n_samples);
std::mt19937 rng(0);
std::shuffle(h_hess.begin(), h_hess.end(), rng);
hessian.SetDevice(ctx->Device());
return hessian;
}
void CheckReg(Context const* ctx, std::shared_ptr<DMatrix> p_fmat, bst_bin_t n_bins,
HostDeviceVector<float> const& hessian, std::vector<float> const& w,
std::size_t n_elements) const {
auto const& h_hess = hessian.ConstHostVector();
{
auto& h_weight = p_fmat->Info().weights_.HostVector();
h_weight = w;
}
HistogramCuts cuts_hess =
DeviceSketchWithHessian(ctx, p_fmat.get(), n_bins, hessian.ConstDeviceSpan(), n_elements);
ValidateCuts(cuts_hess, p_fmat.get(), n_bins);
// merge hessian
{
auto& h_weight = p_fmat->Info().weights_.HostVector();
ASSERT_EQ(h_weight.size(), h_hess.size());
for (std::size_t i = 0; i < h_weight.size(); ++i) {
h_weight[i] = w[i] * h_hess[i];
}
}
HistogramCuts cuts_wh = DeviceSketch(ctx, p_fmat.get(), n_bins, n_elements);
ValidateCuts(cuts_wh, p_fmat.get(), n_bins);
ASSERT_EQ(cuts_hess.Values().size(), cuts_wh.Values().size());
for (std::size_t i = 0; i < cuts_hess.Values().size(); ++i) {
ASSERT_NEAR(cuts_wh.Values()[i], cuts_hess.Values()[i], kRtEps);
}
p_fmat->Info().weights_.HostVector() = w;
}
protected:
Context ctx_ = MakeCUDACtx(0);
void TestLTR(Context const* ctx, bst_row_t n_samples, bst_bin_t n_bins,
std::size_t n_elements) const {
auto x = GenerateRandom(n_samples, n_features_);
std::vector<bst_group_t> gptr;
gptr.resize(n_groups_ + 1, 0);
gptr[1] = n_samples / n_groups_;
gptr[2] = n_samples / n_groups_ + gptr[1];
gptr.back() = n_samples;
auto hessian = this->GenerateHessian(ctx, n_samples);
auto const& h_hess = hessian.ConstHostVector();
auto p_fmat = GetDMatrixFromData(x, n_samples, n_features_);
p_fmat->Info().group_ptr_ = gptr;
// test with constant group weight
std::vector<float> w(n_groups_, 1.0f);
p_fmat->Info().weights_.HostVector() = w;
HistogramCuts cuts_hess =
DeviceSketchWithHessian(ctx, p_fmat.get(), n_bins, hessian.ConstDeviceSpan(), n_elements);
// make validation easier by converting it into sample weight.
p_fmat->Info().weights_.HostVector() = h_hess;
p_fmat->Info().group_ptr_.clear();
ValidateCuts(cuts_hess, p_fmat.get(), n_bins);
// restore ltr properties
p_fmat->Info().weights_.HostVector() = w;
p_fmat->Info().group_ptr_ = gptr;
// test with random group weight
w = GenerateRandomWeights(n_groups_);
p_fmat->Info().weights_.HostVector() = w;
cuts_hess =
DeviceSketchWithHessian(ctx, p_fmat.get(), n_bins, hessian.ConstDeviceSpan(), n_elements);
// make validation easier by converting it into sample weight.
p_fmat->Info().weights_.HostVector() = h_hess;
p_fmat->Info().group_ptr_.clear();
ValidateCuts(cuts_hess, p_fmat.get(), n_bins);
// merge hessian with sample weight
p_fmat->Info().weights_.Resize(n_samples);
p_fmat->Info().group_ptr_.clear();
for (std::size_t i = 0; i < h_hess.size(); ++i) {
auto gidx = dh::SegmentId(Span{gptr.data(), gptr.size()}, i);
p_fmat->Info().weights_.HostVector()[i] = w[gidx] * h_hess[i];
}
auto cuts = DeviceSketch(ctx, p_fmat.get(), n_bins, n_elements);
ValidateCuts(cuts, p_fmat.get(), n_bins);
ASSERT_EQ(cuts.Values().size(), cuts_hess.Values().size());
for (std::size_t i = 0; i < cuts.Values().size(); ++i) {
EXPECT_NEAR(cuts.Values()[i], cuts_hess.Values()[i], 1e-4f);
}
}
void TestRegression(Context const* ctx, bst_row_t n_samples, bst_bin_t n_bins,
std::size_t n_elements) const {
auto x = GenerateRandom(n_samples, n_features_);
auto p_fmat = GetDMatrixFromData(x, n_samples, n_features_);
std::vector<float> w = GenerateRandomWeights(n_samples);
auto hessian = this->GenerateHessian(ctx, n_samples);
this->CheckReg(ctx, p_fmat, n_bins, hessian, w, n_elements);
}
};
auto MakeParamsForTest() {
std::vector<bst_row_t> sizes = {1, 2, 256, 512, 1000, 1500};
std::vector<bst_bin_t> bin_sizes = {2, 16, 256, 512};
std::vector<std::tuple<bool, bst_row_t, bst_bin_t>> configs;
for (auto n_samples : sizes) {
for (auto n_bins : bin_sizes) {
configs.emplace_back(true, n_samples, n_bins);
configs.emplace_back(false, n_samples, n_bins);
}
}
return configs;
}
} // namespace
TEST_P(DeviceSketchWithHessianTest, DeviceSketchWithHessian) {
auto param = GetParam();
auto n_samples = std::get<1>(param);
auto n_bins = std::get<2>(param);
if (std::get<0>(param)) {
this->TestLTR(&ctx_, n_samples, n_bins, 0);
this->TestLTR(&ctx_, n_samples, n_bins, 512);
} else {
this->TestRegression(&ctx_, n_samples, n_bins, 0);
this->TestRegression(&ctx_, n_samples, n_bins, 512);
}
}
INSTANTIATE_TEST_SUITE_P(
HistUtil, DeviceSketchWithHessianTest, ::testing::ValuesIn(MakeParamsForTest()),
[](::testing::TestParamInfo<DeviceSketchWithHessianTest::ParamType> const& info) {
auto task = std::get<0>(info.param) ? "ltr" : "reg";
auto n_samples = std::to_string(std::get<1>(info.param));
auto n_bins = std::to_string(std::get<2>(info.param));
return std::string{task} + "_" + n_samples + "_" + n_bins;
});
} // namespace xgboost::common
| 28a8ec4fd006f07307ede196e927954782efcbaa.cu | /**
* Copyright 2019-2023 by XGBoost Contributors
*/
#include <gtest/gtest.h>
#include <thrust/device_vector.h>
#include <xgboost/base.h> // for bst_bin_t
#include <xgboost/c_api.h>
#include <xgboost/data.h>
#include <algorithm> // for transform
#include <cmath> // for floor
#include <cstddef> // for size_t
#include <limits> // for numeric_limits
#include <string> // for string, to_string
#include <tuple> // for tuple, make_tuple
#include <vector> // for vector
#include "../../../include/xgboost/logging.h"
#include "../../../src/common/device_helpers.cuh"
#include "../../../src/common/hist_util.cuh"
#include "../../../src/common/hist_util.h"
#include "../../../src/data/device_adapter.cuh"
#include "../../../src/data/simple_dmatrix.h"
#include "../data/test_array_interface.h"
#include "../filesystem.h" // dmlc::TemporaryDirectory
#include "../helpers.h"
#include "test_hist_util.h"
namespace xgboost::common {
template <typename AdapterT>
HistogramCuts GetHostCuts(Context const* ctx, AdapterT* adapter, int num_bins, float missing) {
data::SimpleDMatrix dmat(adapter, missing, 1);
HistogramCuts cuts = SketchOnDMatrix(ctx, &dmat, num_bins);
return cuts;
}
TEST(HistUtil, DeviceSketch) {
auto ctx = MakeCUDACtx(0);
int num_columns = 1;
int num_bins = 4;
std::vector<float> x = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 7.0f, -1.0f};
int num_rows = x.size();
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto device_cuts = DeviceSketch(&ctx, dmat.get(), num_bins);
Context cpu_ctx;
HistogramCuts host_cuts = SketchOnDMatrix(&cpu_ctx, dmat.get(), num_bins);
EXPECT_EQ(device_cuts.Values(), host_cuts.Values());
EXPECT_EQ(device_cuts.Ptrs(), host_cuts.Ptrs());
EXPECT_EQ(device_cuts.MinValues(), host_cuts.MinValues());
}
TEST(HistUtil, SketchBatchNumElements) {
#if defined(XGBOOST_USE_RMM) && XGBOOST_USE_RMM == 1
LOG(WARNING) << "Test not runnable with RMM enabled.";
return;
#endif // defined(XGBOOST_USE_RMM) && XGBOOST_USE_RMM == 1
size_t constexpr kCols = 10000;
int device;
dh::safe_cuda(cudaGetDevice(&device));
auto avail = static_cast<size_t>(dh::AvailableMemory(device) * 0.8);
auto per_elem = detail::BytesPerElement(false);
auto avail_elem = avail / per_elem;
size_t rows = avail_elem / kCols * 10;
auto batch = detail::SketchBatchNumElements(0, rows, kCols, rows * kCols, device, 256, false);
ASSERT_EQ(batch, avail_elem);
}
TEST(HistUtil, DeviceSketchMemory) {
auto ctx = MakeCUDACtx(0);
int num_columns = 100;
int num_rows = 1000;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
dh::GlobalMemoryLogger().Clear();
ConsoleLogger::Configure({{"verbosity", "3"}});
auto device_cuts = DeviceSketch(&ctx, dmat.get(), num_bins);
size_t bytes_required = detail::RequiredMemory(
num_rows, num_columns, num_rows * num_columns, num_bins, false);
EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05);
EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 0.95);
ConsoleLogger::Configure({{"verbosity", "0"}});
}
TEST(HistUtil, DeviceSketchWeightsMemory) {
auto ctx = MakeCUDACtx(0);
int num_columns = 100;
int num_rows = 1000;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
dmat->Info().weights_.HostVector() = GenerateRandomWeights(num_rows);
dh::GlobalMemoryLogger().Clear();
ConsoleLogger::Configure({{"verbosity", "3"}});
auto device_cuts = DeviceSketch(&ctx, dmat.get(), num_bins);
ConsoleLogger::Configure({{"verbosity", "0"}});
size_t bytes_required = detail::RequiredMemory(
num_rows, num_columns, num_rows * num_columns, num_bins, true);
EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05);
EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required);
}
TEST(HistUtil, DeviceSketchDeterminism) {
auto ctx = MakeCUDACtx(0);
int num_rows = 500;
int num_columns = 5;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto reference_sketch = DeviceSketch(&ctx, dmat.get(), num_bins);
size_t constexpr kRounds{ 100 };
for (size_t r = 0; r < kRounds; ++r) {
auto new_sketch = DeviceSketch(&ctx, dmat.get(), num_bins);
ASSERT_EQ(reference_sketch.Values(), new_sketch.Values());
ASSERT_EQ(reference_sketch.MinValues(), new_sketch.MinValues());
}
}
TEST(HistUtil, DeviceSketchCategoricalAsNumeric) {
auto ctx = MakeCUDACtx(0);
auto categorical_sizes = {2, 6, 8, 12};
int num_bins = 256;
auto sizes = {25, 100, 1000};
for (auto n : sizes) {
for (auto num_categories : categorical_sizes) {
auto x = GenerateRandomCategoricalSingleColumn(n, num_categories);
auto dmat = GetDMatrixFromData(x, n, 1);
auto cuts = DeviceSketch(&ctx, dmat.get(), num_bins);
ValidateCuts(cuts, dmat.get(), num_bins);
}
}
}
TEST(HistUtil, DeviceSketchCategoricalFeatures) {
auto ctx = MakeCUDACtx(0);
TestCategoricalSketch(1000, 256, 32, false, [ctx](DMatrix* p_fmat, int32_t num_bins) {
return DeviceSketch(&ctx, p_fmat, num_bins);
});
TestCategoricalSketch(1000, 256, 32, true, [ctx](DMatrix* p_fmat, int32_t num_bins) {
return DeviceSketch(&ctx, p_fmat, num_bins);
});
}
void TestMixedSketch() {
size_t n_samples = 1000, n_features = 2, n_categories = 3;
bst_bin_t n_bins = 64;
std::vector<float> data(n_samples * n_features);
SimpleLCG gen;
SimpleRealUniformDistribution<float> cat_d{0.0f, static_cast<float>(n_categories)};
SimpleRealUniformDistribution<float> num_d{0.0f, 3.0f};
for (size_t i = 0; i < n_samples * n_features; ++i) {
// two features, row major. The first column is numeric and the second is categorical.
if (i % 2 == 0) {
data[i] = std::floor(cat_d(&gen));
} else {
data[i] = num_d(&gen);
}
}
auto m = GetDMatrixFromData(data, n_samples, n_features);
m->Info().feature_types.HostVector().push_back(FeatureType::kCategorical);
m->Info().feature_types.HostVector().push_back(FeatureType::kNumerical);
auto ctx = MakeCUDACtx(0);
auto cuts = DeviceSketch(&ctx, m.get(), n_bins);
ASSERT_EQ(cuts.Values().size(), n_bins + n_categories);
}
TEST(HistUtil, DeviceSketchMixedFeatures) { TestMixedSketch(); }
TEST(HistUtil, RemoveDuplicatedCategories) {
bst_row_t n_samples = 512;
bst_feature_t n_features = 3;
bst_cat_t n_categories = 5;
auto ctx = MakeCUDACtx(0);
SimpleLCG rng;
SimpleRealUniformDistribution<float> cat_d{0.0f, static_cast<float>(n_categories)};
dh::device_vector<Entry> sorted_entries(n_samples * n_features);
for (std::size_t i = 0; i < n_samples; ++i) {
for (bst_feature_t j = 0; j < n_features; ++j) {
float fvalue{0.0f};
// The second column is categorical
if (j == 1) {
fvalue = std::floor(cat_d(&rng));
} else {
fvalue = i;
}
sorted_entries[i * n_features + j] = Entry{j, fvalue};
}
}
MetaInfo info;
info.num_col_ = n_features;
info.num_row_ = n_samples;
info.feature_types.HostVector() = std::vector<FeatureType>{
FeatureType::kNumerical, FeatureType::kCategorical, FeatureType::kNumerical};
ASSERT_EQ(info.feature_types.Size(), n_features);
HostDeviceVector<bst_row_t> cuts_ptr{0, n_samples, n_samples * 2, n_samples * 3};
cuts_ptr.SetDevice(0);
dh::device_vector<float> weight(n_samples * n_features, 0);
dh::Iota(dh::ToSpan(weight));
dh::caching_device_vector<bst_row_t> columns_ptr(4);
for (std::size_t i = 0; i < columns_ptr.size(); ++i) {
columns_ptr[i] = i * n_samples;
}
// sort into column major
thrust::sort_by_key(sorted_entries.begin(), sorted_entries.end(), weight.begin(),
detail::EntryCompareOp());
detail::RemoveDuplicatedCategories(ctx.gpu_id, info, cuts_ptr.DeviceSpan(), &sorted_entries,
&weight, &columns_ptr);
auto const& h_cptr = cuts_ptr.ConstHostVector();
ASSERT_EQ(h_cptr.back(), n_samples * 2 + n_categories);
// check numerical
for (std::size_t i = 0; i < n_samples; ++i) {
ASSERT_EQ(weight[i], i * 3);
}
auto beg = n_samples + n_categories;
for (std::size_t i = 0; i < n_samples; ++i) {
ASSERT_EQ(weight[i + beg], i * 3 + 2);
}
// check categorical
beg = n_samples;
for (bst_cat_t i = 0; i < n_categories; ++i) {
// all from the second column
ASSERT_EQ(static_cast<bst_feature_t>(weight[i + beg]) % n_features, 1);
}
}
TEST(HistUtil, DeviceSketchMultipleColumns) {
auto ctx = MakeCUDACtx(0);
auto bin_sizes = {2, 16, 256, 512};
auto sizes = {100, 1000, 1500};
int num_columns = 5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
for (auto num_bins : bin_sizes) {
auto cuts = DeviceSketch(&ctx, dmat.get(), num_bins);
ValidateCuts(cuts, dmat.get(), num_bins);
}
}
}
TEST(HistUtil, DeviceSketchMultipleColumnsWeights) {
auto ctx = MakeCUDACtx(0);
auto bin_sizes = {2, 16, 256, 512};
auto sizes = {100, 1000, 1500};
int num_columns = 5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
dmat->Info().weights_.HostVector() = GenerateRandomWeights(num_rows);
for (auto num_bins : bin_sizes) {
auto cuts = DeviceSketch(&ctx, dmat.get(), num_bins);
ValidateCuts(cuts, dmat.get(), num_bins);
}
}
}
TEST(HistUitl, DeviceSketchWeights) {
auto ctx = MakeCUDACtx(0);
auto bin_sizes = {2, 16, 256, 512};
auto sizes = {100, 1000, 1500};
int num_columns = 5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto weighted_dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto& h_weights = weighted_dmat->Info().weights_.HostVector();
h_weights.resize(num_rows);
std::fill(h_weights.begin(), h_weights.end(), 1.0f);
for (auto num_bins : bin_sizes) {
auto cuts = DeviceSketch(&ctx, dmat.get(), num_bins);
auto wcuts = DeviceSketch(&ctx, weighted_dmat.get(), num_bins);
ASSERT_EQ(cuts.MinValues(), wcuts.MinValues());
ASSERT_EQ(cuts.Ptrs(), wcuts.Ptrs());
ASSERT_EQ(cuts.Values(), wcuts.Values());
ValidateCuts(cuts, dmat.get(), num_bins);
ValidateCuts(wcuts, weighted_dmat.get(), num_bins);
}
}
}
TEST(HistUtil, DeviceSketchBatches) {
auto ctx = MakeCUDACtx(0);
int num_bins = 256;
int num_rows = 5000;
auto batch_sizes = {0, 100, 1500, 6000};
int num_columns = 5;
for (auto batch_size : batch_sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto cuts = DeviceSketch(&ctx, dmat.get(), num_bins, batch_size);
ValidateCuts(cuts, dmat.get(), num_bins);
}
num_rows = 1000;
size_t batches = 16;
auto x = GenerateRandom(num_rows * batches, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows * batches, num_columns);
auto cuts_with_batches = DeviceSketch(&ctx, dmat.get(), num_bins, num_rows);
auto cuts = DeviceSketch(&ctx, dmat.get(), num_bins, 0);
auto const& cut_values_batched = cuts_with_batches.Values();
auto const& cut_values = cuts.Values();
CHECK_EQ(cut_values.size(), cut_values_batched.size());
for (size_t i = 0; i < cut_values.size(); ++i) {
ASSERT_NEAR(cut_values_batched[i], cut_values[i], 1e5);
}
}
TEST(HistUtil, DeviceSketchMultipleColumnsExternal) {
auto ctx = MakeCUDACtx(0);
auto bin_sizes = {2, 16, 256, 512};
auto sizes = {100, 1000, 1500};
int num_columns =5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
dmlc::TemporaryDirectory temp;
auto dmat = GetExternalMemoryDMatrixFromData(x, num_rows, num_columns, temp);
for (auto num_bins : bin_sizes) {
auto cuts = DeviceSketch(&ctx, dmat.get(), num_bins);
ValidateCuts(cuts, dmat.get(), num_bins);
}
}
}
// See https://github.com/dmlc/xgboost/issues/5866.
TEST(HistUtil, DeviceSketchExternalMemoryWithWeights) {
auto ctx = MakeCUDACtx(0);
auto bin_sizes = {2, 16, 256, 512};
auto sizes = {100, 1000, 1500};
int num_columns = 5;
dmlc::TemporaryDirectory temp;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetExternalMemoryDMatrixFromData(x, num_rows, num_columns, temp);
dmat->Info().weights_.HostVector() = GenerateRandomWeights(num_rows);
for (auto num_bins : bin_sizes) {
auto cuts = DeviceSketch(&ctx, dmat.get(), num_bins);
ValidateCuts(cuts, dmat.get(), num_bins);
}
}
}
template <typename Adapter>
auto MakeUnweightedCutsForTest(Adapter adapter, int32_t num_bins, float missing, size_t batch_size = 0) {
common::HistogramCuts batched_cuts;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_container(ft, num_bins, adapter.NumColumns(), adapter.NumRows(), 0);
MetaInfo info;
AdapterDeviceSketch(adapter.Value(), num_bins, info, missing, &sketch_container, batch_size);
sketch_container.MakeCuts(&batched_cuts, info.IsColumnSplit());
return batched_cuts;
}
template <typename Adapter>
void ValidateBatchedCuts(Adapter adapter, int num_bins, DMatrix* dmat, size_t batch_size = 0) {
common::HistogramCuts batched_cuts = MakeUnweightedCutsForTest(
adapter, num_bins, std::numeric_limits<float>::quiet_NaN(), batch_size);
ValidateCuts(batched_cuts, dmat, num_bins);
}
TEST(HistUtil, AdapterDeviceSketch) {
int rows = 5;
int cols = 1;
int num_bins = 4;
float missing = - 1.0;
thrust::device_vector< float> data(rows*cols);
auto json_array_interface = Generate2dArrayInterface(rows, cols, "<f4", &data);
data = std::vector<float >{ 1.0,2.0,3.0,4.0,5.0 };
std::string str;
Json::Dump(json_array_interface, &str);
data::CupyAdapter adapter(str);
auto device_cuts = MakeUnweightedCutsForTest(adapter, num_bins, missing);
Context ctx;
auto host_cuts = GetHostCuts(&ctx, &adapter, num_bins, missing);
EXPECT_EQ(device_cuts.Values(), host_cuts.Values());
EXPECT_EQ(device_cuts.Ptrs(), host_cuts.Ptrs());
EXPECT_EQ(device_cuts.MinValues(), host_cuts.MinValues());
}
TEST(HistUtil, AdapterDeviceSketchMemory) {
int num_columns = 100;
int num_rows = 1000;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
dh::GlobalMemoryLogger().Clear();
ConsoleLogger::Configure({{"verbosity", "3"}});
auto cuts = MakeUnweightedCutsForTest(adapter, num_bins, std::numeric_limits<float>::quiet_NaN());
ConsoleLogger::Configure({{"verbosity", "0"}});
size_t bytes_required = detail::RequiredMemory(
num_rows, num_columns, num_rows * num_columns, num_bins, false);
EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05);
EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 0.95);
}
TEST(HistUtil, AdapterSketchSlidingWindowMemory) {
int num_columns = 100;
int num_rows = 1000;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
MetaInfo info;
dh::GlobalMemoryLogger().Clear();
ConsoleLogger::Configure({{"verbosity", "3"}});
common::HistogramCuts batched_cuts;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_container(ft, num_bins, num_columns, num_rows, 0);
AdapterDeviceSketch(adapter.Value(), num_bins, info, std::numeric_limits<float>::quiet_NaN(),
&sketch_container);
HistogramCuts cuts;
sketch_container.MakeCuts(&cuts, info.IsColumnSplit());
size_t bytes_required = detail::RequiredMemory(
num_rows, num_columns, num_rows * num_columns, num_bins, false);
EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05);
EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 0.95);
ConsoleLogger::Configure({{"verbosity", "0"}});
}
TEST(HistUtil, AdapterSketchSlidingWindowWeightedMemory) {
int num_columns = 100;
int num_rows = 1000;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
MetaInfo info;
auto& h_weights = info.weights_.HostVector();
h_weights.resize(num_rows);
std::fill(h_weights.begin(), h_weights.end(), 1.0f);
dh::GlobalMemoryLogger().Clear();
ConsoleLogger::Configure({{"verbosity", "3"}});
common::HistogramCuts batched_cuts;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_container(ft, num_bins, num_columns, num_rows, 0);
AdapterDeviceSketch(adapter.Value(), num_bins, info,
std::numeric_limits<float>::quiet_NaN(),
&sketch_container);
HistogramCuts cuts;
sketch_container.MakeCuts(&cuts, info.IsColumnSplit());
ConsoleLogger::Configure({{"verbosity", "0"}});
size_t bytes_required = detail::RequiredMemory(
num_rows, num_columns, num_rows * num_columns, num_bins, true);
EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05);
EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required);
}
void TestCategoricalSketchAdapter(size_t n, size_t num_categories,
int32_t num_bins, bool weighted) {
auto h_x = GenerateRandomCategoricalSingleColumn(n, num_categories);
thrust::device_vector<float> x(h_x);
auto adapter = AdapterFromData(x, n, 1);
MetaInfo info;
info.num_row_ = n;
info.num_col_ = 1;
info.feature_types.HostVector().push_back(FeatureType::kCategorical);
if (weighted) {
std::vector<float> weights(n, 0);
SimpleLCG lcg;
SimpleRealUniformDistribution<float> dist(0, 1);
for (auto& v : weights) {
v = dist(&lcg);
}
info.weights_.HostVector() = weights;
}
ASSERT_EQ(info.feature_types.Size(), 1);
SketchContainer container(info.feature_types, num_bins, 1, n, 0);
AdapterDeviceSketch(adapter.Value(), num_bins, info,
std::numeric_limits<float>::quiet_NaN(), &container);
HistogramCuts cuts;
container.MakeCuts(&cuts, info.IsColumnSplit());
thrust::sort(x.begin(), x.end());
auto n_uniques = thrust::unique(x.begin(), x.end()) - x.begin();
ASSERT_NE(n_uniques, x.size());
ASSERT_EQ(cuts.TotalBins(), n_uniques);
ASSERT_EQ(n_uniques, num_categories);
auto& values = cuts.cut_values_.HostVector();
ASSERT_TRUE(std::is_sorted(values.cbegin(), values.cend()));
auto is_unique = (std::unique(values.begin(), values.end()) - values.begin()) == n_uniques;
ASSERT_TRUE(is_unique);
x.resize(n_uniques);
h_x.resize(n_uniques);
thrust::copy(x.begin(), x.end(), h_x.begin());
for (decltype(n_uniques) i = 0; i < n_uniques; ++i) {
ASSERT_EQ(h_x[i], values[i]);
}
}
TEST(HistUtil, AdapterDeviceSketchCategorical) {
auto categorical_sizes = {2, 6, 8, 12};
int num_bins = 256;
auto sizes = {25, 100, 1000};
for (auto n : sizes) {
for (auto num_categories : categorical_sizes) {
auto x = GenerateRandomCategoricalSingleColumn(n, num_categories);
auto dmat = GetDMatrixFromData(x, n, 1);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, n, 1);
ValidateBatchedCuts(adapter, num_bins, dmat.get());
TestCategoricalSketchAdapter(n, num_categories, num_bins, true);
TestCategoricalSketchAdapter(n, num_categories, num_bins, false);
}
}
}
TEST(HistUtil, AdapterDeviceSketchMultipleColumns) {
auto bin_sizes = {2, 16, 256, 512};
auto sizes = {100, 1000, 1500};
int num_columns = 5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto x_device = thrust::device_vector<float>(x);
for (auto num_bins : bin_sizes) {
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
ValidateBatchedCuts(adapter, num_bins, dmat.get());
}
}
}
TEST(HistUtil, AdapterDeviceSketchBatches) {
int num_bins = 256;
int num_rows = 5000;
auto batch_sizes = {0, 100, 1500, 6000};
int num_columns = 5;
for (auto batch_size : batch_sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
ValidateBatchedCuts(adapter, num_bins, dmat.get(), batch_size);
}
}
namespace {
auto MakeData(Context const* ctx, std::size_t n_samples, bst_feature_t n_features) {
dh::safe_cuda(cudaSetDevice(ctx->gpu_id));
auto n = n_samples * n_features;
std::vector<float> x;
x.resize(n);
std::iota(x.begin(), x.end(), 0);
std::int32_t c{0};
float missing = n_samples * n_features;
for (std::size_t i = 0; i < x.size(); ++i) {
if (i % 5 == 0) {
x[i] = missing;
c++;
}
}
thrust::device_vector<float> d_x;
d_x = x;
auto n_invalids = n / 10 * 2 + 1;
auto is_valid = data::IsValidFunctor{missing};
return std::tuple{x, d_x, n_invalids, is_valid};
}
void TestGetColumnSize(std::size_t n_samples) {
auto ctx = MakeCUDACtx(0);
bst_feature_t n_features = 12;
[[maybe_unused]] auto [x, d_x, n_invalids, is_valid] = MakeData(&ctx, n_samples, n_features);
auto adapter = AdapterFromData(d_x, n_samples, n_features);
auto batch = adapter.Value();
auto batch_iter = dh::MakeTransformIterator<data::COOTuple>(
thrust::make_counting_iterator(0llu),
[=] __device__(std::size_t idx) { return batch.GetElement(idx); });
dh::caching_device_vector<std::size_t> column_sizes_scan;
column_sizes_scan.resize(n_features + 1);
std::vector<std::size_t> h_column_size(column_sizes_scan.size());
std::vector<std::size_t> h_column_size_1(column_sizes_scan.size());
detail::LaunchGetColumnSizeKernel<decltype(batch_iter), true, true>(
ctx.gpu_id, IterSpan{batch_iter, batch.Size()}, is_valid, dh::ToSpan(column_sizes_scan));
thrust::copy(column_sizes_scan.begin(), column_sizes_scan.end(), h_column_size.begin());
detail::LaunchGetColumnSizeKernel<decltype(batch_iter), true, false>(
ctx.gpu_id, IterSpan{batch_iter, batch.Size()}, is_valid, dh::ToSpan(column_sizes_scan));
thrust::copy(column_sizes_scan.begin(), column_sizes_scan.end(), h_column_size_1.begin());
ASSERT_EQ(h_column_size, h_column_size_1);
detail::LaunchGetColumnSizeKernel<decltype(batch_iter), false, true>(
ctx.gpu_id, IterSpan{batch_iter, batch.Size()}, is_valid, dh::ToSpan(column_sizes_scan));
thrust::copy(column_sizes_scan.begin(), column_sizes_scan.end(), h_column_size_1.begin());
ASSERT_EQ(h_column_size, h_column_size_1);
detail::LaunchGetColumnSizeKernel<decltype(batch_iter), false, false>(
ctx.gpu_id, IterSpan{batch_iter, batch.Size()}, is_valid, dh::ToSpan(column_sizes_scan));
thrust::copy(column_sizes_scan.begin(), column_sizes_scan.end(), h_column_size_1.begin());
ASSERT_EQ(h_column_size, h_column_size_1);
}
} // namespace
TEST(HistUtil, GetColumnSize) {
bst_row_t n_samples = 4096;
TestGetColumnSize(n_samples);
}
// Check sketching from adapter or DMatrix results in the same answer
// Consistency here is useful for testing and user experience
TEST(HistUtil, SketchingEquivalent) {
auto ctx = MakeCUDACtx(0);
auto bin_sizes = {2, 16, 256, 512};
auto sizes = {100, 1000, 1500};
int num_columns = 5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
for (auto num_bins : bin_sizes) {
auto dmat_cuts = DeviceSketch(&ctx, dmat.get(), num_bins);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
common::HistogramCuts adapter_cuts = MakeUnweightedCutsForTest(
adapter, num_bins, std::numeric_limits<float>::quiet_NaN());
EXPECT_EQ(dmat_cuts.Values(), adapter_cuts.Values());
EXPECT_EQ(dmat_cuts.Ptrs(), adapter_cuts.Ptrs());
EXPECT_EQ(dmat_cuts.MinValues(), adapter_cuts.MinValues());
ValidateBatchedCuts(adapter, num_bins, dmat.get());
}
}
}
TEST(HistUtil, DeviceSketchFromGroupWeights) {
auto ctx = MakeCUDACtx(0);
size_t constexpr kRows = 3000, kCols = 200, kBins = 256;
size_t constexpr kGroups = 10;
auto m = RandomDataGenerator{kRows, kCols, 0}.GenerateDMatrix();
// sketch with group weight
auto& h_weights = m->Info().weights_.HostVector();
h_weights.resize(kGroups);
std::fill(h_weights.begin(), h_weights.end(), 1.0f);
std::vector<bst_group_t> groups(kGroups);
for (size_t i = 0; i < kGroups; ++i) {
groups[i] = kRows / kGroups;
}
m->SetInfo("group", groups.data(), DataType::kUInt32, kGroups);
HistogramCuts weighted_cuts = DeviceSketch(&ctx, m.get(), kBins, 0);
// sketch with no weight
h_weights.clear();
HistogramCuts cuts = DeviceSketch(&ctx, m.get(), kBins, 0);
ASSERT_EQ(cuts.Values().size(), weighted_cuts.Values().size());
ASSERT_EQ(cuts.MinValues().size(), weighted_cuts.MinValues().size());
ASSERT_EQ(cuts.Ptrs().size(), weighted_cuts.Ptrs().size());
for (size_t i = 0; i < cuts.Values().size(); ++i) {
EXPECT_EQ(cuts.Values()[i], weighted_cuts.Values()[i]) << "i:"<< i;
}
for (size_t i = 0; i < cuts.MinValues().size(); ++i) {
ASSERT_EQ(cuts.MinValues()[i], weighted_cuts.MinValues()[i]);
}
for (size_t i = 0; i < cuts.Ptrs().size(); ++i) {
ASSERT_EQ(cuts.Ptrs().at(i), weighted_cuts.Ptrs().at(i));
}
ValidateCuts(weighted_cuts, m.get(), kBins);
}
void TestAdapterSketchFromWeights(bool with_group) {
size_t constexpr kRows = 300, kCols = 20, kBins = 256;
size_t constexpr kGroups = 10;
HostDeviceVector<float> storage;
std::string m =
RandomDataGenerator{kRows, kCols, 0}.Device(0).GenerateArrayInterface(
&storage);
MetaInfo info;
Context ctx;
auto& h_weights = info.weights_.HostVector();
if (with_group) {
h_weights.resize(kGroups);
} else {
h_weights.resize(kRows);
}
std::fill(h_weights.begin(), h_weights.end(), 1.0f);
std::vector<bst_group_t> groups(kGroups);
if (with_group) {
for (size_t i = 0; i < kGroups; ++i) {
groups[i] = kRows / kGroups;
}
info.SetInfo(ctx, "group", groups.data(), DataType::kUInt32, kGroups);
}
info.weights_.SetDevice(0);
info.num_row_ = kRows;
info.num_col_ = kCols;
data::CupyAdapter adapter(m);
auto const& batch = adapter.Value();
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_container(ft, kBins, kCols, kRows, 0);
AdapterDeviceSketch(adapter.Value(), kBins, info, std::numeric_limits<float>::quiet_NaN(),
&sketch_container);
common::HistogramCuts cuts;
sketch_container.MakeCuts(&cuts, info.IsColumnSplit());
auto dmat = GetDMatrixFromData(storage.HostVector(), kRows, kCols);
if (with_group) {
dmat->Info().SetInfo(ctx, "group", groups.data(), DataType::kUInt32, kGroups);
}
dmat->Info().SetInfo(ctx, "weight", h_weights.data(), DataType::kFloat32, h_weights.size());
dmat->Info().num_col_ = kCols;
dmat->Info().num_row_ = kRows;
ASSERT_EQ(cuts.Ptrs().size(), kCols + 1);
ValidateCuts(cuts, dmat.get(), kBins);
auto cuda_ctx = MakeCUDACtx(0);
if (with_group) {
dmat->Info().weights_ = decltype(dmat->Info().weights_)(); // remove weight
HistogramCuts non_weighted = DeviceSketch(&cuda_ctx, dmat.get(), kBins, 0);
for (size_t i = 0; i < cuts.Values().size(); ++i) {
ASSERT_EQ(cuts.Values()[i], non_weighted.Values()[i]);
}
for (size_t i = 0; i < cuts.MinValues().size(); ++i) {
ASSERT_EQ(cuts.MinValues()[i], non_weighted.MinValues()[i]);
}
for (size_t i = 0; i < cuts.Ptrs().size(); ++i) {
ASSERT_EQ(cuts.Ptrs().at(i), non_weighted.Ptrs().at(i));
}
}
if (with_group) {
common::HistogramCuts weighted;
auto& h_weights = info.weights_.HostVector();
h_weights.resize(kGroups);
// Generate different weight.
for (size_t i = 0; i < h_weights.size(); ++i) {
// FIXME(jiamingy): Some entries generated GPU test cannot pass the validate cuts if
// we use more diverse weights, partially caused by
// https://github.com/dmlc/xgboost/issues/7946
h_weights[i] = (i % 2 == 0 ? 1 : 2) / static_cast<float>(kGroups);
}
SketchContainer sketch_container(ft, kBins, kCols, kRows, 0);
AdapterDeviceSketch(adapter.Value(), kBins, info, std::numeric_limits<float>::quiet_NaN(),
&sketch_container);
sketch_container.MakeCuts(&weighted, info.IsColumnSplit());
ValidateCuts(weighted, dmat.get(), kBins);
}
}
TEST(HistUtil, AdapterSketchFromWeights) {
TestAdapterSketchFromWeights(false);
TestAdapterSketchFromWeights(true);
}
namespace {
class DeviceSketchWithHessianTest
: public ::testing::TestWithParam<std::tuple<bool, bst_row_t, bst_bin_t>> {
bst_feature_t n_features_ = 5;
bst_group_t n_groups_{3};
auto GenerateHessian(Context const* ctx, bst_row_t n_samples) const {
HostDeviceVector<float> hessian;
auto& h_hess = hessian.HostVector();
h_hess = GenerateRandomWeights(n_samples);
std::mt19937 rng(0);
std::shuffle(h_hess.begin(), h_hess.end(), rng);
hessian.SetDevice(ctx->Device());
return hessian;
}
void CheckReg(Context const* ctx, std::shared_ptr<DMatrix> p_fmat, bst_bin_t n_bins,
HostDeviceVector<float> const& hessian, std::vector<float> const& w,
std::size_t n_elements) const {
auto const& h_hess = hessian.ConstHostVector();
{
auto& h_weight = p_fmat->Info().weights_.HostVector();
h_weight = w;
}
HistogramCuts cuts_hess =
DeviceSketchWithHessian(ctx, p_fmat.get(), n_bins, hessian.ConstDeviceSpan(), n_elements);
ValidateCuts(cuts_hess, p_fmat.get(), n_bins);
// merge hessian
{
auto& h_weight = p_fmat->Info().weights_.HostVector();
ASSERT_EQ(h_weight.size(), h_hess.size());
for (std::size_t i = 0; i < h_weight.size(); ++i) {
h_weight[i] = w[i] * h_hess[i];
}
}
HistogramCuts cuts_wh = DeviceSketch(ctx, p_fmat.get(), n_bins, n_elements);
ValidateCuts(cuts_wh, p_fmat.get(), n_bins);
ASSERT_EQ(cuts_hess.Values().size(), cuts_wh.Values().size());
for (std::size_t i = 0; i < cuts_hess.Values().size(); ++i) {
ASSERT_NEAR(cuts_wh.Values()[i], cuts_hess.Values()[i], kRtEps);
}
p_fmat->Info().weights_.HostVector() = w;
}
protected:
Context ctx_ = MakeCUDACtx(0);
void TestLTR(Context const* ctx, bst_row_t n_samples, bst_bin_t n_bins,
std::size_t n_elements) const {
auto x = GenerateRandom(n_samples, n_features_);
std::vector<bst_group_t> gptr;
gptr.resize(n_groups_ + 1, 0);
gptr[1] = n_samples / n_groups_;
gptr[2] = n_samples / n_groups_ + gptr[1];
gptr.back() = n_samples;
auto hessian = this->GenerateHessian(ctx, n_samples);
auto const& h_hess = hessian.ConstHostVector();
auto p_fmat = GetDMatrixFromData(x, n_samples, n_features_);
p_fmat->Info().group_ptr_ = gptr;
// test with constant group weight
std::vector<float> w(n_groups_, 1.0f);
p_fmat->Info().weights_.HostVector() = w;
HistogramCuts cuts_hess =
DeviceSketchWithHessian(ctx, p_fmat.get(), n_bins, hessian.ConstDeviceSpan(), n_elements);
// make validation easier by converting it into sample weight.
p_fmat->Info().weights_.HostVector() = h_hess;
p_fmat->Info().group_ptr_.clear();
ValidateCuts(cuts_hess, p_fmat.get(), n_bins);
// restore ltr properties
p_fmat->Info().weights_.HostVector() = w;
p_fmat->Info().group_ptr_ = gptr;
// test with random group weight
w = GenerateRandomWeights(n_groups_);
p_fmat->Info().weights_.HostVector() = w;
cuts_hess =
DeviceSketchWithHessian(ctx, p_fmat.get(), n_bins, hessian.ConstDeviceSpan(), n_elements);
// make validation easier by converting it into sample weight.
p_fmat->Info().weights_.HostVector() = h_hess;
p_fmat->Info().group_ptr_.clear();
ValidateCuts(cuts_hess, p_fmat.get(), n_bins);
// merge hessian with sample weight
p_fmat->Info().weights_.Resize(n_samples);
p_fmat->Info().group_ptr_.clear();
for (std::size_t i = 0; i < h_hess.size(); ++i) {
auto gidx = dh::SegmentId(Span{gptr.data(), gptr.size()}, i);
p_fmat->Info().weights_.HostVector()[i] = w[gidx] * h_hess[i];
}
auto cuts = DeviceSketch(ctx, p_fmat.get(), n_bins, n_elements);
ValidateCuts(cuts, p_fmat.get(), n_bins);
ASSERT_EQ(cuts.Values().size(), cuts_hess.Values().size());
for (std::size_t i = 0; i < cuts.Values().size(); ++i) {
EXPECT_NEAR(cuts.Values()[i], cuts_hess.Values()[i], 1e-4f);
}
}
void TestRegression(Context const* ctx, bst_row_t n_samples, bst_bin_t n_bins,
std::size_t n_elements) const {
auto x = GenerateRandom(n_samples, n_features_);
auto p_fmat = GetDMatrixFromData(x, n_samples, n_features_);
std::vector<float> w = GenerateRandomWeights(n_samples);
auto hessian = this->GenerateHessian(ctx, n_samples);
this->CheckReg(ctx, p_fmat, n_bins, hessian, w, n_elements);
}
};
auto MakeParamsForTest() {
std::vector<bst_row_t> sizes = {1, 2, 256, 512, 1000, 1500};
std::vector<bst_bin_t> bin_sizes = {2, 16, 256, 512};
std::vector<std::tuple<bool, bst_row_t, bst_bin_t>> configs;
for (auto n_samples : sizes) {
for (auto n_bins : bin_sizes) {
configs.emplace_back(true, n_samples, n_bins);
configs.emplace_back(false, n_samples, n_bins);
}
}
return configs;
}
} // namespace
TEST_P(DeviceSketchWithHessianTest, DeviceSketchWithHessian) {
auto param = GetParam();
auto n_samples = std::get<1>(param);
auto n_bins = std::get<2>(param);
if (std::get<0>(param)) {
this->TestLTR(&ctx_, n_samples, n_bins, 0);
this->TestLTR(&ctx_, n_samples, n_bins, 512);
} else {
this->TestRegression(&ctx_, n_samples, n_bins, 0);
this->TestRegression(&ctx_, n_samples, n_bins, 512);
}
}
INSTANTIATE_TEST_SUITE_P(
HistUtil, DeviceSketchWithHessianTest, ::testing::ValuesIn(MakeParamsForTest()),
[](::testing::TestParamInfo<DeviceSketchWithHessianTest::ParamType> const& info) {
auto task = std::get<0>(info.param) ? "ltr" : "reg";
auto n_samples = std::to_string(std::get<1>(info.param));
auto n_bins = std::to_string(std::get<2>(info.param));
return std::string{task} + "_" + n_samples + "_" + n_bins;
});
} // namespace xgboost::common
|
8e8a68756527b99be1ba29057cb9efa9ae28271f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void
createInitialHiddenFeaturesKernel(const float *weights,
const int *movie_ratings, float* initial_hidden_feature_probs,
int num_movies, int num_hidden_features, int num_user_ratings) {
// weights[NUM_MOVIES][5][NUM_FEATURES]
// movie_ratings[NUM_TRAIN_POINTS][3]
// initial_hidden_feature_probs[NUM_FEATURES]
unsigned int hidden_id = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int point_id = 0;
float dot_prod; // Temporary, local dot product variable
while (hidden_id < num_hidden_features) {
dot_prod = 0.00; // Initialize the dot product to 0
for (point_id = 0; point_id < num_user_ratings; point_id++) {
// Indexing: weights[movie_id][rating][feature_id]
// user_id - [1, 500,000]
// movie_id - [1, 17771]
// rating - [0, 4]
// hidden_id - [0, 99]
user_id = *movie_ratings++;
movie_id = *movie_ratings++;
rating = *movie_ratings++;
// Do the dot product
dot_prod += weights[movie_id*5*num_hidden_features
+ rating*num_hidden_features
+ hidden_id]
* initial_hidden_feature_probs[hidden_id];
}
// Store the dot_product result
initial_hidden_feature_probs[hidden_id] = dot_prod;
// Re-use this thread on another data point:
hidden_id += blockDim.x * gridDim.x;
}
}
| 8e8a68756527b99be1ba29057cb9efa9ae28271f.cu | __global__ void
createInitialHiddenFeaturesKernel(const float *weights,
const int *movie_ratings, float* initial_hidden_feature_probs,
int num_movies, int num_hidden_features, int num_user_ratings) {
// weights[NUM_MOVIES][5][NUM_FEATURES]
// movie_ratings[NUM_TRAIN_POINTS][3]
// initial_hidden_feature_probs[NUM_FEATURES]
unsigned int hidden_id = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int point_id = 0;
float dot_prod; // Temporary, local dot product variable
while (hidden_id < num_hidden_features) {
dot_prod = 0.00; // Initialize the dot product to 0
for (point_id = 0; point_id < num_user_ratings; point_id++) {
// Indexing: weights[movie_id][rating][feature_id]
// user_id - [1, 500,000]
// movie_id - [1, 17771]
// rating - [0, 4]
// hidden_id - [0, 99]
user_id = *movie_ratings++;
movie_id = *movie_ratings++;
rating = *movie_ratings++;
// Do the dot product
dot_prod += weights[movie_id*5*num_hidden_features
+ rating*num_hidden_features
+ hidden_id]
* initial_hidden_feature_probs[hidden_id];
}
// Store the dot_product result
initial_hidden_feature_probs[hidden_id] = dot_prod;
// Re-use this thread on another data point:
hidden_id += blockDim.x * gridDim.x;
}
}
|
78251080f77eadeb94a0d5cb019a0d69d2738d1a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <common/device_utils.cuh>
#include <rmm/device_uvector.hpp>
#include "test_utils.h"
namespace MLCommon {
/*
* Testing Methodology:
* 0. Testing with a kernel of only one block is enough to verify this prim
* 1. Assume that the threads in the block contain the following values:
* 0 1 2 .... NThreads - 1
* NThreads ......................
* ................................
* ...................... blockDim.x - 1
* 2. This means, the resulting output of batchedBlockReduce<int, NThreads>
* will be NThreads values and each of them is just a column-wise sum of
* the above matrix
* 3. Repeat this for different block dimensions
* 4. Repeat this for different values of NThreads
*/
template <int NThreads>
__global__ void batchedBlockReduceTestKernel(int* out)
{
extern __shared__ char smem[];
int val = threadIdx.x;
val = batchedBlockReduce<int, NThreads>(val, reinterpret_cast<char*>(smem));
int gid = threadIdx.x / NThreads;
int lid = threadIdx.x % NThreads;
if (gid == 0) { out[lid] = val; }
}
struct BatchedBlockReduceInputs {
int blkDim;
};
template <int NThreads>
void batchedBlockReduceTest(int* out, const BatchedBlockReduceInputs& param, hipStream_t stream)
{
size_t smemSize = sizeof(int) * (param.blkDim / raft::WarpSize) * NThreads;
hipLaunchKernelGGL(( batchedBlockReduceTestKernel<NThreads>), dim3(1), dim3(param.blkDim), smemSize, stream, out);
CUDA_CHECK(hipGetLastError());
}
::std::ostream& operator<<(::std::ostream& os, const BatchedBlockReduceInputs& dims) { return os; }
template <int NThreads>
class BatchedBlockReduceTest : public ::testing::TestWithParam<BatchedBlockReduceInputs> {
protected:
BatchedBlockReduceTest() : out(0, stream), refOut(0, stream) {}
void SetUp() override
{
params = ::testing::TestWithParam<BatchedBlockReduceInputs>::GetParam();
CUDA_CHECK(hipStreamCreate(&stream));
out.resize(NThreads, stream);
refOut.resize(NThreads, stream);
CUDA_CHECK(hipMemset(out.data(), 0, out.size() * sizeof(int)));
CUDA_CHECK(hipMemset(refOut.data(), 0, refOut.size() * sizeof(int)));
computeRef();
batchedBlockReduceTest<NThreads>(out.data(), params, stream);
}
void TearDown() override { CUDA_CHECK(hipStreamDestroy(stream)); }
void computeRef()
{
int* ref = new int[NThreads];
int nGroups = params.blkDim / NThreads;
for (int i = 0; i < NThreads; ++i) {
ref[i] = 0;
for (int j = 0; j < nGroups; ++j) {
ref[i] += j * NThreads + i;
}
}
raft::update_device(refOut.data(), ref, NThreads, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
delete[] ref;
}
protected:
BatchedBlockReduceInputs params;
rmm::device_uvector<int> out, refOut;
hipStream_t stream = 0;
};
typedef BatchedBlockReduceTest<8> BBTest8;
typedef BatchedBlockReduceTest<16> BBTest16;
typedef BatchedBlockReduceTest<32> BBTest32;
const std::vector<BatchedBlockReduceInputs> inputs = {
{32},
{64},
{128},
{256},
{512},
};
TEST_P(BBTest8, Result)
{
ASSERT_TRUE(devArrMatch(refOut.data(), out.data(), 8, raft::Compare<int>()));
}
INSTANTIATE_TEST_CASE_P(BatchedBlockReduceTests, BBTest8, ::testing::ValuesIn(inputs));
} // end namespace MLCommon
| 78251080f77eadeb94a0d5cb019a0d69d2738d1a.cu | /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <common/device_utils.cuh>
#include <rmm/device_uvector.hpp>
#include "test_utils.h"
namespace MLCommon {
/*
* Testing Methodology:
* 0. Testing with a kernel of only one block is enough to verify this prim
* 1. Assume that the threads in the block contain the following values:
* 0 1 2 .... NThreads - 1
* NThreads ......................
* ................................
* ...................... blockDim.x - 1
* 2. This means, the resulting output of batchedBlockReduce<int, NThreads>
* will be NThreads values and each of them is just a column-wise sum of
* the above matrix
* 3. Repeat this for different block dimensions
* 4. Repeat this for different values of NThreads
*/
template <int NThreads>
__global__ void batchedBlockReduceTestKernel(int* out)
{
extern __shared__ char smem[];
int val = threadIdx.x;
val = batchedBlockReduce<int, NThreads>(val, reinterpret_cast<char*>(smem));
int gid = threadIdx.x / NThreads;
int lid = threadIdx.x % NThreads;
if (gid == 0) { out[lid] = val; }
}
struct BatchedBlockReduceInputs {
int blkDim;
};
template <int NThreads>
void batchedBlockReduceTest(int* out, const BatchedBlockReduceInputs& param, cudaStream_t stream)
{
size_t smemSize = sizeof(int) * (param.blkDim / raft::WarpSize) * NThreads;
batchedBlockReduceTestKernel<NThreads><<<1, param.blkDim, smemSize, stream>>>(out);
CUDA_CHECK(cudaGetLastError());
}
::std::ostream& operator<<(::std::ostream& os, const BatchedBlockReduceInputs& dims) { return os; }
template <int NThreads>
class BatchedBlockReduceTest : public ::testing::TestWithParam<BatchedBlockReduceInputs> {
protected:
BatchedBlockReduceTest() : out(0, stream), refOut(0, stream) {}
void SetUp() override
{
params = ::testing::TestWithParam<BatchedBlockReduceInputs>::GetParam();
CUDA_CHECK(cudaStreamCreate(&stream));
out.resize(NThreads, stream);
refOut.resize(NThreads, stream);
CUDA_CHECK(cudaMemset(out.data(), 0, out.size() * sizeof(int)));
CUDA_CHECK(cudaMemset(refOut.data(), 0, refOut.size() * sizeof(int)));
computeRef();
batchedBlockReduceTest<NThreads>(out.data(), params, stream);
}
void TearDown() override { CUDA_CHECK(cudaStreamDestroy(stream)); }
void computeRef()
{
int* ref = new int[NThreads];
int nGroups = params.blkDim / NThreads;
for (int i = 0; i < NThreads; ++i) {
ref[i] = 0;
for (int j = 0; j < nGroups; ++j) {
ref[i] += j * NThreads + i;
}
}
raft::update_device(refOut.data(), ref, NThreads, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
delete[] ref;
}
protected:
BatchedBlockReduceInputs params;
rmm::device_uvector<int> out, refOut;
cudaStream_t stream = 0;
};
typedef BatchedBlockReduceTest<8> BBTest8;
typedef BatchedBlockReduceTest<16> BBTest16;
typedef BatchedBlockReduceTest<32> BBTest32;
const std::vector<BatchedBlockReduceInputs> inputs = {
{32},
{64},
{128},
{256},
{512},
};
TEST_P(BBTest8, Result)
{
ASSERT_TRUE(devArrMatch(refOut.data(), out.data(), 8, raft::Compare<int>()));
}
INSTANTIATE_TEST_CASE_P(BatchedBlockReduceTests, BBTest8, ::testing::ValuesIn(inputs));
} // end namespace MLCommon
|
11629e5efcd5e1c34a298a8c437523d85e5215a7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/common/balanced_splitter.h"
#include "oneflow/core/kernel/kernel_util.h"
#include "oneflow/user/kernels/math_unary_elementwise_func.h"
namespace oneflow {
namespace {
template<typename T, typename K, bool is_cosine_loss>
__global__ void GpuForward(const int64_t n, const int64_t num_classes, const int64_t lower_bound,
const T m1, const T m2, const T m3, const T* in, const K* labels, T* out,
T* theta) {
CUDA_1D_KERNEL_LOOP(i, n) {
const int32_t row_id = i / num_classes;
const int32_t col_id = i - row_id * num_classes;
const T in_data = in[i];
T out_data = in_data;
K label = labels[row_id] - lower_bound;
if (is_cosine_loss) {
if (label == col_id) { out_data = in_data - m3; }
} else {
if (label == col_id) {
const T theta_data = AcosFunctor<T>::Forward(in_data);
out_data = CosFunctor<T>::Forward(theta_data * m1 + m2) - m3;
theta[row_id] = theta_data;
} else if ((label < 0 || label >= num_classes) && col_id == 0) {
theta[row_id] = 0;
}
}
out[i] = out_data;
}
}
template<typename T, typename K, bool is_cosine_loss>
__global__ void GpuBackward(const int64_t n, const int64_t num_classes, const int64_t lower_bound,
const T m1, const T m2, const T m3, const T* dy, const K* labels,
const T* theta, T* dx) {
CUDA_1D_KERNEL_LOOP(i, n) {
const int32_t row_id = i / num_classes;
const int32_t col_id = i - row_id * num_classes;
K label = labels[row_id] - lower_bound;
const T dy_data = dy[i];
const T theta_data = theta[row_id];
T dx_data = dy_data;
if (label == col_id && !is_cosine_loss) {
dx_data = dy_data * SinFunctor<T>::Forward(theta_data * m1 + m2) * m1
/ SinFunctor<T>::Forward(theta_data);
}
dx[i] = dx_data;
}
}
class CombinedMarginLossOpKernelState final : public user_op::OpKernelState {
public:
CombinedMarginLossOpKernelState(int64_t lower, int64_t upper) : lower_(lower), upper_(upper) {}
~CombinedMarginLossOpKernelState() override = default;
int64_t lower() const { return lower_; }
int64_t upper() const { return upper_; }
private:
const int64_t lower_;
const int64_t upper_;
};
std::shared_ptr<user_op::OpKernelState> CreateCombinedMarginLossOpKernelState(
user_op::KernelInitContext* ctx, const std::string& in_arg_name) {
const cfg::SbpParallel& in_sbp = ctx->SbpParallel4ArgNameAndIndex(in_arg_name, 0);
if (in_sbp.has_split_parallel() && in_sbp.split_parallel().axis() == 1
&& ctx->parallel_ctx().parallel_num() > 1) {
CHECK(ctx->SbpParallel4ArgNameAndIndex("label", 0).has_broadcast_parallel());
const user_op::TensorDesc* in_logical_desc =
ctx->LogicalTensorDesc4ArgNameAndIndex(in_arg_name, 0);
const auto depth = ctx->Attr<int64_t>("depth");
CHECK_EQ(depth, in_logical_desc->shape().At(1));
BalancedSplitter bs(depth, ctx->parallel_ctx().parallel_num());
return std::make_shared<CombinedMarginLossOpKernelState>(
bs.At(ctx->parallel_ctx().parallel_id()).begin(),
bs.At(ctx->parallel_ctx().parallel_id()).end());
} else {
return std::shared_ptr<user_op::OpKernelState>(nullptr);
}
}
} // namespace
template<typename T, typename K>
class CombinedMarginLossGpuKernel final : public user_op::OpKernel {
public:
CombinedMarginLossGpuKernel() = default;
~CombinedMarginLossGpuKernel() override = default;
std::shared_ptr<user_op::OpKernelState> CreateOpKernelState(
user_op::KernelInitContext* ctx) const override {
return CreateCombinedMarginLossOpKernelState(ctx, "x");
}
private:
void Compute(user_op::KernelComputeContext* ctx, user_op::OpKernelState* state) const override {
const user_op::Tensor* x = ctx->Tensor4ArgNameAndIndex("x", 0);
const user_op::Tensor* label = ctx->Tensor4ArgNameAndIndex("label", 0);
user_op::Tensor* y = ctx->Tensor4ArgNameAndIndex("y", 0);
user_op::Tensor* theta = ctx->Tensor4ArgNameAndIndex("theta", 0);
const float m1 = ctx->Attr<float>("m1");
const float m2 = ctx->Attr<float>("m2");
const float m3 = ctx->Attr<float>("m3");
int64_t lower_bound = 0;
if (state != nullptr) {
auto* kernel_state = dynamic_cast<CombinedMarginLossOpKernelState*>(state);
CHECK_NOTNULL(kernel_state);
CHECK_EQ(x->shape().Count(1), kernel_state->upper() - kernel_state->lower());
lower_bound = kernel_state->lower();
}
if (m1 == 1.0 && m2 == 0.0) {
hipLaunchKernelGGL(( GpuForward<T, K, true>), dim3(BlocksNum4ThreadsNum(x->shape().elem_cnt())), dim3(kCudaThreadsNumPerBlock),
0, ctx->device_ctx()->cuda_stream(),
x->shape().elem_cnt(), x->shape().Count(1), lower_bound, static_cast<T>(m1),
static_cast<T>(m2), static_cast<T>(m3), x->dptr<T>(), label->dptr<K>(), y->mut_dptr<T>(),
theta->mut_dptr<T>());
} else {
hipLaunchKernelGGL(( GpuForward<T, K, false>), dim3(BlocksNum4ThreadsNum(x->shape().elem_cnt())),
dim3(kCudaThreadsNumPerBlock), 0, ctx->device_ctx()->cuda_stream(),
x->shape().elem_cnt(), x->shape().Count(1), lower_bound, static_cast<T>(m1),
static_cast<T>(m2), static_cast<T>(m3), x->dptr<T>(), label->dptr<K>(), y->mut_dptr<T>(),
theta->mut_dptr<T>());
}
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_COMBINED_MARGIN_LOSS_GPU_KERNEL(in_type, indices_type) \
REGISTER_USER_KERNEL("combined_margin_loss") \
.SetCreateFn<CombinedMarginLossGpuKernel<OF_PP_PAIR_FIRST(in_type), \
OF_PP_PAIR_FIRST(indices_type)>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("x", 0) == OF_PP_PAIR_SECOND(in_type)) \
& (user_op::HobDataType("label", 0) == OF_PP_PAIR_SECOND(indices_type)));
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_COMBINED_MARGIN_LOSS_GPU_KERNEL, FLOATING_DATA_TYPE_SEQ,
INDEX_DATA_TYPE_SEQ)
template<typename T, typename K>
class CombinedMarginLossGradGpuKernel final : public user_op::OpKernel {
public:
CombinedMarginLossGradGpuKernel() = default;
~CombinedMarginLossGradGpuKernel() override = default;
std::shared_ptr<user_op::OpKernelState> CreateOpKernelState(
user_op::KernelInitContext* ctx) const override {
return CreateCombinedMarginLossOpKernelState(ctx, "dy");
}
private:
void Compute(user_op::KernelComputeContext* ctx, user_op::OpKernelState* state) const override {
const user_op::Tensor* dy = ctx->Tensor4ArgNameAndIndex("dy", 0);
const user_op::Tensor* label = ctx->Tensor4ArgNameAndIndex("label", 0);
const user_op::Tensor* theta = ctx->Tensor4ArgNameAndIndex("theta", 0);
user_op::Tensor* dx = ctx->Tensor4ArgNameAndIndex("dx", 0);
const float m1 = ctx->Attr<float>("m1");
const float m2 = ctx->Attr<float>("m2");
const float m3 = ctx->Attr<float>("m3");
int64_t lower_bound = 0;
if (state != nullptr) {
auto* kernel_state = dynamic_cast<CombinedMarginLossOpKernelState*>(state);
CHECK_NOTNULL(kernel_state);
CHECK_EQ(dy->shape().Count(1), kernel_state->upper() - kernel_state->lower());
lower_bound = kernel_state->lower();
}
if (m1 == 1.0 && m2 == 0.0) {
hipLaunchKernelGGL(( GpuBackward<T, K, true>), dim3(BlocksNum4ThreadsNum(dy->shape().elem_cnt())),
dim3(kCudaThreadsNumPerBlock), 0, ctx->device_ctx()->cuda_stream(),
dy->shape().elem_cnt(), dy->shape().Count(1), lower_bound, static_cast<T>(m1),
static_cast<T>(m2), static_cast<T>(m3), dy->dptr<T>(), label->dptr<K>(), theta->dptr<T>(),
dx->mut_dptr<T>());
} else {
hipLaunchKernelGGL(( GpuBackward<T, K, false>), dim3(BlocksNum4ThreadsNum(dy->shape().elem_cnt())),
dim3(kCudaThreadsNumPerBlock), 0, ctx->device_ctx()->cuda_stream(),
dy->shape().elem_cnt(), dy->shape().Count(1), lower_bound, static_cast<T>(m1),
static_cast<T>(m2), static_cast<T>(m3), dy->dptr<T>(), label->dptr<K>(), theta->dptr<T>(),
dx->mut_dptr<T>());
}
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_COMBINED_MARGIN_LOSS_GRAD_GPU_KERNEL(dy_type, indices_type) \
REGISTER_USER_KERNEL("combined_margin_loss_grad") \
.SetCreateFn<CombinedMarginLossGradGpuKernel<OF_PP_PAIR_FIRST(dy_type), \
OF_PP_PAIR_FIRST(indices_type)>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("dy", 0) == OF_PP_PAIR_SECOND(dy_type)) \
& (user_op::HobDataType("label", 0) == OF_PP_PAIR_SECOND(indices_type)));
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_COMBINED_MARGIN_LOSS_GRAD_GPU_KERNEL,
FLOATING_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
} // namespace oneflow
| 11629e5efcd5e1c34a298a8c437523d85e5215a7.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/common/balanced_splitter.h"
#include "oneflow/core/kernel/kernel_util.h"
#include "oneflow/user/kernels/math_unary_elementwise_func.h"
namespace oneflow {
namespace {
template<typename T, typename K, bool is_cosine_loss>
__global__ void GpuForward(const int64_t n, const int64_t num_classes, const int64_t lower_bound,
const T m1, const T m2, const T m3, const T* in, const K* labels, T* out,
T* theta) {
CUDA_1D_KERNEL_LOOP(i, n) {
const int32_t row_id = i / num_classes;
const int32_t col_id = i - row_id * num_classes;
const T in_data = in[i];
T out_data = in_data;
K label = labels[row_id] - lower_bound;
if (is_cosine_loss) {
if (label == col_id) { out_data = in_data - m3; }
} else {
if (label == col_id) {
const T theta_data = AcosFunctor<T>::Forward(in_data);
out_data = CosFunctor<T>::Forward(theta_data * m1 + m2) - m3;
theta[row_id] = theta_data;
} else if ((label < 0 || label >= num_classes) && col_id == 0) {
theta[row_id] = 0;
}
}
out[i] = out_data;
}
}
template<typename T, typename K, bool is_cosine_loss>
__global__ void GpuBackward(const int64_t n, const int64_t num_classes, const int64_t lower_bound,
const T m1, const T m2, const T m3, const T* dy, const K* labels,
const T* theta, T* dx) {
CUDA_1D_KERNEL_LOOP(i, n) {
const int32_t row_id = i / num_classes;
const int32_t col_id = i - row_id * num_classes;
K label = labels[row_id] - lower_bound;
const T dy_data = dy[i];
const T theta_data = theta[row_id];
T dx_data = dy_data;
if (label == col_id && !is_cosine_loss) {
dx_data = dy_data * SinFunctor<T>::Forward(theta_data * m1 + m2) * m1
/ SinFunctor<T>::Forward(theta_data);
}
dx[i] = dx_data;
}
}
class CombinedMarginLossOpKernelState final : public user_op::OpKernelState {
public:
CombinedMarginLossOpKernelState(int64_t lower, int64_t upper) : lower_(lower), upper_(upper) {}
~CombinedMarginLossOpKernelState() override = default;
int64_t lower() const { return lower_; }
int64_t upper() const { return upper_; }
private:
const int64_t lower_;
const int64_t upper_;
};
std::shared_ptr<user_op::OpKernelState> CreateCombinedMarginLossOpKernelState(
user_op::KernelInitContext* ctx, const std::string& in_arg_name) {
const cfg::SbpParallel& in_sbp = ctx->SbpParallel4ArgNameAndIndex(in_arg_name, 0);
if (in_sbp.has_split_parallel() && in_sbp.split_parallel().axis() == 1
&& ctx->parallel_ctx().parallel_num() > 1) {
CHECK(ctx->SbpParallel4ArgNameAndIndex("label", 0).has_broadcast_parallel());
const user_op::TensorDesc* in_logical_desc =
ctx->LogicalTensorDesc4ArgNameAndIndex(in_arg_name, 0);
const auto depth = ctx->Attr<int64_t>("depth");
CHECK_EQ(depth, in_logical_desc->shape().At(1));
BalancedSplitter bs(depth, ctx->parallel_ctx().parallel_num());
return std::make_shared<CombinedMarginLossOpKernelState>(
bs.At(ctx->parallel_ctx().parallel_id()).begin(),
bs.At(ctx->parallel_ctx().parallel_id()).end());
} else {
return std::shared_ptr<user_op::OpKernelState>(nullptr);
}
}
} // namespace
template<typename T, typename K>
class CombinedMarginLossGpuKernel final : public user_op::OpKernel {
public:
CombinedMarginLossGpuKernel() = default;
~CombinedMarginLossGpuKernel() override = default;
std::shared_ptr<user_op::OpKernelState> CreateOpKernelState(
user_op::KernelInitContext* ctx) const override {
return CreateCombinedMarginLossOpKernelState(ctx, "x");
}
private:
void Compute(user_op::KernelComputeContext* ctx, user_op::OpKernelState* state) const override {
const user_op::Tensor* x = ctx->Tensor4ArgNameAndIndex("x", 0);
const user_op::Tensor* label = ctx->Tensor4ArgNameAndIndex("label", 0);
user_op::Tensor* y = ctx->Tensor4ArgNameAndIndex("y", 0);
user_op::Tensor* theta = ctx->Tensor4ArgNameAndIndex("theta", 0);
const float m1 = ctx->Attr<float>("m1");
const float m2 = ctx->Attr<float>("m2");
const float m3 = ctx->Attr<float>("m3");
int64_t lower_bound = 0;
if (state != nullptr) {
auto* kernel_state = dynamic_cast<CombinedMarginLossOpKernelState*>(state);
CHECK_NOTNULL(kernel_state);
CHECK_EQ(x->shape().Count(1), kernel_state->upper() - kernel_state->lower());
lower_bound = kernel_state->lower();
}
if (m1 == 1.0 && m2 == 0.0) {
GpuForward<T, K, true><<<BlocksNum4ThreadsNum(x->shape().elem_cnt()), kCudaThreadsNumPerBlock,
0, ctx->device_ctx()->cuda_stream()>>>(
x->shape().elem_cnt(), x->shape().Count(1), lower_bound, static_cast<T>(m1),
static_cast<T>(m2), static_cast<T>(m3), x->dptr<T>(), label->dptr<K>(), y->mut_dptr<T>(),
theta->mut_dptr<T>());
} else {
GpuForward<T, K, false><<<BlocksNum4ThreadsNum(x->shape().elem_cnt()),
kCudaThreadsNumPerBlock, 0, ctx->device_ctx()->cuda_stream()>>>(
x->shape().elem_cnt(), x->shape().Count(1), lower_bound, static_cast<T>(m1),
static_cast<T>(m2), static_cast<T>(m3), x->dptr<T>(), label->dptr<K>(), y->mut_dptr<T>(),
theta->mut_dptr<T>());
}
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_COMBINED_MARGIN_LOSS_GPU_KERNEL(in_type, indices_type) \
REGISTER_USER_KERNEL("combined_margin_loss") \
.SetCreateFn<CombinedMarginLossGpuKernel<OF_PP_PAIR_FIRST(in_type), \
OF_PP_PAIR_FIRST(indices_type)>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("x", 0) == OF_PP_PAIR_SECOND(in_type)) \
& (user_op::HobDataType("label", 0) == OF_PP_PAIR_SECOND(indices_type)));
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_COMBINED_MARGIN_LOSS_GPU_KERNEL, FLOATING_DATA_TYPE_SEQ,
INDEX_DATA_TYPE_SEQ)
template<typename T, typename K>
class CombinedMarginLossGradGpuKernel final : public user_op::OpKernel {
public:
CombinedMarginLossGradGpuKernel() = default;
~CombinedMarginLossGradGpuKernel() override = default;
std::shared_ptr<user_op::OpKernelState> CreateOpKernelState(
user_op::KernelInitContext* ctx) const override {
return CreateCombinedMarginLossOpKernelState(ctx, "dy");
}
private:
void Compute(user_op::KernelComputeContext* ctx, user_op::OpKernelState* state) const override {
const user_op::Tensor* dy = ctx->Tensor4ArgNameAndIndex("dy", 0);
const user_op::Tensor* label = ctx->Tensor4ArgNameAndIndex("label", 0);
const user_op::Tensor* theta = ctx->Tensor4ArgNameAndIndex("theta", 0);
user_op::Tensor* dx = ctx->Tensor4ArgNameAndIndex("dx", 0);
const float m1 = ctx->Attr<float>("m1");
const float m2 = ctx->Attr<float>("m2");
const float m3 = ctx->Attr<float>("m3");
int64_t lower_bound = 0;
if (state != nullptr) {
auto* kernel_state = dynamic_cast<CombinedMarginLossOpKernelState*>(state);
CHECK_NOTNULL(kernel_state);
CHECK_EQ(dy->shape().Count(1), kernel_state->upper() - kernel_state->lower());
lower_bound = kernel_state->lower();
}
if (m1 == 1.0 && m2 == 0.0) {
GpuBackward<T, K, true><<<BlocksNum4ThreadsNum(dy->shape().elem_cnt()),
kCudaThreadsNumPerBlock, 0, ctx->device_ctx()->cuda_stream()>>>(
dy->shape().elem_cnt(), dy->shape().Count(1), lower_bound, static_cast<T>(m1),
static_cast<T>(m2), static_cast<T>(m3), dy->dptr<T>(), label->dptr<K>(), theta->dptr<T>(),
dx->mut_dptr<T>());
} else {
GpuBackward<T, K, false><<<BlocksNum4ThreadsNum(dy->shape().elem_cnt()),
kCudaThreadsNumPerBlock, 0, ctx->device_ctx()->cuda_stream()>>>(
dy->shape().elem_cnt(), dy->shape().Count(1), lower_bound, static_cast<T>(m1),
static_cast<T>(m2), static_cast<T>(m3), dy->dptr<T>(), label->dptr<K>(), theta->dptr<T>(),
dx->mut_dptr<T>());
}
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_COMBINED_MARGIN_LOSS_GRAD_GPU_KERNEL(dy_type, indices_type) \
REGISTER_USER_KERNEL("combined_margin_loss_grad") \
.SetCreateFn<CombinedMarginLossGradGpuKernel<OF_PP_PAIR_FIRST(dy_type), \
OF_PP_PAIR_FIRST(indices_type)>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("dy", 0) == OF_PP_PAIR_SECOND(dy_type)) \
& (user_op::HobDataType("label", 0) == OF_PP_PAIR_SECOND(indices_type)));
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_COMBINED_MARGIN_LOSS_GRAD_GPU_KERNEL,
FLOATING_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
} // namespace oneflow
|
a350631c37d414df27e1d4a6f8a68db87867a2c9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Author : Hasindu Gamaarachchi
CPA for 128/128 bit SPECK software implementation
To derive right half key K2
*/
#include <stdio.h>
#include "helpers.cuh"
#include "data.cuh"
//file name for all key-correlation pairs sorted in key order
#define FILEALL "all.txt"
//file name for all key-correlation pairs sorted using correlation coefficient
#define FILEALLSORTED "allsorted.txt"
//wave fine format, 0 if ascii 1 if binary
#define WAVFORMAT 1
//set 1 if your samples are hexadecimal separated by spaces
//set 0 if your samples are hexadecimal with no spaces
#define FORMAT 0
//set 0 if need to take fabs(), 1 if negative, 2 if positive
#define CORRELATION_SIGN 0
//Change SAMPLES to the number of power traces
#define SAMPLES 5000
//Change ALLWAVELEGTH to the number of sampling points you have in a single power trace
#define ALLWAVELEN 100000
//Due to memory restrictions on GPU if SAMPLES is large cannot keep all the things at once in memory
//In such case of a memory allocation failure reduce WAVELENGTH
//But make sure that ALLWAVELENGTH is divisible by WAVELENGTH
#define WAVELENGTH 50000
//define for 128/128 Speck
#define KEYBYTES 16
#define KEYBYTESPART 8
#define KEYS 256
//struct used for sorting correlation key pairs
struct key_corr{
unsigned int key;
double corr;
};
//hamming weight of a number
__device__ byte hammingweight(byte H){
//byte H=M^R;
// Count the number of set bits
byte dist=0;
while(H){
dist++;
H &= H - 1;
}
return dist;
}
__device__ byte hamming(unsigned int *sample, unsigned int i,unsigned int n,unsigned int key) { //n is byteno i is the sample
byte pt0[8];
copy2(pt0,&sample[i*KEYBYTES]);
byte pt1[8];
copy2(pt1,&sample[i*KEYBYTES+8]);
byte ans[8];
ROR(ans,pt1,8);
copy(pt1,ans);
_add(ans,pt1,pt0);
byte inter ;
if(n<8){
inter= (byte)(ans[n] ^ key);
}
else{
inter = (byte)(ans[n-8] ^ key);
}
byte dist = hammingweight(inter);
return dist;
}
__global__ void maxCorelationkernel(double *corelation,double *wavestat,double *wavestat2,double *hammingstat,double *allcorelation){
int keyguess=blockDim.y*blockIdx.y+threadIdx.y;
int keybyte=blockDim.x*blockIdx.x+threadIdx.x;
if (keybyte<KEYBYTESPART && keyguess<KEYS ){
double sigmaH,sigmaH2,sigmaW=0,sigmaW2=0,sigmaWH=0;
sigmaH=hammingstat[KEYBYTESPART*keyguess+keybyte];
sigmaH2=hammingstat[KEYS*KEYBYTESPART+KEYBYTESPART*keyguess+keybyte];
double temp_corelation=0;;
double corelationmax=0;;
unsigned int j;
for(j=0;j<WAVELENGTH;j++){
sigmaWH=wavestat2[j*KEYS*KEYBYTESPART + keyguess*KEYBYTESPART + keybyte];
sigmaW=wavestat[j];
sigmaW2=wavestat[WAVELENGTH+j];
double numerator=SAMPLES*sigmaWH - sigmaW*sigmaH;
double denominator=sqrt(SAMPLES*sigmaW2 - sigmaW*sigmaW)*sqrt(SAMPLES*sigmaH2 - sigmaH*sigmaH);
if(CORRELATION_SIGN==0){
temp_corelation=fabs(numerator/denominator);
}
else if(CORRELATION_SIGN==1){
temp_corelation=-numerator/denominator;
}
else if(CORRELATION_SIGN==2){
temp_corelation=numerator/denominator;
}
else{
temp_corelation=fabs(numerator/denominator);
}
allcorelation[j*KEYS*KEYBYTESPART + keyguess*KEYBYTESPART + keybyte]=temp_corelation;
if(temp_corelation>corelationmax){
corelationmax=temp_corelation;
}
}
if(corelationmax>corelation[keyguess*KEYBYTESPART+keybyte]){
corelation[keyguess*KEYBYTESPART+keybyte]=corelationmax;
}
}
return;
}
__global__ void wavestatkernel(double *wavedata, double *wavestat,double *wavestat2,byte *hammingArray){
int keyguess=blockDim.y*blockIdx.y+threadIdx.y;
int keybyte=blockDim.x*blockIdx.x+threadIdx.x;
int wave=blockDim.z*blockIdx.z+threadIdx.z;
if (keyguess<KEYS && keybyte<KEYBYTESPART && wave<WAVELENGTH ){
unsigned int i;
double sigmaWH=0;
for(i=0;i<SAMPLES;i++){
sigmaWH+=wavedata[i*WAVELENGTH+wave]*(double)hammingArray[i*KEYS*KEYBYTESPART + keyguess*KEYBYTESPART + keybyte];
}
wavestat2[wave*KEYS*KEYBYTESPART + keyguess*KEYBYTESPART + keybyte ]=sigmaWH;
}
if (keyguess==0 && keybyte==0 && wave<WAVELENGTH ){
unsigned int i;
double sigmaW=0,sigmaW2=0,W=0;
for(i=0;i<SAMPLES;i++){
W=wavedata[i*WAVELENGTH+wave];
sigmaW+=W;
sigmaW2+=W*W;
}
wavestat[wave]=sigmaW;
wavestat[WAVELENGTH+wave]=sigmaW2;
}
return;
}
__global__ void hammingkernel(unsigned int *sample,byte *hammingArray,double *hammingstat){
int keyguess=blockDim.y*blockIdx.y+threadIdx.y;
int keybyte=blockDim.x*blockIdx.x+threadIdx.x;
if (keybyte<KEYBYTESPART && keyguess<KEYS ){
double sigmaH=0,sigmaH2=0;
byte H;
unsigned int i;
for(i=0;i<SAMPLES;i++){
H=hamming(sample,i,keybyte,keyguess);
hammingArray[i*KEYS*KEYBYTESPART + keyguess*KEYBYTESPART + keybyte]=H;
sigmaH+=(double)H;
sigmaH2+=(double)H*(double)H;
}
hammingstat[KEYBYTESPART*keyguess+keybyte]=sigmaH;
hammingstat[KEYS*KEYBYTESPART+KEYBYTESPART*keyguess+keybyte]=sigmaH2;
}
return;
}
int main(int argc, char *argv[]){
unsigned int i,j;
//check args
if(argc!=3){
fprintf(stderr,"%s\n", "Not enough args. eg ./cpa wavedata.txt sample.txt");
exit(EXIT_FAILURE);
}
if(ALLWAVELEN%WAVELENGTH !=0){
fprintf(stderr,"Make sure that ALLWAVELEN is divisible by WAVELEN\n");
exit(1);
}
//get wave data
double *wavedata=(double *)malloc(sizeof(double) * SAMPLES* WAVELENGTH);
isMemoryFull(wavedata);
//get sample texts
unsigned int *sample=(unsigned int *)malloc(sizeof(unsigned int)*SAMPLES*KEYBYTES);
isMemoryFull(sample);
FILE *file=fopen(argv[2],"r");
isFileOK(file);
if(FORMAT==1){
for(i=0; i<SAMPLES ;i++){
for(j=0; j<KEYBYTES; j++){
fscanf(file,"%x",&sample[i*KEYBYTES+j]);
}
}
}
else if(FORMAT==0){
char str[100];
for(i=0; i<SAMPLES ;i++){
fscanf(file,"%s",str);
for(j=0; j<KEYBYTES; j++){
sscanf(&str[2*j],"%02X",&sample[i*KEYBYTES+j]);
}
}
}
else{
fprintf(stderr,"Unknown FORMAT for sample text\n");
exit(1);
}
fclose(file);
//space for corelation
double *corelation=(double *)malloc(sizeof(double) * KEYS * KEYBYTESPART);
isMemoryFull(corelation);
//space for all correlations
double *allcorelation=(double *)malloc(sizeof(double) * KEYS * KEYBYTESPART * WAVELENGTH);
isMemoryFull(allcorelation);
//Time
hipEvent_t start,stop;
float elapsedtime;
hipEventCreate(&start);
hipEventRecord(start,0);
//cuda arrays and copying
double *dev_wavedata;
unsigned int *dev_sample;
double *dev_corelation,*dev_allcorrelations,*dev_wavestat,*dev_wavestat2,*dev_hammingstat;
byte *dev_hammingArray;
checkCudaError(hipMalloc((void**)&dev_wavedata, SAMPLES*WAVELENGTH*sizeof(double)));
checkCudaError(hipMalloc((void**)&dev_sample, SAMPLES*KEYBYTES*sizeof(unsigned int)));
checkCudaError(hipMalloc((void**)&dev_corelation, KEYS*KEYBYTESPART*sizeof(double)));
checkCudaError(hipMalloc((void**)&dev_hammingArray, KEYS*KEYBYTESPART*SAMPLES*sizeof(byte)));
checkCudaError(hipMalloc((void**)&dev_wavestat, 2*WAVELENGTH*sizeof(double)));
checkCudaError(hipMalloc((void**)&dev_wavestat2, KEYS*KEYBYTESPART*WAVELENGTH*sizeof(double)));
checkCudaError(hipMalloc((void**)&dev_hammingstat, 2*KEYS*KEYBYTESPART*sizeof(double)));
checkCudaError(hipMalloc((void**)&dev_allcorrelations, WAVELENGTH*KEYS*KEYBYTESPART*sizeof(double)));
checkCudaError(hipMemset(dev_corelation,0, KEYS*KEYBYTESPART*sizeof(double)));
checkCudaError(hipMemcpy(dev_sample,sample, SAMPLES*KEYBYTES*sizeof(unsigned int),hipMemcpyHostToDevice));
dim3 grid(KEYBYTES/8,KEYS/32);
dim3 block(8,32);
//findhamming
hipLaunchKernelGGL(( hammingkernel), dim3(grid),dim3(block), 0, 0, dev_sample,dev_hammingArray,dev_hammingstat);
checkCudaError(hipGetLastError());
//correlation value writing
char filename[100];
FILE* filec[KEYBYTESPART][KEYS];
int keyguess,keybyte;
for(keybyte=0;keybyte<KEYBYTESPART;keybyte++){
for(keyguess=0;keyguess<KEYS;keyguess++){
sprintf(filename,"subbyte%d_keyguess%d",keybyte,keyguess);
filec[keybyte][keyguess]=fopen(filename,"w");
}
}
int loops=0;
for(loops=0;loops<ALLWAVELEN/WAVELENGTH;loops++){
if(WAVFORMAT==0){
FILE *file=fopen(argv[1],"r");
isFileOK(file);
for(i=0; i<SAMPLES ;i++){
unsigned int k=0;
for(j=0; j<ALLWAVELEN; j++){
float dat;
fscanf(file,"%f",&dat);
if(j<WAVELENGTH*(loops+1) && j>=WAVELENGTH*loops){
wavedata[i*WAVELENGTH+k]=(double)dat;
k++;
}
}
}
fclose(file);
}
else if(WAVFORMAT==1){
//read wave data
FILE *file=fopen(argv[1],"rb");
isFileOK(file);
for(i=0; i<SAMPLES ;i++){
fseek(file,sizeof(float)*(i*ALLWAVELEN+WAVELENGTH*loops),SEEK_SET);
for(j=0; j<WAVELENGTH; j++){
float dat;
int ret=fread(&dat,sizeof(float),1,file);
if(ret<1){
perror("");
exit(1);
}
wavedata[i*WAVELENGTH+j]=(double)dat;
}
}
fclose(file);
}
else{
fprintf(stderr,"Unknown wave file format\n");
exit(1);
}
checkCudaError(hipMemcpy(dev_wavedata,wavedata,SAMPLES*WAVELENGTH*sizeof(double),hipMemcpyHostToDevice));
dim3 block3d(8,16,4);
dim3 grid3d(KEYBYTESPART/8,KEYS/16,WAVELENGTH/4);
//find wave stats
hipLaunchKernelGGL(( wavestatkernel), dim3(grid3d),dim3(block3d), 0, 0, dev_wavedata,dev_wavestat,dev_wavestat2,dev_hammingArray);
checkCudaError(hipGetLastError());
//deploy double
hipLaunchKernelGGL(( maxCorelationkernel), dim3(grid),dim3(block), 0, 0, dev_corelation,dev_wavestat,dev_wavestat2,dev_hammingstat,dev_allcorrelations);
checkCudaError(hipGetLastError());
checkCudaError(hipMemcpy(allcorelation,dev_allcorrelations,WAVELENGTH*KEYS*KEYBYTESPART*sizeof(double),hipMemcpyDeviceToHost));
//write correlation values
int point,keyguess,keybyte;
for(keybyte=0;keybyte<KEYBYTESPART;keybyte++){
for(keyguess=0;keyguess<KEYS;keyguess++){
for(point=0;point<WAVELENGTH;point++){
fprintf(filec[keybyte][keyguess],"%f ",allcorelation[point*KEYS*KEYBYTESPART + keyguess*KEYBYTESPART + keybyte]);
}
if(loops==ALLWAVELEN/WAVELENGTH-1){
fprintf(filec[keybyte][keyguess],"\n");
fclose(filec[keybyte][keyguess]);
}
}
}
}
//copy back
checkCudaError(hipMemcpy(corelation,dev_corelation,KEYS*KEYBYTESPART*sizeof(double),hipMemcpyDeviceToHost));
checkCudaError(hipFree(dev_wavedata));
checkCudaError(hipFree(dev_sample));
checkCudaError(hipFree(dev_corelation));
checkCudaError(hipFree(dev_wavestat));
checkCudaError(hipFree(dev_wavestat2));
checkCudaError(hipFree(dev_hammingstat));
checkCudaError(hipFree(dev_hammingArray));
//Time
hipEventCreate(&stop);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedtime,start,stop);
fprintf(stderr,"Time spent for CUDA operation : %.10f\n",elapsedtime/(float)1000);
//form struct array
struct key_corr key_corrpairs[KEYS][KEYBYTESPART];
//print all information while putting to structs
file=fopen(FILEALL,"w");
for (i=0;i<KEYS;i++){
for(j=0;j<KEYBYTESPART;j++){
key_corrpairs[i][j].key=i;
key_corrpairs[i][j].corr=corelation[i*KEYBYTESPART+j];
fprintf(file,"%.2X : %f\t",i,corelation[i*KEYBYTESPART+j]);
}
fprintf(file,"\n");
}
int k;
//sort using insertion sort
for (j=0;j<KEYBYTESPART;j++){
for (i=1;i<KEYS;i++){
double corr=key_corrpairs[i][j].corr;
unsigned int key=key_corrpairs[i][j].key;
for (k=(int)(i-1);k>=0 && corr>key_corrpairs[k][j].corr;k--){
key_corrpairs[k+1][j].corr=key_corrpairs[k][j].corr;
key_corrpairs[k+1][j].key=key_corrpairs[k][j].key;
}
key_corrpairs[k+1][j].key=key;
key_corrpairs[k+1][j].corr=corr;
}
}
//print all in ascending order
file=fopen(FILEALLSORTED,"w");
for (i=0;i<KEYS;i++){
for(j=0;j<KEYBYTESPART;j++){
fprintf(file,"%.2X : %f\t",key_corrpairs[i][j].key,key_corrpairs[i][j].corr);
}
fprintf(file,"\n");
}
//print the best five to the stdout
for (i=0;i<5;i++){
for(j=0;j<KEYBYTESPART;j++){
printf("%.2X\t\t\t",key_corrpairs[i][j].key);
}
printf("\n");
for(j=0;j<KEYBYTESPART;j++){
printf("%f\t",key_corrpairs[i][j].corr);
}
printf("\n\n");
}
return 0;
}
| a350631c37d414df27e1d4a6f8a68db87867a2c9.cu | /* Author : Hasindu Gamaarachchi
CPA for 128/128 bit SPECK software implementation
To derive right half key K2
*/
#include <stdio.h>
#include "helpers.cuh"
#include "data.cuh"
//file name for all key-correlation pairs sorted in key order
#define FILEALL "all.txt"
//file name for all key-correlation pairs sorted using correlation coefficient
#define FILEALLSORTED "allsorted.txt"
//wave fine format, 0 if ascii 1 if binary
#define WAVFORMAT 1
//set 1 if your samples are hexadecimal separated by spaces
//set 0 if your samples are hexadecimal with no spaces
#define FORMAT 0
//set 0 if need to take fabs(), 1 if negative, 2 if positive
#define CORRELATION_SIGN 0
//Change SAMPLES to the number of power traces
#define SAMPLES 5000
//Change ALLWAVELEGTH to the number of sampling points you have in a single power trace
#define ALLWAVELEN 100000
//Due to memory restrictions on GPU if SAMPLES is large cannot keep all the things at once in memory
//In such case of a memory allocation failure reduce WAVELENGTH
//But make sure that ALLWAVELENGTH is divisible by WAVELENGTH
#define WAVELENGTH 50000
//define for 128/128 Speck
#define KEYBYTES 16
#define KEYBYTESPART 8
#define KEYS 256
//struct used for sorting correlation key pairs
struct key_corr{
unsigned int key;
double corr;
};
//hamming weight of a number
__device__ byte hammingweight(byte H){
//byte H=M^R;
// Count the number of set bits
byte dist=0;
while(H){
dist++;
H &= H - 1;
}
return dist;
}
__device__ byte hamming(unsigned int *sample, unsigned int i,unsigned int n,unsigned int key) { //n is byteno i is the sample
byte pt0[8];
copy2(pt0,&sample[i*KEYBYTES]);
byte pt1[8];
copy2(pt1,&sample[i*KEYBYTES+8]);
byte ans[8];
ROR(ans,pt1,8);
copy(pt1,ans);
_add(ans,pt1,pt0);
byte inter ;
if(n<8){
inter= (byte)(ans[n] ^ key);
}
else{
inter = (byte)(ans[n-8] ^ key);
}
byte dist = hammingweight(inter);
return dist;
}
__global__ void maxCorelationkernel(double *corelation,double *wavestat,double *wavestat2,double *hammingstat,double *allcorelation){
int keyguess=blockDim.y*blockIdx.y+threadIdx.y;
int keybyte=blockDim.x*blockIdx.x+threadIdx.x;
if (keybyte<KEYBYTESPART && keyguess<KEYS ){
double sigmaH,sigmaH2,sigmaW=0,sigmaW2=0,sigmaWH=0;
sigmaH=hammingstat[KEYBYTESPART*keyguess+keybyte];
sigmaH2=hammingstat[KEYS*KEYBYTESPART+KEYBYTESPART*keyguess+keybyte];
double temp_corelation=0;;
double corelationmax=0;;
unsigned int j;
for(j=0;j<WAVELENGTH;j++){
sigmaWH=wavestat2[j*KEYS*KEYBYTESPART + keyguess*KEYBYTESPART + keybyte];
sigmaW=wavestat[j];
sigmaW2=wavestat[WAVELENGTH+j];
double numerator=SAMPLES*sigmaWH - sigmaW*sigmaH;
double denominator=sqrt(SAMPLES*sigmaW2 - sigmaW*sigmaW)*sqrt(SAMPLES*sigmaH2 - sigmaH*sigmaH);
if(CORRELATION_SIGN==0){
temp_corelation=fabs(numerator/denominator);
}
else if(CORRELATION_SIGN==1){
temp_corelation=-numerator/denominator;
}
else if(CORRELATION_SIGN==2){
temp_corelation=numerator/denominator;
}
else{
temp_corelation=fabs(numerator/denominator);
}
allcorelation[j*KEYS*KEYBYTESPART + keyguess*KEYBYTESPART + keybyte]=temp_corelation;
if(temp_corelation>corelationmax){
corelationmax=temp_corelation;
}
}
if(corelationmax>corelation[keyguess*KEYBYTESPART+keybyte]){
corelation[keyguess*KEYBYTESPART+keybyte]=corelationmax;
}
}
return;
}
__global__ void wavestatkernel(double *wavedata, double *wavestat,double *wavestat2,byte *hammingArray){
int keyguess=blockDim.y*blockIdx.y+threadIdx.y;
int keybyte=blockDim.x*blockIdx.x+threadIdx.x;
int wave=blockDim.z*blockIdx.z+threadIdx.z;
if (keyguess<KEYS && keybyte<KEYBYTESPART && wave<WAVELENGTH ){
unsigned int i;
double sigmaWH=0;
for(i=0;i<SAMPLES;i++){
sigmaWH+=wavedata[i*WAVELENGTH+wave]*(double)hammingArray[i*KEYS*KEYBYTESPART + keyguess*KEYBYTESPART + keybyte];
}
wavestat2[wave*KEYS*KEYBYTESPART + keyguess*KEYBYTESPART + keybyte ]=sigmaWH;
}
if (keyguess==0 && keybyte==0 && wave<WAVELENGTH ){
unsigned int i;
double sigmaW=0,sigmaW2=0,W=0;
for(i=0;i<SAMPLES;i++){
W=wavedata[i*WAVELENGTH+wave];
sigmaW+=W;
sigmaW2+=W*W;
}
wavestat[wave]=sigmaW;
wavestat[WAVELENGTH+wave]=sigmaW2;
}
return;
}
__global__ void hammingkernel(unsigned int *sample,byte *hammingArray,double *hammingstat){
int keyguess=blockDim.y*blockIdx.y+threadIdx.y;
int keybyte=blockDim.x*blockIdx.x+threadIdx.x;
if (keybyte<KEYBYTESPART && keyguess<KEYS ){
double sigmaH=0,sigmaH2=0;
byte H;
unsigned int i;
for(i=0;i<SAMPLES;i++){
H=hamming(sample,i,keybyte,keyguess);
hammingArray[i*KEYS*KEYBYTESPART + keyguess*KEYBYTESPART + keybyte]=H;
sigmaH+=(double)H;
sigmaH2+=(double)H*(double)H;
}
hammingstat[KEYBYTESPART*keyguess+keybyte]=sigmaH;
hammingstat[KEYS*KEYBYTESPART+KEYBYTESPART*keyguess+keybyte]=sigmaH2;
}
return;
}
int main(int argc, char *argv[]){
unsigned int i,j;
//check args
if(argc!=3){
fprintf(stderr,"%s\n", "Not enough args. eg ./cpa wavedata.txt sample.txt");
exit(EXIT_FAILURE);
}
if(ALLWAVELEN%WAVELENGTH !=0){
fprintf(stderr,"Make sure that ALLWAVELEN is divisible by WAVELEN\n");
exit(1);
}
//get wave data
double *wavedata=(double *)malloc(sizeof(double) * SAMPLES* WAVELENGTH);
isMemoryFull(wavedata);
//get sample texts
unsigned int *sample=(unsigned int *)malloc(sizeof(unsigned int)*SAMPLES*KEYBYTES);
isMemoryFull(sample);
FILE *file=fopen(argv[2],"r");
isFileOK(file);
if(FORMAT==1){
for(i=0; i<SAMPLES ;i++){
for(j=0; j<KEYBYTES; j++){
fscanf(file,"%x",&sample[i*KEYBYTES+j]);
}
}
}
else if(FORMAT==0){
char str[100];
for(i=0; i<SAMPLES ;i++){
fscanf(file,"%s",str);
for(j=0; j<KEYBYTES; j++){
sscanf(&str[2*j],"%02X",&sample[i*KEYBYTES+j]);
}
}
}
else{
fprintf(stderr,"Unknown FORMAT for sample text\n");
exit(1);
}
fclose(file);
//space for corelation
double *corelation=(double *)malloc(sizeof(double) * KEYS * KEYBYTESPART);
isMemoryFull(corelation);
//space for all correlations
double *allcorelation=(double *)malloc(sizeof(double) * KEYS * KEYBYTESPART * WAVELENGTH);
isMemoryFull(allcorelation);
//Time
cudaEvent_t start,stop;
float elapsedtime;
cudaEventCreate(&start);
cudaEventRecord(start,0);
//cuda arrays and copying
double *dev_wavedata;
unsigned int *dev_sample;
double *dev_corelation,*dev_allcorrelations,*dev_wavestat,*dev_wavestat2,*dev_hammingstat;
byte *dev_hammingArray;
checkCudaError(cudaMalloc((void**)&dev_wavedata, SAMPLES*WAVELENGTH*sizeof(double)));
checkCudaError(cudaMalloc((void**)&dev_sample, SAMPLES*KEYBYTES*sizeof(unsigned int)));
checkCudaError(cudaMalloc((void**)&dev_corelation, KEYS*KEYBYTESPART*sizeof(double)));
checkCudaError(cudaMalloc((void**)&dev_hammingArray, KEYS*KEYBYTESPART*SAMPLES*sizeof(byte)));
checkCudaError(cudaMalloc((void**)&dev_wavestat, 2*WAVELENGTH*sizeof(double)));
checkCudaError(cudaMalloc((void**)&dev_wavestat2, KEYS*KEYBYTESPART*WAVELENGTH*sizeof(double)));
checkCudaError(cudaMalloc((void**)&dev_hammingstat, 2*KEYS*KEYBYTESPART*sizeof(double)));
checkCudaError(cudaMalloc((void**)&dev_allcorrelations, WAVELENGTH*KEYS*KEYBYTESPART*sizeof(double)));
checkCudaError(cudaMemset(dev_corelation,0, KEYS*KEYBYTESPART*sizeof(double)));
checkCudaError(cudaMemcpy(dev_sample,sample, SAMPLES*KEYBYTES*sizeof(unsigned int),cudaMemcpyHostToDevice));
dim3 grid(KEYBYTES/8,KEYS/32);
dim3 block(8,32);
//findhamming
hammingkernel<<<grid,block>>>(dev_sample,dev_hammingArray,dev_hammingstat);
checkCudaError(cudaGetLastError());
//correlation value writing
char filename[100];
FILE* filec[KEYBYTESPART][KEYS];
int keyguess,keybyte;
for(keybyte=0;keybyte<KEYBYTESPART;keybyte++){
for(keyguess=0;keyguess<KEYS;keyguess++){
sprintf(filename,"subbyte%d_keyguess%d",keybyte,keyguess);
filec[keybyte][keyguess]=fopen(filename,"w");
}
}
int loops=0;
for(loops=0;loops<ALLWAVELEN/WAVELENGTH;loops++){
if(WAVFORMAT==0){
FILE *file=fopen(argv[1],"r");
isFileOK(file);
for(i=0; i<SAMPLES ;i++){
unsigned int k=0;
for(j=0; j<ALLWAVELEN; j++){
float dat;
fscanf(file,"%f",&dat);
if(j<WAVELENGTH*(loops+1) && j>=WAVELENGTH*loops){
wavedata[i*WAVELENGTH+k]=(double)dat;
k++;
}
}
}
fclose(file);
}
else if(WAVFORMAT==1){
//read wave data
FILE *file=fopen(argv[1],"rb");
isFileOK(file);
for(i=0; i<SAMPLES ;i++){
fseek(file,sizeof(float)*(i*ALLWAVELEN+WAVELENGTH*loops),SEEK_SET);
for(j=0; j<WAVELENGTH; j++){
float dat;
int ret=fread(&dat,sizeof(float),1,file);
if(ret<1){
perror("");
exit(1);
}
wavedata[i*WAVELENGTH+j]=(double)dat;
}
}
fclose(file);
}
else{
fprintf(stderr,"Unknown wave file format\n");
exit(1);
}
checkCudaError(cudaMemcpy(dev_wavedata,wavedata,SAMPLES*WAVELENGTH*sizeof(double),cudaMemcpyHostToDevice));
dim3 block3d(8,16,4);
dim3 grid3d(KEYBYTESPART/8,KEYS/16,WAVELENGTH/4);
//find wave stats
wavestatkernel<<<grid3d,block3d>>>(dev_wavedata,dev_wavestat,dev_wavestat2,dev_hammingArray);
checkCudaError(cudaGetLastError());
//deploy double
maxCorelationkernel<<<grid,block>>>(dev_corelation,dev_wavestat,dev_wavestat2,dev_hammingstat,dev_allcorrelations);
checkCudaError(cudaGetLastError());
checkCudaError(cudaMemcpy(allcorelation,dev_allcorrelations,WAVELENGTH*KEYS*KEYBYTESPART*sizeof(double),cudaMemcpyDeviceToHost));
//write correlation values
int point,keyguess,keybyte;
for(keybyte=0;keybyte<KEYBYTESPART;keybyte++){
for(keyguess=0;keyguess<KEYS;keyguess++){
for(point=0;point<WAVELENGTH;point++){
fprintf(filec[keybyte][keyguess],"%f ",allcorelation[point*KEYS*KEYBYTESPART + keyguess*KEYBYTESPART + keybyte]);
}
if(loops==ALLWAVELEN/WAVELENGTH-1){
fprintf(filec[keybyte][keyguess],"\n");
fclose(filec[keybyte][keyguess]);
}
}
}
}
//copy back
checkCudaError(cudaMemcpy(corelation,dev_corelation,KEYS*KEYBYTESPART*sizeof(double),cudaMemcpyDeviceToHost));
checkCudaError(cudaFree(dev_wavedata));
checkCudaError(cudaFree(dev_sample));
checkCudaError(cudaFree(dev_corelation));
checkCudaError(cudaFree(dev_wavestat));
checkCudaError(cudaFree(dev_wavestat2));
checkCudaError(cudaFree(dev_hammingstat));
checkCudaError(cudaFree(dev_hammingArray));
//Time
cudaEventCreate(&stop);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedtime,start,stop);
fprintf(stderr,"Time spent for CUDA operation : %.10f\n",elapsedtime/(float)1000);
//form struct array
struct key_corr key_corrpairs[KEYS][KEYBYTESPART];
//print all information while putting to structs
file=fopen(FILEALL,"w");
for (i=0;i<KEYS;i++){
for(j=0;j<KEYBYTESPART;j++){
key_corrpairs[i][j].key=i;
key_corrpairs[i][j].corr=corelation[i*KEYBYTESPART+j];
fprintf(file,"%.2X : %f\t",i,corelation[i*KEYBYTESPART+j]);
}
fprintf(file,"\n");
}
int k;
//sort using insertion sort
for (j=0;j<KEYBYTESPART;j++){
for (i=1;i<KEYS;i++){
double corr=key_corrpairs[i][j].corr;
unsigned int key=key_corrpairs[i][j].key;
for (k=(int)(i-1);k>=0 && corr>key_corrpairs[k][j].corr;k--){
key_corrpairs[k+1][j].corr=key_corrpairs[k][j].corr;
key_corrpairs[k+1][j].key=key_corrpairs[k][j].key;
}
key_corrpairs[k+1][j].key=key;
key_corrpairs[k+1][j].corr=corr;
}
}
//print all in ascending order
file=fopen(FILEALLSORTED,"w");
for (i=0;i<KEYS;i++){
for(j=0;j<KEYBYTESPART;j++){
fprintf(file,"%.2X : %f\t",key_corrpairs[i][j].key,key_corrpairs[i][j].corr);
}
fprintf(file,"\n");
}
//print the best five to the stdout
for (i=0;i<5;i++){
for(j=0;j<KEYBYTESPART;j++){
printf("%.2X\t\t\t",key_corrpairs[i][j].key);
}
printf("\n");
for(j=0;j<KEYBYTESPART;j++){
printf("%f\t",key_corrpairs[i][j].corr);
}
printf("\n\n");
}
return 0;
}
|
ad52c7c524f98cb6b96a442ffb3d3ba209bc5b0a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernel_test7_write(char* _ptr, char* end_ptr, char* _start_ptr, unsigned int* err)
{
unsigned int i;
unsigned int* ptr = (unsigned int*) (_ptr + blockIdx.x*BLOCKSIZE);
unsigned int* start_ptr = (unsigned int*) _start_ptr;
if (ptr >= (unsigned int*) end_ptr) {
return;
}
for (i = 0;i < BLOCKSIZE/sizeof(unsigned int); i++){
ptr[i] = start_ptr[i];
}
return;
} | ad52c7c524f98cb6b96a442ffb3d3ba209bc5b0a.cu | #include "includes.h"
__global__ void kernel_test7_write(char* _ptr, char* end_ptr, char* _start_ptr, unsigned int* err)
{
unsigned int i;
unsigned int* ptr = (unsigned int*) (_ptr + blockIdx.x*BLOCKSIZE);
unsigned int* start_ptr = (unsigned int*) _start_ptr;
if (ptr >= (unsigned int*) end_ptr) {
return;
}
for (i = 0;i < BLOCKSIZE/sizeof(unsigned int); i++){
ptr[i] = start_ptr[i];
}
return;
} |
c4d4a523d1b2bc9256850756c968014948b43d6a.hip | // !!! This is a file automatically generated by hipify!!!
//---------------------------------------------------------------------------//
// Copyright (c) 2014 Benoit
//
// Distributed under the Boost Software License, Version 1.0
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt
//
// See http://boostorg.github.com/compute for more information.
//---------------------------------------------------------------------------//
#include <algorithm>
#include <cstdlib>
#include <iostream>
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "perf.hpp"
int main(int argc, char *argv[])
{
perf_parse_args(argc, argv);
std::cout << "size: " << PERF_N << std::endl;
thrust::host_vector<int> h_vec = generate_random_vector<int>(PERF_N);
// transfer data to the device
thrust::device_vector<int> d_vec = h_vec;
perf_timer t;
for(size_t trial = 0; trial < PERF_TRIALS; trial++){
d_vec = h_vec;
t.start();
thrust::exclusive_scan(d_vec.begin(), d_vec.end(), d_vec.begin());
hipDeviceSynchronize();
t.stop();
}
std::cout << "time: " << t.min_time() / 1e6 << " ms" << std::endl;
// transfer data back to host
thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin());
return 0;
}
| c4d4a523d1b2bc9256850756c968014948b43d6a.cu | //---------------------------------------------------------------------------//
// Copyright (c) 2014 Benoit
//
// Distributed under the Boost Software License, Version 1.0
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt
//
// See http://boostorg.github.com/compute for more information.
//---------------------------------------------------------------------------//
#include <algorithm>
#include <cstdlib>
#include <iostream>
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "perf.hpp"
int main(int argc, char *argv[])
{
perf_parse_args(argc, argv);
std::cout << "size: " << PERF_N << std::endl;
thrust::host_vector<int> h_vec = generate_random_vector<int>(PERF_N);
// transfer data to the device
thrust::device_vector<int> d_vec = h_vec;
perf_timer t;
for(size_t trial = 0; trial < PERF_TRIALS; trial++){
d_vec = h_vec;
t.start();
thrust::exclusive_scan(d_vec.begin(), d_vec.end(), d_vec.begin());
cudaDeviceSynchronize();
t.stop();
}
std::cout << "time: " << t.min_time() / 1e6 << " ms" << std::endl;
// transfer data back to host
thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin());
return 0;
}
|
992396e7c7c6e4622a70e2063f53e2b615502356.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <ATen/hip/HIPApplyUtils.cuh>
// TODO make it in a common file
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
template <typename T>
__device__ inline T get_pixel_val(
const T* tensor,
const int idx,
const int H,
const int W,
const int y,
const int x,
const int V,
const int U,
const int v,
const int u,
const T pad_val) {
if ((y < 0) || (y >= H) || (x < 0) || (x >= W) || (v < 0) || (v >= V) ||
(u < 0) || (u >= U)) {
return pad_val;
} else {
return tensor[(((idx * V + v) * U + u) * H + y) * W + x];
}
}
template <typename T>
__device__ inline void add_pixel_val(
T* tensor,
const T val,
const int idx,
const int H,
const int W,
const int y,
const int x,
const int V,
const int U,
const int v,
const int u) {
if ((val == 0.) || (y < 0) || (y >= H) || (x < 0) || (x >= W) || (v < 0) ||
(v >= V) || (u < 0) || (u >= U)) {
return;
} else {
atomicAdd(tensor + ((((idx * V + v) * U + u) * H + y) * W + x), val);
}
}
template <typename T>
__global__ void SwapAlign2NatForwardFeat(
const int nthreads,
const T* bottom_data,
const int Vout,
const int Uout,
const float hVout,
const float hUout,
const int Vin,
const int Uin,
const float lambda,
const int Hin,
const int Win,
const int Hout,
const int Wout,
const T pad_val,
T* top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int idx = index;
const int x = idx % Wout;
idx /= Wout;
const int y = idx % Hout;
idx /= Hout;
const int u = idx % Uout;
idx /= Uout;
const int v = idx % Vout;
idx /= Vout;
const float ox = x * lambda + u - hUout + 0.5;
const int xf = static_cast<int>(floor(ox));
const int xc = static_cast<int>(ceil(ox));
const float xwc = ox - xf;
const float xwf = 1. - xwc;
const float oy = y * lambda + v - hVout + 0.5;
const int yf = static_cast<int>(floor(oy));
const int yc = static_cast<int>(ceil(oy));
const float ywc = oy - yf;
const float ywf = 1. - ywc;
const float ou = (u + 0.5) / lambda - 0.5;
const int uf = static_cast<int>(floor(ou));
const int uc = static_cast<int>(ceil(ou));
const float uwc = ou - uf;
const float uwf = 1. - uwc;
const float ov = (v + 0.5) / lambda - 0.5;
const int vf = static_cast<int>(floor(ov));
const int vc = static_cast<int>(ceil(ov));
const float vwc = ov - vf;
const float vwf = 1. - vwc;
T val = ywf * xwf * vwf * uwf *
get_pixel_val(
bottom_data, idx, Hin, Win, yf, xf, Vin, Uin, vf, uf, pad_val) +
ywf * xwf * vwf * uwc *
get_pixel_val(
bottom_data, idx, Hin, Win, yf, xf, Vin, Uin, vf, uc, pad_val) +
ywf * xwf * vwc * uwf *
get_pixel_val(
bottom_data, idx, Hin, Win, yf, xf, Vin, Uin, vc, uf, pad_val) +
ywf * xwf * vwc * uwc *
get_pixel_val(
bottom_data, idx, Hin, Win, yf, xf, Vin, Uin, vc, uc, pad_val) +
ywf * xwc * vwf * uwf *
get_pixel_val(
bottom_data, idx, Hin, Win, yf, xc, Vin, Uin, vf, uf, pad_val) +
ywf * xwc * vwf * uwc *
get_pixel_val(
bottom_data, idx, Hin, Win, yf, xc, Vin, Uin, vf, uc, pad_val) +
ywf * xwc * vwc * uwf *
get_pixel_val(
bottom_data, idx, Hin, Win, yf, xc, Vin, Uin, vc, uf, pad_val) +
ywf * xwc * vwc * uwc *
get_pixel_val(
bottom_data, idx, Hin, Win, yf, xc, Vin, Uin, vc, uc, pad_val) +
ywc * xwf * vwf * uwf *
get_pixel_val(
bottom_data, idx, Hin, Win, yc, xf, Vin, Uin, vf, uf, pad_val) +
ywc * xwf * vwf * uwc *
get_pixel_val(
bottom_data, idx, Hin, Win, yc, xf, Vin, Uin, vf, uc, pad_val) +
ywc * xwf * vwc * uwf *
get_pixel_val(
bottom_data, idx, Hin, Win, yc, xf, Vin, Uin, vc, uf, pad_val) +
ywc * xwf * vwc * uwc *
get_pixel_val(
bottom_data, idx, Hin, Win, yc, xf, Vin, Uin, vc, uc, pad_val) +
ywc * xwc * vwf * uwf *
get_pixel_val(
bottom_data, idx, Hin, Win, yc, xc, Vin, Uin, vf, uf, pad_val) +
ywc * xwc * vwf * uwc *
get_pixel_val(
bottom_data, idx, Hin, Win, yc, xc, Vin, Uin, vf, uc, pad_val) +
ywc * xwc * vwc * uwf *
get_pixel_val(
bottom_data, idx, Hin, Win, yc, xc, Vin, Uin, vc, uf, pad_val) +
ywc * xwc * vwc * uwc *
get_pixel_val(
bottom_data, idx, Hin, Win, yc, xc, Vin, Uin, vc, uc, pad_val);
top_data[index] = val;
}
}
template <typename T>
__global__ void SwapAlign2NatBackwardFeat(
const int nthreads,
const T* top_diff,
const int Vout,
const int Uout,
const float hVout,
const float hUout,
const int Vin,
const int Uin,
const float lambda,
const int Hin,
const int Win,
const int Hout,
const int Wout,
T* bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int idx = index;
const int x = idx % Wout;
idx /= Wout;
const int y = idx % Hout;
idx /= Hout;
const int u = idx % Uout;
idx /= Uout;
const int v = idx % Vout;
idx /= Vout;
const float ox = x * lambda + u - hUout + 0.5;
const int xf = static_cast<int>(floor(ox));
const int xc = static_cast<int>(ceil(ox));
const float xwc = ox - xf;
const float xwf = 1. - xwc;
const float oy = y * lambda + v - hVout + 0.5;
const int yf = static_cast<int>(floor(oy));
const int yc = static_cast<int>(ceil(oy));
const float ywc = oy - yf;
const float ywf = 1. - ywc;
const float ou = (u + 0.5) / lambda - 0.5;
const int uf = static_cast<int>(floor(ou));
const int uc = static_cast<int>(ceil(ou));
const float uwc = ou - uf;
const float uwf = 1. - uwc;
const float ov = (v + 0.5) / lambda - 0.5;
const int vf = static_cast<int>(floor(ov));
const int vc = static_cast<int>(ceil(ov));
const float vwc = ov - vf;
const float vwf = 1. - vwc;
const T grad = top_diff[index];
add_pixel_val(
bottom_diff,
ywf * xwf * vwf * uwf * grad,
idx,
Hin,
Win,
yf,
xf,
Vin,
Uin,
vf,
uf);
add_pixel_val(
bottom_diff,
ywf * xwf * vwf * uwc * grad,
idx,
Hin,
Win,
yf,
xf,
Vin,
Uin,
vf,
uc);
add_pixel_val(
bottom_diff,
ywf * xwf * vwc * uwf * grad,
idx,
Hin,
Win,
yf,
xf,
Vin,
Uin,
vc,
uf);
add_pixel_val(
bottom_diff,
ywf * xwf * vwc * uwc * grad,
idx,
Hin,
Win,
yf,
xf,
Vin,
Uin,
vc,
uc);
add_pixel_val(
bottom_diff,
ywf * xwc * vwf * uwf * grad,
idx,
Hin,
Win,
yf,
xc,
Vin,
Uin,
vf,
uf);
add_pixel_val(
bottom_diff,
ywf * xwc * vwf * uwc * grad,
idx,
Hin,
Win,
yf,
xc,
Vin,
Uin,
vf,
uc);
add_pixel_val(
bottom_diff,
ywf * xwc * vwc * uwf * grad,
idx,
Hin,
Win,
yf,
xc,
Vin,
Uin,
vc,
uf);
add_pixel_val(
bottom_diff,
ywf * xwc * vwc * uwc * grad,
idx,
Hin,
Win,
yf,
xc,
Vin,
Uin,
vc,
uc);
add_pixel_val(
bottom_diff,
ywc * xwf * vwf * uwf * grad,
idx,
Hin,
Win,
yc,
xf,
Vin,
Uin,
vf,
uf);
add_pixel_val(
bottom_diff,
ywc * xwf * vwf * uwc * grad,
idx,
Hin,
Win,
yc,
xf,
Vin,
Uin,
vf,
uc);
add_pixel_val(
bottom_diff,
ywc * xwf * vwc * uwf * grad,
idx,
Hin,
Win,
yc,
xf,
Vin,
Uin,
vc,
uf);
add_pixel_val(
bottom_diff,
ywc * xwf * vwc * uwc * grad,
idx,
Hin,
Win,
yc,
xf,
Vin,
Uin,
vc,
uc);
add_pixel_val(
bottom_diff,
ywc * xwc * vwf * uwf * grad,
idx,
Hin,
Win,
yc,
xc,
Vin,
Uin,
vf,
uf);
add_pixel_val(
bottom_diff,
ywc * xwc * vwf * uwc * grad,
idx,
Hin,
Win,
yc,
xc,
Vin,
Uin,
vf,
uc);
add_pixel_val(
bottom_diff,
ywc * xwc * vwc * uwf * grad,
idx,
Hin,
Win,
yc,
xc,
Vin,
Uin,
vc,
uf);
add_pixel_val(
bottom_diff,
ywc * xwc * vwc * uwc * grad,
idx,
Hin,
Win,
yc,
xc,
Vin,
Uin,
vc,
uc);
}
}
namespace tensormask {
at::Tensor SwapAlign2Nat_forward_cuda(
const at::Tensor& X,
const int lambda_val,
const float pad_val) {
AT_ASSERTM(X.device().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(X.ndimension() == 4, "input must be a 4D tensor");
AT_ASSERTM(lambda_val >= 1, "lambda should be greater or equal to 1");
const int N = X.size(0);
const int C = X.size(1);
const int Vin = static_cast<int>(sqrt(static_cast<float>(C)));
const int Uin = C / Vin;
AT_ASSERTM(
C == Vin * Uin && Vin == Uin, "#channels should be a square number");
const int Vout = lambda_val * Vin;
const int Uout = lambda_val * Uin;
const int Hin = X.size(2);
const int Win = X.size(3);
const float lambda = static_cast<float>(lambda_val);
const int Hout = static_cast<int>(ceil(Hin / lambda));
const int Wout = static_cast<int>(ceil(Win / lambda));
const float hVout = Vout / 2.;
const float hUout = Uout / 2.;
at::hip::HIPGuardMasqueradingAsCUDA device_guard(X.device());
at::Tensor Y = at::empty({N, Vout * Uout, Hout, Wout}, X.options());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(at::cuda::ATenCeilDiv(Y.numel(), 512L), 4096L));
dim3 block(512);
if (Y.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return Y;
}
AT_DISPATCH_FLOATING_TYPES(X.scalar_type(), "SwapAlign2Nat_forward", [&] {
hipLaunchKernelGGL(( SwapAlign2NatForwardFeat<scalar_t>), dim3(grid), dim3(block), 0, stream,
Y.numel(),
X.contiguous().data_ptr<scalar_t>(),
Vout,
Uout,
hVout,
hUout,
Vin,
Uin,
lambda,
Hin,
Win,
Hout,
Wout,
pad_val,
Y.data_ptr<scalar_t>());
});
hipDeviceSynchronize();
AT_CUDA_CHECK(hipGetLastError());
return Y;
}
at::Tensor SwapAlign2Nat_backward_cuda(
const at::Tensor& gY,
const int lambda_val,
const int batch_size,
const int channel,
const int height,
const int width) {
AT_ASSERTM(gY.device().is_cuda(), "input gradient must be a CUDA tensor");
AT_ASSERTM(gY.ndimension() == 4, "input gradient must be a 4D tensor");
AT_ASSERTM(lambda_val >= 1, "lambda should be greater or equal to 1");
const int Vin = static_cast<int>(sqrt(static_cast<float>(channel)));
const int Uin = channel / Vin;
const int Vout = lambda_val * Vin;
const int Uout = lambda_val * Uin;
const float hVout = Vout / 2.;
const float hUout = Uout / 2.;
const int Hout = gY.size(2);
const int Wout = gY.size(3);
at::hip::HIPGuardMasqueradingAsCUDA device_guard(gY.device());
at::Tensor gX = at::zeros({batch_size, channel, height, width}, gY.options());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(at::cuda::ATenCeilDiv(gY.numel(), 512L), 4096L));
dim3 block(512);
// handle possibly empty gradients
if (gY.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return gX;
}
AT_DISPATCH_FLOATING_TYPES(gY.scalar_type(), "SwapAlign2Nat_backward", [&] {
hipLaunchKernelGGL(( SwapAlign2NatBackwardFeat<scalar_t>), dim3(grid), dim3(block), 0, stream,
gY.numel(),
gY.contiguous().data_ptr<scalar_t>(),
Vout,
Uout,
hVout,
hUout,
Vin,
Uin,
static_cast<float>(lambda_val),
height,
width,
Hout,
Wout,
gX.data_ptr<scalar_t>());
});
AT_CUDA_CHECK(hipGetLastError());
return gX;
}
} // namespace tensormask
| 992396e7c7c6e4622a70e2063f53e2b615502356.cu | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAGuard.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
// TODO make it in a common file
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
template <typename T>
__device__ inline T get_pixel_val(
const T* tensor,
const int idx,
const int H,
const int W,
const int y,
const int x,
const int V,
const int U,
const int v,
const int u,
const T pad_val) {
if ((y < 0) || (y >= H) || (x < 0) || (x >= W) || (v < 0) || (v >= V) ||
(u < 0) || (u >= U)) {
return pad_val;
} else {
return tensor[(((idx * V + v) * U + u) * H + y) * W + x];
}
}
template <typename T>
__device__ inline void add_pixel_val(
T* tensor,
const T val,
const int idx,
const int H,
const int W,
const int y,
const int x,
const int V,
const int U,
const int v,
const int u) {
if ((val == 0.) || (y < 0) || (y >= H) || (x < 0) || (x >= W) || (v < 0) ||
(v >= V) || (u < 0) || (u >= U)) {
return;
} else {
atomicAdd(tensor + ((((idx * V + v) * U + u) * H + y) * W + x), val);
}
}
template <typename T>
__global__ void SwapAlign2NatForwardFeat(
const int nthreads,
const T* bottom_data,
const int Vout,
const int Uout,
const float hVout,
const float hUout,
const int Vin,
const int Uin,
const float lambda,
const int Hin,
const int Win,
const int Hout,
const int Wout,
const T pad_val,
T* top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int idx = index;
const int x = idx % Wout;
idx /= Wout;
const int y = idx % Hout;
idx /= Hout;
const int u = idx % Uout;
idx /= Uout;
const int v = idx % Vout;
idx /= Vout;
const float ox = x * lambda + u - hUout + 0.5;
const int xf = static_cast<int>(floor(ox));
const int xc = static_cast<int>(ceil(ox));
const float xwc = ox - xf;
const float xwf = 1. - xwc;
const float oy = y * lambda + v - hVout + 0.5;
const int yf = static_cast<int>(floor(oy));
const int yc = static_cast<int>(ceil(oy));
const float ywc = oy - yf;
const float ywf = 1. - ywc;
const float ou = (u + 0.5) / lambda - 0.5;
const int uf = static_cast<int>(floor(ou));
const int uc = static_cast<int>(ceil(ou));
const float uwc = ou - uf;
const float uwf = 1. - uwc;
const float ov = (v + 0.5) / lambda - 0.5;
const int vf = static_cast<int>(floor(ov));
const int vc = static_cast<int>(ceil(ov));
const float vwc = ov - vf;
const float vwf = 1. - vwc;
T val = ywf * xwf * vwf * uwf *
get_pixel_val(
bottom_data, idx, Hin, Win, yf, xf, Vin, Uin, vf, uf, pad_val) +
ywf * xwf * vwf * uwc *
get_pixel_val(
bottom_data, idx, Hin, Win, yf, xf, Vin, Uin, vf, uc, pad_val) +
ywf * xwf * vwc * uwf *
get_pixel_val(
bottom_data, idx, Hin, Win, yf, xf, Vin, Uin, vc, uf, pad_val) +
ywf * xwf * vwc * uwc *
get_pixel_val(
bottom_data, idx, Hin, Win, yf, xf, Vin, Uin, vc, uc, pad_val) +
ywf * xwc * vwf * uwf *
get_pixel_val(
bottom_data, idx, Hin, Win, yf, xc, Vin, Uin, vf, uf, pad_val) +
ywf * xwc * vwf * uwc *
get_pixel_val(
bottom_data, idx, Hin, Win, yf, xc, Vin, Uin, vf, uc, pad_val) +
ywf * xwc * vwc * uwf *
get_pixel_val(
bottom_data, idx, Hin, Win, yf, xc, Vin, Uin, vc, uf, pad_val) +
ywf * xwc * vwc * uwc *
get_pixel_val(
bottom_data, idx, Hin, Win, yf, xc, Vin, Uin, vc, uc, pad_val) +
ywc * xwf * vwf * uwf *
get_pixel_val(
bottom_data, idx, Hin, Win, yc, xf, Vin, Uin, vf, uf, pad_val) +
ywc * xwf * vwf * uwc *
get_pixel_val(
bottom_data, idx, Hin, Win, yc, xf, Vin, Uin, vf, uc, pad_val) +
ywc * xwf * vwc * uwf *
get_pixel_val(
bottom_data, idx, Hin, Win, yc, xf, Vin, Uin, vc, uf, pad_val) +
ywc * xwf * vwc * uwc *
get_pixel_val(
bottom_data, idx, Hin, Win, yc, xf, Vin, Uin, vc, uc, pad_val) +
ywc * xwc * vwf * uwf *
get_pixel_val(
bottom_data, idx, Hin, Win, yc, xc, Vin, Uin, vf, uf, pad_val) +
ywc * xwc * vwf * uwc *
get_pixel_val(
bottom_data, idx, Hin, Win, yc, xc, Vin, Uin, vf, uc, pad_val) +
ywc * xwc * vwc * uwf *
get_pixel_val(
bottom_data, idx, Hin, Win, yc, xc, Vin, Uin, vc, uf, pad_val) +
ywc * xwc * vwc * uwc *
get_pixel_val(
bottom_data, idx, Hin, Win, yc, xc, Vin, Uin, vc, uc, pad_val);
top_data[index] = val;
}
}
template <typename T>
__global__ void SwapAlign2NatBackwardFeat(
const int nthreads,
const T* top_diff,
const int Vout,
const int Uout,
const float hVout,
const float hUout,
const int Vin,
const int Uin,
const float lambda,
const int Hin,
const int Win,
const int Hout,
const int Wout,
T* bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int idx = index;
const int x = idx % Wout;
idx /= Wout;
const int y = idx % Hout;
idx /= Hout;
const int u = idx % Uout;
idx /= Uout;
const int v = idx % Vout;
idx /= Vout;
const float ox = x * lambda + u - hUout + 0.5;
const int xf = static_cast<int>(floor(ox));
const int xc = static_cast<int>(ceil(ox));
const float xwc = ox - xf;
const float xwf = 1. - xwc;
const float oy = y * lambda + v - hVout + 0.5;
const int yf = static_cast<int>(floor(oy));
const int yc = static_cast<int>(ceil(oy));
const float ywc = oy - yf;
const float ywf = 1. - ywc;
const float ou = (u + 0.5) / lambda - 0.5;
const int uf = static_cast<int>(floor(ou));
const int uc = static_cast<int>(ceil(ou));
const float uwc = ou - uf;
const float uwf = 1. - uwc;
const float ov = (v + 0.5) / lambda - 0.5;
const int vf = static_cast<int>(floor(ov));
const int vc = static_cast<int>(ceil(ov));
const float vwc = ov - vf;
const float vwf = 1. - vwc;
const T grad = top_diff[index];
add_pixel_val(
bottom_diff,
ywf * xwf * vwf * uwf * grad,
idx,
Hin,
Win,
yf,
xf,
Vin,
Uin,
vf,
uf);
add_pixel_val(
bottom_diff,
ywf * xwf * vwf * uwc * grad,
idx,
Hin,
Win,
yf,
xf,
Vin,
Uin,
vf,
uc);
add_pixel_val(
bottom_diff,
ywf * xwf * vwc * uwf * grad,
idx,
Hin,
Win,
yf,
xf,
Vin,
Uin,
vc,
uf);
add_pixel_val(
bottom_diff,
ywf * xwf * vwc * uwc * grad,
idx,
Hin,
Win,
yf,
xf,
Vin,
Uin,
vc,
uc);
add_pixel_val(
bottom_diff,
ywf * xwc * vwf * uwf * grad,
idx,
Hin,
Win,
yf,
xc,
Vin,
Uin,
vf,
uf);
add_pixel_val(
bottom_diff,
ywf * xwc * vwf * uwc * grad,
idx,
Hin,
Win,
yf,
xc,
Vin,
Uin,
vf,
uc);
add_pixel_val(
bottom_diff,
ywf * xwc * vwc * uwf * grad,
idx,
Hin,
Win,
yf,
xc,
Vin,
Uin,
vc,
uf);
add_pixel_val(
bottom_diff,
ywf * xwc * vwc * uwc * grad,
idx,
Hin,
Win,
yf,
xc,
Vin,
Uin,
vc,
uc);
add_pixel_val(
bottom_diff,
ywc * xwf * vwf * uwf * grad,
idx,
Hin,
Win,
yc,
xf,
Vin,
Uin,
vf,
uf);
add_pixel_val(
bottom_diff,
ywc * xwf * vwf * uwc * grad,
idx,
Hin,
Win,
yc,
xf,
Vin,
Uin,
vf,
uc);
add_pixel_val(
bottom_diff,
ywc * xwf * vwc * uwf * grad,
idx,
Hin,
Win,
yc,
xf,
Vin,
Uin,
vc,
uf);
add_pixel_val(
bottom_diff,
ywc * xwf * vwc * uwc * grad,
idx,
Hin,
Win,
yc,
xf,
Vin,
Uin,
vc,
uc);
add_pixel_val(
bottom_diff,
ywc * xwc * vwf * uwf * grad,
idx,
Hin,
Win,
yc,
xc,
Vin,
Uin,
vf,
uf);
add_pixel_val(
bottom_diff,
ywc * xwc * vwf * uwc * grad,
idx,
Hin,
Win,
yc,
xc,
Vin,
Uin,
vf,
uc);
add_pixel_val(
bottom_diff,
ywc * xwc * vwc * uwf * grad,
idx,
Hin,
Win,
yc,
xc,
Vin,
Uin,
vc,
uf);
add_pixel_val(
bottom_diff,
ywc * xwc * vwc * uwc * grad,
idx,
Hin,
Win,
yc,
xc,
Vin,
Uin,
vc,
uc);
}
}
namespace tensormask {
at::Tensor SwapAlign2Nat_forward_cuda(
const at::Tensor& X,
const int lambda_val,
const float pad_val) {
AT_ASSERTM(X.device().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(X.ndimension() == 4, "input must be a 4D tensor");
AT_ASSERTM(lambda_val >= 1, "lambda should be greater or equal to 1");
const int N = X.size(0);
const int C = X.size(1);
const int Vin = static_cast<int>(sqrt(static_cast<float>(C)));
const int Uin = C / Vin;
AT_ASSERTM(
C == Vin * Uin && Vin == Uin, "#channels should be a square number");
const int Vout = lambda_val * Vin;
const int Uout = lambda_val * Uin;
const int Hin = X.size(2);
const int Win = X.size(3);
const float lambda = static_cast<float>(lambda_val);
const int Hout = static_cast<int>(ceil(Hin / lambda));
const int Wout = static_cast<int>(ceil(Win / lambda));
const float hVout = Vout / 2.;
const float hUout = Uout / 2.;
at::cuda::CUDAGuard device_guard(X.device());
at::Tensor Y = at::empty({N, Vout * Uout, Hout, Wout}, X.options());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(at::cuda::ATenCeilDiv(Y.numel(), 512L), 4096L));
dim3 block(512);
if (Y.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return Y;
}
AT_DISPATCH_FLOATING_TYPES(X.scalar_type(), "SwapAlign2Nat_forward", [&] {
SwapAlign2NatForwardFeat<scalar_t><<<grid, block, 0, stream>>>(
Y.numel(),
X.contiguous().data_ptr<scalar_t>(),
Vout,
Uout,
hVout,
hUout,
Vin,
Uin,
lambda,
Hin,
Win,
Hout,
Wout,
pad_val,
Y.data_ptr<scalar_t>());
});
cudaDeviceSynchronize();
AT_CUDA_CHECK(cudaGetLastError());
return Y;
}
at::Tensor SwapAlign2Nat_backward_cuda(
const at::Tensor& gY,
const int lambda_val,
const int batch_size,
const int channel,
const int height,
const int width) {
AT_ASSERTM(gY.device().is_cuda(), "input gradient must be a CUDA tensor");
AT_ASSERTM(gY.ndimension() == 4, "input gradient must be a 4D tensor");
AT_ASSERTM(lambda_val >= 1, "lambda should be greater or equal to 1");
const int Vin = static_cast<int>(sqrt(static_cast<float>(channel)));
const int Uin = channel / Vin;
const int Vout = lambda_val * Vin;
const int Uout = lambda_val * Uin;
const float hVout = Vout / 2.;
const float hUout = Uout / 2.;
const int Hout = gY.size(2);
const int Wout = gY.size(3);
at::cuda::CUDAGuard device_guard(gY.device());
at::Tensor gX = at::zeros({batch_size, channel, height, width}, gY.options());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(at::cuda::ATenCeilDiv(gY.numel(), 512L), 4096L));
dim3 block(512);
// handle possibly empty gradients
if (gY.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return gX;
}
AT_DISPATCH_FLOATING_TYPES(gY.scalar_type(), "SwapAlign2Nat_backward", [&] {
SwapAlign2NatBackwardFeat<scalar_t><<<grid, block, 0, stream>>>(
gY.numel(),
gY.contiguous().data_ptr<scalar_t>(),
Vout,
Uout,
hVout,
hUout,
Vin,
Uin,
static_cast<float>(lambda_val),
height,
width,
Hout,
Wout,
gX.data_ptr<scalar_t>());
});
AT_CUDA_CHECK(cudaGetLastError());
return gX;
}
} // namespace tensormask
|
80b573dd3c71c055ec0a5a37bb830314a59484e1.hip | // !!! This is a file automatically generated by hipify!!!
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/opencv_modules.hpp"
#ifndef HAVE_OPENCV_CUDEV
#error "opencv_cudev is required"
#else
#include "opencv2/core/cuda.hpp"
#include "opencv2/cudev.hpp"
using namespace cv;
using namespace cv::cuda;
using namespace cv::cudev;
namespace
{
class DefaultAllocator : public GpuMat::Allocator
{
public:
bool allocate(GpuMat* mat, int rows, int cols, size_t elemSize);
void free(GpuMat* mat);
};
bool DefaultAllocator::allocate(GpuMat* mat, int rows, int cols, size_t elemSize)
{
if (rows > 1 && cols > 1)
{
CV_CUDEV_SAFE_CALL( hipMallocPitch(&mat->data, &mat->step, elemSize * cols, rows) );
}
else
{
// Single row or single column must be continuous
CV_CUDEV_SAFE_CALL( hipMalloc(&mat->data, elemSize * cols * rows) );
mat->step = elemSize * cols;
}
mat->refcount = (int*) fastMalloc(sizeof(int));
return true;
}
void DefaultAllocator::free(GpuMat* mat)
{
hipFree(mat->datastart);
fastFree(mat->refcount);
}
DefaultAllocator cudaDefaultAllocator;
GpuMat::Allocator* g_defaultAllocator = &cudaDefaultAllocator;
}
GpuMat::Allocator* cv::cuda::GpuMat::defaultAllocator()
{
return g_defaultAllocator;
}
void cv::cuda::GpuMat::setDefaultAllocator(Allocator* allocator)
{
CV_Assert( allocator != 0 );
g_defaultAllocator = allocator;
}
/////////////////////////////////////////////////////
/// create
void cv::cuda::GpuMat::create(int _rows, int _cols, int _type)
{
CV_DbgAssert( _rows >= 0 && _cols >= 0 );
_type &= Mat::TYPE_MASK;
if (rows == _rows && cols == _cols && type() == _type && data)
return;
if (data)
release();
if (_rows > 0 && _cols > 0)
{
flags = Mat::MAGIC_VAL + _type;
rows = _rows;
cols = _cols;
const size_t esz = elemSize();
bool allocSuccess = allocator->allocate(this, rows, cols, esz);
if (!allocSuccess)
{
// custom allocator fails, try default allocator
allocator = defaultAllocator();
allocSuccess = allocator->allocate(this, rows, cols, esz);
CV_Assert( allocSuccess );
}
if (esz * cols == step)
flags |= Mat::CONTINUOUS_FLAG;
int64 _nettosize = static_cast<int64>(step) * rows;
size_t nettosize = static_cast<size_t>(_nettosize);
datastart = data;
dataend = data + nettosize;
if (refcount)
*refcount = 1;
}
}
/////////////////////////////////////////////////////
/// release
void cv::cuda::GpuMat::release()
{
CV_DbgAssert( allocator != 0 );
if (refcount && CV_XADD(refcount, -1) == 1)
allocator->free(this);
data = datastart = dataend = 0;
step = rows = cols = 0;
refcount = 0;
}
/////////////////////////////////////////////////////
/// upload
void cv::cuda::GpuMat::upload(InputArray arr)
{
Mat mat = arr.getMat();
CV_DbgAssert( !mat.empty() );
create(mat.size(), mat.type());
CV_CUDEV_SAFE_CALL( hipMemcpy2D(data, step, mat.data, mat.step, cols * elemSize(), rows, hipMemcpyHostToDevice) );
}
void cv::cuda::GpuMat::upload(InputArray arr, Stream& _stream)
{
Mat mat = arr.getMat();
CV_DbgAssert( !mat.empty() );
create(mat.size(), mat.type());
hipStream_t stream = StreamAccessor::getStream(_stream);
CV_CUDEV_SAFE_CALL( hipMemcpy2DAsync(data, step, mat.data, mat.step, cols * elemSize(), rows, hipMemcpyHostToDevice, stream) );
}
/////////////////////////////////////////////////////
/// download
void cv::cuda::GpuMat::download(OutputArray _dst) const
{
CV_DbgAssert( !empty() );
_dst.create(size(), type());
Mat dst = _dst.getMat();
CV_CUDEV_SAFE_CALL( hipMemcpy2D(dst.data, dst.step, data, step, cols * elemSize(), rows, hipMemcpyDeviceToHost) );
}
void cv::cuda::GpuMat::download(OutputArray _dst, Stream& _stream) const
{
CV_DbgAssert( !empty() );
_dst.create(size(), type());
Mat dst = _dst.getMat();
hipStream_t stream = StreamAccessor::getStream(_stream);
CV_CUDEV_SAFE_CALL( hipMemcpy2DAsync(dst.data, dst.step, data, step, cols * elemSize(), rows, hipMemcpyDeviceToHost, stream) );
}
/////////////////////////////////////////////////////
/// copyTo
void cv::cuda::GpuMat::copyTo(OutputArray _dst) const
{
CV_DbgAssert( !empty() );
_dst.create(size(), type());
GpuMat dst = _dst.getGpuMat();
CV_CUDEV_SAFE_CALL( hipMemcpy2D(dst.data, dst.step, data, step, cols * elemSize(), rows, hipMemcpyDeviceToDevice) );
}
void cv::cuda::GpuMat::copyTo(OutputArray _dst, Stream& _stream) const
{
CV_DbgAssert( !empty() );
_dst.create(size(), type());
GpuMat dst = _dst.getGpuMat();
hipStream_t stream = StreamAccessor::getStream(_stream);
CV_CUDEV_SAFE_CALL( hipMemcpy2DAsync(dst.data, dst.step, data, step, cols * elemSize(), rows, hipMemcpyDeviceToDevice, stream) );
}
namespace
{
template <size_t size> struct CopyToPolicy : DefaultTransformPolicy
{
};
template <> struct CopyToPolicy<4> : DefaultTransformPolicy
{
enum {
shift = 2
};
};
template <> struct CopyToPolicy<8> : DefaultTransformPolicy
{
enum {
shift = 1
};
};
template <typename T>
void copyWithMask(const GpuMat& src, const GpuMat& dst, const GpuMat& mask, Stream& stream)
{
gridTransformUnary_< CopyToPolicy<sizeof(typename VecTraits<T>::elem_type)> >(globPtr<T>(src), globPtr<T>(dst), identity<T>(), globPtr<uchar>(mask), stream);
}
}
void cv::cuda::GpuMat::copyTo(OutputArray _dst, InputArray _mask, Stream& stream) const
{
CV_DbgAssert( !empty() );
CV_DbgAssert( depth() <= CV_64F && channels() <= 4 );
GpuMat mask = _mask.getGpuMat();
CV_DbgAssert( size() == mask.size() && mask.depth() == CV_8U && (mask.channels() == 1 || mask.channels() == channels()) );
uchar* data0 = _dst.getGpuMat().data;
_dst.create(size(), type());
GpuMat dst = _dst.getGpuMat();
// do not leave dst uninitialized
if (dst.data != data0)
dst.setTo(Scalar::all(0), stream);
typedef void (*func_t)(const GpuMat& src, const GpuMat& dst, const GpuMat& mask, Stream& stream);
static const func_t funcs[9][4] =
{
{0,0,0,0},
{copyWithMask<uchar>, copyWithMask<uchar2>, copyWithMask<uchar3>, copyWithMask<uchar4>},
{copyWithMask<ushort>, copyWithMask<ushort2>, copyWithMask<ushort3>, copyWithMask<ushort4>},
{0,0,0,0},
{copyWithMask<int>, copyWithMask<int2>, copyWithMask<int3>, copyWithMask<int4>},
{0,0,0,0},
{0,0,0,0},
{0,0,0,0},
{copyWithMask<double>, copyWithMask<double2>, copyWithMask<double3>, copyWithMask<double4>}
};
if (mask.channels() == channels())
{
const func_t func = funcs[elemSize1()][0];
CV_DbgAssert( func != 0 );
func(reshape(1), dst.reshape(1), mask.reshape(1), stream);
}
else
{
const func_t func = funcs[elemSize1()][channels() - 1];
CV_DbgAssert( func != 0 );
func(*this, dst, mask, stream);
}
}
/////////////////////////////////////////////////////
/// setTo
namespace
{
template <typename T>
void setToWithOutMask(const GpuMat& mat, Scalar _scalar, Stream& stream)
{
Scalar_<typename VecTraits<T>::elem_type> scalar = _scalar;
gridTransformUnary(constantPtr(VecTraits<T>::make(scalar.val), mat.rows, mat.cols), globPtr<T>(mat), identity<T>(), stream);
}
template <typename T>
void setToWithMask(const GpuMat& mat, const GpuMat& mask, Scalar _scalar, Stream& stream)
{
Scalar_<typename VecTraits<T>::elem_type> scalar = _scalar;
gridTransformUnary(constantPtr(VecTraits<T>::make(scalar.val), mat.rows, mat.cols), globPtr<T>(mat), identity<T>(), globPtr<uchar>(mask), stream);
}
}
GpuMat& cv::cuda::GpuMat::setTo(Scalar value, Stream& stream)
{
CV_DbgAssert( !empty() );
CV_DbgAssert( depth() <= CV_64F && channels() <= 4 );
if (value[0] == 0.0 && value[1] == 0.0 && value[2] == 0.0 && value[3] == 0.0)
{
// Zero fill
if (stream)
CV_CUDEV_SAFE_CALL( hipMemset2DAsync(data, step, 0, cols * elemSize(), rows, StreamAccessor::getStream(stream)) );
else
CV_CUDEV_SAFE_CALL( hipMemset2D(data, step, 0, cols * elemSize(), rows) );
return *this;
}
if (depth() == CV_8U)
{
const int cn = channels();
if (cn == 1
|| (cn == 2 && value[0] == value[1])
|| (cn == 3 && value[0] == value[1] && value[0] == value[2])
|| (cn == 4 && value[0] == value[1] && value[0] == value[2] && value[0] == value[3]))
{
const int val = cv::saturate_cast<uchar>(value[0]);
if (stream)
CV_CUDEV_SAFE_CALL( hipMemset2DAsync(data, step, val, cols * elemSize(), rows, StreamAccessor::getStream(stream)) );
else
CV_CUDEV_SAFE_CALL( hipMemset2D(data, step, val, cols * elemSize(), rows) );
return *this;
}
}
typedef void (*func_t)(const GpuMat& mat, Scalar scalar, Stream& stream);
static const func_t funcs[7][4] =
{
{setToWithOutMask<uchar>,setToWithOutMask<uchar2>,setToWithOutMask<uchar3>,setToWithOutMask<uchar4>},
{setToWithOutMask<schar>,setToWithOutMask<char2>,setToWithOutMask<char3>,setToWithOutMask<char4>},
{setToWithOutMask<ushort>,setToWithOutMask<ushort2>,setToWithOutMask<ushort3>,setToWithOutMask<ushort4>},
{setToWithOutMask<short>,setToWithOutMask<short2>,setToWithOutMask<short3>,setToWithOutMask<short4>},
{setToWithOutMask<int>,setToWithOutMask<int2>,setToWithOutMask<int3>,setToWithOutMask<int4>},
{setToWithOutMask<float>,setToWithOutMask<float2>,setToWithOutMask<float3>,setToWithOutMask<float4>},
{setToWithOutMask<double>,setToWithOutMask<double2>,setToWithOutMask<double3>,setToWithOutMask<double4>}
};
funcs[depth()][channels() - 1](*this, value, stream);
return *this;
}
GpuMat& cv::cuda::GpuMat::setTo(Scalar value, InputArray _mask, Stream& stream)
{
CV_DbgAssert( !empty() );
CV_DbgAssert( depth() <= CV_64F && channels() <= 4 );
GpuMat mask = _mask.getGpuMat();
CV_DbgAssert( size() == mask.size() && mask.type() == CV_8UC1 );
typedef void (*func_t)(const GpuMat& mat, const GpuMat& mask, Scalar scalar, Stream& stream);
static const func_t funcs[7][4] =
{
{setToWithMask<uchar>,setToWithMask<uchar2>,setToWithMask<uchar3>,setToWithMask<uchar4>},
{setToWithMask<schar>,setToWithMask<char2>,setToWithMask<char3>,setToWithMask<char4>},
{setToWithMask<ushort>,setToWithMask<ushort2>,setToWithMask<ushort3>,setToWithMask<ushort4>},
{setToWithMask<short>,setToWithMask<short2>,setToWithMask<short3>,setToWithMask<short4>},
{setToWithMask<int>,setToWithMask<int2>,setToWithMask<int3>,setToWithMask<int4>},
{setToWithMask<float>,setToWithMask<float2>,setToWithMask<float3>,setToWithMask<float4>},
{setToWithMask<double>,setToWithMask<double2>,setToWithMask<double3>,setToWithMask<double4>}
};
funcs[depth()][channels() - 1](*this, mask, value, stream);
return *this;
}
/////////////////////////////////////////////////////
/// convertTo
namespace
{
template <typename T> struct ConvertToPolicy : DefaultTransformPolicy
{
};
template <> struct ConvertToPolicy<double> : DefaultTransformPolicy
{
enum {
shift = 1
};
};
template <typename T, typename D>
void convertToNoScale(const GpuMat& src, const GpuMat& dst, Stream& stream)
{
typedef typename VecTraits<T>::elem_type src_elem_type;
typedef typename VecTraits<D>::elem_type dst_elem_type;
typedef typename LargerType<src_elem_type, float>::type larger_elem_type;
typedef typename LargerType<float, dst_elem_type>::type scalar_type;
gridTransformUnary_< ConvertToPolicy<scalar_type> >(globPtr<T>(src), globPtr<D>(dst), saturate_cast_func<T, D>(), stream);
}
template <typename T, typename D, typename S> struct Convertor : unary_function<T, D>
{
S alpha;
S beta;
__device__ __forceinline__ D operator ()(typename TypeTraits<T>::parameter_type src) const
{
return cudev::saturate_cast<D>(alpha * src + beta);
}
};
template <typename T, typename D>
void convertToScale(const GpuMat& src, const GpuMat& dst, double alpha, double beta, Stream& stream)
{
typedef typename VecTraits<T>::elem_type src_elem_type;
typedef typename VecTraits<D>::elem_type dst_elem_type;
typedef typename LargerType<src_elem_type, float>::type larger_elem_type;
typedef typename LargerType<float, dst_elem_type>::type scalar_type;
Convertor<T, D, scalar_type> op;
op.alpha = cv::saturate_cast<scalar_type>(alpha);
op.beta = cv::saturate_cast<scalar_type>(beta);
gridTransformUnary_< ConvertToPolicy<scalar_type> >(globPtr<T>(src), globPtr<D>(dst), op, stream);
}
}
void cv::cuda::GpuMat::convertTo(OutputArray _dst, int rtype, Stream& stream) const
{
if (rtype < 0)
rtype = type();
else
rtype = CV_MAKE_TYPE(CV_MAT_DEPTH(rtype), channels());
const int sdepth = depth();
const int ddepth = CV_MAT_DEPTH(rtype);
if (sdepth == ddepth)
{
if (stream)
copyTo(_dst, stream);
else
copyTo(_dst);
return;
}
CV_DbgAssert( sdepth <= CV_64F && ddepth <= CV_64F );
GpuMat src = *this;
_dst.create(size(), rtype);
GpuMat dst = _dst.getGpuMat();
typedef void (*func_t)(const GpuMat& src, const GpuMat& dst, Stream& stream);
static const func_t funcs[7][7] =
{
{0, convertToNoScale<uchar, schar>, convertToNoScale<uchar, ushort>, convertToNoScale<uchar, short>, convertToNoScale<uchar, int>, convertToNoScale<uchar, float>, convertToNoScale<uchar, double>},
{convertToNoScale<schar, uchar>, 0, convertToNoScale<schar, ushort>, convertToNoScale<schar, short>, convertToNoScale<schar, int>, convertToNoScale<schar, float>, convertToNoScale<schar, double>},
{convertToNoScale<ushort, uchar>, convertToNoScale<ushort, schar>, 0, convertToNoScale<ushort, short>, convertToNoScale<ushort, int>, convertToNoScale<ushort, float>, convertToNoScale<ushort, double>},
{convertToNoScale<short, uchar>, convertToNoScale<short, schar>, convertToNoScale<short, ushort>, 0, convertToNoScale<short, int>, convertToNoScale<short, float>, convertToNoScale<short, double>},
{convertToNoScale<int, uchar>, convertToNoScale<int, schar>, convertToNoScale<int, ushort>, convertToNoScale<int, short>, 0, convertToNoScale<int, float>, convertToNoScale<int, double>},
{convertToNoScale<float, uchar>, convertToNoScale<float, schar>, convertToNoScale<float, ushort>, convertToNoScale<float, short>, convertToNoScale<float, int>, 0, convertToNoScale<float, double>},
{convertToNoScale<double, uchar>, convertToNoScale<double, schar>, convertToNoScale<double, ushort>, convertToNoScale<double, short>, convertToNoScale<double, int>, convertToNoScale<double, float>, 0}
};
funcs[sdepth][ddepth](reshape(1), dst.reshape(1), stream);
}
void cv::cuda::GpuMat::convertTo(OutputArray _dst, int rtype, double alpha, double beta, Stream& stream) const
{
if (rtype < 0)
rtype = type();
else
rtype = CV_MAKETYPE(CV_MAT_DEPTH(rtype), channels());
const int sdepth = depth();
const int ddepth = CV_MAT_DEPTH(rtype);
GpuMat src = *this;
_dst.create(size(), rtype);
GpuMat dst = _dst.getGpuMat();
typedef void (*func_t)(const GpuMat& src, const GpuMat& dst, double alpha, double beta, Stream& stream);
static const func_t funcs[7][7] =
{
{convertToScale<uchar, uchar>, convertToScale<uchar, schar>, convertToScale<uchar, ushort>, convertToScale<uchar, short>, convertToScale<uchar, int>, convertToScale<uchar, float>, convertToScale<uchar, double>},
{convertToScale<schar, uchar>, convertToScale<schar, schar>, convertToScale<schar, ushort>, convertToScale<schar, short>, convertToScale<schar, int>, convertToScale<schar, float>, convertToScale<schar, double>},
{convertToScale<ushort, uchar>, convertToScale<ushort, schar>, convertToScale<ushort, ushort>, convertToScale<ushort, short>, convertToScale<ushort, int>, convertToScale<ushort, float>, convertToScale<ushort, double>},
{convertToScale<short, uchar>, convertToScale<short, schar>, convertToScale<short, ushort>, convertToScale<short, short>, convertToScale<short, int>, convertToScale<short, float>, convertToScale<short, double>},
{convertToScale<int, uchar>, convertToScale<int, schar>, convertToScale<int, ushort>, convertToScale<int, short>, convertToScale<int, int>, convertToScale<int, float>, convertToScale<int, double>},
{convertToScale<float, uchar>, convertToScale<float, schar>, convertToScale<float, ushort>, convertToScale<float, short>, convertToScale<float, int>, convertToScale<float, float>, convertToScale<float, double>},
{convertToScale<double, uchar>, convertToScale<double, schar>, convertToScale<double, ushort>, convertToScale<double, short>, convertToScale<double, int>, convertToScale<double, float>, convertToScale<double, double>}
};
funcs[sdepth][ddepth](reshape(1), dst.reshape(1), alpha, beta, stream);
}
#endif
| 80b573dd3c71c055ec0a5a37bb830314a59484e1.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/opencv_modules.hpp"
#ifndef HAVE_OPENCV_CUDEV
#error "opencv_cudev is required"
#else
#include "opencv2/core/cuda.hpp"
#include "opencv2/cudev.hpp"
using namespace cv;
using namespace cv::cuda;
using namespace cv::cudev;
namespace
{
class DefaultAllocator : public GpuMat::Allocator
{
public:
bool allocate(GpuMat* mat, int rows, int cols, size_t elemSize);
void free(GpuMat* mat);
};
bool DefaultAllocator::allocate(GpuMat* mat, int rows, int cols, size_t elemSize)
{
if (rows > 1 && cols > 1)
{
CV_CUDEV_SAFE_CALL( cudaMallocPitch(&mat->data, &mat->step, elemSize * cols, rows) );
}
else
{
// Single row or single column must be continuous
CV_CUDEV_SAFE_CALL( cudaMalloc(&mat->data, elemSize * cols * rows) );
mat->step = elemSize * cols;
}
mat->refcount = (int*) fastMalloc(sizeof(int));
return true;
}
void DefaultAllocator::free(GpuMat* mat)
{
cudaFree(mat->datastart);
fastFree(mat->refcount);
}
DefaultAllocator cudaDefaultAllocator;
GpuMat::Allocator* g_defaultAllocator = &cudaDefaultAllocator;
}
GpuMat::Allocator* cv::cuda::GpuMat::defaultAllocator()
{
return g_defaultAllocator;
}
void cv::cuda::GpuMat::setDefaultAllocator(Allocator* allocator)
{
CV_Assert( allocator != 0 );
g_defaultAllocator = allocator;
}
/////////////////////////////////////////////////////
/// create
void cv::cuda::GpuMat::create(int _rows, int _cols, int _type)
{
CV_DbgAssert( _rows >= 0 && _cols >= 0 );
_type &= Mat::TYPE_MASK;
if (rows == _rows && cols == _cols && type() == _type && data)
return;
if (data)
release();
if (_rows > 0 && _cols > 0)
{
flags = Mat::MAGIC_VAL + _type;
rows = _rows;
cols = _cols;
const size_t esz = elemSize();
bool allocSuccess = allocator->allocate(this, rows, cols, esz);
if (!allocSuccess)
{
// custom allocator fails, try default allocator
allocator = defaultAllocator();
allocSuccess = allocator->allocate(this, rows, cols, esz);
CV_Assert( allocSuccess );
}
if (esz * cols == step)
flags |= Mat::CONTINUOUS_FLAG;
int64 _nettosize = static_cast<int64>(step) * rows;
size_t nettosize = static_cast<size_t>(_nettosize);
datastart = data;
dataend = data + nettosize;
if (refcount)
*refcount = 1;
}
}
/////////////////////////////////////////////////////
/// release
void cv::cuda::GpuMat::release()
{
CV_DbgAssert( allocator != 0 );
if (refcount && CV_XADD(refcount, -1) == 1)
allocator->free(this);
data = datastart = dataend = 0;
step = rows = cols = 0;
refcount = 0;
}
/////////////////////////////////////////////////////
/// upload
void cv::cuda::GpuMat::upload(InputArray arr)
{
Mat mat = arr.getMat();
CV_DbgAssert( !mat.empty() );
create(mat.size(), mat.type());
CV_CUDEV_SAFE_CALL( cudaMemcpy2D(data, step, mat.data, mat.step, cols * elemSize(), rows, cudaMemcpyHostToDevice) );
}
void cv::cuda::GpuMat::upload(InputArray arr, Stream& _stream)
{
Mat mat = arr.getMat();
CV_DbgAssert( !mat.empty() );
create(mat.size(), mat.type());
cudaStream_t stream = StreamAccessor::getStream(_stream);
CV_CUDEV_SAFE_CALL( cudaMemcpy2DAsync(data, step, mat.data, mat.step, cols * elemSize(), rows, cudaMemcpyHostToDevice, stream) );
}
/////////////////////////////////////////////////////
/// download
void cv::cuda::GpuMat::download(OutputArray _dst) const
{
CV_DbgAssert( !empty() );
_dst.create(size(), type());
Mat dst = _dst.getMat();
CV_CUDEV_SAFE_CALL( cudaMemcpy2D(dst.data, dst.step, data, step, cols * elemSize(), rows, cudaMemcpyDeviceToHost) );
}
void cv::cuda::GpuMat::download(OutputArray _dst, Stream& _stream) const
{
CV_DbgAssert( !empty() );
_dst.create(size(), type());
Mat dst = _dst.getMat();
cudaStream_t stream = StreamAccessor::getStream(_stream);
CV_CUDEV_SAFE_CALL( cudaMemcpy2DAsync(dst.data, dst.step, data, step, cols * elemSize(), rows, cudaMemcpyDeviceToHost, stream) );
}
/////////////////////////////////////////////////////
/// copyTo
void cv::cuda::GpuMat::copyTo(OutputArray _dst) const
{
CV_DbgAssert( !empty() );
_dst.create(size(), type());
GpuMat dst = _dst.getGpuMat();
CV_CUDEV_SAFE_CALL( cudaMemcpy2D(dst.data, dst.step, data, step, cols * elemSize(), rows, cudaMemcpyDeviceToDevice) );
}
void cv::cuda::GpuMat::copyTo(OutputArray _dst, Stream& _stream) const
{
CV_DbgAssert( !empty() );
_dst.create(size(), type());
GpuMat dst = _dst.getGpuMat();
cudaStream_t stream = StreamAccessor::getStream(_stream);
CV_CUDEV_SAFE_CALL( cudaMemcpy2DAsync(dst.data, dst.step, data, step, cols * elemSize(), rows, cudaMemcpyDeviceToDevice, stream) );
}
namespace
{
template <size_t size> struct CopyToPolicy : DefaultTransformPolicy
{
};
template <> struct CopyToPolicy<4> : DefaultTransformPolicy
{
enum {
shift = 2
};
};
template <> struct CopyToPolicy<8> : DefaultTransformPolicy
{
enum {
shift = 1
};
};
template <typename T>
void copyWithMask(const GpuMat& src, const GpuMat& dst, const GpuMat& mask, Stream& stream)
{
gridTransformUnary_< CopyToPolicy<sizeof(typename VecTraits<T>::elem_type)> >(globPtr<T>(src), globPtr<T>(dst), identity<T>(), globPtr<uchar>(mask), stream);
}
}
void cv::cuda::GpuMat::copyTo(OutputArray _dst, InputArray _mask, Stream& stream) const
{
CV_DbgAssert( !empty() );
CV_DbgAssert( depth() <= CV_64F && channels() <= 4 );
GpuMat mask = _mask.getGpuMat();
CV_DbgAssert( size() == mask.size() && mask.depth() == CV_8U && (mask.channels() == 1 || mask.channels() == channels()) );
uchar* data0 = _dst.getGpuMat().data;
_dst.create(size(), type());
GpuMat dst = _dst.getGpuMat();
// do not leave dst uninitialized
if (dst.data != data0)
dst.setTo(Scalar::all(0), stream);
typedef void (*func_t)(const GpuMat& src, const GpuMat& dst, const GpuMat& mask, Stream& stream);
static const func_t funcs[9][4] =
{
{0,0,0,0},
{copyWithMask<uchar>, copyWithMask<uchar2>, copyWithMask<uchar3>, copyWithMask<uchar4>},
{copyWithMask<ushort>, copyWithMask<ushort2>, copyWithMask<ushort3>, copyWithMask<ushort4>},
{0,0,0,0},
{copyWithMask<int>, copyWithMask<int2>, copyWithMask<int3>, copyWithMask<int4>},
{0,0,0,0},
{0,0,0,0},
{0,0,0,0},
{copyWithMask<double>, copyWithMask<double2>, copyWithMask<double3>, copyWithMask<double4>}
};
if (mask.channels() == channels())
{
const func_t func = funcs[elemSize1()][0];
CV_DbgAssert( func != 0 );
func(reshape(1), dst.reshape(1), mask.reshape(1), stream);
}
else
{
const func_t func = funcs[elemSize1()][channels() - 1];
CV_DbgAssert( func != 0 );
func(*this, dst, mask, stream);
}
}
/////////////////////////////////////////////////////
/// setTo
namespace
{
template <typename T>
void setToWithOutMask(const GpuMat& mat, Scalar _scalar, Stream& stream)
{
Scalar_<typename VecTraits<T>::elem_type> scalar = _scalar;
gridTransformUnary(constantPtr(VecTraits<T>::make(scalar.val), mat.rows, mat.cols), globPtr<T>(mat), identity<T>(), stream);
}
template <typename T>
void setToWithMask(const GpuMat& mat, const GpuMat& mask, Scalar _scalar, Stream& stream)
{
Scalar_<typename VecTraits<T>::elem_type> scalar = _scalar;
gridTransformUnary(constantPtr(VecTraits<T>::make(scalar.val), mat.rows, mat.cols), globPtr<T>(mat), identity<T>(), globPtr<uchar>(mask), stream);
}
}
GpuMat& cv::cuda::GpuMat::setTo(Scalar value, Stream& stream)
{
CV_DbgAssert( !empty() );
CV_DbgAssert( depth() <= CV_64F && channels() <= 4 );
if (value[0] == 0.0 && value[1] == 0.0 && value[2] == 0.0 && value[3] == 0.0)
{
// Zero fill
if (stream)
CV_CUDEV_SAFE_CALL( cudaMemset2DAsync(data, step, 0, cols * elemSize(), rows, StreamAccessor::getStream(stream)) );
else
CV_CUDEV_SAFE_CALL( cudaMemset2D(data, step, 0, cols * elemSize(), rows) );
return *this;
}
if (depth() == CV_8U)
{
const int cn = channels();
if (cn == 1
|| (cn == 2 && value[0] == value[1])
|| (cn == 3 && value[0] == value[1] && value[0] == value[2])
|| (cn == 4 && value[0] == value[1] && value[0] == value[2] && value[0] == value[3]))
{
const int val = cv::saturate_cast<uchar>(value[0]);
if (stream)
CV_CUDEV_SAFE_CALL( cudaMemset2DAsync(data, step, val, cols * elemSize(), rows, StreamAccessor::getStream(stream)) );
else
CV_CUDEV_SAFE_CALL( cudaMemset2D(data, step, val, cols * elemSize(), rows) );
return *this;
}
}
typedef void (*func_t)(const GpuMat& mat, Scalar scalar, Stream& stream);
static const func_t funcs[7][4] =
{
{setToWithOutMask<uchar>,setToWithOutMask<uchar2>,setToWithOutMask<uchar3>,setToWithOutMask<uchar4>},
{setToWithOutMask<schar>,setToWithOutMask<char2>,setToWithOutMask<char3>,setToWithOutMask<char4>},
{setToWithOutMask<ushort>,setToWithOutMask<ushort2>,setToWithOutMask<ushort3>,setToWithOutMask<ushort4>},
{setToWithOutMask<short>,setToWithOutMask<short2>,setToWithOutMask<short3>,setToWithOutMask<short4>},
{setToWithOutMask<int>,setToWithOutMask<int2>,setToWithOutMask<int3>,setToWithOutMask<int4>},
{setToWithOutMask<float>,setToWithOutMask<float2>,setToWithOutMask<float3>,setToWithOutMask<float4>},
{setToWithOutMask<double>,setToWithOutMask<double2>,setToWithOutMask<double3>,setToWithOutMask<double4>}
};
funcs[depth()][channels() - 1](*this, value, stream);
return *this;
}
GpuMat& cv::cuda::GpuMat::setTo(Scalar value, InputArray _mask, Stream& stream)
{
CV_DbgAssert( !empty() );
CV_DbgAssert( depth() <= CV_64F && channels() <= 4 );
GpuMat mask = _mask.getGpuMat();
CV_DbgAssert( size() == mask.size() && mask.type() == CV_8UC1 );
typedef void (*func_t)(const GpuMat& mat, const GpuMat& mask, Scalar scalar, Stream& stream);
static const func_t funcs[7][4] =
{
{setToWithMask<uchar>,setToWithMask<uchar2>,setToWithMask<uchar3>,setToWithMask<uchar4>},
{setToWithMask<schar>,setToWithMask<char2>,setToWithMask<char3>,setToWithMask<char4>},
{setToWithMask<ushort>,setToWithMask<ushort2>,setToWithMask<ushort3>,setToWithMask<ushort4>},
{setToWithMask<short>,setToWithMask<short2>,setToWithMask<short3>,setToWithMask<short4>},
{setToWithMask<int>,setToWithMask<int2>,setToWithMask<int3>,setToWithMask<int4>},
{setToWithMask<float>,setToWithMask<float2>,setToWithMask<float3>,setToWithMask<float4>},
{setToWithMask<double>,setToWithMask<double2>,setToWithMask<double3>,setToWithMask<double4>}
};
funcs[depth()][channels() - 1](*this, mask, value, stream);
return *this;
}
/////////////////////////////////////////////////////
/// convertTo
namespace
{
template <typename T> struct ConvertToPolicy : DefaultTransformPolicy
{
};
template <> struct ConvertToPolicy<double> : DefaultTransformPolicy
{
enum {
shift = 1
};
};
template <typename T, typename D>
void convertToNoScale(const GpuMat& src, const GpuMat& dst, Stream& stream)
{
typedef typename VecTraits<T>::elem_type src_elem_type;
typedef typename VecTraits<D>::elem_type dst_elem_type;
typedef typename LargerType<src_elem_type, float>::type larger_elem_type;
typedef typename LargerType<float, dst_elem_type>::type scalar_type;
gridTransformUnary_< ConvertToPolicy<scalar_type> >(globPtr<T>(src), globPtr<D>(dst), saturate_cast_func<T, D>(), stream);
}
template <typename T, typename D, typename S> struct Convertor : unary_function<T, D>
{
S alpha;
S beta;
__device__ __forceinline__ D operator ()(typename TypeTraits<T>::parameter_type src) const
{
return cudev::saturate_cast<D>(alpha * src + beta);
}
};
template <typename T, typename D>
void convertToScale(const GpuMat& src, const GpuMat& dst, double alpha, double beta, Stream& stream)
{
typedef typename VecTraits<T>::elem_type src_elem_type;
typedef typename VecTraits<D>::elem_type dst_elem_type;
typedef typename LargerType<src_elem_type, float>::type larger_elem_type;
typedef typename LargerType<float, dst_elem_type>::type scalar_type;
Convertor<T, D, scalar_type> op;
op.alpha = cv::saturate_cast<scalar_type>(alpha);
op.beta = cv::saturate_cast<scalar_type>(beta);
gridTransformUnary_< ConvertToPolicy<scalar_type> >(globPtr<T>(src), globPtr<D>(dst), op, stream);
}
}
void cv::cuda::GpuMat::convertTo(OutputArray _dst, int rtype, Stream& stream) const
{
if (rtype < 0)
rtype = type();
else
rtype = CV_MAKE_TYPE(CV_MAT_DEPTH(rtype), channels());
const int sdepth = depth();
const int ddepth = CV_MAT_DEPTH(rtype);
if (sdepth == ddepth)
{
if (stream)
copyTo(_dst, stream);
else
copyTo(_dst);
return;
}
CV_DbgAssert( sdepth <= CV_64F && ddepth <= CV_64F );
GpuMat src = *this;
_dst.create(size(), rtype);
GpuMat dst = _dst.getGpuMat();
typedef void (*func_t)(const GpuMat& src, const GpuMat& dst, Stream& stream);
static const func_t funcs[7][7] =
{
{0, convertToNoScale<uchar, schar>, convertToNoScale<uchar, ushort>, convertToNoScale<uchar, short>, convertToNoScale<uchar, int>, convertToNoScale<uchar, float>, convertToNoScale<uchar, double>},
{convertToNoScale<schar, uchar>, 0, convertToNoScale<schar, ushort>, convertToNoScale<schar, short>, convertToNoScale<schar, int>, convertToNoScale<schar, float>, convertToNoScale<schar, double>},
{convertToNoScale<ushort, uchar>, convertToNoScale<ushort, schar>, 0, convertToNoScale<ushort, short>, convertToNoScale<ushort, int>, convertToNoScale<ushort, float>, convertToNoScale<ushort, double>},
{convertToNoScale<short, uchar>, convertToNoScale<short, schar>, convertToNoScale<short, ushort>, 0, convertToNoScale<short, int>, convertToNoScale<short, float>, convertToNoScale<short, double>},
{convertToNoScale<int, uchar>, convertToNoScale<int, schar>, convertToNoScale<int, ushort>, convertToNoScale<int, short>, 0, convertToNoScale<int, float>, convertToNoScale<int, double>},
{convertToNoScale<float, uchar>, convertToNoScale<float, schar>, convertToNoScale<float, ushort>, convertToNoScale<float, short>, convertToNoScale<float, int>, 0, convertToNoScale<float, double>},
{convertToNoScale<double, uchar>, convertToNoScale<double, schar>, convertToNoScale<double, ushort>, convertToNoScale<double, short>, convertToNoScale<double, int>, convertToNoScale<double, float>, 0}
};
funcs[sdepth][ddepth](reshape(1), dst.reshape(1), stream);
}
void cv::cuda::GpuMat::convertTo(OutputArray _dst, int rtype, double alpha, double beta, Stream& stream) const
{
if (rtype < 0)
rtype = type();
else
rtype = CV_MAKETYPE(CV_MAT_DEPTH(rtype), channels());
const int sdepth = depth();
const int ddepth = CV_MAT_DEPTH(rtype);
GpuMat src = *this;
_dst.create(size(), rtype);
GpuMat dst = _dst.getGpuMat();
typedef void (*func_t)(const GpuMat& src, const GpuMat& dst, double alpha, double beta, Stream& stream);
static const func_t funcs[7][7] =
{
{convertToScale<uchar, uchar>, convertToScale<uchar, schar>, convertToScale<uchar, ushort>, convertToScale<uchar, short>, convertToScale<uchar, int>, convertToScale<uchar, float>, convertToScale<uchar, double>},
{convertToScale<schar, uchar>, convertToScale<schar, schar>, convertToScale<schar, ushort>, convertToScale<schar, short>, convertToScale<schar, int>, convertToScale<schar, float>, convertToScale<schar, double>},
{convertToScale<ushort, uchar>, convertToScale<ushort, schar>, convertToScale<ushort, ushort>, convertToScale<ushort, short>, convertToScale<ushort, int>, convertToScale<ushort, float>, convertToScale<ushort, double>},
{convertToScale<short, uchar>, convertToScale<short, schar>, convertToScale<short, ushort>, convertToScale<short, short>, convertToScale<short, int>, convertToScale<short, float>, convertToScale<short, double>},
{convertToScale<int, uchar>, convertToScale<int, schar>, convertToScale<int, ushort>, convertToScale<int, short>, convertToScale<int, int>, convertToScale<int, float>, convertToScale<int, double>},
{convertToScale<float, uchar>, convertToScale<float, schar>, convertToScale<float, ushort>, convertToScale<float, short>, convertToScale<float, int>, convertToScale<float, float>, convertToScale<float, double>},
{convertToScale<double, uchar>, convertToScale<double, schar>, convertToScale<double, ushort>, convertToScale<double, short>, convertToScale<double, int>, convertToScale<double, float>, convertToScale<double, double>}
};
funcs[sdepth][ddepth](reshape(1), dst.reshape(1), alpha, beta, stream);
}
#endif
|
8f8027eb5254781746dda106fce4cde567c81da8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/user/kernels/slice_util.h"
#include "oneflow/core/common/switch_func.h"
namespace oneflow {
namespace {
template<typename T, int NDIM>
__global__ void SliceForwardGpu(const int n, SliceParams params,
SliceIndexHelper<NDIM> entire_idx_cvtr,
SliceIndexHelper<NDIM> sliced_idx_cvtr, const T* entire,
T* sliced) {
CUDA_1D_KERNEL_LOOP(i, n) {
int64_t offset = SliceOffsetToEntireOffset<NDIM>(i, params, entire_idx_cvtr, sliced_idx_cvtr);
sliced[i] = entire[offset];
}
}
template<typename T, int NDIM>
__global__ void SliceBackwardGpu(const int n, SliceParams params,
SliceIndexHelper<NDIM> entire_idx_cvtr,
SliceIndexHelper<NDIM> sliced_idx_cvtr, T* entire,
const T* sliced) {
CUDA_1D_KERNEL_LOOP(i, n) {
int64_t offset = SliceOffsetToEntireOffset<NDIM>(i, params, entire_idx_cvtr, sliced_idx_cvtr);
entire[offset] = sliced[i];
}
}
template<typename T, int NDIM>
void LaunchSliceForward(DeviceCtx* ctx, const SliceParams& params, const T* entire, T* sliced) {
CHECK_EQ(params.ndim, NDIM);
int64_t elem_cnt = params.elem_cnt();
SliceIndexHelper<NDIM> entire_idx_cvtr(params.dims);
SliceIndexHelper<NDIM> sliced_idx_cvtr(params.size);
if (elem_cnt == 0) { return; }
hipLaunchKernelGGL(( SliceForwardGpu<T, NDIM>)
, dim3(BlocksNum4ThreadsNum(elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
elem_cnt, params, entire_idx_cvtr, sliced_idx_cvtr, entire, sliced);
}
template<typename T, int NDIM>
void LaunchSliceBackward(DeviceCtx* ctx, const SliceParams& params, const T* sliced, T* entire) {
CHECK_EQ(params.ndim, NDIM);
int64_t elem_cnt = params.elem_cnt();
SliceIndexHelper<NDIM> entire_idx_cvtr(params.dims);
SliceIndexHelper<NDIM> sliced_idx_cvtr(params.size);
hipLaunchKernelGGL(( SliceBackwardGpu<T, NDIM>)
, dim3(BlocksNum4ThreadsNum(elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
elem_cnt, params, entire_idx_cvtr, sliced_idx_cvtr, entire, sliced);
}
template<typename T>
struct SliceSwitchUtil final {
#define MAKE_SLICE_SWITCH_ENTRY(func_name, N) func_name<T, N>
#define DEFINE_SLICE_SWITCH_UTIL_STATIC_METHOD(func_name) \
DEFINE_STATIC_SWITCH_FUNC(void, func_name, MAKE_SLICE_SWITCH_ENTRY, MAKE_NDIM_CTRV_SEQ(DIM_SEQ))
DEFINE_SLICE_SWITCH_UTIL_STATIC_METHOD(LaunchSliceForward)
DEFINE_SLICE_SWITCH_UTIL_STATIC_METHOD(LaunchSliceBackward)
#undef DEFINE_SLICE_SWITCH_UTIL_STATIC_METHOD
#undef MAKE_SLICE_SWITCH_ENTRY
};
template<typename T>
size_t GetPackSize(const SliceParams& params, const T* entire, const T* sliced) {
CHECK_GT(params.ndim, 0);
const int64_t last_dim = params.ndim - 1;
const int64_t mask = (params.dims[last_dim] * sizeof(T)) | (params.start[last_dim] * sizeof(T))
| (params.size[last_dim] * sizeof(T))
| static_cast<int64_t>(reinterpret_cast<uintptr_t>(entire))
| static_cast<int64_t>(reinterpret_cast<uintptr_t>(sliced));
if ((mask & 0xF) == 0) {
return 16;
} else if ((mask & 0x7) == 0) {
return 8;
} else if ((mask & 0x3) == 0) {
return 4;
} else if ((mask & 0x1) == 0) {
return 2;
} else {
return 1;
}
}
template<typename T>
void GetPackedParams(const SliceParams& params, const T* entire, const T* sliced, size_t* pack_size,
SliceParams* packed_params) {
CHECK_GT(params.ndim, 0);
const int64_t last_dim = params.ndim - 1;
if (params.step[last_dim] == 1) {
*pack_size = GetPackSize<T>(params, entire, sliced);
CHECK_GE(*pack_size, sizeof(T));
const int64_t elem_per_pack = *pack_size / sizeof(T);
*packed_params = params;
packed_params->dims[last_dim] /= elem_per_pack;
packed_params->start[last_dim] /= elem_per_pack;
packed_params->size[last_dim] /= elem_per_pack;
} else {
*pack_size = sizeof(T);
*packed_params = params;
}
}
} // namespace
template<typename T>
struct SliceKernelUtil<DeviceType::kGPU, T> {
static void Forward(DeviceCtx* ctx, const SliceParams& params, const T* entire, T* sliced) {
SliceParams fold_slice_params = FoldContiguousFullSliceDimensions(params);
size_t pack_size;
SliceParams packed_params{};
GetPackedParams<T>(fold_slice_params, entire, sliced, &pack_size, &packed_params);
if (pack_size == 1) {
SliceSwitchUtil<uint8_t>::SwitchLaunchSliceForward(
SwitchCase(packed_params.ndim), ctx, packed_params,
reinterpret_cast<const uint8_t*>(entire), reinterpret_cast<uint8_t*>(sliced));
} else if (pack_size == 2) {
SliceSwitchUtil<uint16_t>::SwitchLaunchSliceForward(
SwitchCase(packed_params.ndim), ctx, packed_params,
reinterpret_cast<const uint16_t*>(entire), reinterpret_cast<uint16_t*>(sliced));
} else if (pack_size == 4) {
SliceSwitchUtil<uint32_t>::SwitchLaunchSliceForward(
SwitchCase(packed_params.ndim), ctx, packed_params,
reinterpret_cast<const uint32_t*>(entire), reinterpret_cast<uint32_t*>(sliced));
} else if (pack_size == 8) {
SliceSwitchUtil<uint64_t>::SwitchLaunchSliceForward(
SwitchCase(packed_params.ndim), ctx, packed_params,
reinterpret_cast<const uint64_t*>(entire), reinterpret_cast<uint64_t*>(sliced));
} else if (pack_size == 16) {
SliceSwitchUtil<ulonglong2>::SwitchLaunchSliceForward(
SwitchCase(packed_params.ndim), ctx, packed_params,
reinterpret_cast<const ulonglong2*>(entire), reinterpret_cast<ulonglong2*>(sliced));
} else {
UNIMPLEMENTED();
}
}
static void Backward(DeviceCtx* ctx, const SliceParams& params, const T* sliced, T* entire) {
SliceParams fold_slice_params = FoldContiguousFullSliceDimensions(params);
size_t pack_size;
SliceParams packed_params{};
GetPackedParams<T>(fold_slice_params, entire, sliced, &pack_size, &packed_params);
if (pack_size == 1) {
SliceSwitchUtil<uint8_t>::SwitchLaunchSliceBackward(
SwitchCase(packed_params.ndim), ctx, packed_params,
reinterpret_cast<const uint8_t*>(sliced), reinterpret_cast<uint8_t*>(entire));
} else if (pack_size == 2) {
SliceSwitchUtil<uint16_t>::SwitchLaunchSliceBackward(
SwitchCase(packed_params.ndim), ctx, packed_params,
reinterpret_cast<const uint16_t*>(sliced), reinterpret_cast<uint16_t*>(entire));
} else if (pack_size == 4) {
SliceSwitchUtil<uint32_t>::SwitchLaunchSliceBackward(
SwitchCase(packed_params.ndim), ctx, packed_params,
reinterpret_cast<const uint32_t*>(sliced), reinterpret_cast<uint32_t*>(entire));
} else if (pack_size == 8) {
SliceSwitchUtil<uint64_t>::SwitchLaunchSliceBackward(
SwitchCase(packed_params.ndim), ctx, packed_params,
reinterpret_cast<const uint64_t*>(sliced), reinterpret_cast<uint64_t*>(entire));
} else if (pack_size == 16) {
SliceSwitchUtil<ulonglong2>::SwitchLaunchSliceBackward(
SwitchCase(packed_params.ndim), ctx, packed_params,
reinterpret_cast<const ulonglong2*>(sliced), reinterpret_cast<ulonglong2*>(entire));
} else {
UNIMPLEMENTED();
}
}
};
INSTANTIATE_SLICE_KERNEL_UTIL_WITH_DEVICE(DeviceType::kGPU)
INSTANTIATE_SLICE_KERNEL_UTIL(DeviceType::kGPU, float16)
} // namespace oneflow
| 8f8027eb5254781746dda106fce4cde567c81da8.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/user/kernels/slice_util.h"
#include "oneflow/core/common/switch_func.h"
namespace oneflow {
namespace {
template<typename T, int NDIM>
__global__ void SliceForwardGpu(const int n, SliceParams params,
SliceIndexHelper<NDIM> entire_idx_cvtr,
SliceIndexHelper<NDIM> sliced_idx_cvtr, const T* entire,
T* sliced) {
CUDA_1D_KERNEL_LOOP(i, n) {
int64_t offset = SliceOffsetToEntireOffset<NDIM>(i, params, entire_idx_cvtr, sliced_idx_cvtr);
sliced[i] = entire[offset];
}
}
template<typename T, int NDIM>
__global__ void SliceBackwardGpu(const int n, SliceParams params,
SliceIndexHelper<NDIM> entire_idx_cvtr,
SliceIndexHelper<NDIM> sliced_idx_cvtr, T* entire,
const T* sliced) {
CUDA_1D_KERNEL_LOOP(i, n) {
int64_t offset = SliceOffsetToEntireOffset<NDIM>(i, params, entire_idx_cvtr, sliced_idx_cvtr);
entire[offset] = sliced[i];
}
}
template<typename T, int NDIM>
void LaunchSliceForward(DeviceCtx* ctx, const SliceParams& params, const T* entire, T* sliced) {
CHECK_EQ(params.ndim, NDIM);
int64_t elem_cnt = params.elem_cnt();
SliceIndexHelper<NDIM> entire_idx_cvtr(params.dims);
SliceIndexHelper<NDIM> sliced_idx_cvtr(params.size);
if (elem_cnt == 0) { return; }
SliceForwardGpu<T, NDIM>
<<<BlocksNum4ThreadsNum(elem_cnt), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
elem_cnt, params, entire_idx_cvtr, sliced_idx_cvtr, entire, sliced);
}
template<typename T, int NDIM>
void LaunchSliceBackward(DeviceCtx* ctx, const SliceParams& params, const T* sliced, T* entire) {
CHECK_EQ(params.ndim, NDIM);
int64_t elem_cnt = params.elem_cnt();
SliceIndexHelper<NDIM> entire_idx_cvtr(params.dims);
SliceIndexHelper<NDIM> sliced_idx_cvtr(params.size);
SliceBackwardGpu<T, NDIM>
<<<BlocksNum4ThreadsNum(elem_cnt), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
elem_cnt, params, entire_idx_cvtr, sliced_idx_cvtr, entire, sliced);
}
template<typename T>
struct SliceSwitchUtil final {
#define MAKE_SLICE_SWITCH_ENTRY(func_name, N) func_name<T, N>
#define DEFINE_SLICE_SWITCH_UTIL_STATIC_METHOD(func_name) \
DEFINE_STATIC_SWITCH_FUNC(void, func_name, MAKE_SLICE_SWITCH_ENTRY, MAKE_NDIM_CTRV_SEQ(DIM_SEQ))
DEFINE_SLICE_SWITCH_UTIL_STATIC_METHOD(LaunchSliceForward)
DEFINE_SLICE_SWITCH_UTIL_STATIC_METHOD(LaunchSliceBackward)
#undef DEFINE_SLICE_SWITCH_UTIL_STATIC_METHOD
#undef MAKE_SLICE_SWITCH_ENTRY
};
template<typename T>
size_t GetPackSize(const SliceParams& params, const T* entire, const T* sliced) {
CHECK_GT(params.ndim, 0);
const int64_t last_dim = params.ndim - 1;
const int64_t mask = (params.dims[last_dim] * sizeof(T)) | (params.start[last_dim] * sizeof(T))
| (params.size[last_dim] * sizeof(T))
| static_cast<int64_t>(reinterpret_cast<uintptr_t>(entire))
| static_cast<int64_t>(reinterpret_cast<uintptr_t>(sliced));
if ((mask & 0xF) == 0) {
return 16;
} else if ((mask & 0x7) == 0) {
return 8;
} else if ((mask & 0x3) == 0) {
return 4;
} else if ((mask & 0x1) == 0) {
return 2;
} else {
return 1;
}
}
template<typename T>
void GetPackedParams(const SliceParams& params, const T* entire, const T* sliced, size_t* pack_size,
SliceParams* packed_params) {
CHECK_GT(params.ndim, 0);
const int64_t last_dim = params.ndim - 1;
if (params.step[last_dim] == 1) {
*pack_size = GetPackSize<T>(params, entire, sliced);
CHECK_GE(*pack_size, sizeof(T));
const int64_t elem_per_pack = *pack_size / sizeof(T);
*packed_params = params;
packed_params->dims[last_dim] /= elem_per_pack;
packed_params->start[last_dim] /= elem_per_pack;
packed_params->size[last_dim] /= elem_per_pack;
} else {
*pack_size = sizeof(T);
*packed_params = params;
}
}
} // namespace
template<typename T>
struct SliceKernelUtil<DeviceType::kGPU, T> {
static void Forward(DeviceCtx* ctx, const SliceParams& params, const T* entire, T* sliced) {
SliceParams fold_slice_params = FoldContiguousFullSliceDimensions(params);
size_t pack_size;
SliceParams packed_params{};
GetPackedParams<T>(fold_slice_params, entire, sliced, &pack_size, &packed_params);
if (pack_size == 1) {
SliceSwitchUtil<uint8_t>::SwitchLaunchSliceForward(
SwitchCase(packed_params.ndim), ctx, packed_params,
reinterpret_cast<const uint8_t*>(entire), reinterpret_cast<uint8_t*>(sliced));
} else if (pack_size == 2) {
SliceSwitchUtil<uint16_t>::SwitchLaunchSliceForward(
SwitchCase(packed_params.ndim), ctx, packed_params,
reinterpret_cast<const uint16_t*>(entire), reinterpret_cast<uint16_t*>(sliced));
} else if (pack_size == 4) {
SliceSwitchUtil<uint32_t>::SwitchLaunchSliceForward(
SwitchCase(packed_params.ndim), ctx, packed_params,
reinterpret_cast<const uint32_t*>(entire), reinterpret_cast<uint32_t*>(sliced));
} else if (pack_size == 8) {
SliceSwitchUtil<uint64_t>::SwitchLaunchSliceForward(
SwitchCase(packed_params.ndim), ctx, packed_params,
reinterpret_cast<const uint64_t*>(entire), reinterpret_cast<uint64_t*>(sliced));
} else if (pack_size == 16) {
SliceSwitchUtil<ulonglong2>::SwitchLaunchSliceForward(
SwitchCase(packed_params.ndim), ctx, packed_params,
reinterpret_cast<const ulonglong2*>(entire), reinterpret_cast<ulonglong2*>(sliced));
} else {
UNIMPLEMENTED();
}
}
static void Backward(DeviceCtx* ctx, const SliceParams& params, const T* sliced, T* entire) {
SliceParams fold_slice_params = FoldContiguousFullSliceDimensions(params);
size_t pack_size;
SliceParams packed_params{};
GetPackedParams<T>(fold_slice_params, entire, sliced, &pack_size, &packed_params);
if (pack_size == 1) {
SliceSwitchUtil<uint8_t>::SwitchLaunchSliceBackward(
SwitchCase(packed_params.ndim), ctx, packed_params,
reinterpret_cast<const uint8_t*>(sliced), reinterpret_cast<uint8_t*>(entire));
} else if (pack_size == 2) {
SliceSwitchUtil<uint16_t>::SwitchLaunchSliceBackward(
SwitchCase(packed_params.ndim), ctx, packed_params,
reinterpret_cast<const uint16_t*>(sliced), reinterpret_cast<uint16_t*>(entire));
} else if (pack_size == 4) {
SliceSwitchUtil<uint32_t>::SwitchLaunchSliceBackward(
SwitchCase(packed_params.ndim), ctx, packed_params,
reinterpret_cast<const uint32_t*>(sliced), reinterpret_cast<uint32_t*>(entire));
} else if (pack_size == 8) {
SliceSwitchUtil<uint64_t>::SwitchLaunchSliceBackward(
SwitchCase(packed_params.ndim), ctx, packed_params,
reinterpret_cast<const uint64_t*>(sliced), reinterpret_cast<uint64_t*>(entire));
} else if (pack_size == 16) {
SliceSwitchUtil<ulonglong2>::SwitchLaunchSliceBackward(
SwitchCase(packed_params.ndim), ctx, packed_params,
reinterpret_cast<const ulonglong2*>(sliced), reinterpret_cast<ulonglong2*>(entire));
} else {
UNIMPLEMENTED();
}
}
};
INSTANTIATE_SLICE_KERNEL_UTIL_WITH_DEVICE(DeviceType::kGPU)
INSTANTIATE_SLICE_KERNEL_UTIL(DeviceType::kGPU, float16)
} // namespace oneflow
|
c821146129d576b91e69d2e70a9d5a03342cd5e3.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <iostream>
__device__ float cuda_mymod(float a, float b) {
float mod_1 = fmod(a,b) + b;
return fmod(mod_1, b);
}
__device__ int cuda_mymin(int a, double b) {
return !(b<a)?a:round(b);
}
__device__ double cuda_fmod(double numer, double denom){
double tquou = floor(numer / denom);
return numer - tquou * denom;
}
__device__ int cuda_findcellidx_1D(const float* p, const int ncx) {
// Floor value to find cell
int idx = floor(p[0] * ncx);
idx = max(0, min(idx, ncx-1));
return idx;
}
__device__ int cuda_findcellidx_2D(const float* p, const int ncx, const int ncy) {
// Copy point
double point[2];
point[0] = p[0];
point[1] = p[1];
// Cell size
const float inc_x = 1.0 / ncx;
const float inc_y = 1.0 / ncy;
// Find initial row, col placement
double p0 = min((ncx * inc_x - 0.000000001), max(0.0, point[0]));
double p1 = min((ncy * inc_y - 0.000000001), max(0.0, point[1]));
double xmod = cuda_fmod((double)p0, (double)inc_x);
double ymod = cuda_fmod((double)p1, (double)inc_y);
double x = xmod / inc_x;
double y = ymod / inc_y;
int cell_idx = cuda_mymin(ncx-1, (p0 - xmod) / inc_x) +
cuda_mymin(ncy-1, (p1 - ymod) / inc_y) * ncx;
cell_idx *= 4;
// Out of bound (left)
if(point[0]<=0){
if(point[1] <= 0 && point[1]/inc_y<point[0]/inc_x){
// Nothing to do here
} else if(point[1] >= ncy * inc_y && point[1]/inc_y-ncy > -point[0]/inc_x) {
cell_idx += 2;
} else {
cell_idx += 3;
}
return cell_idx;
}
// Out of bound (right)
if(point[0] >= ncx*inc_x){
if(point[1]<=0 && -point[1]/inc_y > point[0]/inc_x - ncx){
// Nothing to do here
} else if(point[1] >= ncy*inc_y && point[1]/inc_y - ncy > point[0]/inc_x-ncx){
cell_idx += 2;
} else {
cell_idx += 1;
}
return cell_idx;
}
// Out of bound (up)
if(point[1] <= 0){
return cell_idx;
}
// Out of bound (bottom)
if(point[1] >= ncy*inc_y){
cell_idx += 2;
return cell_idx;
}
// OK, we are inbound
if(x<y){
if(1-x<y){
cell_idx += 2;
} else {
cell_idx += 3;
}
} else if(1-x<y) {
cell_idx += 1;
}
return cell_idx;
/*
// Cell size
const float inc_x = 1.0 / nx;
const float inc_y = 1.0 / ny;
// Copy point
float point[2];
point[0] = p[0];
point[1] = p[1];
// If point is outside [0, 1]x[0, 1] then we push it inside
if (point[0] < 0.0 || point[0] > 1.0 || point[1] < 0.0 || point[1] > 1.0) {
const float half = 0.5;
point[0] -= half;
point[1] -= half;
const float abs_x = abs(point[0]);
const float abs_y = abs(point[1]);
const float push_x = (abs_x < abs_y) ? half*inc_x : 0.0;
const float push_y = (abs_y < abs_x) ? half*inc_y : 0.0;
if (abs_x > half) {
point[0] = copysign(half - push_x, point[0]);
}
if (abs_y > half) {
point[1] = copysign(half - push_y, point[1]);
}
point[0] += half;
point[1] += half;
}
// Find initial row, col placement
const float p0 = min((float)(1.0 - 1e-8), point[0]);
const float p1 = min((float)(1.0 - 1e-8), point[1]);
const float p0ncx = p0*nx;
const float p1ncy = p1*ny;
const int ip0ncx = p0ncx; // rounds down
const int ip1ncy = p1ncy; // rounds down
int cell_idx = 4 * (ip0ncx + ip1ncy * nx);
// Find (sub)triangle
const float x = p0ncx - ip0ncx;
const float y = p1ncy - ip1ncy;
if (x < y) {
if (1-x < y) {
cell_idx += 2;
} else {
cell_idx += 3;
}
} else if (1-x < y) {
cell_idx += 1;
}
return cell_idx;
*/
}
__device__ int cuda_findcellidx_3D(const float* p, const int nx, const int ny, const int nz) {
// Cell size
const float inc_x = 1.0 / nx;
const float inc_y = 1.0 / ny;
const float inc_z = 1.0 / nz;
// Copy point
float point[3];
point[0] = p[0];
point[1] = p[1];
point[2] = p[2];
// If point is outside [0, 1]x[0, 1]x[0, 1] then we push it inside
if(point[0] < 0.0 || point[0] > 1.0 || point[1] < 0.0 || point[1] > 1.0) {
const float half = 0.5;
point[0] -= half;
point[1] -= half;
point[2] -= half;
const float abs_x = abs(point[0]);
const float abs_y = abs(point[1]);
const float abs_z = abs(point[2]);
const float push_x = (abs_x < abs_y && abs_x < abs_z) ? half*inc_x : 0.0;
const float push_y = (abs_y < abs_x && abs_x < abs_z) ? half*inc_y : 0.0;
const float push_z = (abs_z < abs_x && abs_x < abs_y) ? half*inc_z : 0.0;
if(abs_x > half){point[0] = copysign(half - push_x, point[0]);}
if(abs_y > half){point[1] = copysign(half - push_y, point[1]);}
if(abs_z > half){point[2] = copysign(half - push_z, point[2]);}
point[0] += half;
point[1] += half;
point[2] += half;
}
float zero = 0.0;
float p0 = min((float)(nx*inc_x-1e-8),max(zero, point[0]));
float p1 = min((float)(ny*inc_y-1e-8),max(zero, point[1]));
float p2 = min((float)(nz*inc_x-1e-8),max(zero, point[2]));
double xmod = cuda_fmod(p0,inc_x);
double ymod = cuda_fmod(p1,inc_y);
double zmod = cuda_fmod(p2,inc_z);
int i = cuda_mymin(nx-1,((p0 - xmod)/inc_x));
int j = cuda_mymin(ny-1,((p1 - ymod)/inc_y));
int k = cuda_mymin(nz-1,((p2 - zmod)/inc_z));
int cell_idx = 5*(i + j * nx + k * nx * ny);
double x = xmod/inc_x;
double y = ymod/inc_y;
double z = zmod/inc_z;
bool tf = false;
if (k%2==0){
if ((i%2==0 && j%2==1) || (i%2==1 && j%2==0)){
tf = true;
}
}
else if((i%2==0 && j%2==0) || (i%2==1 && j%2==1)){
tf = true;
}
if (tf){
double tmp = x;
x = y;
y = 1-tmp;
}
if (-x -y +z >= 0){
cell_idx+=1;
}
else if (x+y+z - 2 >= 0){
cell_idx+=2;
}
else if (-x+y-z >= 0){
cell_idx+=3;
}
else if (x-y-z >= 0){
cell_idx+=4;
}
return cell_idx;
}
__device__ void A_times_b_1D(float x[], const float* A, float* b) {
x[0] = A[0]*b[0] + A[1];
return;
}
__device__ void A_times_b_2D(float x[], const float* A, float* b) {
x[0] = A[0]*b[0] + A[1]*b[1] + A[2];
x[1] = A[3]*b[0] + A[4]*b[1] + A[5];
return;
}
__device__ void A_times_b_3D(float x[], const float* A, float* b) {
x[0] = A[0]*b[0] + A[1]*b[1] + A[2]*b[2] + A[3];
x[1] = A[4]*b[0] + A[5]*b[1] + A[6]*b[2] + A[7];
x[2] = A[8]*b[0] + A[9]*b[1] + A[10]*b[2] + A[11];
return;
}
__device__ void A_times_b_linear_1D(float x[], const float* A, float* b) {
x[0] = A[0]*b[0];
return;
}
__device__ void A_times_b_linear_2D(float x[], const float* A, float* b) {
x[0] = A[0]*b[0] + A[1]*b[1];
x[1] = A[3]*b[0] + A[4]*b[1];
return;
}
__device__ void A_times_b_linear_3D(float x[], const float* A, float* b) {
x[0] = A[0]*b[0] + A[1]*b[1] + A[2]*b[2];
x[1] = A[4]*b[0] + A[5]*b[1] + A[6]*b[2];
x[2] = A[8]*b[0] + A[9]*b[1] + A[10]*b[2];
return;
}
// Kernel declaration
__global__ void cpab_cuda_kernel_forward_1D(const int nP, const int batch_size,
float* newpoints, const float* points,
const float* Trels, const int* nStepSolver,
const int* nc, const int broadcast, const int* circularity) {
int point_index = blockIdx.x * blockDim.x + threadIdx.x;
int batch_index = blockIdx.y * blockDim.y + threadIdx.y;
if(point_index < nP && batch_index < batch_size) {
// Get point
float point[1];
point[0] = points[broadcast*batch_index*nP*1+point_index];
// Define start index for the matrices belonging to this batch
// batch * 2 params pr cell * cell in x
int start_idx = batch_index * 2 * nc[0];
// Iterate in nStepSolver
int cellidx;
for(int n = 0; n < nStepSolver[0]; n++){
// Find cell idx
cellidx = cuda_findcellidx_1D(point, nc[0]);
// Extract the mapping in the cell
const float* Trels_idx = Trels + 2*cellidx + start_idx;
// Calculate trajectory of point
float point_updated[1];
A_times_b_1D(point_updated, Trels_idx, point);
point[0] = point_updated[0];
if(circularity[0] == 1)
point[0] = cuda_mymod(point[0],1);
}
// Copy to output
newpoints[nP * batch_index + point_index] = point[0];
}
return;
}
__global__ void cpab_cuda_kernel_forward_2D(const int nP, const int batch_size,
float* newpoints, const float* points,
const float* Trels, const int* nStepSolver,
const int* nc, const int broadcast, const int* circularity) {
int point_index = blockIdx.x * blockDim.x + threadIdx.x;
int batch_index = blockIdx.y * blockDim.y + threadIdx.y;
if(point_index < nP && batch_index < batch_size) {
// Get point
float point[2];
point[0] = points[broadcast*batch_index*nP*2+point_index];
point[1] = points[broadcast*batch_index*nP*2+point_index + nP];
// Define start index for the matrices belonging to this batch
// batch * num_elem * 4 triangles pr cell * cell in x * cell in y
int start_idx = batch_index * 6 * 4 * nc[0] * nc[1];
// Iterate in nStepSolver
int cellidx;
for(int n = 0; n < nStepSolver[0]; n++){
// Find cell idx
cellidx = cuda_findcellidx_2D(point, nc[0], nc[1]);
// Extract the mapping in the cell
const float* Trels_idx = Trels + 6*cellidx + start_idx;
// Calculate trajectory of point
float point_updated[2];
A_times_b_2D(point_updated, Trels_idx, point);
point[0] = point_updated[0];
point[1] = point_updated[1];
}
// Copy to output
newpoints[2 * nP * batch_index + point_index] = point[0];
newpoints[2 * nP * batch_index + point_index + nP] = point[1];
}
return;
}
__global__ void cpab_cuda_kernel_forward_3D(const int nP, const int batch_size,
float* newpoints, const float* points,
const float* Trels, const int* nStepSolver,
const int* nc, const int broadcast, const int* circularity) {
int point_index = blockIdx.x * blockDim.x + threadIdx.x;
int batch_index = blockIdx.y * blockDim.y + threadIdx.y;
if(point_index < nP && batch_index < batch_size) {
// Get point
float point[3];
point[0] = points[broadcast*batch_index*nP*3+point_index];
point[1] = points[broadcast*batch_index*nP*3+point_index + nP];
point[2] = points[broadcast*batch_index*nP*3+point_index + 2*nP];
// Define start index for the matrices belonging to this batch
// batch * 12 params pr cell * 5 triangles pr cell * cell in x * cell in y * cell in z
int start_idx = batch_index * 12 * 5 * nc[0] * nc[1] * nc[2];
// Iterate in nStepSolver
int cellidx;
for(int n = 0; n < nStepSolver[0]; n++){
// Find cell idx
cellidx = cuda_findcellidx_3D(point, nc[0], nc[1], nc[2]);
// Extract the mapping in the cell
const float* Trels_idx = Trels + 12*cellidx + start_idx;
// Calculate trajectory of point
float point_updated[3];
A_times_b_3D(point_updated, Trels_idx, point);
point[0] = point_updated[0];
point[1] = point_updated[1];
point[2] = point_updated[2];
}
// Copy to output
newpoints[3 * nP * batch_index + point_index] = point[0];
newpoints[3 * nP * batch_index + point_index + nP] = point[1];
newpoints[3 * nP * batch_index + point_index + 2 * nP] = point[2];
}
return;
}
__global__ void cpab_cuda_kernel_backward_1D(dim3 nthreads, const int n_theta, const int d, const int nP, const int nC,
float* grad, const float* points, const float* As, const float* Bs,
const int* nStepSolver, const int* nc, const int broadcast, const int* circularity) {
// Allocate memory for computations
float p[1], v[1], pMid[1], vMid[1], q[1], qMid[1];
float B_times_T[1], A_times_dTdAlpha[1], u[1], uMid[1];
float Alocal[2], Blocal[2], AlocalMid[2], BlocalMid[2];
int cellidx, cellidxMid;
// Thread index
int point_index = threadIdx.x + blockIdx.x * blockDim.x;
int batch_index = threadIdx.y + blockIdx.y * blockDim.y;
int dim_index = threadIdx.z + blockIdx.z * blockDim.z;
// Make sure we are within bounds
if(point_index < nP && batch_index < n_theta && dim_index < d){
int index = nP * batch_index + point_index;
int boxsize = nP * n_theta;
// Define start index for the matrices belonging to this batch
// batch * 2 params pr cell * cell in x
int start_idx = batch_index * 2 * nc[0];
// Get point
p[0] = points[broadcast*batch_index*nP*1+point_index];
// Step size for solver
double h = (1.0 / nStepSolver[0]);
// Iterate a number of times
for(int t=0; t<nStepSolver[0]; t++) {
// Get current cell
cellidx = cuda_findcellidx_1D(p, nc[0]);
// Get index of A
int As_idx = 2*cellidx;
// Extract local A
for(int i = 0; i < 2; i++){
Alocal[i] = (As + As_idx + start_idx)[i];
}
// Compute velocity at current location
A_times_b_1D(v, Alocal, p);
// Compute midpoint
pMid[0] = p[0] + h*v[0]/2.0;
if(circularity[0] == 1)
{
pMid[0] = cuda_mymod(pMid[0],1);
cellidxMid = cuda_findcellidx_1D(pMid, nc[0]);
// Get index of A
int AsMid_idx = 2*cellidxMid;
// Extract local mid A
for(int i = 0; i < 2; i++){
AlocalMid[i] = (As + AsMid_idx + start_idx)[i];
}
A_times_b_1D(vMid, AlocalMid, pMid);
// Get index of B
int Bs_midIdx = 2 * dim_index * nC + AsMid_idx;
// Get local B
for(int i = 0; i < 2; i++){
BlocalMid[i] = (Bs + Bs_midIdx)[i];
}
}
else{
A_times_b_1D(vMid, Alocal, pMid);
}
// Compute velocity at midpoint
A_times_b_1D(vMid, Alocal, pMid);
// Get index of B
int Bs_idx = 2 * dim_index * nC + As_idx;
// Get local B
for(int i = 0; i < 2; i++){
Blocal[i] = (Bs + Bs_idx)[i];
}
// Copy q
q[0] = grad[dim_index*boxsize + index];
// Step 1: Compute u using the old location
// Find current RHS (term 1 + term 2)
A_times_b_1D(B_times_T, Blocal, p); // Term 1
A_times_b_linear_1D(A_times_dTdAlpha, Alocal, q); // Term 2
// Sum both terms
u[0] = B_times_T[0] + A_times_dTdAlpha[0];
// Step 2: Compute mid "point"
qMid[0] = q[0] + h * u[0]/2.0;
// Step 3: Compute uMid
A_times_b_1D(B_times_T, Blocal, pMid); // Term 1
A_times_b_linear_1D(A_times_dTdAlpha, Alocal, qMid); // Term 2
// Sum both terms
uMid[0] = B_times_T[0] + A_times_dTdAlpha[0];
// Update q
q[0] += uMid[0] * h;
// Update gradient
grad[dim_index * boxsize + index] = q[0];
// Update p
p[0] += vMid[0]*h;
if(circularity[0] == 1)
p[0] = cuda_mymod(p[0],1);
}
}
return;
}
__global__ void cpab_cuda_kernel_backward_2D(dim3 nthreads, const int n_theta, const int d, const int nP, const int nC,
float* grad, const float* points, const float* As, const float* Bs,
const int* nStepSolver, const int* nc, const int broadcast, const int* circularity) {
// Allocate memory for computations
float p[2], v[2], pMid[2], vMid[2], q[2], qMid[2];
float B_times_T[2], A_times_dTdAlpha[2], u[2], uMid[2];
float Alocal[6], Blocal[6];
int cellidx;
// Thread index
int point_index = threadIdx.x + blockIdx.x * blockDim.x;
int batch_index = threadIdx.y + blockIdx.y * blockDim.y;
int dim_index = threadIdx.z + blockIdx.z * blockDim.z;
// Make sure we are within bounds
if(point_index < nP && batch_index < n_theta && dim_index < d){
int index = 2 * nP * batch_index + point_index;
int boxsize = 2 * nP * n_theta;
// Define start index for the matrices belonging to this batch
// batch * num_elem * 4 triangles pr cell * cell in x * cell in y
int start_idx = batch_index * 6 * 4 * nc[0] * nc[1];
// Get point
p[0] = points[broadcast*batch_index*nP*2+point_index];
p[1] = points[broadcast*batch_index*nP*2+point_index + nP];
// Step size for solver
double h = (1.0 / nStepSolver[0]);
// Iterate a number of times
for(int t=0; t<nStepSolver[0]; t++) {
// Get current cell
cellidx = cuda_findcellidx_2D(p, nc[0], nc[1]);
// Get index of A
int As_idx = 6*cellidx;
// Extract local A
for(int i = 0; i < 6; i++){
Alocal[i] = (As + As_idx + start_idx)[i];
}
// Compute velocity at current location
A_times_b_2D(v, Alocal, p);
// Compute midpoint
pMid[0] = p[0] + h*v[0]/2.0;
pMid[1] = p[1] + h*v[1]/2.0;
// Compute velocity at midpoint
A_times_b_2D(vMid, Alocal, pMid);
// Get index of B
int Bs_idx = 6 * dim_index * nC + As_idx;
// Get local B
for(int i = 0; i < 6; i++){
Blocal[i] = (Bs + Bs_idx)[i];
}
// Copy q
q[0] = grad[dim_index*boxsize + index];
q[1] = grad[dim_index*boxsize + index + nP];
// Step 1: Compute u using the old location
// Find current RHS (term 1 + term 2)
A_times_b_2D(B_times_T, Blocal, p); // Term 1
A_times_b_linear_2D(A_times_dTdAlpha, Alocal, q); // Term 2
// Sum both terms
u[0] = B_times_T[0] + A_times_dTdAlpha[0];
u[1] = B_times_T[1] + A_times_dTdAlpha[1];
// Step 2: Compute mid "point"
qMid[0] = q[0] + h * u[0]/2.0;
qMid[1] = q[1] + h * u[1]/2.0;
// Step 3: Compute uMid
A_times_b_2D(B_times_T, Blocal, pMid); // Term 1
A_times_b_linear_2D(A_times_dTdAlpha, Alocal, qMid); // Term 2
// Sum both terms
uMid[0] = B_times_T[0] + A_times_dTdAlpha[0];
uMid[1] = B_times_T[1] + A_times_dTdAlpha[1];
// Update q
q[0] += uMid[0] * h;
q[1] += uMid[1] * h;
// Update gradient
grad[dim_index * boxsize + index] = q[0];
grad[dim_index * boxsize + index + nP] = q[1];
// Update p
p[0] += vMid[0]*h;
p[1] += vMid[1]*h;
}
}
return;
}
__global__ void cpab_cuda_kernel_backward_3D(dim3 nthreads, const int n_theta, const int d, const int nP, const int nC,
float* grad, const float* points, const float* As, const float* Bs,
const int* nStepSolver, const int* nc, const int broadcast, const int* circularity) {
// Allocate memory for computations
float p[3], v[3], pMid[3], vMid[3], q[3], qMid[3];
float B_times_T[3], A_times_dTdAlpha[3], u[3], uMid[3];
float Alocal[12], Blocal[12];
int cellidx;
// Thread index
int point_index = threadIdx.x + blockIdx.x * blockDim.x;
int batch_index = threadIdx.y + blockIdx.y * blockDim.y;
int dim_index = threadIdx.z + blockIdx.z * blockDim.z;
// Make sure we are within bounds
if(point_index < nP && batch_index < n_theta && dim_index < d){
int index = 3 * nP * batch_index + point_index;
int boxsize = 3 * nP * n_theta;
// Define start index for the matrices belonging to this batch
// batch * 12 params pr cell * 6 triangles pr cell * cell in x * cell in y * cell in z
int start_idx = batch_index * 12 * 5 * nc[0] * nc[1] * nc[2];
// Get point
p[0] = points[broadcast*batch_index*nP*3+point_index];
p[1] = points[broadcast*batch_index*nP*3+point_index + nP];
p[2] = points[broadcast*batch_index*nP*3+point_index + 2 * nP];
// Step size for solver
double h = (1.0 / nStepSolver[0]);
// Iterate a number of times
for(int t=0; t<nStepSolver[0]; t++) {
// Get current cell
cellidx = cuda_findcellidx_3D(p, nc[0], nc[1], nc[2]);
// Get index of A
int As_idx = 12*cellidx;
// Extract local A
for(int i = 0; i < 12; i++){
Alocal[i] = (As + As_idx + start_idx)[i];
}
// Compute velocity at current location
A_times_b_3D(v, Alocal, p);
// Compute midpoint
pMid[0] = p[0] + h*v[0]/2.0;
pMid[1] = p[1] + h*v[1]/2.0;
pMid[2] = p[2] + h*v[2]/2.0;
// Compute velocity at midpoint
A_times_b_3D(vMid, Alocal, pMid);
// Get index of B
int Bs_idx = 12 * dim_index * nC + As_idx;
// Get local B
for(int i = 0; i < 12; i++){
Blocal[i] = (Bs + Bs_idx)[i];
}
// Copy q
q[0] = grad[dim_index * boxsize + index];
q[1] = grad[dim_index * boxsize + index + nP];
q[2] = grad[dim_index * boxsize + index + 2*nP];
// Step 1: Compute u using the old location
// Find current RHS (term 1 + term 2)
A_times_b_3D(B_times_T, Blocal, p); // Term 1
A_times_b_linear_3D(A_times_dTdAlpha, Alocal, q); // Term 2
// Sum both terms
u[0] = B_times_T[0] + A_times_dTdAlpha[0];
u[1] = B_times_T[1] + A_times_dTdAlpha[1];
u[2] = B_times_T[2] + A_times_dTdAlpha[2];
// Step 2: Compute mid "point"
qMid[0] = q[0] + h * u[0]/2.0;
qMid[1] = q[1] + h * u[1]/2.0;
qMid[2] = q[2] + h * u[2]/2.0;
// Step 3: Compute uMid
A_times_b_3D(B_times_T, Blocal, pMid); // Term 1
A_times_b_linear_3D(A_times_dTdAlpha, Alocal, qMid); // Term 2
// Sum both terms
uMid[0] = B_times_T[0] + A_times_dTdAlpha[0];
uMid[1] = B_times_T[1] + A_times_dTdAlpha[1];
uMid[2] = B_times_T[2] + A_times_dTdAlpha[2];
// Update q
q[0] += uMid[0] * h;
q[1] += uMid[1] * h;
q[2] += uMid[2] * h;
// Update gradient
grad[dim_index * boxsize + index] = q[0];
grad[dim_index * boxsize + index + nP] = q[1];
grad[dim_index * boxsize + index + 2 * nP] = q[2];
// Update p
p[0] += vMid[0]*h;
p[1] += vMid[1]*h;
p[2] += vMid[2]*h;
}
}
return;
}
| c821146129d576b91e69d2e70a9d5a03342cd5e3.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
__device__ float cuda_mymod(float a, float b) {
float mod_1 = fmod(a,b) + b;
return fmod(mod_1, b);
}
__device__ int cuda_mymin(int a, double b) {
return !(b<a)?a:round(b);
}
__device__ double cuda_fmod(double numer, double denom){
double tquou = floor(numer / denom);
return numer - tquou * denom;
}
__device__ int cuda_findcellidx_1D(const float* p, const int ncx) {
// Floor value to find cell
int idx = floor(p[0] * ncx);
idx = max(0, min(idx, ncx-1));
return idx;
}
__device__ int cuda_findcellidx_2D(const float* p, const int ncx, const int ncy) {
// Copy point
double point[2];
point[0] = p[0];
point[1] = p[1];
// Cell size
const float inc_x = 1.0 / ncx;
const float inc_y = 1.0 / ncy;
// Find initial row, col placement
double p0 = min((ncx * inc_x - 0.000000001), max(0.0, point[0]));
double p1 = min((ncy * inc_y - 0.000000001), max(0.0, point[1]));
double xmod = cuda_fmod((double)p0, (double)inc_x);
double ymod = cuda_fmod((double)p1, (double)inc_y);
double x = xmod / inc_x;
double y = ymod / inc_y;
int cell_idx = cuda_mymin(ncx-1, (p0 - xmod) / inc_x) +
cuda_mymin(ncy-1, (p1 - ymod) / inc_y) * ncx;
cell_idx *= 4;
// Out of bound (left)
if(point[0]<=0){
if(point[1] <= 0 && point[1]/inc_y<point[0]/inc_x){
// Nothing to do here
} else if(point[1] >= ncy * inc_y && point[1]/inc_y-ncy > -point[0]/inc_x) {
cell_idx += 2;
} else {
cell_idx += 3;
}
return cell_idx;
}
// Out of bound (right)
if(point[0] >= ncx*inc_x){
if(point[1]<=0 && -point[1]/inc_y > point[0]/inc_x - ncx){
// Nothing to do here
} else if(point[1] >= ncy*inc_y && point[1]/inc_y - ncy > point[0]/inc_x-ncx){
cell_idx += 2;
} else {
cell_idx += 1;
}
return cell_idx;
}
// Out of bound (up)
if(point[1] <= 0){
return cell_idx;
}
// Out of bound (bottom)
if(point[1] >= ncy*inc_y){
cell_idx += 2;
return cell_idx;
}
// OK, we are inbound
if(x<y){
if(1-x<y){
cell_idx += 2;
} else {
cell_idx += 3;
}
} else if(1-x<y) {
cell_idx += 1;
}
return cell_idx;
/*
// Cell size
const float inc_x = 1.0 / nx;
const float inc_y = 1.0 / ny;
// Copy point
float point[2];
point[0] = p[0];
point[1] = p[1];
// If point is outside [0, 1]x[0, 1] then we push it inside
if (point[0] < 0.0 || point[0] > 1.0 || point[1] < 0.0 || point[1] > 1.0) {
const float half = 0.5;
point[0] -= half;
point[1] -= half;
const float abs_x = abs(point[0]);
const float abs_y = abs(point[1]);
const float push_x = (abs_x < abs_y) ? half*inc_x : 0.0;
const float push_y = (abs_y < abs_x) ? half*inc_y : 0.0;
if (abs_x > half) {
point[0] = copysign(half - push_x, point[0]);
}
if (abs_y > half) {
point[1] = copysign(half - push_y, point[1]);
}
point[0] += half;
point[1] += half;
}
// Find initial row, col placement
const float p0 = min((float)(1.0 - 1e-8), point[0]);
const float p1 = min((float)(1.0 - 1e-8), point[1]);
const float p0ncx = p0*nx;
const float p1ncy = p1*ny;
const int ip0ncx = p0ncx; // rounds down
const int ip1ncy = p1ncy; // rounds down
int cell_idx = 4 * (ip0ncx + ip1ncy * nx);
// Find (sub)triangle
const float x = p0ncx - ip0ncx;
const float y = p1ncy - ip1ncy;
if (x < y) {
if (1-x < y) {
cell_idx += 2;
} else {
cell_idx += 3;
}
} else if (1-x < y) {
cell_idx += 1;
}
return cell_idx;
*/
}
__device__ int cuda_findcellidx_3D(const float* p, const int nx, const int ny, const int nz) {
// Cell size
const float inc_x = 1.0 / nx;
const float inc_y = 1.0 / ny;
const float inc_z = 1.0 / nz;
// Copy point
float point[3];
point[0] = p[0];
point[1] = p[1];
point[2] = p[2];
// If point is outside [0, 1]x[0, 1]x[0, 1] then we push it inside
if(point[0] < 0.0 || point[0] > 1.0 || point[1] < 0.0 || point[1] > 1.0) {
const float half = 0.5;
point[0] -= half;
point[1] -= half;
point[2] -= half;
const float abs_x = abs(point[0]);
const float abs_y = abs(point[1]);
const float abs_z = abs(point[2]);
const float push_x = (abs_x < abs_y && abs_x < abs_z) ? half*inc_x : 0.0;
const float push_y = (abs_y < abs_x && abs_x < abs_z) ? half*inc_y : 0.0;
const float push_z = (abs_z < abs_x && abs_x < abs_y) ? half*inc_z : 0.0;
if(abs_x > half){point[0] = copysign(half - push_x, point[0]);}
if(abs_y > half){point[1] = copysign(half - push_y, point[1]);}
if(abs_z > half){point[2] = copysign(half - push_z, point[2]);}
point[0] += half;
point[1] += half;
point[2] += half;
}
float zero = 0.0;
float p0 = min((float)(nx*inc_x-1e-8),max(zero, point[0]));
float p1 = min((float)(ny*inc_y-1e-8),max(zero, point[1]));
float p2 = min((float)(nz*inc_x-1e-8),max(zero, point[2]));
double xmod = cuda_fmod(p0,inc_x);
double ymod = cuda_fmod(p1,inc_y);
double zmod = cuda_fmod(p2,inc_z);
int i = cuda_mymin(nx-1,((p0 - xmod)/inc_x));
int j = cuda_mymin(ny-1,((p1 - ymod)/inc_y));
int k = cuda_mymin(nz-1,((p2 - zmod)/inc_z));
int cell_idx = 5*(i + j * nx + k * nx * ny);
double x = xmod/inc_x;
double y = ymod/inc_y;
double z = zmod/inc_z;
bool tf = false;
if (k%2==0){
if ((i%2==0 && j%2==1) || (i%2==1 && j%2==0)){
tf = true;
}
}
else if((i%2==0 && j%2==0) || (i%2==1 && j%2==1)){
tf = true;
}
if (tf){
double tmp = x;
x = y;
y = 1-tmp;
}
if (-x -y +z >= 0){
cell_idx+=1;
}
else if (x+y+z - 2 >= 0){
cell_idx+=2;
}
else if (-x+y-z >= 0){
cell_idx+=3;
}
else if (x-y-z >= 0){
cell_idx+=4;
}
return cell_idx;
}
__device__ void A_times_b_1D(float x[], const float* A, float* b) {
x[0] = A[0]*b[0] + A[1];
return;
}
__device__ void A_times_b_2D(float x[], const float* A, float* b) {
x[0] = A[0]*b[0] + A[1]*b[1] + A[2];
x[1] = A[3]*b[0] + A[4]*b[1] + A[5];
return;
}
__device__ void A_times_b_3D(float x[], const float* A, float* b) {
x[0] = A[0]*b[0] + A[1]*b[1] + A[2]*b[2] + A[3];
x[1] = A[4]*b[0] + A[5]*b[1] + A[6]*b[2] + A[7];
x[2] = A[8]*b[0] + A[9]*b[1] + A[10]*b[2] + A[11];
return;
}
__device__ void A_times_b_linear_1D(float x[], const float* A, float* b) {
x[0] = A[0]*b[0];
return;
}
__device__ void A_times_b_linear_2D(float x[], const float* A, float* b) {
x[0] = A[0]*b[0] + A[1]*b[1];
x[1] = A[3]*b[0] + A[4]*b[1];
return;
}
__device__ void A_times_b_linear_3D(float x[], const float* A, float* b) {
x[0] = A[0]*b[0] + A[1]*b[1] + A[2]*b[2];
x[1] = A[4]*b[0] + A[5]*b[1] + A[6]*b[2];
x[2] = A[8]*b[0] + A[9]*b[1] + A[10]*b[2];
return;
}
// Kernel declaration
__global__ void cpab_cuda_kernel_forward_1D(const int nP, const int batch_size,
float* newpoints, const float* points,
const float* Trels, const int* nStepSolver,
const int* nc, const int broadcast, const int* circularity) {
int point_index = blockIdx.x * blockDim.x + threadIdx.x;
int batch_index = blockIdx.y * blockDim.y + threadIdx.y;
if(point_index < nP && batch_index < batch_size) {
// Get point
float point[1];
point[0] = points[broadcast*batch_index*nP*1+point_index];
// Define start index for the matrices belonging to this batch
// batch * 2 params pr cell * cell in x
int start_idx = batch_index * 2 * nc[0];
// Iterate in nStepSolver
int cellidx;
for(int n = 0; n < nStepSolver[0]; n++){
// Find cell idx
cellidx = cuda_findcellidx_1D(point, nc[0]);
// Extract the mapping in the cell
const float* Trels_idx = Trels + 2*cellidx + start_idx;
// Calculate trajectory of point
float point_updated[1];
A_times_b_1D(point_updated, Trels_idx, point);
point[0] = point_updated[0];
if(circularity[0] == 1)
point[0] = cuda_mymod(point[0],1);
}
// Copy to output
newpoints[nP * batch_index + point_index] = point[0];
}
return;
}
__global__ void cpab_cuda_kernel_forward_2D(const int nP, const int batch_size,
float* newpoints, const float* points,
const float* Trels, const int* nStepSolver,
const int* nc, const int broadcast, const int* circularity) {
int point_index = blockIdx.x * blockDim.x + threadIdx.x;
int batch_index = blockIdx.y * blockDim.y + threadIdx.y;
if(point_index < nP && batch_index < batch_size) {
// Get point
float point[2];
point[0] = points[broadcast*batch_index*nP*2+point_index];
point[1] = points[broadcast*batch_index*nP*2+point_index + nP];
// Define start index for the matrices belonging to this batch
// batch * num_elem * 4 triangles pr cell * cell in x * cell in y
int start_idx = batch_index * 6 * 4 * nc[0] * nc[1];
// Iterate in nStepSolver
int cellidx;
for(int n = 0; n < nStepSolver[0]; n++){
// Find cell idx
cellidx = cuda_findcellidx_2D(point, nc[0], nc[1]);
// Extract the mapping in the cell
const float* Trels_idx = Trels + 6*cellidx + start_idx;
// Calculate trajectory of point
float point_updated[2];
A_times_b_2D(point_updated, Trels_idx, point);
point[0] = point_updated[0];
point[1] = point_updated[1];
}
// Copy to output
newpoints[2 * nP * batch_index + point_index] = point[0];
newpoints[2 * nP * batch_index + point_index + nP] = point[1];
}
return;
}
__global__ void cpab_cuda_kernel_forward_3D(const int nP, const int batch_size,
float* newpoints, const float* points,
const float* Trels, const int* nStepSolver,
const int* nc, const int broadcast, const int* circularity) {
int point_index = blockIdx.x * blockDim.x + threadIdx.x;
int batch_index = blockIdx.y * blockDim.y + threadIdx.y;
if(point_index < nP && batch_index < batch_size) {
// Get point
float point[3];
point[0] = points[broadcast*batch_index*nP*3+point_index];
point[1] = points[broadcast*batch_index*nP*3+point_index + nP];
point[2] = points[broadcast*batch_index*nP*3+point_index + 2*nP];
// Define start index for the matrices belonging to this batch
// batch * 12 params pr cell * 5 triangles pr cell * cell in x * cell in y * cell in z
int start_idx = batch_index * 12 * 5 * nc[0] * nc[1] * nc[2];
// Iterate in nStepSolver
int cellidx;
for(int n = 0; n < nStepSolver[0]; n++){
// Find cell idx
cellidx = cuda_findcellidx_3D(point, nc[0], nc[1], nc[2]);
// Extract the mapping in the cell
const float* Trels_idx = Trels + 12*cellidx + start_idx;
// Calculate trajectory of point
float point_updated[3];
A_times_b_3D(point_updated, Trels_idx, point);
point[0] = point_updated[0];
point[1] = point_updated[1];
point[2] = point_updated[2];
}
// Copy to output
newpoints[3 * nP * batch_index + point_index] = point[0];
newpoints[3 * nP * batch_index + point_index + nP] = point[1];
newpoints[3 * nP * batch_index + point_index + 2 * nP] = point[2];
}
return;
}
__global__ void cpab_cuda_kernel_backward_1D(dim3 nthreads, const int n_theta, const int d, const int nP, const int nC,
float* grad, const float* points, const float* As, const float* Bs,
const int* nStepSolver, const int* nc, const int broadcast, const int* circularity) {
// Allocate memory for computations
float p[1], v[1], pMid[1], vMid[1], q[1], qMid[1];
float B_times_T[1], A_times_dTdAlpha[1], u[1], uMid[1];
float Alocal[2], Blocal[2], AlocalMid[2], BlocalMid[2];
int cellidx, cellidxMid;
// Thread index
int point_index = threadIdx.x + blockIdx.x * blockDim.x;
int batch_index = threadIdx.y + blockIdx.y * blockDim.y;
int dim_index = threadIdx.z + blockIdx.z * blockDim.z;
// Make sure we are within bounds
if(point_index < nP && batch_index < n_theta && dim_index < d){
int index = nP * batch_index + point_index;
int boxsize = nP * n_theta;
// Define start index for the matrices belonging to this batch
// batch * 2 params pr cell * cell in x
int start_idx = batch_index * 2 * nc[0];
// Get point
p[0] = points[broadcast*batch_index*nP*1+point_index];
// Step size for solver
double h = (1.0 / nStepSolver[0]);
// Iterate a number of times
for(int t=0; t<nStepSolver[0]; t++) {
// Get current cell
cellidx = cuda_findcellidx_1D(p, nc[0]);
// Get index of A
int As_idx = 2*cellidx;
// Extract local A
for(int i = 0; i < 2; i++){
Alocal[i] = (As + As_idx + start_idx)[i];
}
// Compute velocity at current location
A_times_b_1D(v, Alocal, p);
// Compute midpoint
pMid[0] = p[0] + h*v[0]/2.0;
if(circularity[0] == 1)
{
pMid[0] = cuda_mymod(pMid[0],1);
cellidxMid = cuda_findcellidx_1D(pMid, nc[0]);
// Get index of A
int AsMid_idx = 2*cellidxMid;
// Extract local mid A
for(int i = 0; i < 2; i++){
AlocalMid[i] = (As + AsMid_idx + start_idx)[i];
}
A_times_b_1D(vMid, AlocalMid, pMid);
// Get index of B
int Bs_midIdx = 2 * dim_index * nC + AsMid_idx;
// Get local B
for(int i = 0; i < 2; i++){
BlocalMid[i] = (Bs + Bs_midIdx)[i];
}
}
else{
A_times_b_1D(vMid, Alocal, pMid);
}
// Compute velocity at midpoint
A_times_b_1D(vMid, Alocal, pMid);
// Get index of B
int Bs_idx = 2 * dim_index * nC + As_idx;
// Get local B
for(int i = 0; i < 2; i++){
Blocal[i] = (Bs + Bs_idx)[i];
}
// Copy q
q[0] = grad[dim_index*boxsize + index];
// Step 1: Compute u using the old location
// Find current RHS (term 1 + term 2)
A_times_b_1D(B_times_T, Blocal, p); // Term 1
A_times_b_linear_1D(A_times_dTdAlpha, Alocal, q); // Term 2
// Sum both terms
u[0] = B_times_T[0] + A_times_dTdAlpha[0];
// Step 2: Compute mid "point"
qMid[0] = q[0] + h * u[0]/2.0;
// Step 3: Compute uMid
A_times_b_1D(B_times_T, Blocal, pMid); // Term 1
A_times_b_linear_1D(A_times_dTdAlpha, Alocal, qMid); // Term 2
// Sum both terms
uMid[0] = B_times_T[0] + A_times_dTdAlpha[0];
// Update q
q[0] += uMid[0] * h;
// Update gradient
grad[dim_index * boxsize + index] = q[0];
// Update p
p[0] += vMid[0]*h;
if(circularity[0] == 1)
p[0] = cuda_mymod(p[0],1);
}
}
return;
}
__global__ void cpab_cuda_kernel_backward_2D(dim3 nthreads, const int n_theta, const int d, const int nP, const int nC,
float* grad, const float* points, const float* As, const float* Bs,
const int* nStepSolver, const int* nc, const int broadcast, const int* circularity) {
// Allocate memory for computations
float p[2], v[2], pMid[2], vMid[2], q[2], qMid[2];
float B_times_T[2], A_times_dTdAlpha[2], u[2], uMid[2];
float Alocal[6], Blocal[6];
int cellidx;
// Thread index
int point_index = threadIdx.x + blockIdx.x * blockDim.x;
int batch_index = threadIdx.y + blockIdx.y * blockDim.y;
int dim_index = threadIdx.z + blockIdx.z * blockDim.z;
// Make sure we are within bounds
if(point_index < nP && batch_index < n_theta && dim_index < d){
int index = 2 * nP * batch_index + point_index;
int boxsize = 2 * nP * n_theta;
// Define start index for the matrices belonging to this batch
// batch * num_elem * 4 triangles pr cell * cell in x * cell in y
int start_idx = batch_index * 6 * 4 * nc[0] * nc[1];
// Get point
p[0] = points[broadcast*batch_index*nP*2+point_index];
p[1] = points[broadcast*batch_index*nP*2+point_index + nP];
// Step size for solver
double h = (1.0 / nStepSolver[0]);
// Iterate a number of times
for(int t=0; t<nStepSolver[0]; t++) {
// Get current cell
cellidx = cuda_findcellidx_2D(p, nc[0], nc[1]);
// Get index of A
int As_idx = 6*cellidx;
// Extract local A
for(int i = 0; i < 6; i++){
Alocal[i] = (As + As_idx + start_idx)[i];
}
// Compute velocity at current location
A_times_b_2D(v, Alocal, p);
// Compute midpoint
pMid[0] = p[0] + h*v[0]/2.0;
pMid[1] = p[1] + h*v[1]/2.0;
// Compute velocity at midpoint
A_times_b_2D(vMid, Alocal, pMid);
// Get index of B
int Bs_idx = 6 * dim_index * nC + As_idx;
// Get local B
for(int i = 0; i < 6; i++){
Blocal[i] = (Bs + Bs_idx)[i];
}
// Copy q
q[0] = grad[dim_index*boxsize + index];
q[1] = grad[dim_index*boxsize + index + nP];
// Step 1: Compute u using the old location
// Find current RHS (term 1 + term 2)
A_times_b_2D(B_times_T, Blocal, p); // Term 1
A_times_b_linear_2D(A_times_dTdAlpha, Alocal, q); // Term 2
// Sum both terms
u[0] = B_times_T[0] + A_times_dTdAlpha[0];
u[1] = B_times_T[1] + A_times_dTdAlpha[1];
// Step 2: Compute mid "point"
qMid[0] = q[0] + h * u[0]/2.0;
qMid[1] = q[1] + h * u[1]/2.0;
// Step 3: Compute uMid
A_times_b_2D(B_times_T, Blocal, pMid); // Term 1
A_times_b_linear_2D(A_times_dTdAlpha, Alocal, qMid); // Term 2
// Sum both terms
uMid[0] = B_times_T[0] + A_times_dTdAlpha[0];
uMid[1] = B_times_T[1] + A_times_dTdAlpha[1];
// Update q
q[0] += uMid[0] * h;
q[1] += uMid[1] * h;
// Update gradient
grad[dim_index * boxsize + index] = q[0];
grad[dim_index * boxsize + index + nP] = q[1];
// Update p
p[0] += vMid[0]*h;
p[1] += vMid[1]*h;
}
}
return;
}
__global__ void cpab_cuda_kernel_backward_3D(dim3 nthreads, const int n_theta, const int d, const int nP, const int nC,
float* grad, const float* points, const float* As, const float* Bs,
const int* nStepSolver, const int* nc, const int broadcast, const int* circularity) {
// Allocate memory for computations
float p[3], v[3], pMid[3], vMid[3], q[3], qMid[3];
float B_times_T[3], A_times_dTdAlpha[3], u[3], uMid[3];
float Alocal[12], Blocal[12];
int cellidx;
// Thread index
int point_index = threadIdx.x + blockIdx.x * blockDim.x;
int batch_index = threadIdx.y + blockIdx.y * blockDim.y;
int dim_index = threadIdx.z + blockIdx.z * blockDim.z;
// Make sure we are within bounds
if(point_index < nP && batch_index < n_theta && dim_index < d){
int index = 3 * nP * batch_index + point_index;
int boxsize = 3 * nP * n_theta;
// Define start index for the matrices belonging to this batch
// batch * 12 params pr cell * 6 triangles pr cell * cell in x * cell in y * cell in z
int start_idx = batch_index * 12 * 5 * nc[0] * nc[1] * nc[2];
// Get point
p[0] = points[broadcast*batch_index*nP*3+point_index];
p[1] = points[broadcast*batch_index*nP*3+point_index + nP];
p[2] = points[broadcast*batch_index*nP*3+point_index + 2 * nP];
// Step size for solver
double h = (1.0 / nStepSolver[0]);
// Iterate a number of times
for(int t=0; t<nStepSolver[0]; t++) {
// Get current cell
cellidx = cuda_findcellidx_3D(p, nc[0], nc[1], nc[2]);
// Get index of A
int As_idx = 12*cellidx;
// Extract local A
for(int i = 0; i < 12; i++){
Alocal[i] = (As + As_idx + start_idx)[i];
}
// Compute velocity at current location
A_times_b_3D(v, Alocal, p);
// Compute midpoint
pMid[0] = p[0] + h*v[0]/2.0;
pMid[1] = p[1] + h*v[1]/2.0;
pMid[2] = p[2] + h*v[2]/2.0;
// Compute velocity at midpoint
A_times_b_3D(vMid, Alocal, pMid);
// Get index of B
int Bs_idx = 12 * dim_index * nC + As_idx;
// Get local B
for(int i = 0; i < 12; i++){
Blocal[i] = (Bs + Bs_idx)[i];
}
// Copy q
q[0] = grad[dim_index * boxsize + index];
q[1] = grad[dim_index * boxsize + index + nP];
q[2] = grad[dim_index * boxsize + index + 2*nP];
// Step 1: Compute u using the old location
// Find current RHS (term 1 + term 2)
A_times_b_3D(B_times_T, Blocal, p); // Term 1
A_times_b_linear_3D(A_times_dTdAlpha, Alocal, q); // Term 2
// Sum both terms
u[0] = B_times_T[0] + A_times_dTdAlpha[0];
u[1] = B_times_T[1] + A_times_dTdAlpha[1];
u[2] = B_times_T[2] + A_times_dTdAlpha[2];
// Step 2: Compute mid "point"
qMid[0] = q[0] + h * u[0]/2.0;
qMid[1] = q[1] + h * u[1]/2.0;
qMid[2] = q[2] + h * u[2]/2.0;
// Step 3: Compute uMid
A_times_b_3D(B_times_T, Blocal, pMid); // Term 1
A_times_b_linear_3D(A_times_dTdAlpha, Alocal, qMid); // Term 2
// Sum both terms
uMid[0] = B_times_T[0] + A_times_dTdAlpha[0];
uMid[1] = B_times_T[1] + A_times_dTdAlpha[1];
uMid[2] = B_times_T[2] + A_times_dTdAlpha[2];
// Update q
q[0] += uMid[0] * h;
q[1] += uMid[1] * h;
q[2] += uMid[2] * h;
// Update gradient
grad[dim_index * boxsize + index] = q[0];
grad[dim_index * boxsize + index + nP] = q[1];
grad[dim_index * boxsize + index + 2 * nP] = q[2];
// Update p
p[0] += vMid[0]*h;
p[1] += vMid[1]*h;
p[2] += vMid[2]*h;
}
}
return;
}
|
9a5d09cbdcc8d3f384768168fdb7b3a0016f83d4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2020 Facebook
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "model.h"
#include "cuda_helper.h"
Tensor FFModel::transpose(const Tensor& input,
const std::vector<int>& perm)
{
Transpose* transpose = new Transpose(*this, input, perm);
layers.push_back(transpose);
return transpose->outputs[0];
}
Transpose::Transpose(FFModel& model,
const Tensor& input,
const std::vector<int>& _perm)
: Op(model, OP_TRANSPOSE, "Transpose_", input)
{
assert(_perm.size() == input.numDim);
// Use Legion indexing to store perm
for (int i = 0; i < input.numDim; i++)
perm[i] = input.numDim - 1 - _perm[input.numDim - 1 - i];
outputs[0].numDim = input.numDim;
for (int i = 0; i < outputs[0].numDim; i++)
outputs[0].adim[i] = input.adim[perm[i]];
numOutputs = 1;
numWeights = 0;
}
Tensor Transpose::init_inout(FFModel& model,
const Tensor& input)
{
inputs[0] = input;
create_output_and_partition(model);
return outputs[0];
}
void Transpose::create_weights(FFModel& model)
{
// Do nothing
}
void Transpose::create_output_and_partition(FFModel& model)
{
int dim = inputs[0].numDim;
switch (dim) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
task_is = model.get_or_create_task_is(DIM, name); \
create_output_and_partition_with_dim<DIM>(model); \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
{
// Unsupported dim for ElementWiseUnary operator
assert(false);
}
}
}
template<int NDIM>
void Transpose::create_output_and_partition_with_dim(FFModel& model)
{
// Retrive the task indexspace for the op
task_is = IndexSpaceT<NDIM>(model.get_or_create_task_is(NDIM, name));
Context ctx = model.config.lg_ctx;
Runtime* runtime = model.config.lg_hlr;
Rect<NDIM> part_rect = runtime->get_index_space_domain(ctx, task_is);
// Current require all dimensions being transposed should not be partitioned
for (int i = 0; i < NDIM; i++)
if (i != perm[i])
assert(part_rect.hi[i] == part_rect.lo[i]);
int dims[NDIM];
for (int i = 0; i < NDIM; i++)
dims[i] = outputs[0].adim[NDIM-1-i];
outputs[0] = model.create_tensor<NDIM>(dims, DT_FLOAT, this);
outputs[0].owner_op = this;
outputs[0].owner_idx = 0;
Rect<NDIM> input_rect;
input_rect = runtime->get_index_partition_color_space(
ctx, inputs[0].part.get_index_partition());
if (input_rect == part_rect) {
input_lps[0] = inputs[0].part;
input_grad_lps[0] = inputs[0].part_grad;
} else {
model.create_disjoint_partition<NDIM>(
inputs[0], IndexSpaceT<NDIM>(task_is), input_lps[0], input_grad_lps[0]);
}
}
OpMeta* Transpose::init_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
return NULL;
}
void Transpose::init(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
IndexLauncher launcher(TRANSPOSE_INIT_TASK_ID, task_is,
TaskArgument(this, sizeof(ElementBinary)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(1, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
struct TransposeStrides
{
int num_dim;
int in_strides[MAX_TENSOR_DIM], out_strides[MAX_TENSOR_DIM], perm[MAX_TENSOR_DIM];
};
__global__
void transpose_simple_kernel(coord_t volume,
const float* in_ptr,
float* out_ptr,
const TransposeStrides info,
const float beta)
{
CUDA_KERNEL_LOOP(o_idx, volume)
{
coord_t i_idx = 0;
coord_t t = o_idx;
for (int i = info.num_dim-1; i >= 0; i--) {
coord_t ratio = t / info.out_strides[i];
t -= ratio * info.out_strides[i];
i_idx += ratio * info.in_strides[info.perm[i]];
}
out_ptr[o_idx] += out_ptr[o_idx] * beta + in_ptr[i_idx];
}
}
__host__
void Transpose::forward_task(const Task* task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime* runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
const Transpose* transpose = (const Transpose*) task->args;
Domain in_domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
Domain out_domain = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
for (int i = 0; i < out_domain.get_dim(); i++) {
assert(out_domain.hi()[i] == in_domain.hi()[transpose->perm[i]]);
assert(out_domain.lo()[i] == in_domain.lo()[transpose->perm[i]]);
}
const float* in_ptr = helperGetTensorPointerRO<float>(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
float* out_ptr = helperGetTensorPointerWO<float>(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
TransposeStrides info;
info.num_dim = out_domain.get_dim();
for (int i = 0; i < info.num_dim; i++) {
int in_dim_size = (in_domain.hi()[i] - in_domain.lo()[i] + 1);
int out_dim_size = (out_domain.hi()[i] - out_domain.lo()[i] + 1);
info.in_strides[i] = (i == 0) ? 1 : info.in_strides[i-1] * in_dim_size;
info.out_strides[i] = (i == 0) ? 1 : info.out_strides[i-1] * out_dim_size;
info.perm[i] = transpose->perm[i];
}
hipLaunchKernelGGL(( transpose_simple_kernel), dim3(GET_BLOCKS(out_domain.get_volume())), dim3(CUDA_NUM_THREADS), 0, 0,
out_domain.get_volume(), in_ptr, out_ptr, info, 0.0f/*beta*/);
}
void Transpose::forward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
IndexLauncher launcher(TRANSPOSE_FWD_TASK_ID, task_is,
TaskArgument(this, sizeof(Transpose)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(1, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
__host__
void Transpose::backward_task(const Task* task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime* runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
const Transpose* transpose = (const Transpose*) task->args;
Domain out_grad_domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
Domain in_grad_domain = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
for (int i = 0; i < out_grad_domain.get_dim(); i++) {
assert(out_grad_domain.hi()[i] == in_grad_domain.hi()[transpose->perm[i]]);
assert(out_grad_domain.lo()[i] == in_grad_domain.lo()[transpose->perm[i]]);
}
const float* out_grad_ptr = helperGetTensorPointerRO<float>(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
float* in_grad_ptr = helperGetTensorPointerRW<float>(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
TransposeStrides info;
info.num_dim = in_grad_domain.get_dim();
for (int i = 0; i < info.num_dim; i++) {
int in_dim_size = (out_grad_domain.hi()[i] - out_grad_domain.lo()[i] + 1);
int out_dim_size = (in_grad_domain.hi()[i] - in_grad_domain.lo()[i] + 1);
info.in_strides[i] = (i == 0) ? 1 : info.in_strides[i-1] * in_dim_size;
info.out_strides[i] = (i == 0) ? 1 : info.out_strides[i-1] * out_dim_size;
info.perm[transpose->perm[i]] = i;
}
hipLaunchKernelGGL(( transpose_simple_kernel), dim3(GET_BLOCKS(in_grad_domain.get_volume())), dim3(CUDA_NUM_THREADS), 0, 0,
in_grad_domain.get_volume(), out_grad_ptr, in_grad_ptr, info, 1.0f/*beta*/);
}
void Transpose::backward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
IndexLauncher launcher(TRANSPOSE_BWD_TASK_ID, task_is,
TaskArgument(this, sizeof(Transpose)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
// regions[0](I): output_grad
launcher.add_region_requirement(
RegionRequirement(outputs[0].part_grad, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, outputs[0].region_grad));
launcher.add_field(0, FID_DATA);
// regions[1](I/O): input_grad
launcher.add_region_requirement(
RegionRequirement(input_grad_lps[0], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[0].region_grad));
launcher.add_field(1, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
bool Transpose::measure_compute_time(Simulator* sim,
const ParallelConfig& pc,
float& forward_time,
float& backward_time)
{
//TODO: implement measure_forward
return false;
}
| 9a5d09cbdcc8d3f384768168fdb7b3a0016f83d4.cu | /* Copyright 2020 Facebook
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "model.h"
#include "cuda_helper.h"
Tensor FFModel::transpose(const Tensor& input,
const std::vector<int>& perm)
{
Transpose* transpose = new Transpose(*this, input, perm);
layers.push_back(transpose);
return transpose->outputs[0];
}
Transpose::Transpose(FFModel& model,
const Tensor& input,
const std::vector<int>& _perm)
: Op(model, OP_TRANSPOSE, "Transpose_", input)
{
assert(_perm.size() == input.numDim);
// Use Legion indexing to store perm
for (int i = 0; i < input.numDim; i++)
perm[i] = input.numDim - 1 - _perm[input.numDim - 1 - i];
outputs[0].numDim = input.numDim;
for (int i = 0; i < outputs[0].numDim; i++)
outputs[0].adim[i] = input.adim[perm[i]];
numOutputs = 1;
numWeights = 0;
}
Tensor Transpose::init_inout(FFModel& model,
const Tensor& input)
{
inputs[0] = input;
create_output_and_partition(model);
return outputs[0];
}
void Transpose::create_weights(FFModel& model)
{
// Do nothing
}
void Transpose::create_output_and_partition(FFModel& model)
{
int dim = inputs[0].numDim;
switch (dim) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
task_is = model.get_or_create_task_is(DIM, name); \
create_output_and_partition_with_dim<DIM>(model); \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
{
// Unsupported dim for ElementWiseUnary operator
assert(false);
}
}
}
template<int NDIM>
void Transpose::create_output_and_partition_with_dim(FFModel& model)
{
// Retrive the task indexspace for the op
task_is = IndexSpaceT<NDIM>(model.get_or_create_task_is(NDIM, name));
Context ctx = model.config.lg_ctx;
Runtime* runtime = model.config.lg_hlr;
Rect<NDIM> part_rect = runtime->get_index_space_domain(ctx, task_is);
// Current require all dimensions being transposed should not be partitioned
for (int i = 0; i < NDIM; i++)
if (i != perm[i])
assert(part_rect.hi[i] == part_rect.lo[i]);
int dims[NDIM];
for (int i = 0; i < NDIM; i++)
dims[i] = outputs[0].adim[NDIM-1-i];
outputs[0] = model.create_tensor<NDIM>(dims, DT_FLOAT, this);
outputs[0].owner_op = this;
outputs[0].owner_idx = 0;
Rect<NDIM> input_rect;
input_rect = runtime->get_index_partition_color_space(
ctx, inputs[0].part.get_index_partition());
if (input_rect == part_rect) {
input_lps[0] = inputs[0].part;
input_grad_lps[0] = inputs[0].part_grad;
} else {
model.create_disjoint_partition<NDIM>(
inputs[0], IndexSpaceT<NDIM>(task_is), input_lps[0], input_grad_lps[0]);
}
}
OpMeta* Transpose::init_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
return NULL;
}
void Transpose::init(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
IndexLauncher launcher(TRANSPOSE_INIT_TASK_ID, task_is,
TaskArgument(this, sizeof(ElementBinary)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(1, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
struct TransposeStrides
{
int num_dim;
int in_strides[MAX_TENSOR_DIM], out_strides[MAX_TENSOR_DIM], perm[MAX_TENSOR_DIM];
};
__global__
void transpose_simple_kernel(coord_t volume,
const float* in_ptr,
float* out_ptr,
const TransposeStrides info,
const float beta)
{
CUDA_KERNEL_LOOP(o_idx, volume)
{
coord_t i_idx = 0;
coord_t t = o_idx;
for (int i = info.num_dim-1; i >= 0; i--) {
coord_t ratio = t / info.out_strides[i];
t -= ratio * info.out_strides[i];
i_idx += ratio * info.in_strides[info.perm[i]];
}
out_ptr[o_idx] += out_ptr[o_idx] * beta + in_ptr[i_idx];
}
}
__host__
void Transpose::forward_task(const Task* task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime* runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
const Transpose* transpose = (const Transpose*) task->args;
Domain in_domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
Domain out_domain = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
for (int i = 0; i < out_domain.get_dim(); i++) {
assert(out_domain.hi()[i] == in_domain.hi()[transpose->perm[i]]);
assert(out_domain.lo()[i] == in_domain.lo()[transpose->perm[i]]);
}
const float* in_ptr = helperGetTensorPointerRO<float>(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
float* out_ptr = helperGetTensorPointerWO<float>(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
TransposeStrides info;
info.num_dim = out_domain.get_dim();
for (int i = 0; i < info.num_dim; i++) {
int in_dim_size = (in_domain.hi()[i] - in_domain.lo()[i] + 1);
int out_dim_size = (out_domain.hi()[i] - out_domain.lo()[i] + 1);
info.in_strides[i] = (i == 0) ? 1 : info.in_strides[i-1] * in_dim_size;
info.out_strides[i] = (i == 0) ? 1 : info.out_strides[i-1] * out_dim_size;
info.perm[i] = transpose->perm[i];
}
transpose_simple_kernel<<<GET_BLOCKS(out_domain.get_volume()), CUDA_NUM_THREADS>>>(
out_domain.get_volume(), in_ptr, out_ptr, info, 0.0f/*beta*/);
}
void Transpose::forward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
IndexLauncher launcher(TRANSPOSE_FWD_TASK_ID, task_is,
TaskArgument(this, sizeof(Transpose)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(1, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
__host__
void Transpose::backward_task(const Task* task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime* runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
const Transpose* transpose = (const Transpose*) task->args;
Domain out_grad_domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
Domain in_grad_domain = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
for (int i = 0; i < out_grad_domain.get_dim(); i++) {
assert(out_grad_domain.hi()[i] == in_grad_domain.hi()[transpose->perm[i]]);
assert(out_grad_domain.lo()[i] == in_grad_domain.lo()[transpose->perm[i]]);
}
const float* out_grad_ptr = helperGetTensorPointerRO<float>(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
float* in_grad_ptr = helperGetTensorPointerRW<float>(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
TransposeStrides info;
info.num_dim = in_grad_domain.get_dim();
for (int i = 0; i < info.num_dim; i++) {
int in_dim_size = (out_grad_domain.hi()[i] - out_grad_domain.lo()[i] + 1);
int out_dim_size = (in_grad_domain.hi()[i] - in_grad_domain.lo()[i] + 1);
info.in_strides[i] = (i == 0) ? 1 : info.in_strides[i-1] * in_dim_size;
info.out_strides[i] = (i == 0) ? 1 : info.out_strides[i-1] * out_dim_size;
info.perm[transpose->perm[i]] = i;
}
transpose_simple_kernel<<<GET_BLOCKS(in_grad_domain.get_volume()), CUDA_NUM_THREADS>>>(
in_grad_domain.get_volume(), out_grad_ptr, in_grad_ptr, info, 1.0f/*beta*/);
}
void Transpose::backward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
IndexLauncher launcher(TRANSPOSE_BWD_TASK_ID, task_is,
TaskArgument(this, sizeof(Transpose)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
// regions[0](I): output_grad
launcher.add_region_requirement(
RegionRequirement(outputs[0].part_grad, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, outputs[0].region_grad));
launcher.add_field(0, FID_DATA);
// regions[1](I/O): input_grad
launcher.add_region_requirement(
RegionRequirement(input_grad_lps[0], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[0].region_grad));
launcher.add_field(1, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
bool Transpose::measure_compute_time(Simulator* sim,
const ParallelConfig& pc,
float& forward_time,
float& backward_time)
{
//TODO: implement measure_forward
return false;
}
|
220815db1c6a4ff32f346a75fcabd32683c4c97a.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Dispatch.h>
#include <ATen/native/ForeachUtils.h>
#include <ATen/native/hip/ForeachFunctors.cuh>
namespace at { namespace native {
template<template<class> class Op>
std::vector<Tensor> foreach_binary_op(TensorList tensors, at::ArrayRef<double> scalars) {
std::vector<std::vector<at::Tensor>> tensor_lists;
std::vector<at::Tensor> vec_res;
for (const auto& t: tensors) {
vec_res.emplace_back(at::native::empty_like(t));
}
tensor_lists.emplace_back(tensors.vec());
tensor_lists.emplace_back(vec_res);
AT_DISPATCH_ALL_TYPES_AND2(kBFloat16, kHalf, tensors[0].scalar_type(), "foreach_binary_op_scalarlist_cuda", [&]() {
multi_tensor_apply<2>(tensor_lists, scalars, BinaryOpScalarListFunctor<scalar_t, Op>());
});
return tensor_lists[1];
}
template<template<class> class Op>
void foreach_binary_op_(TensorList tensors, at::ArrayRef<double> scalars) {
std::vector<std::vector<at::Tensor>> tensor_lists;
tensor_lists.emplace_back(tensors.vec());
AT_DISPATCH_ALL_TYPES_AND2(kBFloat16, kHalf, tensors[0].scalar_type(), "foreach_binary_op_scalarlist_cuda_", [&]() {
multi_tensor_apply<1>(tensor_lists, scalars, BinaryOpScalarListFunctor_<scalar_t, Op>());
});
}
#define FOREACH_BINARY_OP_SCALARLIST(NAME, OP) \
void foreach_tensor_##NAME##_scalarlist_kernel_cuda_(TensorList tensors, at::ArrayRef<double> scalars) { \
check_foreach_api_restrictions(tensors); \
\
if (!can_use_fast_route(tensors, scalars)) { \
return at::native::foreach_tensor_##NAME##_scalarlist_kernel_slow_(tensors, scalars); \
} \
\
foreach_binary_op_<OP>(tensors, scalars); \
} \
\
std::vector<Tensor> foreach_tensor_##NAME##_scalarlist_kernel_cuda(TensorList tensors, at::ArrayRef<double> scalars) { \
check_foreach_api_restrictions(tensors); \
\
if (!can_use_fast_route(tensors, scalars)) { \
return at::native::foreach_tensor_##NAME##_scalarlist_kernel_slow(tensors, scalars); \
} \
\
return foreach_binary_op<OP>(tensors, scalars); \
}
FOREACH_BINARY_OP_SCALARLIST(add, std::plus);
FOREACH_BINARY_OP_SCALARLIST(sub, std::minus);
FOREACH_BINARY_OP_SCALARLIST(mul, std::multiplies);
FOREACH_BINARY_OP_SCALARLIST(div, std::divides);
}} // namespace at::native
| 220815db1c6a4ff32f346a75fcabd32683c4c97a.cu | #include <ATen/Dispatch.h>
#include <ATen/native/ForeachUtils.h>
#include <ATen/native/cuda/ForeachFunctors.cuh>
namespace at { namespace native {
template<template<class> class Op>
std::vector<Tensor> foreach_binary_op(TensorList tensors, at::ArrayRef<double> scalars) {
std::vector<std::vector<at::Tensor>> tensor_lists;
std::vector<at::Tensor> vec_res;
for (const auto& t: tensors) {
vec_res.emplace_back(at::native::empty_like(t));
}
tensor_lists.emplace_back(tensors.vec());
tensor_lists.emplace_back(vec_res);
AT_DISPATCH_ALL_TYPES_AND2(kBFloat16, kHalf, tensors[0].scalar_type(), "foreach_binary_op_scalarlist_cuda", [&]() {
multi_tensor_apply<2>(tensor_lists, scalars, BinaryOpScalarListFunctor<scalar_t, Op>());
});
return tensor_lists[1];
}
template<template<class> class Op>
void foreach_binary_op_(TensorList tensors, at::ArrayRef<double> scalars) {
std::vector<std::vector<at::Tensor>> tensor_lists;
tensor_lists.emplace_back(tensors.vec());
AT_DISPATCH_ALL_TYPES_AND2(kBFloat16, kHalf, tensors[0].scalar_type(), "foreach_binary_op_scalarlist_cuda_", [&]() {
multi_tensor_apply<1>(tensor_lists, scalars, BinaryOpScalarListFunctor_<scalar_t, Op>());
});
}
#define FOREACH_BINARY_OP_SCALARLIST(NAME, OP) \
void foreach_tensor_##NAME##_scalarlist_kernel_cuda_(TensorList tensors, at::ArrayRef<double> scalars) { \
check_foreach_api_restrictions(tensors); \
\
if (!can_use_fast_route(tensors, scalars)) { \
return at::native::foreach_tensor_##NAME##_scalarlist_kernel_slow_(tensors, scalars); \
} \
\
foreach_binary_op_<OP>(tensors, scalars); \
} \
\
std::vector<Tensor> foreach_tensor_##NAME##_scalarlist_kernel_cuda(TensorList tensors, at::ArrayRef<double> scalars) { \
check_foreach_api_restrictions(tensors); \
\
if (!can_use_fast_route(tensors, scalars)) { \
return at::native::foreach_tensor_##NAME##_scalarlist_kernel_slow(tensors, scalars); \
} \
\
return foreach_binary_op<OP>(tensors, scalars); \
}
FOREACH_BINARY_OP_SCALARLIST(add, std::plus);
FOREACH_BINARY_OP_SCALARLIST(sub, std::minus);
FOREACH_BINARY_OP_SCALARLIST(mul, std::multiplies);
FOREACH_BINARY_OP_SCALARLIST(div, std::divides);
}} // namespace at::native
|
102254e26dfeb4af3d8481c4381a5fe0ed341cf1.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "dual.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *err = NULL;
hipMalloc(&err, XSIZE*YSIZE);
float *Y = NULL;
hipMalloc(&Y, XSIZE*YSIZE);
float *X = NULL;
hipMalloc(&X, XSIZE*YSIZE);
float *Z = NULL;
hipMalloc(&Z, XSIZE*YSIZE);
unsigned int size = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
dual), dim3(gridBlock),dim3(threadBlock), 0, 0, err,Y,X,Z,size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
dual), dim3(gridBlock),dim3(threadBlock), 0, 0, err,Y,X,Z,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
dual), dim3(gridBlock),dim3(threadBlock), 0, 0, err,Y,X,Z,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 102254e26dfeb4af3d8481c4381a5fe0ed341cf1.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "dual.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *err = NULL;
cudaMalloc(&err, XSIZE*YSIZE);
float *Y = NULL;
cudaMalloc(&Y, XSIZE*YSIZE);
float *X = NULL;
cudaMalloc(&X, XSIZE*YSIZE);
float *Z = NULL;
cudaMalloc(&Z, XSIZE*YSIZE);
unsigned int size = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
dual<<<gridBlock,threadBlock>>>(err,Y,X,Z,size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
dual<<<gridBlock,threadBlock>>>(err,Y,X,Z,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
dual<<<gridBlock,threadBlock>>>(err,Y,X,Z,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
13547fd3b7094c7d958e31bc4c393f90b22e67fe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hip/hip_cooperative_groups.h>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/sequence.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/integer_utils.hpp>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/lists/lists_column_device_view.cuh>
#include <cudf/scalar/scalar_factories.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/table/table.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/bit.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/binary_search.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/scan.h>
#include <type_traits>
#include "row_conversion.hpp"
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
#define ASYNC_MEMCPY_SUPPORTED
#endif
#if !defined(__CUDA_ARCH__) || defined(ASYNC_MEMCPY_SUPPORTED)
#include <cuda/barrier>
#endif // #if !defined(__CUDA_ARCH__) || defined(ASYNC_MEMCPY_SUPPORTED)
#include <algorithm>
#include <cstdarg>
#include <cstdint>
#include <iostream>
#include <iterator>
#include <limits>
#include <optional>
#include <tuple>
namespace {
constexpr auto JCUDF_ROW_ALIGNMENT = 8;
constexpr auto MAX_BATCH_SIZE = std::numeric_limits<cudf::size_type>::max();
// Number of rows each block processes in the two kernels. Tuned via nsight
constexpr auto NUM_STRING_ROWS_PER_BLOCK_TO_ROWS = 1024;
constexpr auto NUM_STRING_ROWS_PER_BLOCK_FROM_ROWS = 64;
constexpr auto MIN_STRING_BLOCKS = 32;
constexpr auto MAX_STRING_BLOCKS = MAX_BATCH_SIZE;
constexpr auto NUM_WARPS_IN_BLOCK = 32;
} // anonymous namespace
// needed to suppress warning about cuda::barrier
#pragma nv_diag_suppress static_var_with_dynamic_init
using namespace cudf;
using detail::make_device_uvector_async;
using rmm::device_uvector;
#ifdef ASYNC_MEMCPY_SUPPORTED
using cuda::aligned_size_t;
#else
template <std::size_t> using aligned_size_t = size_t; // Local stub for cuda::aligned_size_t.
#endif // ASYNC_MEMCPY_SUPPORTED
namespace cudf {
namespace jni {
namespace detail {
/*
* This module converts data from row-major to column-major and from column-major to row-major. It
* is a transpose of the data of sorts, but there are a few complicating factors. They are spelled
* out below:
*
* Row Batches:
* The row data has to fit inside a cuDF column, which limits it to 2 gigs currently. The calling
* code attempts to keep the data size under 2 gigs, but due to padding this isn't always the case,
* so being able to break this up into multiple columns is necessary. Internally, this is referred
* to as the row batch, which is a group of rows that will fit into this 2 gig space requirement.
* There are typically 1 of these batches, but there can be 2.
*
* Async Memcpy:
* The CUDA blocks are using memcpy_async, which allows for the device to schedule memcpy operations
* and then wait on them to complete at a later time with a barrier. On Ampere or later hardware
* there is dedicated hardware to do this copy and on pre-Ampere it should generate the same code
* that a hand-rolled loop would generate, so performance should be the same or better than a
* hand-rolled kernel.
*
* Tile Info:
* Each CUDA block will work on a single tile info before exiting. This single tile consumes all
* available shared memory. The kernel reads data into shared memory and then back out from shared
* memory to device memory via memcpy_async. This kernel is completely memory bound.
*
* Batch Data:
* This structure contains all the row batches and some book-keeping data necessary for the batches
* such as row numbers for the batches.
*
* Tiles:
* The tile info describes a tile of data to process. In a GPU with 48KB this equates to about 221
* bytes in each direction of a table. The tiles are kept as square as possible to attempt to
* coalesce memory operations. The taller a tile is the better coalescing of columns, but row
* coalescing suffers. The wider a tile is the better the row coalescing, but columns coalescing
* suffers. The code attempts to produce a square tile to balance the coalescing. It starts by
* figuring out the optimal byte length and then adding columns to the data until the tile is too
* large. Since rows are different width with different alignment requirements, this isn't typically
* exact. Once a width is found the tiles are generated vertically with that width and height and
* then the process repeats. This means all the tiles will be the same height, but will have
* different widths based on what columns they encompass. Tiles in a vertical row will all have the
* same dimensions.
*
* --------------------------------
* | 4 5.0f || True 8 3 1 |
* | 3 6.0f || False 3 1 1 |
* | 2 7.0f || True 7 4 1 |
* | 1 8.0f || False 2 5 1 |
* --------------------------------
* | 0 9.0f || True 6 7 1 |
* ...
*/
/**
* @brief The CUDA blocks work on one tile_info struct of data.
* This structure defines the workspaces for the blocks.
*
*/
struct tile_info {
int start_col;
int start_row;
int end_col;
int end_row;
int batch_number;
__device__ inline size_type get_shared_row_size(size_type const *const col_offsets,
size_type const *const col_sizes) const {
// this calculation is invalid if there are holes in the data such as a variable-width column.
// It is wrong in a safe way in that it will say this row size is larger than it should be, so
// we are not losing data we are just not as efficient as we could be with shared memory. This
// may be a problem if the tile is computed without regard to variable width offset/length sizes
// in that we overrun shared memory.
return util::round_up_unsafe(col_offsets[end_col] + col_sizes[end_col] - col_offsets[start_col],
JCUDF_ROW_ALIGNMENT);
}
__device__ inline size_type num_cols() const { return end_col - start_col + 1; }
__device__ inline size_type num_rows() const { return end_row - start_row + 1; }
};
/**
* @brief Returning rows is done in a byte cudf column. This is limited in size by
* `size_type` and so output is broken into batches of rows that fit inside
* this limit.
*
*/
struct row_batch {
size_type num_bytes; // number of bytes in this batch
size_type row_count; // number of rows in the batch
device_uvector<offset_type> row_offsets; // offsets column of output cudf column
};
/**
* @brief Holds information about the batches of data to be processed
*
*/
struct batch_data {
device_uvector<size_type> batch_row_offsets; // offsets to each row in incoming data
device_uvector<size_type> d_batch_row_boundaries; // row numbers for the start of each batch
std::vector<size_type>
batch_row_boundaries; // row numbers for the start of each batch: 0, 1500, 2700
std::vector<row_batch> row_batches; // information about each batch such as byte count
};
/**
* @brief builds row size information for tables that contain strings
*
* @param tbl table from which to compute row size information
* @param fixed_width_and_validity_size size of fixed-width and validity data in this table
* @param stream cuda stream on which to operate
* @return pair of device vector of size_types of the row sizes of the table and a device vector of
* offsets into the string column
*/
std::pair<rmm::device_uvector<size_type>, rmm::device_uvector<strings_column_view::offset_iterator>>
build_string_row_offsets(table_view const &tbl, size_type fixed_width_and_validity_size,
rmm::cuda_stream_view stream) {
auto const num_rows = tbl.num_rows();
rmm::device_uvector<size_type> d_row_sizes(num_rows, stream);
thrust::uninitialized_fill(rmm::exec_policy(stream), d_row_sizes.begin(), d_row_sizes.end(), 0);
auto d_offsets_iterators = [&]() {
std::vector<strings_column_view::offset_iterator> offsets_iterators;
auto offsets_iter = thrust::make_transform_iterator(
tbl.begin(), [](auto const &col) -> strings_column_view::offset_iterator {
if (!is_fixed_width(col.type())) {
CUDF_EXPECTS(col.type().id() == type_id::STRING, "only string columns are supported!");
return strings_column_view(col).offsets_begin();
} else {
return nullptr;
}
});
std::copy_if(offsets_iter, offsets_iter + tbl.num_columns(),
std::back_inserter(offsets_iterators),
[](auto const &offset_ptr) { return offset_ptr != nullptr; });
return make_device_uvector_async(offsets_iterators, stream);
}();
auto const num_columns = static_cast<size_type>(d_offsets_iterators.size());
thrust::for_each(rmm::exec_policy(stream), thrust::make_counting_iterator(0),
thrust::make_counting_iterator(num_columns * num_rows),
[d_offsets_iterators = d_offsets_iterators.data(), num_columns, num_rows,
d_row_sizes = d_row_sizes.data()] __device__(auto element_idx) {
auto const row = element_idx % num_rows;
auto const col = element_idx / num_rows;
auto const val =
d_offsets_iterators[col][row + 1] - d_offsets_iterators[col][row];
atomicAdd(&d_row_sizes[row], val);
});
// transform the row sizes to include fixed width size and alignment
thrust::transform(rmm::exec_policy(stream), d_row_sizes.begin(), d_row_sizes.end(),
d_row_sizes.begin(), [fixed_width_and_validity_size] __device__(auto row_size) {
return util::round_up_unsafe(fixed_width_and_validity_size + row_size,
JCUDF_ROW_ALIGNMENT);
});
return {std::move(d_row_sizes), std::move(d_offsets_iterators)};
}
/**
* @brief functor to return the offset of a row in a table with string columns
*
*/
struct string_row_offset_functor {
string_row_offset_functor(device_span<size_type const> d_row_offsets)
: d_row_offsets(d_row_offsets){};
__device__ inline size_type operator()(int row_number, int) const {
return d_row_offsets[row_number];
}
device_span<size_type const> d_row_offsets;
};
/**
* @brief functor to return the offset of a row in a table with only fixed-width columns
*
*/
struct fixed_width_row_offset_functor {
fixed_width_row_offset_functor(size_type fixed_width_only_row_size)
: _fixed_width_only_row_size(fixed_width_only_row_size){};
__device__ inline size_type operator()(int row_number, int tile_row_start) const {
return (row_number - tile_row_start) * _fixed_width_only_row_size;
}
size_type _fixed_width_only_row_size;
};
/**
* @brief Copies data from row-based JCUDF format to column-based cudf format.
*
* This optimized version of the conversion is faster for fixed-width tables that do not have more
* than 100 columns.
*
* @param num_rows number of rows in the incoming table
* @param num_columns number of columns in the incoming table
* @param row_size length in bytes of each row
* @param input_offset_in_row offset to each row of data
* @param num_bytes total number of bytes in the incoming data
* @param output_data array of pointers to the output data
* @param output_nm array of pointers to the output null masks
* @param input_data pointing to the incoming row data
*/
__global__ void
copy_from_rows_fixed_width_optimized(const size_type num_rows, const size_type num_columns,
const size_type row_size, const size_type *input_offset_in_row,
const size_type *num_bytes, int8_t **output_data,
bitmask_type **output_nm, const int8_t *input_data) {
// We are going to copy the data in two passes.
// The first pass copies a chunk of data into shared memory.
// The second pass copies that chunk from shared memory out to the final location.
// Because shared memory is limited we copy a subset of the rows at a time.
// For simplicity we will refer to this as a row_group
// In practice we have found writing more than 4 columns of data per thread
// results in performance loss. As such we are using a 2 dimensional
// kernel in terms of threads, but not in terms of blocks. Columns are
// controlled by the y dimension (there is no y dimension in blocks). Rows
// are controlled by the x dimension (there are multiple blocks in the x
// dimension).
size_type const rows_per_group = blockDim.x;
size_type const row_group_start = blockIdx.x;
size_type const row_group_stride = gridDim.x;
size_type const row_group_end = (num_rows + rows_per_group - 1) / rows_per_group + 1;
extern __shared__ int8_t shared_data[];
// Because we are copying fixed width only data and we stride the rows
// this thread will always start copying from shared data in the same place
int8_t *row_tmp = &shared_data[row_size * threadIdx.x];
int8_t *row_vld_tmp = &row_tmp[input_offset_in_row[num_columns - 1] + num_bytes[num_columns - 1]];
for (auto row_group_index = row_group_start; row_group_index < row_group_end;
row_group_index += row_group_stride) {
// Step 1: Copy the data into shared memory
// We know row_size is always aligned with and a multiple of int64_t;
int64_t *long_shared = reinterpret_cast<int64_t *>(shared_data);
int64_t const *long_input = reinterpret_cast<int64_t const *>(input_data);
auto const shared_output_index = threadIdx.x + (threadIdx.y * blockDim.x);
auto const shared_output_stride = blockDim.x * blockDim.y;
auto const row_index_end = ::min(num_rows, ((row_group_index + 1) * rows_per_group));
auto const num_rows_in_group = row_index_end - (row_group_index * rows_per_group);
auto const shared_length = row_size * num_rows_in_group;
size_type const shared_output_end = shared_length / sizeof(int64_t);
auto const start_input_index = (row_size * row_group_index * rows_per_group) / sizeof(int64_t);
for (size_type shared_index = shared_output_index; shared_index < shared_output_end;
shared_index += shared_output_stride) {
long_shared[shared_index] = long_input[start_input_index + shared_index];
}
// Wait for all of the data to be in shared memory
__syncthreads();
// Step 2 copy the data back out
// Within the row group there should be 1 thread for each row. This is a
// requirement for launching the kernel
auto const row_index = (row_group_index * rows_per_group) + threadIdx.x;
// But we might not use all of the threads if the number of rows does not go
// evenly into the thread count. We don't want those threads to exit yet
// because we may need them to copy data in for the next row group.
uint32_t active_mask = __ballot_sync(0xffff'ffffu, row_index < num_rows);
if (row_index < num_rows) {
auto const col_index_start = threadIdx.y;
auto const col_index_stride = blockDim.y;
for (auto col_index = col_index_start; col_index < num_columns;
col_index += col_index_stride) {
auto const col_size = num_bytes[col_index];
int8_t const *col_tmp = &(row_tmp[input_offset_in_row[col_index]]);
int8_t *col_output = output_data[col_index];
switch (col_size) {
case 1: {
col_output[row_index] = *col_tmp;
break;
}
case 2: {
int16_t *short_col_output = reinterpret_cast<int16_t *>(col_output);
short_col_output[row_index] = *reinterpret_cast<const int16_t *>(col_tmp);
break;
}
case 4: {
int32_t *int_col_output = reinterpret_cast<int32_t *>(col_output);
int_col_output[row_index] = *reinterpret_cast<const int32_t *>(col_tmp);
break;
}
case 8: {
int64_t *long_col_output = reinterpret_cast<int64_t *>(col_output);
long_col_output[row_index] = *reinterpret_cast<const int64_t *>(col_tmp);
break;
}
default: {
auto const output_offset = col_size * row_index;
// TODO this should just not be supported for fixed width columns, but just in case...
for (auto b = 0; b < col_size; b++) {
col_output[b + output_offset] = col_tmp[b];
}
break;
}
}
bitmask_type *nm = output_nm[col_index];
int8_t *valid_byte = &row_vld_tmp[col_index / 8];
size_type byte_bit_offset = col_index % 8;
int predicate = *valid_byte & (1 << byte_bit_offset);
uint32_t bitmask = __ballot_sync(active_mask, predicate);
if (row_index % 32 == 0) {
nm[word_index(row_index)] = bitmask;
}
} // end column loop
} // end row copy
// wait for the row_group to be totally copied before starting on the next row group
__syncthreads();
}
}
__global__ void copy_to_rows_fixed_width_optimized(
const size_type start_row, const size_type num_rows, const size_type num_columns,
const size_type row_size, const size_type *output_offset_in_row, const size_type *num_bytes,
const int8_t **input_data, const bitmask_type **input_nm, int8_t *output_data) {
// We are going to copy the data in two passes.
// The first pass copies a chunk of data into shared memory.
// The second pass copies that chunk from shared memory out to the final location.
// Because shared memory is limited we copy a subset of the rows at a time.
// We do not support copying a subset of the columns in a row yet, so we don't
// currently support a row that is wider than shared memory.
// For simplicity we will refer to this as a row_group
// In practice we have found reading more than 4 columns of data per thread
// results in performance loss. As such we are using a 2 dimensional
// kernel in terms of threads, but not in terms of blocks. Columns are
// controlled by the y dimension (there is no y dimension in blocks). Rows
// are controlled by the x dimension (there are multiple blocks in the x
// dimension).
size_type rows_per_group = blockDim.x;
size_type row_group_start = blockIdx.x;
size_type row_group_stride = gridDim.x;
size_type row_group_end = (num_rows + rows_per_group - 1) / rows_per_group + 1;
extern __shared__ int8_t shared_data[];
// Because we are copying fixed width only data and we stride the rows
// this thread will always start copying to shared data in the same place
int8_t *row_tmp = &shared_data[row_size * threadIdx.x];
int8_t *row_vld_tmp =
&row_tmp[output_offset_in_row[num_columns - 1] + num_bytes[num_columns - 1]];
for (size_type row_group_index = row_group_start; row_group_index < row_group_end;
row_group_index += row_group_stride) {
// Within the row group there should be 1 thread for each row. This is a
// requirement for launching the kernel
size_type row_index = start_row + (row_group_index * rows_per_group) + threadIdx.x;
// But we might not use all of the threads if the number of rows does not go
// evenly into the thread count. We don't want those threads to exit yet
// because we may need them to copy data back out.
if (row_index < (start_row + num_rows)) {
size_type col_index_start = threadIdx.y;
size_type col_index_stride = blockDim.y;
for (size_type col_index = col_index_start; col_index < num_columns;
col_index += col_index_stride) {
size_type col_size = num_bytes[col_index];
int8_t *col_tmp = &(row_tmp[output_offset_in_row[col_index]]);
const int8_t *col_input = input_data[col_index];
switch (col_size) {
case 1: {
*col_tmp = col_input[row_index];
break;
}
case 2: {
const int16_t *short_col_input = reinterpret_cast<const int16_t *>(col_input);
*reinterpret_cast<int16_t *>(col_tmp) = short_col_input[row_index];
break;
}
case 4: {
const int32_t *int_col_input = reinterpret_cast<const int32_t *>(col_input);
*reinterpret_cast<int32_t *>(col_tmp) = int_col_input[row_index];
break;
}
case 8: {
const int64_t *long_col_input = reinterpret_cast<const int64_t *>(col_input);
*reinterpret_cast<int64_t *>(col_tmp) = long_col_input[row_index];
break;
}
default: {
size_type input_offset = col_size * row_index;
// TODO this should just not be supported for fixed width columns, but just in case...
for (size_type b = 0; b < col_size; b++) {
col_tmp[b] = col_input[b + input_offset];
}
break;
}
}
// atomicOr only works on 32 bit or 64 bit aligned values, and not byte aligned
// so we have to rewrite the addresses to make sure that it is 4 byte aligned
int8_t *valid_byte = &row_vld_tmp[col_index / 8];
size_type byte_bit_offset = col_index % 8;
uint64_t fixup_bytes = reinterpret_cast<uint64_t>(valid_byte) % 4;
int32_t *valid_int = reinterpret_cast<int32_t *>(valid_byte - fixup_bytes);
size_type int_bit_offset = byte_bit_offset + (fixup_bytes * 8);
// Now copy validity for the column
if (input_nm[col_index]) {
if (bit_is_set(input_nm[col_index], row_index)) {
atomicOr_block(valid_int, 1 << int_bit_offset);
} else {
atomicAnd_block(valid_int, ~(1 << int_bit_offset));
}
} else {
// It is valid so just set the bit
atomicOr_block(valid_int, 1 << int_bit_offset);
}
} // end column loop
} // end row copy
// wait for the row_group to be totally copied into shared memory
__syncthreads();
// Step 2: Copy the data back out
// We know row_size is always aligned with and a multiple of int64_t;
int64_t *long_shared = reinterpret_cast<int64_t *>(shared_data);
int64_t *long_output = reinterpret_cast<int64_t *>(output_data);
size_type shared_input_index = threadIdx.x + (threadIdx.y * blockDim.x);
size_type shared_input_stride = blockDim.x * blockDim.y;
size_type row_index_end = ((row_group_index + 1) * rows_per_group);
if (row_index_end > num_rows) {
row_index_end = num_rows;
}
size_type num_rows_in_group = row_index_end - (row_group_index * rows_per_group);
size_type shared_length = row_size * num_rows_in_group;
size_type shared_input_end = shared_length / sizeof(int64_t);
size_type start_output_index = (row_size * row_group_index * rows_per_group) / sizeof(int64_t);
for (size_type shared_index = shared_input_index; shared_index < shared_input_end;
shared_index += shared_input_stride) {
long_output[start_output_index + shared_index] = long_shared[shared_index];
}
__syncthreads();
// Go for the next round
}
}
#ifdef ASYNC_MEMCPY_SUPPORTED
#define MEMCPY(dst, src, size, barrier) cuda::memcpy_async(dst, src, size, barrier)
#else
#define MEMCPY(dst, src, size, barrier) memcpy(dst, src, size)
#endif // ASYNC_MEMCPY_SUPPORTED
/**
* @brief copy data from cudf columns into JCUDF format, which is row-based
*
* @tparam RowOffsetFunctor iterator that gives the size of a specific row of the table.
* @param num_rows total number of rows in the table
* @param num_columns total number of columns in the table
* @param shmem_used_per_tile shared memory amount each `tile_info` is using
* @param tile_infos span of `tile_info` structs the define the work
* @param input_data pointer to raw table data
* @param col_sizes array of sizes for each element in a column - one per column
* @param col_offsets offset into input data row for each column's start
* @param row_offsets offset to a specific row in the output data
* @param batch_row_boundaries row numbers for batch starts
* @param output_data pointer to output data
*
*/
template <typename RowOffsetFunctor>
__global__ void copy_to_rows(const size_type num_rows, const size_type num_columns,
const size_type shmem_used_per_tile,
device_span<const tile_info> tile_infos, const int8_t **input_data,
const size_type *col_sizes, const size_type *col_offsets,
RowOffsetFunctor row_offsets, size_type const *batch_row_boundaries,
int8_t **output_data) {
// We are going to copy the data in two passes.
// The first pass copies a chunk of data into shared memory.
// The second pass copies that chunk from shared memory out to the final location.
// Because shared memory is limited we copy a subset of the rows at a time.
// This has been broken up for us in the tile_info struct, so we don't have
// any calculation to do here, but it is important to note.
auto const group = cooperative_groups::this_thread_block();
auto const warp = cooperative_groups::tiled_partition<cudf::detail::warp_size>(group);
extern __shared__ int8_t shared_data[];
#ifdef ASYNC_MEMCPY_SUPPORTED
__shared__ cuda::barrier<cuda::thread_scope_block> tile_barrier;
if (group.thread_rank() == 0) {
init(&tile_barrier, group.size());
}
group.sync();
#endif // ASYNC_MEMCPY_SUPPORTED
auto const tile = tile_infos[blockIdx.x];
auto const num_tile_cols = tile.num_cols();
auto const num_tile_rows = tile.num_rows();
auto const tile_row_size = tile.get_shared_row_size(col_offsets, col_sizes);
auto const starting_column_offset = col_offsets[tile.start_col];
// to do the copy we need to do n column copies followed by m element copies OR we have to do m
// element copies followed by r row copies. When going from column to row it is much easier to
// copy by elements first otherwise we would need a running total of the column sizes for our
// tile, which isn't readily available. This makes it more appealing to copy element-wise from
// input data into shared matching the end layout and do row-based memcopies out.
// read each column across the tile
// each warp takes a column with each thread of a warp taking a row this is done with cooperative
// groups where each column is chosen by the tiled partition and each thread in that partition
// works on a row
for (int relative_col = warp.meta_group_rank(); relative_col < num_tile_cols;
relative_col += warp.meta_group_size()) {
auto const absolute_col = relative_col + tile.start_col;
auto const col_size = col_sizes[absolute_col];
auto const col_offset = col_offsets[absolute_col];
auto const relative_col_offset = col_offset - starting_column_offset;
auto const col_ptr = input_data[absolute_col];
if (col_ptr == nullptr) {
// variable-width data column
continue;
}
for (int relative_row = warp.thread_rank(); relative_row < num_tile_rows;
relative_row += warp.size()) {
if (relative_row >= num_tile_rows) {
// out of bounds
continue;
}
auto const absolute_row = relative_row + tile.start_row;
auto const shared_offset = relative_row * tile_row_size + relative_col_offset;
auto const input_src = col_ptr + col_size * absolute_row;
// copy the element from global memory
switch (col_size) {
case 2: {
const int16_t *short_col_input = reinterpret_cast<const int16_t *>(input_src);
*reinterpret_cast<int16_t *>(&shared_data[shared_offset]) = *short_col_input;
break;
}
case 4: {
const int32_t *int_col_input = reinterpret_cast<const int32_t *>(input_src);
*reinterpret_cast<int32_t *>(&shared_data[shared_offset]) = *int_col_input;
break;
}
case 8: {
const int64_t *long_col_input = reinterpret_cast<const int64_t *>(input_src);
*reinterpret_cast<int64_t *>(&shared_data[shared_offset]) = *long_col_input;
break;
}
case 1: shared_data[shared_offset] = *input_src; break;
default: {
for (int i = 0; i < col_size; ++i) {
shared_data[shared_offset] = *input_src;
}
break;
}
}
}
}
auto const tile_output_buffer = output_data[tile.batch_number];
auto const row_batch_start = tile.batch_number == 0 ? 0 : batch_row_boundaries[tile.batch_number];
// no async copies above waiting on the barrier, so we sync the group here to ensure all copies to
// shared memory are completed before copying data out
group.sync();
// each warp takes a row
for (int copy_row = warp.meta_group_rank(); copy_row < tile.num_rows();
copy_row += warp.meta_group_size()) {
auto const src = &shared_data[tile_row_size * copy_row];
auto const dst = tile_output_buffer + row_offsets(copy_row + tile.start_row, row_batch_start) +
starting_column_offset;
#ifdef ASYNC_MEMCPY_SUPPORTED
cuda::memcpy_async(warp, dst, src, tile_row_size, tile_barrier);
#else
for (int b = warp.thread_rank(); b < tile_row_size; b += warp.size()) {
dst[b] = src[b];
}
#endif
}
#ifdef ASYNC_MEMCPY_SUPPORTED
// wait on the last copies to complete
tile_barrier.arrive_and_wait();
#else
group.sync();
#endif // ASYNC_MEMCPY_SUPPORTED
}
/**
* @brief copy data from row-based format to cudf columns
*
* @tparam RowOffsetFunctor iterator that gives the size of a specific row of the table.
* @param num_rows total number of rows in the table
* @param num_columns total number of columns in the table
* @param shmem_used_per_tile amount of shared memory that is used by a tile
* @param row_offsets offset to a specific row in the output data
* @param batch_row_boundaries row numbers for batch starts
* @param output_data pointer to output data, partitioned by data size
* @param validity_offsets offset into input data row for validity data
* @param tile_infos information about the tiles of work
* @param input_nm pointer to input data
*
*/
template <typename RowOffsetFunctor>
__global__ void
copy_validity_to_rows(const size_type num_rows, const size_type num_columns,
const size_type shmem_used_per_tile, RowOffsetFunctor row_offsets,
size_type const *batch_row_boundaries, int8_t **output_data,
const size_type validity_offset, device_span<const tile_info> tile_infos,
const bitmask_type **input_nm) {
extern __shared__ int8_t shared_data[];
// each thread of warp reads a single int32 of validity - so we read 128 bytes then ballot_sync
// the bits and write the result to shmem after we fill shared mem memcpy it out in a blob.
auto const group = cooperative_groups::this_thread_block();
auto const warp = cooperative_groups::tiled_partition<cudf::detail::warp_size>(group);
#ifdef ASYNC_MEMCPY_SUPPORTED
// Initialize cuda barriers for each tile.
__shared__ cuda::barrier<cuda::thread_scope_block> shared_tile_barrier;
if (group.thread_rank() == 0) {
init(&shared_tile_barrier, group.size());
}
group.sync();
#endif // ASYNC_MEMCPY_SUPPORTED
auto tile = tile_infos[blockIdx.x];
auto const num_tile_cols = tile.num_cols();
auto const num_tile_rows = tile.num_rows();
auto const threads_per_warp = warp.size();
auto const rows_per_read = cudf::detail::size_in_bits<bitmask_type>();
auto const num_sections_x = util::div_rounding_up_unsafe(num_tile_cols, threads_per_warp);
auto const num_sections_y = util::div_rounding_up_unsafe(num_tile_rows, rows_per_read);
auto const validity_data_row_length = util::round_up_unsafe(
util::div_rounding_up_unsafe(num_tile_cols, CHAR_BIT), JCUDF_ROW_ALIGNMENT);
auto const total_sections = num_sections_x * num_sections_y;
// the tile is divided into sections. A warp operates on a section at a time.
for (int my_section_idx = warp.meta_group_rank(); my_section_idx < total_sections;
my_section_idx += warp.meta_group_size()) {
// convert to rows and cols
auto const section_x = my_section_idx % num_sections_x;
auto const section_y = my_section_idx / num_sections_x;
auto const relative_col = section_x * threads_per_warp + warp.thread_rank();
auto const relative_row = section_y * rows_per_read;
auto const absolute_col = relative_col + tile.start_col;
auto const absolute_row = relative_row + tile.start_row;
auto const participating = absolute_col < num_columns && absolute_row < num_rows;
auto const participation_mask = __ballot_sync(0xFFFF'FFFFu, participating);
if (participating) {
auto my_data = input_nm[absolute_col] != nullptr ?
input_nm[absolute_col][word_index(absolute_row)] :
std::numeric_limits<uint32_t>::max();
// every thread that is participating in the warp has 4 bytes, but it's column-based data and
// we need it in row-based. So we shuffle the bits around with ballot_sync to make the bytes
// we actually write.
bitmask_type dw_mask = 0x1;
for (int i = 0; i < threads_per_warp && relative_row + i < num_rows; ++i, dw_mask <<= 1) {
auto validity_data = __ballot_sync(participation_mask, my_data & dw_mask);
// lead thread in each warp writes data
auto const validity_write_offset =
validity_data_row_length * (relative_row + i) + (relative_col / CHAR_BIT);
if (warp.thread_rank() == 0) {
*reinterpret_cast<bitmask_type *>(&shared_data[validity_write_offset]) = validity_data;
}
}
}
}
auto const output_data_base =
output_data[tile.batch_number] + validity_offset + tile.start_col / CHAR_BIT;
// each warp copies a row at a time
auto const row_bytes = util::div_rounding_up_unsafe(num_tile_cols, CHAR_BIT);
auto const row_batch_start = tile.batch_number == 0 ? 0 : batch_row_boundaries[tile.batch_number];
// make sure entire tile has finished copy
// Note that this was copied from above just under the for loop due to nsight complaints about
// divergent threads
group.sync();
for (int relative_row = warp.meta_group_rank(); relative_row < num_tile_rows;
relative_row += warp.meta_group_size()) {
auto const src = &shared_data[validity_data_row_length * relative_row];
auto const dst = output_data_base + row_offsets(relative_row + tile.start_row, row_batch_start);
#ifdef ASYNC_MEMCPY_SUPPORTED
cuda::memcpy_async(warp, dst, src, row_bytes, shared_tile_barrier);
#else
for (int b = warp.thread_rank(); b < row_bytes; b += warp.size()) {
dst[b] = src[b];
}
#endif
}
#ifdef ASYNC_MEMCPY_SUPPORTED
// wait for tile of data to arrive
shared_tile_barrier.arrive_and_wait();
#else
group.sync();
#endif // ASYNC_MEMCPY_SUPPORTED
}
/**
* @brief kernel to copy string data to JCUDF row format
*
* @tparam RowOffsetFunctor iterator for row offsets into the destination data
* @param num_rows number of rows in this portion of the table
* @param num_variable_columns number of columns of variable-width data
* @param variable_input_data variable width data column pointers
* @param variable_col_output_offsets output offset information for variable-width columns
* @param variable_col_offsets input offset information for variable-width columns
* @param fixed_width_row_size offset to variable-width data in a row
* @param row_offsets offsets for each row in output data
* @param batch_row_offset row start for this batch
* @param output_data pointer to output data for this batch
*
*/
template <typename RowOffsetFunctor>
__global__ void copy_strings_to_rows(size_type const num_rows, size_type const num_variable_columns,
int8_t const **variable_input_data,
size_type const *variable_col_output_offsets,
size_type const **variable_col_offsets,
size_type fixed_width_row_size, RowOffsetFunctor row_offsets,
size_type const batch_row_offset, int8_t *output_data) {
// Each block will take a group of rows controlled by NUM_STRING_ROWS_PER_BLOCK_TO_ROWS. Each warp
// will copy a row at a time. The base thread will first go through column data and fill out
// offset/length information for the column. Then all threads of the warp will participate in the
// memcpy of the string data.
auto const my_block = cooperative_groups::this_thread_block();
auto const warp = cooperative_groups::tiled_partition<cudf::detail::warp_size>(my_block);
#ifdef ASYNC_MEMCPY_SUPPORTED
cuda::barrier<cuda::thread_scope_block> block_barrier;
#endif
auto const start_row =
blockIdx.x * NUM_STRING_ROWS_PER_BLOCK_TO_ROWS + warp.meta_group_rank() + batch_row_offset;
auto const end_row =
::min(num_rows, static_cast<size_type>(start_row + NUM_STRING_ROWS_PER_BLOCK_TO_ROWS));
for (int row = start_row; row < end_row; row += warp.meta_group_size()) {
auto offset = fixed_width_row_size; // initial offset to variable-width data
auto const base_row_offset = row_offsets(row, 0);
for (int col = 0; col < num_variable_columns; ++col) {
auto const string_start_offset = variable_col_offsets[col][row];
auto const string_length = variable_col_offsets[col][row + 1] - string_start_offset;
if (warp.thread_rank() == 0) {
// write the offset/length to column
uint32_t *output_dest = reinterpret_cast<uint32_t *>(
&output_data[base_row_offset + variable_col_output_offsets[col]]);
output_dest[0] = offset;
output_dest[1] = string_length;
}
auto string_output_dest = &output_data[base_row_offset + offset];
auto string_output_src = &variable_input_data[col][string_start_offset];
warp.sync();
#ifdef ASYNC_MEMCPY_SUPPORTED
cuda::memcpy_async(warp, string_output_dest, string_output_src, string_length, block_barrier);
#else
for (int c = warp.thread_rank(); c < string_length; c += warp.size()) {
string_output_dest[c] = string_output_src[c];
}
#endif
offset += string_length;
}
}
}
/**
* @brief copy data from row-based format to cudf columns
*
* @tparam RowOffsetFunctor iterator that gives the size of a specific row of the table.
* @param num_rows total number of rows in the table
* @param num_columns total number of columns in the table
* @param shmem_used_per_tile amount of shared memory that is used by a tile
* @param row_offsets offset to a specific row in the input data
* @param batch_row_boundaries row numbers for batch starts
* @param output_data pointers to column data
* @param col_sizes array of sizes for each element in a column - one per column
* @param col_offsets offset into input data row for each column's start
* @param tile_infos information about the tiles of work
* @param input_data pointer to input data
*
*/
template <typename RowOffsetFunctor>
__global__ void copy_from_rows(const size_type num_rows, const size_type num_columns,
const size_type shmem_used_per_tile, RowOffsetFunctor row_offsets,
size_type const *batch_row_boundaries, int8_t **output_data,
const size_type *col_sizes, const size_type *col_offsets,
device_span<const tile_info> tile_infos, const int8_t *input_data) {
// We are going to copy the data in two passes.
// The first pass copies a chunk of data into shared memory.
// The second pass copies that chunk from shared memory out to the final location.
// Because shared memory is limited we copy a subset of the rows at a time. This has been broken
// up for us in the tile_info struct, so we don't have any calculation to do here, but it is
// important to note.
// To speed up some of the random access memory we do, we copy col_sizes and col_offsets to shared
// memory for each of the tiles that we work on
auto const group = cooperative_groups::this_thread_block();
auto const warp = cooperative_groups::tiled_partition<cudf::detail::warp_size>(group);
extern __shared__ int8_t shared[];
#ifdef ASYNC_MEMCPY_SUPPORTED
// Initialize cuda barriers for each tile.
__shared__ cuda::barrier<cuda::thread_scope_block> tile_barrier;
if (group.thread_rank() == 0) {
init(&tile_barrier, group.size());
}
group.sync();
#endif // ASYNC_MEMCPY_SUPPORTED
{
auto const fetch_tile = tile_infos[blockIdx.x];
auto const fetch_tile_start_row = fetch_tile.start_row;
auto const starting_col_offset = col_offsets[fetch_tile.start_col];
auto const fetch_tile_row_size = fetch_tile.get_shared_row_size(col_offsets, col_sizes);
auto const row_batch_start =
fetch_tile.batch_number == 0 ? 0 : batch_row_boundaries[fetch_tile.batch_number];
for (int absolute_row = warp.meta_group_rank() + fetch_tile.start_row;
absolute_row <= fetch_tile.end_row; absolute_row += warp.meta_group_size()) {
warp.sync();
auto shared_offset = (absolute_row - fetch_tile_start_row) * fetch_tile_row_size;
auto dst = &shared[shared_offset];
auto src = &input_data[row_offsets(absolute_row, row_batch_start) + starting_col_offset];
// copy the data
#ifdef ASYNC_MEMCPY_SUPPORTED
cuda::memcpy_async(warp, dst, src, fetch_tile_row_size, tile_barrier);
#else
for (int b = warp.thread_rank(); b < fetch_tile_row_size; b += warp.size()) {
dst[b] = src[b];
}
#endif
}
}
{
auto const tile = tile_infos[blockIdx.x];
auto const rows_in_tile = tile.num_rows();
auto const cols_in_tile = tile.num_cols();
auto const tile_row_size = tile.get_shared_row_size(col_offsets, col_sizes);
#ifdef ASYNC_MEMCPY_SUPPORTED
// ensure our data is ready
tile_barrier.arrive_and_wait();
#else
group.sync();
#endif // ASYNC_MEMCPY_SUPPORTED
// Now we copy from shared memory to final destination. The data is laid out in rows in shared
// memory, so the reads for a column will be "vertical". Because of this and the different sizes
// for each column, this portion is handled on row/column basis. to prevent each thread working
// on a single row and also to ensure that all threads can do work in the case of more threads
// than rows, we do a global index instead of a double for loop with col/row.
for (int relative_row = warp.thread_rank(); relative_row < rows_in_tile;
relative_row += warp.size()) {
auto const absolute_row = relative_row + tile.start_row;
auto const shared_memory_row_offset = tile_row_size * relative_row;
for (int relative_col = warp.meta_group_rank(); relative_col < cols_in_tile;
relative_col += warp.meta_group_size()) {
auto const absolute_col = relative_col + tile.start_col;
auto const shared_memory_offset =
col_offsets[absolute_col] - col_offsets[tile.start_col] + shared_memory_row_offset;
auto const column_size = col_sizes[absolute_col];
int8_t *shmem_src = &shared[shared_memory_offset];
int8_t *dst = &output_data[absolute_col][absolute_row * column_size];
MEMCPY(dst, shmem_src, column_size, tile_barrier);
}
}
}
#ifdef ASYNC_MEMCPY_SUPPORTED
// wait on the last copies to complete
tile_barrier.arrive_and_wait();
#else
group.sync();
#endif // ASYNC_MEMCPY_SUPPORTED
}
/**
* @brief copy data from row-based format to cudf columns
*
* @tparam RowOffsetFunctor iterator that gives the size of a specific row of the table.
* @param num_rows total number of rows in the table
* @param num_columns total number of columns in the table
* @param shmem_used_per_tile amount of shared memory that is used by a tile
* @param row_offsets offset to the first column a specific row in the input data
* @param batch_row_boundaries row numbers for batch starts
* @param output_nm pointers to null masks for columns
* @param validity_offsets offset into input data row for validity data
* @param tile_infos information about the tiles of work
* @param input_data pointer to input data
*
*/
template <typename RowOffsetFunctor>
__global__ void
copy_validity_from_rows(const size_type num_rows, const size_type num_columns,
const size_type shmem_used_per_tile, RowOffsetFunctor row_offsets,
size_type const *batch_row_boundaries, bitmask_type **output_nm,
const size_type validity_offset, device_span<const tile_info> tile_infos,
const int8_t *input_data) {
extern __shared__ int8_t shared[];
using cudf::detail::warp_size;
// each thread of warp reads a single byte of validity - so we read 32 bytes then ballot_sync the
// bits and write the result to shmem after we fill shared mem memcpy it out in a blob. Probably
// need knobs for number of rows vs columns to balance read/write
// C0 C1 C2 C3 C4 C5 C6 C7
// R0 1 0 1 0 0 1 1 0 <-- thread 0 reads byte r0
// R1 1 1 1 1 1 1 1 0 <-- thread 1 reads byte r1
// R2 0 0 1 0 0 1 1 0 <-- thread 2 reads byte r2
// ...
// R31 1 1 1 1 1 1 1 1 <-- thread 31 reads byte r31
// ^
// | 1 bit of each input byte, by column, are swizzled into a single 32 bit word via
// __ballot_sync, representing 32 rows of that column.
auto const group = cooperative_groups::this_thread_block();
auto const warp = cooperative_groups::tiled_partition<cudf::detail::warp_size>(group);
#ifdef ASYNC_MEMCPY_SUPPORTED
// Initialize cuda barriers for each tile.
__shared__ cuda::barrier<cuda::thread_scope_block> shared_tile_barrier;
if (group.thread_rank() == 0) {
init(&shared_tile_barrier, group.size());
}
group.sync();
#endif // ASYNC_MEMCPY_SUPPORTED
auto const tile = tile_infos[blockIdx.x];
auto const tile_start_col = tile.start_col;
auto const tile_start_row = tile.start_row;
auto const num_tile_cols = tile.num_cols();
auto const num_tile_rows = tile.num_rows();
auto const threads_per_warp = warp.size();
auto const cols_per_read = CHAR_BIT;
auto const rows_per_read = static_cast<size_type>(threads_per_warp);
auto const num_sections_x = util::div_rounding_up_safe(num_tile_cols, cols_per_read);
auto const num_sections_y = util::div_rounding_up_safe(num_tile_rows, rows_per_read);
auto const validity_data_col_length = num_sections_y * 4; // words to bytes
auto const total_sections = num_sections_x * num_sections_y;
// the tile is divided into sections. A warp operates on a section at a time.
for (int my_section_idx = warp.meta_group_rank(); my_section_idx < total_sections;
my_section_idx += warp.meta_group_size()) {
// convert section to row and col
auto const section_x = my_section_idx % num_sections_x;
auto const section_y = my_section_idx / num_sections_x;
auto const relative_col = section_x * cols_per_read;
auto const relative_row = section_y * rows_per_read + warp.thread_rank();
auto const absolute_col = relative_col + tile_start_col;
auto const absolute_row = relative_row + tile_start_row;
auto const row_batch_start =
tile.batch_number == 0 ? 0 : batch_row_boundaries[tile.batch_number];
auto const participation_mask = __ballot_sync(0xFFFF'FFFFu, absolute_row < num_rows);
if (absolute_row < num_rows) {
auto const my_byte = input_data[row_offsets(absolute_row, row_batch_start) + validity_offset +
(absolute_col / cols_per_read)];
// so every thread that is participating in the warp has a byte, but it's row-based data and
// we need it in column-based. So we shuffle the bits around to make the bytes we actually
// write.
for (int i = 0, byte_mask = 0x1; (i < cols_per_read) && ((relative_col + i) < num_columns);
++i, byte_mask <<= 1) {
auto const validity_data = __ballot_sync(participation_mask, my_byte & byte_mask);
// lead thread in each warp writes data
if (warp.thread_rank() == 0) {
auto const validity_write_offset =
validity_data_col_length * (relative_col + i) + relative_row / cols_per_read;
*reinterpret_cast<bitmask_type *>(&shared[validity_write_offset]) = validity_data;
}
}
}
}
// now memcpy the shared memory out to the final destination
auto const col_words = util::div_rounding_up_unsafe(num_tile_rows, CHAR_BIT * 4);
// make sure entire tile has finished copy
group.sync();
for (int relative_col = warp.meta_group_rank(); relative_col < num_tile_cols;
relative_col += warp.meta_group_size()) {
auto const absolute_col = relative_col + tile_start_col;
auto dst = output_nm[absolute_col] + word_index(tile_start_row);
auto const src =
reinterpret_cast<bitmask_type *>(&shared[validity_data_col_length * relative_col]);
#ifdef ASYNC_MEMCPY_SUPPORTED
cuda::memcpy_async(warp, dst, src, aligned_size_t<4>(validity_data_col_length),
shared_tile_barrier);
#else
for (int b = warp.thread_rank(); b < col_words; b += warp.size()) {
dst[b] = src[b];
}
#endif
}
#ifdef ASYNC_MEMCPY_SUPPORTED
// wait for tile of data to arrive
shared_tile_barrier.arrive_and_wait();
#else
group.sync();
#endif // ASYNC_MEMCPY_SUPPORTED
}
/**
* @brief copies string data from jcudf row format to cudf columns
*
* @tparam RowOffsetFunctor iterator for row offsets into the destination data
* @param row_offsets offsets for each row in input data
* @param string_row_offsets offset data into jcudf row data for each string
* @param string_lengths length of each incoming string in each column
* @param string_column_offsets offset column data for cudf column
* @param string_col_data output cudf string column data
* @param row_data jcudf row data
* @param num_rows number of rows in data
* @param num_string_columns number of string columns in the table
*/
template <typename RowOffsetFunctor>
__global__ void copy_strings_from_rows(RowOffsetFunctor row_offsets, int32_t **string_row_offsets,
int32_t **string_lengths, size_type **string_column_offsets,
char **string_col_data, int8_t const *row_data,
size_type const num_rows,
size_type const num_string_columns) {
// Each warp takes a tile, which is a single column and up to ROWS_PER_BLOCK rows. A tile will not
// wrap around the bottom of the table. The warp will copy the strings for each row in the tile.
// Traversing in row-major order to coalesce the offsets and size reads.
auto my_block = cooperative_groups::this_thread_block();
auto warp = cooperative_groups::tiled_partition<cudf::detail::warp_size>(my_block);
#ifdef ASYNC_MEMCPY_SUPPORTED
cuda::barrier<cuda::thread_scope_block> block_barrier;
#endif
// workaround for not being able to take a reference to a constexpr host variable
auto const ROWS_PER_BLOCK = NUM_STRING_ROWS_PER_BLOCK_FROM_ROWS;
auto const tiles_per_col = util::div_rounding_up_unsafe(num_rows, ROWS_PER_BLOCK);
auto const starting_tile = blockIdx.x * warp.meta_group_size() + warp.meta_group_rank();
auto const num_tiles = tiles_per_col * num_string_columns;
auto const tile_stride = warp.meta_group_size() * gridDim.x;
// Each warp will copy strings in its tile. This is handled by all the threads of a warp passing
// the same parameters to async_memcpy and all threads in the warp participating in the copy.
for (auto my_tile = starting_tile; my_tile < num_tiles; my_tile += tile_stride) {
auto const starting_row = (my_tile % tiles_per_col) * ROWS_PER_BLOCK;
auto const col = my_tile / tiles_per_col;
auto const str_len = string_lengths[col];
auto const str_row_off = string_row_offsets[col];
auto const str_col_off = string_column_offsets[col];
auto str_col_data = string_col_data[col];
for (int row = starting_row; row < starting_row + ROWS_PER_BLOCK && row < num_rows; ++row) {
auto const src = &row_data[row_offsets(row, 0) + str_row_off[row]];
auto dst = &str_col_data[str_col_off[row]];
#ifdef ASYNC_MEMCPY_SUPPORTED
cuda::memcpy_async(warp, dst, src, str_len[row], block_barrier);
#else
for (int c = warp.thread_rank(); c < str_len[row]; c += warp.size()) {
dst[c] = src[c];
}
#endif
}
}
}
/**
* @brief Calculate the dimensions of the kernel for fixed width only columns.
*
* @param [in] num_columns the number of columns being copied.
* @param [in] num_rows the number of rows being copied.
* @param [in] size_per_row the size each row takes up when padded.
* @param [out] blocks the size of the blocks for the kernel
* @param [out] threads the size of the threads for the kernel
* @return the size in bytes of shared memory needed for each block.
*/
static int calc_fixed_width_kernel_dims(const size_type num_columns, const size_type num_rows,
const size_type size_per_row, dim3 &blocks, dim3 &threads) {
// We have found speed degrades when a thread handles more than 4 columns.
// Each block is 2 dimensional. The y dimension indicates the columns.
// We limit this to 32 threads in the y dimension so we can still
// have at least 32 threads in the x dimension (1 warp) which should
// result in better coalescing of memory operations. We also
// want to guarantee that we are processing a multiple of 32 threads
// in the x dimension because we use atomic operations at the block
// level when writing validity data out to main memory, and that would
// need to change if we split a word of validity data between blocks.
int const y_block_size = min(util::div_rounding_up_safe(num_columns, 4), 32);
int const x_possible_block_size = 1024 / y_block_size;
// 48KB is the default setting for shared memory per block according to the cuda tutorials
// If someone configures the GPU to only have 16 KB this might not work.
int const max_shared_size = 48 * 1024;
// If we don't have enough shared memory there is no point in having more threads
// per block that will just sit idle
auto const max_block_size = ::min(x_possible_block_size, max_shared_size / size_per_row);
// Make sure that the x dimension is a multiple of 32 this not only helps
// coalesce memory access it also lets us do a ballot sync for validity to write
// the data back out the warp level. If x is a multiple of 32 then each thread in the y
// dimension is associated with one or more warps, that should correspond to the validity
// words directly.
int const block_size = (max_block_size / 32) * 32;
CUDF_EXPECTS(block_size != 0, "Row size is too large to fit in shared memory");
// The maximum number of blocks supported in the x dimension is 2 ^ 31 - 1
// but in practice having too many can cause some overhead that I don't totally
// understand. Playing around with this having as little as 600 blocks appears
// to be able to saturate memory on V100, so this is an order of magnitude higher
// to try and future proof this a bit.
int const num_blocks = std::clamp((num_rows + block_size - 1) / block_size, 1, 10240);
blocks.x = num_blocks;
blocks.y = 1;
blocks.z = 1;
threads.x = block_size;
threads.y = y_block_size;
threads.z = 1;
return size_per_row * block_size;
}
/**
* When converting to rows it is possible that the size of the table was too big to fit
* in a single column. This creates an output column for a subset of the rows in a table
* going from start row and containing the next num_rows. Most of the parameters passed
* into this function are common between runs and should be calculated once.
*/
static std::unique_ptr<column> fixed_width_convert_to_rows(
const size_type start_row, const size_type num_rows, const size_type num_columns,
const size_type size_per_row, rmm::device_uvector<size_type> &column_start,
rmm::device_uvector<size_type> &column_size, rmm::device_uvector<const int8_t *> &input_data,
rmm::device_uvector<const bitmask_type *> &input_nm, const scalar &zero,
const scalar &scalar_size_per_row, rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr) {
int64_t const total_allocation = size_per_row * num_rows;
// We made a mistake in the split somehow
CUDF_EXPECTS(total_allocation < std::numeric_limits<size_type>::max(),
"Table is too large to fit!");
// Allocate and set the offsets row for the byte array
std::unique_ptr<column> offsets =
cudf::detail::sequence(num_rows + 1, zero, scalar_size_per_row, stream);
std::unique_ptr<column> data =
make_numeric_column(data_type(type_id::INT8), static_cast<size_type>(total_allocation),
mask_state::UNALLOCATED, stream, mr);
dim3 blocks;
dim3 threads;
int shared_size =
detail::calc_fixed_width_kernel_dims(num_columns, num_rows, size_per_row, blocks, threads);
hipLaunchKernelGGL(( copy_to_rows_fixed_width_optimized), dim3(blocks), dim3(threads), shared_size, stream.value(),
start_row, num_rows, num_columns, size_per_row, column_start.data(), column_size.data(),
input_data.data(), input_nm.data(), data->mutable_view().data<int8_t>());
return make_lists_column(num_rows, std::move(offsets), std::move(data), 0,
rmm::device_buffer{0, stream, mr}, stream, mr);
}
static inline bool are_all_fixed_width(std::vector<data_type> const &schema) {
return std::all_of(schema.begin(), schema.end(),
[](const data_type &t) { return is_fixed_width(t); });
}
/**
* @brief Given a set of fixed width columns, calculate how the data will be laid out in memory.
*
* @param [in] schema the types of columns that need to be laid out.
* @param [out] column_start the byte offset where each column starts in the row.
* @param [out] column_size the size in bytes of the data for each columns in the row.
* @return the size in bytes each row needs.
*/
static inline int32_t compute_fixed_width_layout(std::vector<data_type> const &schema,
std::vector<size_type> &column_start,
std::vector<size_type> &column_size) {
// We guarantee that the start of each column is 64-bit aligned so anything can go
// there, but to make the code simple we will still do an alignment for it.
int32_t at_offset = 0;
for (auto col = schema.begin(); col < schema.end(); col++) {
size_type s = size_of(*col);
column_size.emplace_back(s);
std::size_t allocation_needed = s;
std::size_t alignment_needed = allocation_needed; // They are the same for fixed width types
at_offset = util::round_up_unsafe(at_offset, static_cast<int32_t>(alignment_needed));
column_start.emplace_back(at_offset);
at_offset += allocation_needed;
}
// Now we need to add in space for validity
// Eventually we can think about nullable vs not nullable, but for now we will just always add
// it in
int32_t const validity_bytes_needed =
util::div_rounding_up_safe<int32_t>(schema.size(), CHAR_BIT);
// validity comes at the end and is byte aligned so we can pack more in.
at_offset += validity_bytes_needed;
// Now we need to pad the end so all rows are 64 bit aligned
return util::round_up_unsafe(at_offset, JCUDF_ROW_ALIGNMENT);
}
/**
* @brief column sizes and column start offsets for a table
*/
struct column_info_s {
size_type size_per_row;
std::vector<size_type> column_starts;
std::vector<size_type> column_sizes;
std::vector<size_type> variable_width_column_starts;
column_info_s &operator=(column_info_s const &other) = delete;
column_info_s &operator=(column_info_s &&other) = delete;
};
/**
* @brief Compute information about a table such as bytes per row and offsets.
*
* @tparam iterator iterator of column schema data
* @param begin starting iterator of column schema
* @param end ending iterator of column schema
* @param column_starts column start offsets
* @param column_sizes size in bytes of each column
* @return size of the fixed_width data portion of a row.
*/
template <typename iterator>
column_info_s compute_column_information(iterator begin, iterator end) {
size_type size_per_row = 0;
std::vector<size_type> column_starts;
std::vector<size_type> column_sizes;
std::vector<size_type> variable_width_column_starts;
column_starts.reserve(std::distance(begin, end) + 1);
column_sizes.reserve(std::distance(begin, end));
for (auto col_type = begin; col_type != end; ++col_type) {
bool const compound_type = is_compound(*col_type);
// a list or string column will write a single uint64 of data here for offset/length
auto const col_size = compound_type ? sizeof(uint32_t) + sizeof(uint32_t) : size_of(*col_type);
// align size for this type - They are the same for fixed width types and 4 bytes for variable
// width length/offset combos
size_type const alignment_needed = compound_type ? __alignof(uint32_t) : col_size;
size_per_row = util::round_up_unsafe(size_per_row, alignment_needed);
if (compound_type) {
variable_width_column_starts.push_back(size_per_row);
}
column_starts.push_back(size_per_row);
column_sizes.push_back(col_size);
size_per_row += col_size;
}
// add validity offset to the end of fixed_width offsets
auto validity_offset = size_per_row;
column_starts.push_back(validity_offset);
// validity is byte-aligned in the JCUDF format
size_per_row +=
util::div_rounding_up_safe(static_cast<size_type>(std::distance(begin, end)), CHAR_BIT);
return {size_per_row, std::move(column_starts), std::move(column_sizes),
std::move(variable_width_column_starts)};
}
/**
* @brief Build `tile_info` for the validity data to break up the work.
*
* @param num_columns number of columns in the table
* @param num_rows number of rows in the table
* @param shmem_limit_per_tile size of shared memory available to a single gpu tile
* @param row_batches batched row information for multiple output locations
* @return vector of `tile_info` structs for validity data
*/
std::vector<detail::tile_info>
build_validity_tile_infos(size_type const &num_columns, size_type const &num_rows,
size_type const &shmem_limit_per_tile,
std::vector<row_batch> const &row_batches) {
auto const desired_rows_and_columns = static_cast<int>(sqrt(shmem_limit_per_tile));
auto const column_stride = util::round_up_unsafe(
[&]() {
if (desired_rows_and_columns > num_columns) {
// not many columns, build a single tile for table width and ship it off
return num_columns;
} else {
return util::round_down_safe(desired_rows_and_columns, CHAR_BIT);
}
}(),
JCUDF_ROW_ALIGNMENT);
// we fit as much as we can given the column stride note that an element in the table takes just 1
// bit, but a row with a single element still takes 8 bytes!
auto const bytes_per_row = util::round_up_safe(
util::div_rounding_up_unsafe(column_stride, CHAR_BIT), JCUDF_ROW_ALIGNMENT);
auto const row_stride =
::min(num_rows, util::round_down_safe(shmem_limit_per_tile / bytes_per_row, 64));
std::vector<detail::tile_info> validity_tile_infos;
validity_tile_infos.reserve(num_columns / column_stride * num_rows / row_stride);
for (int col = 0; col < num_columns; col += column_stride) {
int current_tile_row_batch = 0;
int rows_left_in_batch = row_batches[current_tile_row_batch].row_count;
int row = 0;
while (row < num_rows) {
if (rows_left_in_batch == 0) {
current_tile_row_batch++;
rows_left_in_batch = row_batches[current_tile_row_batch].row_count;
}
int const tile_height = ::min(row_stride, rows_left_in_batch);
validity_tile_infos.emplace_back(
detail::tile_info{col, row, ::min(col + column_stride - 1, num_columns - 1),
row + tile_height - 1, current_tile_row_batch});
row += tile_height;
rows_left_in_batch -= tile_height;
}
}
return validity_tile_infos;
}
/**
* @brief functor that returns the size of a row or 0 is row is greater than the number of rows in
* the table
*
* @tparam RowSize iterator that returns the size of a specific row
*/
template <typename RowSize> struct row_size_functor {
row_size_functor(size_type row_end, RowSize row_sizes, size_type last_row_end)
: _row_end(row_end), _row_sizes(row_sizes), _last_row_end(last_row_end) {}
__device__ inline uint64_t operator()(int i) const {
return i >= _row_end ? 0 : _row_sizes[i + _last_row_end];
}
size_type _row_end;
RowSize _row_sizes;
size_type _last_row_end;
};
/**
* @brief Builds batches of rows that will fit in the size limit of a column.
*
* @tparam RowSize iterator that gives the size of a specific row of the table.
* @param num_rows Total number of rows in the table
* @param row_sizes iterator that gives the size of a specific row of the table.
* @param all_fixed_width bool indicating all data in this table is fixed width
* @param stream stream to operate on for this work
* @param mr memory resource used to allocate any returned data
* @returns vector of size_type's that indicate row numbers for batch boundaries and a
* device_uvector of row offsets
*/
template <typename RowSize>
batch_data build_batches(size_type num_rows, RowSize row_sizes, bool all_fixed_width,
rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) {
auto const total_size = thrust::reduce(rmm::exec_policy(stream), row_sizes, row_sizes + num_rows);
auto const num_batches = static_cast<int32_t>(
util::div_rounding_up_safe(total_size, static_cast<uint64_t>(MAX_BATCH_SIZE)));
auto const num_offsets = num_batches + 1;
std::vector<row_batch> row_batches;
std::vector<size_type> batch_row_boundaries;
device_uvector<size_type> batch_row_offsets(all_fixed_width ? 0 : num_rows, stream);
// at most max gpu memory / 2GB iterations.
batch_row_boundaries.reserve(num_offsets);
batch_row_boundaries.push_back(0);
size_type last_row_end = 0;
device_uvector<uint64_t> cumulative_row_sizes(num_rows, stream);
thrust::inclusive_scan(rmm::exec_policy(stream), row_sizes, row_sizes + num_rows,
cumulative_row_sizes.begin());
// This needs to be split this into 2 gig batches. Care must be taken to avoid a batch larger than
// 2 gigs. Imagine a table with 900 meg rows. The batches should occur every 2 rows, but if a
// lower bound is run at 2 gigs, 4 gigs, 6 gigs. the batches will be 2 rows, 2 rows, 3 rows, which
// will be invalid. The previous batch size must be taken into account when building a new batch.
// One way is to pull the batch size back to the host and add it to MAX_BATCH_SIZE for the lower
// bound search. The other method involves keeping everything on device, but subtracting the
// previous batch from cumulative_row_sizes based on index. This involves no synchronization
// between GPU and CPU, but involves more work on the GPU. These further need to be broken on a
// 32-row boundary to match the fixed_width optimized versions.
while (last_row_end < num_rows) {
auto offset_row_sizes = thrust::make_transform_iterator(
cumulative_row_sizes.begin(),
[last_row_end, cumulative_row_sizes = cumulative_row_sizes.data()] __device__(auto i) {
return i - cumulative_row_sizes[last_row_end];
});
auto search_start = offset_row_sizes + last_row_end;
auto search_end = offset_row_sizes + num_rows;
// find the next MAX_BATCH_SIZE boundary
auto const lb =
thrust::lower_bound(rmm::exec_policy(stream), search_start, search_end, MAX_BATCH_SIZE);
size_type const batch_size = lb - search_start;
size_type const row_end = lb == search_end ?
batch_size + last_row_end :
last_row_end + util::round_down_safe(batch_size, 32);
// build offset list for each row in this batch
auto const num_rows_in_batch = row_end - last_row_end;
// build offset list for each row in this batch
auto const num_entries = row_end - last_row_end + 1;
device_uvector<size_type> output_batch_row_offsets(num_entries, stream, mr);
auto row_size_iter_bounded = cudf::detail::make_counting_transform_iterator(
0, row_size_functor(row_end, row_sizes, last_row_end));
thrust::exclusive_scan(rmm::exec_policy(stream), row_size_iter_bounded,
row_size_iter_bounded + num_entries, output_batch_row_offsets.begin());
auto const batch_bytes = output_batch_row_offsets.element(num_rows_in_batch, stream);
// The output_batch_row_offsets vector is used as the offset column of the returned data. This
// needs to be individually allocated, but the kernel needs a contiguous array of offsets or
// more global lookups are necessary.
if (!all_fixed_width) {
hipMemcpy(batch_row_offsets.data() + last_row_end, output_batch_row_offsets.data(),
num_rows_in_batch * sizeof(size_type), hipMemcpyDeviceToDevice);
}
batch_row_boundaries.push_back(row_end);
row_batches.push_back({batch_bytes, num_rows_in_batch, std::move(output_batch_row_offsets)});
last_row_end = row_end;
}
return {std::move(batch_row_offsets), make_device_uvector_async(batch_row_boundaries, stream),
std::move(batch_row_boundaries), std::move(row_batches)};
}
/**
* @brief Computes the number of tiles necessary given a tile height and batch offsets
*
* @param batch_row_boundaries row boundaries for each batch
* @param desired_tile_height height of each tile in the table
* @param stream stream to use
* @return number of tiles necessary
*/
int compute_tile_counts(device_span<size_type const> const &batch_row_boundaries,
int desired_tile_height, rmm::cuda_stream_view stream) {
size_type const num_batches = batch_row_boundaries.size() - 1;
device_uvector<size_type> num_tiles(num_batches, stream);
auto iter = thrust::make_counting_iterator(0);
thrust::transform(rmm::exec_policy(stream), iter, iter + num_batches, num_tiles.begin(),
[desired_tile_height,
batch_row_boundaries =
batch_row_boundaries.data()] __device__(auto batch_index) -> size_type {
return util::div_rounding_up_unsafe(batch_row_boundaries[batch_index + 1] -
batch_row_boundaries[batch_index],
desired_tile_height);
});
return thrust::reduce(rmm::exec_policy(stream), num_tiles.begin(), num_tiles.end());
}
/**
* @brief Builds the `tile_info` structs for a given table.
*
* @param tiles span of tiles to populate
* @param batch_row_boundaries boundary to row batches
* @param column_start starting column of the tile
* @param column_end ending column of the tile
* @param desired_tile_height height of the tile
* @param total_number_of_rows total number of rows in the table
* @param stream stream to use
* @return number of tiles created
*/
size_type
build_tiles(device_span<tile_info> tiles,
device_uvector<size_type> const &batch_row_boundaries, // comes from build_batches
int column_start, int column_end, int desired_tile_height, int total_number_of_rows,
rmm::cuda_stream_view stream) {
size_type const num_batches = batch_row_boundaries.size() - 1;
device_uvector<size_type> num_tiles(num_batches, stream);
auto iter = thrust::make_counting_iterator(0);
thrust::transform(rmm::exec_policy(stream), iter, iter + num_batches, num_tiles.begin(),
[desired_tile_height,
batch_row_boundaries =
batch_row_boundaries.data()] __device__(auto batch_index) -> size_type {
return util::div_rounding_up_unsafe(batch_row_boundaries[batch_index + 1] -
batch_row_boundaries[batch_index],
desired_tile_height);
});
size_type const total_tiles =
thrust::reduce(rmm::exec_policy(stream), num_tiles.begin(), num_tiles.end());
device_uvector<size_type> tile_starts(num_batches + 1, stream);
auto tile_iter = cudf::detail::make_counting_transform_iterator(
0, [num_tiles = num_tiles.data(), num_batches] __device__(auto i) {
return (i < num_batches) ? num_tiles[i] : 0;
});
thrust::exclusive_scan(rmm::exec_policy(stream), tile_iter, tile_iter + num_batches + 1,
tile_starts.begin()); // in tiles
thrust::transform(
rmm::exec_policy(stream), iter, iter + total_tiles, tiles.begin(),
[=, tile_starts = tile_starts.data(),
batch_row_boundaries = batch_row_boundaries.data()] __device__(size_type tile_index) {
// what batch this tile falls in
auto const batch_index_iter =
thrust::upper_bound(thrust::seq, tile_starts, tile_starts + num_batches, tile_index);
auto const batch_index = std::distance(tile_starts, batch_index_iter) - 1;
// local index within the tile
int const local_tile_index = tile_index - tile_starts[batch_index];
// the start row for this batch.
int const batch_row_start = batch_row_boundaries[batch_index];
// the start row for this tile
int const tile_row_start = batch_row_start + (local_tile_index * desired_tile_height);
// the end row for this tile
int const max_row =
::min(total_number_of_rows - 1,
batch_index + 1 > num_batches ?
std::numeric_limits<size_type>::max() :
static_cast<int>(batch_row_boundaries[batch_index + 1]) - 1);
int const tile_row_end =
::min(batch_row_start + ((local_tile_index + 1) * desired_tile_height) - 1, max_row);
// stuff the tile
return tile_info{column_start, tile_row_start, column_end, tile_row_end,
static_cast<int>(batch_index)};
});
return total_tiles;
}
/**
* @brief Determines what data should be operated on by each tile for the incoming table.
*
* @tparam TileCallback Callback that receives the start and end columns of tiles
* @param column_sizes vector of the size of each column
* @param column_starts vector of the offset of each column
* @param first_row_batch_size size of the first row batch to limit max tile size since a tile
* is unable to span batches
* @param total_number_of_rows total number of rows in the table
* @param shmem_limit_per_tile shared memory allowed per tile
* @param f callback function called when building a tile
*/
template <typename TileCallback>
void determine_tiles(std::vector<size_type> const &column_sizes,
std::vector<size_type> const &column_starts,
size_type const first_row_batch_size, size_type const total_number_of_rows,
size_type const &shmem_limit_per_tile, TileCallback f) {
// tile infos are organized with the tile going "down" the columns this provides the most
// coalescing of memory access
int current_tile_width = 0;
int current_tile_start_col = 0;
// the ideal tile height has lots of 8-byte reads and 8-byte writes. The optimal read/write would
// be memory cache line sized access, but since other tiles will read/write the edges this may not
// turn out to be overly important. For now, we will attempt to build a square tile as far as byte
// sizes. x * y = shared_mem_size. Which translates to x^2 = shared_mem_size since we want them
// equal, so height and width are sqrt(shared_mem_size). The trick is that it's in bytes, not rows
// or columns.
auto const square_bias = 32; // bias towards columns for performance reasons
auto const optimal_square_len = static_cast<size_type>(sqrt(shmem_limit_per_tile));
auto const desired_tile_height = util::round_up_safe<int>(
::min(optimal_square_len / square_bias, total_number_of_rows), cudf::detail::warp_size);
auto const tile_height = std::clamp(desired_tile_height, 1, first_row_batch_size);
int row_size = 0;
// march each column and build the tiles of appropriate sizes
for (uint col = 0; col < column_sizes.size(); ++col) {
auto const col_size = column_sizes[col];
// align size for this type
auto const alignment_needed = col_size; // They are the same for fixed width types
auto const row_size_aligned = util::round_up_unsafe(row_size, alignment_needed);
auto const row_size_with_this_col = row_size_aligned + col_size;
auto const row_size_with_end_pad =
util::round_up_unsafe(row_size_with_this_col, JCUDF_ROW_ALIGNMENT);
if (row_size_with_end_pad * tile_height > shmem_limit_per_tile) {
// too large, close this tile, generate vertical tiles and restart
f(current_tile_start_col, col == 0 ? col : col - 1, tile_height);
row_size =
util::round_up_unsafe((column_starts[col] + column_sizes[col]) & 7, alignment_needed);
row_size += col_size; // alignment required for shared memory tile boundary to match alignment
// of output row
current_tile_start_col = col;
current_tile_width = 0;
} else {
row_size = row_size_with_this_col;
current_tile_width++;
}
}
// build last set of tiles
if (current_tile_width > 0) {
f(current_tile_start_col, static_cast<int>(column_sizes.size()) - 1, tile_height);
}
}
/**
* @brief convert cudf table into JCUDF row format
*
* @tparam offsetFunctor functor type for offset functor
* @param tbl table to convert to JCUDF row format
* @param batch_info information about the batches of data
* @param offset_functor functor that returns the starting offset of each row
* @param column_info information about incoming columns
* @param variable_width_offsets optional vector of offsets for variable-with columns
* @param stream stream used
* @param mr selected memory resource for returned data
* @return vector of list columns containing byte columns of the JCUDF row data
*/
template <typename offsetFunctor>
std::vector<std::unique_ptr<column>> convert_to_rows(
table_view const &tbl, batch_data &batch_info, offsetFunctor offset_functor,
column_info_s const &column_info,
std::optional<rmm::device_uvector<strings_column_view::offset_iterator>> variable_width_offsets,
rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) {
int device_id;
CUDF_CUDA_TRY(hipGetDevice(&device_id));
int total_shmem_in_bytes;
CUDF_CUDA_TRY(
hipDeviceGetAttribute(&total_shmem_in_bytes, hipDeviceAttributeMaxSharedMemoryPerBlock, device_id));
#ifndef __CUDA_ARCH__ // __host__ code.
// Need to reduce total shmem available by the size of barriers in the kernel's shared memory
total_shmem_in_bytes -=
util::round_up_unsafe(sizeof(cuda::barrier<cuda::thread_scope_block>), 16ul);
#endif // __CUDA_ARCH__
auto const shmem_limit_per_tile = total_shmem_in_bytes;
auto const num_rows = tbl.num_rows();
auto const fixed_width_only = !variable_width_offsets.has_value();
auto select_columns = [](auto const &tbl, auto column_predicate) {
std::vector<column_view> cols;
std::copy_if(tbl.begin(), tbl.end(), std::back_inserter(cols),
[&](auto c) { return column_predicate(c); });
return table_view(cols);
};
auto dev_col_sizes = make_device_uvector_async(column_info.column_sizes, stream);
auto dev_col_starts = make_device_uvector_async(column_info.column_starts, stream);
// Get the pointers to the input columnar data ready
auto const data_begin = thrust::make_transform_iterator(tbl.begin(), [](auto const &c) {
return is_compound(c.type()) ? nullptr : c.template data<int8_t>();
});
std::vector<int8_t const *> input_data(data_begin, data_begin + tbl.num_columns());
// validity code handles variable and fixed-width data, so give it everything
auto const nm_begin =
thrust::make_transform_iterator(tbl.begin(), [](auto const &c) { return c.null_mask(); });
std::vector<bitmask_type const *> input_nm(nm_begin, nm_begin + tbl.num_columns());
auto dev_input_data = make_device_uvector_async(input_data, stream);
auto dev_input_nm = make_device_uvector_async(input_nm, stream);
// the first batch always exists unless we were sent an empty table
auto const first_batch_size = batch_info.row_batches[0].row_count;
std::vector<rmm::device_buffer> output_buffers;
std::vector<int8_t *> output_data;
output_data.reserve(batch_info.row_batches.size());
output_buffers.reserve(batch_info.row_batches.size());
std::transform(batch_info.row_batches.begin(), batch_info.row_batches.end(),
std::back_inserter(output_buffers), [&](auto const &batch) {
return rmm::device_buffer(batch.num_bytes, stream, mr);
});
std::transform(output_buffers.begin(), output_buffers.end(), std::back_inserter(output_data),
[](auto &buf) { return static_cast<int8_t *>(buf.data()); });
auto dev_output_data = make_device_uvector_async(output_data, stream, mr);
int info_count = 0;
detail::determine_tiles(
column_info.column_sizes, column_info.column_starts, first_batch_size, num_rows,
shmem_limit_per_tile,
[&gpu_batch_row_boundaries = batch_info.d_batch_row_boundaries, &info_count,
&stream](int const start_col, int const end_col, int const tile_height) {
int i = detail::compute_tile_counts(gpu_batch_row_boundaries, tile_height, stream);
info_count += i;
});
// allocate space for tiles
device_uvector<detail::tile_info> gpu_tile_infos(info_count, stream);
int tile_offset = 0;
detail::determine_tiles(
column_info.column_sizes, column_info.column_starts, first_batch_size, num_rows,
shmem_limit_per_tile,
[&gpu_batch_row_boundaries = batch_info.d_batch_row_boundaries, &gpu_tile_infos, num_rows,
&tile_offset, stream](int const start_col, int const end_col, int const tile_height) {
tile_offset += detail::build_tiles(
{gpu_tile_infos.data() + tile_offset, gpu_tile_infos.size() - tile_offset},
gpu_batch_row_boundaries, start_col, end_col, tile_height, num_rows, stream);
});
// build validity tiles for ALL columns, variable and fixed width.
auto validity_tile_infos = detail::build_validity_tile_infos(
tbl.num_columns(), num_rows, shmem_limit_per_tile, batch_info.row_batches);
auto dev_validity_tile_infos = make_device_uvector_async(validity_tile_infos, stream);
auto const validity_offset = column_info.column_starts.back();
// blast through the entire table and convert it
hipLaunchKernelGGL(( detail::copy_to_rows), dim3(gpu_tile_infos.size()), dim3(NUM_WARPS_IN_BLOCK * cudf::detail::warp_size),
total_shmem_in_bytes, stream.value(),
num_rows, tbl.num_columns(), shmem_limit_per_tile, gpu_tile_infos, dev_input_data.data(),
dev_col_sizes.data(), dev_col_starts.data(), offset_functor,
batch_info.d_batch_row_boundaries.data(),
reinterpret_cast<int8_t **>(dev_output_data.data()));
// note that validity gets the entire table and not the fixed-width portion
hipLaunchKernelGGL(( detail::copy_validity_to_rows), dim3(validity_tile_infos.size()),
dim3(NUM_WARPS_IN_BLOCK * cudf::detail::warp_size),
total_shmem_in_bytes, stream.value(),
num_rows, tbl.num_columns(), shmem_limit_per_tile, offset_functor,
batch_info.d_batch_row_boundaries.data(), dev_output_data.data(), validity_offset,
dev_validity_tile_infos, dev_input_nm.data());
if (!fixed_width_only) {
// build table view for variable-width data only
auto const variable_width_table =
select_columns(tbl, [](auto col) { return is_compound(col.type()); });
CUDF_EXPECTS(!variable_width_table.is_empty(), "No variable-width columns when expected!");
CUDF_EXPECTS(variable_width_offsets.has_value(), "No variable width offset data!");
auto const variable_data_begin =
thrust::make_transform_iterator(variable_width_table.begin(), [](auto const &c) {
strings_column_view const scv{c};
return is_compound(c.type()) ? scv.chars().template data<int8_t>() : nullptr;
});
std::vector<int8_t const *> variable_width_input_data(
variable_data_begin, variable_data_begin + variable_width_table.num_columns());
auto dev_variable_input_data = make_device_uvector_async(variable_width_input_data, stream);
auto dev_variable_col_output_offsets =
make_device_uvector_async(column_info.variable_width_column_starts, stream);
for (uint i = 0; i < batch_info.row_batches.size(); i++) {
auto const batch_row_offset = batch_info.batch_row_boundaries[i];
auto const batch_num_rows = batch_info.row_batches[i].row_count;
dim3 const string_blocks(::min(
MAX_STRING_BLOCKS,
util::div_rounding_up_unsafe(batch_num_rows, NUM_STRING_ROWS_PER_BLOCK_TO_ROWS)));
hipLaunchKernelGGL(( detail::copy_strings_to_rows), dim3(string_blocks), dim3(NUM_WARPS_IN_BLOCK * cudf::detail::warp_size), 0,
stream.value(),
batch_num_rows, variable_width_table.num_columns(), dev_variable_input_data.data(),
dev_variable_col_output_offsets.data(), variable_width_offsets->data(),
column_info.size_per_row, offset_functor, batch_row_offset,
reinterpret_cast<int8_t *>(output_data[i]));
}
}
// split up the output buffer into multiple buffers based on row batch sizes and create list of
// byte columns
std::vector<std::unique_ptr<column>> ret;
ret.reserve(batch_info.row_batches.size());
auto counting_iter = thrust::make_counting_iterator(0);
std::transform(counting_iter, counting_iter + batch_info.row_batches.size(),
std::back_inserter(ret), [&](auto batch) {
auto const offset_count = batch_info.row_batches[batch].row_offsets.size();
auto offsets = std::make_unique<column>(
data_type{type_id::INT32}, (size_type)offset_count,
batch_info.row_batches[batch].row_offsets.release());
auto data = std::make_unique<column>(data_type{type_id::INT8},
batch_info.row_batches[batch].num_bytes,
std::move(output_buffers[batch]));
return make_lists_column(
batch_info.row_batches[batch].row_count, std::move(offsets), std::move(data),
0, rmm::device_buffer{0, cudf::get_default_stream(), mr}, stream, mr);
});
return ret;
}
} // namespace detail
/**
* @brief convert a cudf table to JCUDF row format
*
* @param tbl incoming table to convert
* @param stream stream to use for operations
* @param mr memory resource used for returned data
* @return vector of list columns containing byte columns of the JCUDF row data
*/
std::vector<std::unique_ptr<column>> convert_to_rows(table_view const &tbl,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr) {
auto const num_columns = tbl.num_columns();
auto const num_rows = tbl.num_rows();
auto const fixed_width_only = std::all_of(
tbl.begin(), tbl.end(), [](column_view const &c) { return is_fixed_width(c.type()); });
// Break up the work into tiles, which are a starting and ending row/col #. This tile size is
// calculated based on the shared memory size available we want a single tile to fill up the
// entire shared memory space available for the transpose-like conversion.
// There are two different processes going on here. The GPU conversion of the data and the writing
// of the data into the list of byte columns that are a maximum of 2 gigs each due to offset
// maximum size. The GPU conversion portion has to understand this limitation because the column
// must own the data inside and as a result it must be a distinct allocation for that column.
// Copying the data into these final buffers would be prohibitively expensive, so care is taken to
// ensure the GPU writes to the proper buffer. The tiles are broken at the boundaries of specific
// rows based on the row sizes up to that point. These are row batches and they are decided first
// before building the tiles so the tiles can be properly cut around them.
auto schema_column_iter =
thrust::make_transform_iterator(tbl.begin(), [](auto const &i) { return i.type(); });
auto column_info =
detail::compute_column_information(schema_column_iter, schema_column_iter + num_columns);
auto const size_per_row = column_info.size_per_row;
if (fixed_width_only) {
// total encoded row size. This includes fixed-width data and validity only. It does not include
// variable-width data since it isn't copied with the fixed-width and validity kernel.
auto row_size_iter = thrust::make_constant_iterator<uint64_t>(
util::round_up_unsafe(size_per_row, JCUDF_ROW_ALIGNMENT));
auto batch_info = detail::build_batches(num_rows, row_size_iter, fixed_width_only, stream, mr);
detail::fixed_width_row_offset_functor offset_functor(
util::round_up_unsafe(size_per_row, JCUDF_ROW_ALIGNMENT));
return detail::convert_to_rows(tbl, batch_info, offset_functor, std::move(column_info),
std::nullopt, stream, mr);
} else {
auto offset_data = detail::build_string_row_offsets(tbl, size_per_row, stream);
auto &row_sizes = std::get<0>(offset_data);
auto row_size_iter = cudf::detail::make_counting_transform_iterator(
0, detail::row_size_functor(num_rows, row_sizes.data(), 0));
auto batch_info = detail::build_batches(num_rows, row_size_iter, fixed_width_only, stream, mr);
detail::string_row_offset_functor offset_functor(batch_info.batch_row_offsets);
return detail::convert_to_rows(tbl, batch_info, offset_functor, std::move(column_info),
std::make_optional(std::move(std::get<1>(offset_data))), stream,
mr);
}
}
std::vector<std::unique_ptr<column>>
convert_to_rows_fixed_width_optimized(table_view const &tbl, rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr) {
auto const num_columns = tbl.num_columns();
std::vector<data_type> schema;
schema.resize(num_columns);
std::transform(tbl.begin(), tbl.end(), schema.begin(),
[](auto i) -> data_type { return i.type(); });
if (detail::are_all_fixed_width(schema)) {
std::vector<size_type> column_start;
std::vector<size_type> column_size;
int32_t const size_per_row =
detail::compute_fixed_width_layout(schema, column_start, column_size);
auto dev_column_start = make_device_uvector_async(column_start, stream, mr);
auto dev_column_size = make_device_uvector_async(column_size, stream, mr);
// Make the number of rows per batch a multiple of 32 so we don't have to worry about splitting
// validity at a specific row offset. This might change in the future.
auto const max_rows_per_batch =
util::round_down_safe(std::numeric_limits<size_type>::max() / size_per_row, 32);
auto const num_rows = tbl.num_rows();
// Get the pointers to the input columnar data ready
std::vector<const int8_t *> input_data;
std::vector<bitmask_type const *> input_nm;
for (size_type column_number = 0; column_number < num_columns; column_number++) {
column_view cv = tbl.column(column_number);
input_data.emplace_back(cv.data<int8_t>());
input_nm.emplace_back(cv.null_mask());
}
auto dev_input_data = make_device_uvector_async(input_data, stream, mr);
auto dev_input_nm = make_device_uvector_async(input_nm, stream, mr);
using ScalarType = scalar_type_t<size_type>;
auto zero = make_numeric_scalar(data_type(type_id::INT32), stream.value());
zero->set_valid_async(true, stream);
static_cast<ScalarType *>(zero.get())->set_value(0, stream);
auto step = make_numeric_scalar(data_type(type_id::INT32), stream.value());
step->set_valid_async(true, stream);
static_cast<ScalarType *>(step.get())->set_value(static_cast<size_type>(size_per_row), stream);
std::vector<std::unique_ptr<column>> ret;
for (size_type row_start = 0; row_start < num_rows; row_start += max_rows_per_batch) {
size_type row_count = num_rows - row_start;
row_count = row_count > max_rows_per_batch ? max_rows_per_batch : row_count;
ret.emplace_back(detail::fixed_width_convert_to_rows(
row_start, row_count, num_columns, size_per_row, dev_column_start, dev_column_size,
dev_input_data, dev_input_nm, *zero, *step, stream, mr));
}
return ret;
} else {
CUDF_FAIL("Only fixed width types are currently supported");
}
}
/**
* @brief convert from JCUDF row format to cudf columns
*
* @param input vector of list columns containing byte columns of the JCUDF row data
* @param schema incoming schema of the data
* @param stream stream to use for compute
* @param mr memory resource for returned data
* @return cudf table of the data
*/
std::unique_ptr<table> convert_from_rows(lists_column_view const &input,
std::vector<data_type> const &schema,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr) {
// verify that the types are what we expect
column_view child = input.child();
auto const list_type = child.type().id();
CUDF_EXPECTS(list_type == type_id::INT8 || list_type == type_id::UINT8,
"Only a list of bytes is supported as input");
// convert any strings in the schema to two int32 columns
// This allows us to leverage the fixed-width copy code to fill in our offset and string length
// data.
std::vector<data_type> string_schema;
string_schema.reserve(schema.size());
for (auto i : schema) {
if (i.id() == type_id::STRING) {
string_schema.push_back(data_type(type_id::INT32));
string_schema.push_back(data_type(type_id::INT32));
} else {
string_schema.push_back(i);
}
}
auto const num_columns = string_schema.size();
auto const num_rows = input.parent().size();
int device_id;
CUDF_CUDA_TRY(hipGetDevice(&device_id));
int total_shmem_in_bytes;
CUDF_CUDA_TRY(
hipDeviceGetAttribute(&total_shmem_in_bytes, hipDeviceAttributeMaxSharedMemoryPerBlock, device_id));
#ifndef __CUDA_ARCH__ // __host__ code.
// Need to reduce total shmem available by the size of barriers in the kernel's shared memory
total_shmem_in_bytes -=
util::round_up_unsafe(sizeof(cuda::barrier<cuda::thread_scope_block>), 16ul);
#endif // __CUDA_ARCH__
auto const shmem_limit_per_tile = total_shmem_in_bytes;
auto column_info = detail::compute_column_information(string_schema.begin(), string_schema.end());
auto const size_per_row = util::round_up_unsafe(column_info.size_per_row, JCUDF_ROW_ALIGNMENT);
// Ideally we would check that the offsets are all the same, etc. but for now this is probably
// fine
CUDF_EXPECTS(size_per_row * num_rows <= child.size(), "The layout of the data appears to be off");
auto dev_col_starts = make_device_uvector_async(column_info.column_starts, stream);
auto dev_col_sizes = make_device_uvector_async(column_info.column_sizes, stream);
// Allocate the columns we are going to write into
std::vector<std::unique_ptr<column>> output_columns;
std::vector<std::unique_ptr<column>> string_row_offset_columns;
std::vector<std::unique_ptr<column>> string_length_columns;
std::vector<int8_t *> output_data;
std::vector<bitmask_type *> output_nm;
std::vector<int32_t *> string_row_offsets;
std::vector<int32_t *> string_lengths;
for (auto i : schema) {
auto make_col = [&output_data, &output_nm](data_type type, size_type num_rows, bool include_nm,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr) {
auto column = make_fixed_width_column(
type, num_rows, include_nm ? mask_state::UNINITIALIZED : mask_state::UNALLOCATED, stream,
mr);
auto mut = column->mutable_view();
output_data.emplace_back(mut.data<int8_t>());
if (include_nm) {
output_nm.emplace_back(mut.null_mask());
}
return column;
};
if (i.id() == type_id::STRING) {
auto const int32type = data_type(type_id::INT32);
auto offset_col =
make_col(int32type, num_rows, true, stream, rmm::mr::get_current_device_resource());
string_row_offsets.push_back(offset_col->mutable_view().data<int32_t>());
string_row_offset_columns.emplace_back(std::move(offset_col));
auto length_col =
make_col(int32type, num_rows, false, stream, rmm::mr::get_current_device_resource());
string_lengths.push_back(length_col->mutable_view().data<int32_t>());
string_length_columns.emplace_back(std::move(length_col));
// placeholder
output_columns.emplace_back(make_empty_column(type_id::STRING));
} else {
output_columns.emplace_back(make_col(i, num_rows, true, stream, mr));
}
}
auto dev_string_row_offsets = make_device_uvector_async(string_row_offsets, stream);
auto dev_string_lengths = make_device_uvector_async(string_lengths, stream);
// build the row_batches from the passed in list column
std::vector<detail::row_batch> row_batches;
row_batches.push_back(
{detail::row_batch{child.size(), num_rows, device_uvector<size_type>(0, stream)}});
auto dev_output_data = make_device_uvector_async(output_data, stream);
auto dev_output_nm = make_device_uvector_async(output_nm, stream);
// only ever get a single batch when going from rows, so boundaries are 0, num_rows
constexpr auto num_batches = 2;
device_uvector<size_type> gpu_batch_row_boundaries(num_batches, stream);
thrust::transform(rmm::exec_policy(stream), thrust::make_counting_iterator(0),
thrust::make_counting_iterator(num_batches), gpu_batch_row_boundaries.begin(),
[num_rows] __device__(auto i) { return i == 0 ? 0 : num_rows; });
int info_count = 0;
detail::determine_tiles(
column_info.column_sizes, column_info.column_starts, num_rows, num_rows, shmem_limit_per_tile,
[&gpu_batch_row_boundaries, &info_count, &stream](int const start_col, int const end_col,
int const tile_height) {
info_count += detail::compute_tile_counts(gpu_batch_row_boundaries, tile_height, stream);
});
// allocate space for tiles
device_uvector<detail::tile_info> gpu_tile_infos(info_count, stream);
int tile_offset = 0;
detail::determine_tiles(
column_info.column_sizes, column_info.column_starts, num_rows, num_rows, shmem_limit_per_tile,
[&gpu_batch_row_boundaries, &gpu_tile_infos, num_rows, &tile_offset,
stream](int const start_col, int const end_col, int const tile_height) {
tile_offset += detail::build_tiles(
{gpu_tile_infos.data() + tile_offset, gpu_tile_infos.size() - tile_offset},
gpu_batch_row_boundaries, start_col, end_col, tile_height, num_rows, stream);
});
dim3 const blocks(gpu_tile_infos.size());
// validity needs to be calculated based on the actual number of final table columns
auto validity_tile_infos =
detail::build_validity_tile_infos(schema.size(), num_rows, shmem_limit_per_tile, row_batches);
auto dev_validity_tile_infos = make_device_uvector_async(validity_tile_infos, stream);
dim3 const validity_blocks(validity_tile_infos.size());
if (dev_string_row_offsets.size() == 0) {
detail::fixed_width_row_offset_functor offset_functor(size_per_row);
hipLaunchKernelGGL(( detail::copy_from_rows), dim3(gpu_tile_infos.size()), dim3(NUM_WARPS_IN_BLOCK * cudf::detail::warp_size),
total_shmem_in_bytes, stream.value(),
num_rows, num_columns, shmem_limit_per_tile, offset_functor,
gpu_batch_row_boundaries.data(), dev_output_data.data(), dev_col_sizes.data(),
dev_col_starts.data(), gpu_tile_infos, child.data<int8_t>());
hipLaunchKernelGGL(( detail::copy_validity_from_rows), dim3(validity_tile_infos.size()),
dim3(NUM_WARPS_IN_BLOCK * cudf::detail::warp_size),
total_shmem_in_bytes, stream.value(),
num_rows, num_columns, shmem_limit_per_tile, offset_functor,
gpu_batch_row_boundaries.data(), dev_output_nm.data(), column_info.column_starts.back(),
dev_validity_tile_infos, child.data<int8_t>());
} else {
detail::string_row_offset_functor offset_functor(device_span<size_type const>{input.offsets()});
hipLaunchKernelGGL(( detail::copy_from_rows), dim3(gpu_tile_infos.size()), dim3(NUM_WARPS_IN_BLOCK * cudf::detail::warp_size),
total_shmem_in_bytes, stream.value(),
num_rows, num_columns, shmem_limit_per_tile, offset_functor,
gpu_batch_row_boundaries.data(), dev_output_data.data(), dev_col_sizes.data(),
dev_col_starts.data(), gpu_tile_infos, child.data<int8_t>());
hipLaunchKernelGGL(( detail::copy_validity_from_rows), dim3(validity_tile_infos.size()),
dim3(NUM_WARPS_IN_BLOCK * cudf::detail::warp_size),
total_shmem_in_bytes, stream.value(),
num_rows, num_columns, shmem_limit_per_tile, offset_functor,
gpu_batch_row_boundaries.data(), dev_output_nm.data(), column_info.column_starts.back(),
dev_validity_tile_infos, child.data<int8_t>());
std::vector<device_uvector<size_type>> string_col_offsets;
std::vector<rmm::device_uvector<char>> string_data_cols;
std::vector<size_type *> string_col_offset_ptrs;
std::vector<char *> string_data_col_ptrs;
for (auto &col_string_lengths : string_lengths) {
device_uvector<size_type> output_string_offsets(num_rows + 1, stream, mr);
auto tmp = [num_rows, col_string_lengths] __device__(auto const &i) {
return i < num_rows ? col_string_lengths[i] : 0;
};
auto bounded_iter = cudf::detail::make_counting_transform_iterator(0, tmp);
thrust::exclusive_scan(rmm::exec_policy(stream), bounded_iter, bounded_iter + num_rows + 1,
output_string_offsets.begin());
// allocate destination string column
rmm::device_uvector<char> string_data(output_string_offsets.element(num_rows, stream), stream,
mr);
string_col_offset_ptrs.push_back(output_string_offsets.data());
string_data_col_ptrs.push_back(string_data.data());
string_col_offsets.push_back(std::move(output_string_offsets));
string_data_cols.push_back(std::move(string_data));
}
auto dev_string_col_offsets = make_device_uvector_async(string_col_offset_ptrs, stream);
auto dev_string_data_cols = make_device_uvector_async(string_data_col_ptrs, stream);
dim3 const string_blocks(
::min(::max(MIN_STRING_BLOCKS, num_rows / NUM_STRING_ROWS_PER_BLOCK_FROM_ROWS),
MAX_STRING_BLOCKS));
hipLaunchKernelGGL(( detail::copy_strings_from_rows), dim3(string_blocks), dim3(NUM_WARPS_IN_BLOCK * cudf::detail::warp_size), 0,
stream.value(),
offset_functor, dev_string_row_offsets.data(), dev_string_lengths.data(),
dev_string_col_offsets.data(), dev_string_data_cols.data(), child.data<int8_t>(), num_rows,
static_cast<cudf::size_type>(string_col_offsets.size()));
// merge strings back into output_columns
int string_idx = 0;
for (int i = 0; i < static_cast<int>(schema.size()); ++i) {
if (schema[i].id() == type_id::STRING) {
// stuff real string column
auto string_data = string_row_offset_columns[string_idx].release()->release();
output_columns[i] = make_strings_column(num_rows, std::move(string_col_offsets[string_idx]),
std::move(string_data_cols[string_idx]),
std::move(*string_data.null_mask.release()),
cudf::UNKNOWN_NULL_COUNT);
string_idx++;
}
}
}
return std::make_unique<table>(std::move(output_columns));
}
std::unique_ptr<table> convert_from_rows_fixed_width_optimized(
lists_column_view const &input, std::vector<data_type> const &schema,
rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) {
// verify that the types are what we expect
column_view child = input.child();
auto const list_type = child.type().id();
CUDF_EXPECTS(list_type == type_id::INT8 || list_type == type_id::UINT8,
"Only a list of bytes is supported as input");
auto const num_columns = schema.size();
if (detail::are_all_fixed_width(schema)) {
std::vector<size_type> column_start;
std::vector<size_type> column_size;
auto const num_rows = input.parent().size();
auto const size_per_row = detail::compute_fixed_width_layout(schema, column_start, column_size);
// Ideally we would check that the offsets are all the same, etc. but for now this is probably
// fine
CUDF_EXPECTS(size_per_row * num_rows == child.size(),
"The layout of the data appears to be off");
auto dev_column_start = make_device_uvector_async(column_start, stream);
auto dev_column_size = make_device_uvector_async(column_size, stream);
// Allocate the columns we are going to write into
std::vector<std::unique_ptr<column>> output_columns;
std::vector<int8_t *> output_data;
std::vector<bitmask_type *> output_nm;
for (int i = 0; i < static_cast<int>(num_columns); i++) {
auto column =
make_fixed_width_column(schema[i], num_rows, mask_state::UNINITIALIZED, stream, mr);
auto mut = column->mutable_view();
output_data.emplace_back(mut.data<int8_t>());
output_nm.emplace_back(mut.null_mask());
output_columns.emplace_back(std::move(column));
}
auto dev_output_data = make_device_uvector_async(output_data, stream, mr);
auto dev_output_nm = make_device_uvector_async(output_nm, stream, mr);
dim3 blocks;
dim3 threads;
int shared_size =
detail::calc_fixed_width_kernel_dims(num_columns, num_rows, size_per_row, blocks, threads);
hipLaunchKernelGGL(( detail::copy_from_rows_fixed_width_optimized), dim3(blocks), dim3(threads), shared_size, stream.value(),
num_rows, num_columns, size_per_row, dev_column_start.data(), dev_column_size.data(),
dev_output_data.data(), dev_output_nm.data(), child.data<int8_t>());
return std::make_unique<table>(std::move(output_columns));
} else {
CUDF_FAIL("Only fixed width types are currently supported");
}
}
} // namespace jni
} // namespace cudf
| 13547fd3b7094c7d958e31bc4c393f90b22e67fe.cu | /*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cooperative_groups.h>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/sequence.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/integer_utils.hpp>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/lists/lists_column_device_view.cuh>
#include <cudf/scalar/scalar_factories.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/table/table.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/bit.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/binary_search.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/scan.h>
#include <type_traits>
#include "row_conversion.hpp"
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
#define ASYNC_MEMCPY_SUPPORTED
#endif
#if !defined(__CUDA_ARCH__) || defined(ASYNC_MEMCPY_SUPPORTED)
#include <cuda/barrier>
#endif // #if !defined(__CUDA_ARCH__) || defined(ASYNC_MEMCPY_SUPPORTED)
#include <algorithm>
#include <cstdarg>
#include <cstdint>
#include <iostream>
#include <iterator>
#include <limits>
#include <optional>
#include <tuple>
namespace {
constexpr auto JCUDF_ROW_ALIGNMENT = 8;
constexpr auto MAX_BATCH_SIZE = std::numeric_limits<cudf::size_type>::max();
// Number of rows each block processes in the two kernels. Tuned via nsight
constexpr auto NUM_STRING_ROWS_PER_BLOCK_TO_ROWS = 1024;
constexpr auto NUM_STRING_ROWS_PER_BLOCK_FROM_ROWS = 64;
constexpr auto MIN_STRING_BLOCKS = 32;
constexpr auto MAX_STRING_BLOCKS = MAX_BATCH_SIZE;
constexpr auto NUM_WARPS_IN_BLOCK = 32;
} // anonymous namespace
// needed to suppress warning about cuda::barrier
#pragma nv_diag_suppress static_var_with_dynamic_init
using namespace cudf;
using detail::make_device_uvector_async;
using rmm::device_uvector;
#ifdef ASYNC_MEMCPY_SUPPORTED
using cuda::aligned_size_t;
#else
template <std::size_t> using aligned_size_t = size_t; // Local stub for cuda::aligned_size_t.
#endif // ASYNC_MEMCPY_SUPPORTED
namespace cudf {
namespace jni {
namespace detail {
/*
* This module converts data from row-major to column-major and from column-major to row-major. It
* is a transpose of the data of sorts, but there are a few complicating factors. They are spelled
* out below:
*
* Row Batches:
* The row data has to fit inside a cuDF column, which limits it to 2 gigs currently. The calling
* code attempts to keep the data size under 2 gigs, but due to padding this isn't always the case,
* so being able to break this up into multiple columns is necessary. Internally, this is referred
* to as the row batch, which is a group of rows that will fit into this 2 gig space requirement.
* There are typically 1 of these batches, but there can be 2.
*
* Async Memcpy:
* The CUDA blocks are using memcpy_async, which allows for the device to schedule memcpy operations
* and then wait on them to complete at a later time with a barrier. On Ampere or later hardware
* there is dedicated hardware to do this copy and on pre-Ampere it should generate the same code
* that a hand-rolled loop would generate, so performance should be the same or better than a
* hand-rolled kernel.
*
* Tile Info:
* Each CUDA block will work on a single tile info before exiting. This single tile consumes all
* available shared memory. The kernel reads data into shared memory and then back out from shared
* memory to device memory via memcpy_async. This kernel is completely memory bound.
*
* Batch Data:
* This structure contains all the row batches and some book-keeping data necessary for the batches
* such as row numbers for the batches.
*
* Tiles:
* The tile info describes a tile of data to process. In a GPU with 48KB this equates to about 221
* bytes in each direction of a table. The tiles are kept as square as possible to attempt to
* coalesce memory operations. The taller a tile is the better coalescing of columns, but row
* coalescing suffers. The wider a tile is the better the row coalescing, but columns coalescing
* suffers. The code attempts to produce a square tile to balance the coalescing. It starts by
* figuring out the optimal byte length and then adding columns to the data until the tile is too
* large. Since rows are different width with different alignment requirements, this isn't typically
* exact. Once a width is found the tiles are generated vertically with that width and height and
* then the process repeats. This means all the tiles will be the same height, but will have
* different widths based on what columns they encompass. Tiles in a vertical row will all have the
* same dimensions.
*
* --------------------------------
* | 4 5.0f || True 8 3 1 |
* | 3 6.0f || False 3 1 1 |
* | 2 7.0f || True 7 4 1 |
* | 1 8.0f || False 2 5 1 |
* --------------------------------
* | 0 9.0f || True 6 7 1 |
* ...
*/
/**
* @brief The CUDA blocks work on one tile_info struct of data.
* This structure defines the workspaces for the blocks.
*
*/
struct tile_info {
int start_col;
int start_row;
int end_col;
int end_row;
int batch_number;
__device__ inline size_type get_shared_row_size(size_type const *const col_offsets,
size_type const *const col_sizes) const {
// this calculation is invalid if there are holes in the data such as a variable-width column.
// It is wrong in a safe way in that it will say this row size is larger than it should be, so
// we are not losing data we are just not as efficient as we could be with shared memory. This
// may be a problem if the tile is computed without regard to variable width offset/length sizes
// in that we overrun shared memory.
return util::round_up_unsafe(col_offsets[end_col] + col_sizes[end_col] - col_offsets[start_col],
JCUDF_ROW_ALIGNMENT);
}
__device__ inline size_type num_cols() const { return end_col - start_col + 1; }
__device__ inline size_type num_rows() const { return end_row - start_row + 1; }
};
/**
* @brief Returning rows is done in a byte cudf column. This is limited in size by
* `size_type` and so output is broken into batches of rows that fit inside
* this limit.
*
*/
struct row_batch {
size_type num_bytes; // number of bytes in this batch
size_type row_count; // number of rows in the batch
device_uvector<offset_type> row_offsets; // offsets column of output cudf column
};
/**
* @brief Holds information about the batches of data to be processed
*
*/
struct batch_data {
device_uvector<size_type> batch_row_offsets; // offsets to each row in incoming data
device_uvector<size_type> d_batch_row_boundaries; // row numbers for the start of each batch
std::vector<size_type>
batch_row_boundaries; // row numbers for the start of each batch: 0, 1500, 2700
std::vector<row_batch> row_batches; // information about each batch such as byte count
};
/**
* @brief builds row size information for tables that contain strings
*
* @param tbl table from which to compute row size information
* @param fixed_width_and_validity_size size of fixed-width and validity data in this table
* @param stream cuda stream on which to operate
* @return pair of device vector of size_types of the row sizes of the table and a device vector of
* offsets into the string column
*/
std::pair<rmm::device_uvector<size_type>, rmm::device_uvector<strings_column_view::offset_iterator>>
build_string_row_offsets(table_view const &tbl, size_type fixed_width_and_validity_size,
rmm::cuda_stream_view stream) {
auto const num_rows = tbl.num_rows();
rmm::device_uvector<size_type> d_row_sizes(num_rows, stream);
thrust::uninitialized_fill(rmm::exec_policy(stream), d_row_sizes.begin(), d_row_sizes.end(), 0);
auto d_offsets_iterators = [&]() {
std::vector<strings_column_view::offset_iterator> offsets_iterators;
auto offsets_iter = thrust::make_transform_iterator(
tbl.begin(), [](auto const &col) -> strings_column_view::offset_iterator {
if (!is_fixed_width(col.type())) {
CUDF_EXPECTS(col.type().id() == type_id::STRING, "only string columns are supported!");
return strings_column_view(col).offsets_begin();
} else {
return nullptr;
}
});
std::copy_if(offsets_iter, offsets_iter + tbl.num_columns(),
std::back_inserter(offsets_iterators),
[](auto const &offset_ptr) { return offset_ptr != nullptr; });
return make_device_uvector_async(offsets_iterators, stream);
}();
auto const num_columns = static_cast<size_type>(d_offsets_iterators.size());
thrust::for_each(rmm::exec_policy(stream), thrust::make_counting_iterator(0),
thrust::make_counting_iterator(num_columns * num_rows),
[d_offsets_iterators = d_offsets_iterators.data(), num_columns, num_rows,
d_row_sizes = d_row_sizes.data()] __device__(auto element_idx) {
auto const row = element_idx % num_rows;
auto const col = element_idx / num_rows;
auto const val =
d_offsets_iterators[col][row + 1] - d_offsets_iterators[col][row];
atomicAdd(&d_row_sizes[row], val);
});
// transform the row sizes to include fixed width size and alignment
thrust::transform(rmm::exec_policy(stream), d_row_sizes.begin(), d_row_sizes.end(),
d_row_sizes.begin(), [fixed_width_and_validity_size] __device__(auto row_size) {
return util::round_up_unsafe(fixed_width_and_validity_size + row_size,
JCUDF_ROW_ALIGNMENT);
});
return {std::move(d_row_sizes), std::move(d_offsets_iterators)};
}
/**
* @brief functor to return the offset of a row in a table with string columns
*
*/
struct string_row_offset_functor {
string_row_offset_functor(device_span<size_type const> d_row_offsets)
: d_row_offsets(d_row_offsets){};
__device__ inline size_type operator()(int row_number, int) const {
return d_row_offsets[row_number];
}
device_span<size_type const> d_row_offsets;
};
/**
* @brief functor to return the offset of a row in a table with only fixed-width columns
*
*/
struct fixed_width_row_offset_functor {
fixed_width_row_offset_functor(size_type fixed_width_only_row_size)
: _fixed_width_only_row_size(fixed_width_only_row_size){};
__device__ inline size_type operator()(int row_number, int tile_row_start) const {
return (row_number - tile_row_start) * _fixed_width_only_row_size;
}
size_type _fixed_width_only_row_size;
};
/**
* @brief Copies data from row-based JCUDF format to column-based cudf format.
*
* This optimized version of the conversion is faster for fixed-width tables that do not have more
* than 100 columns.
*
* @param num_rows number of rows in the incoming table
* @param num_columns number of columns in the incoming table
* @param row_size length in bytes of each row
* @param input_offset_in_row offset to each row of data
* @param num_bytes total number of bytes in the incoming data
* @param output_data array of pointers to the output data
* @param output_nm array of pointers to the output null masks
* @param input_data pointing to the incoming row data
*/
__global__ void
copy_from_rows_fixed_width_optimized(const size_type num_rows, const size_type num_columns,
const size_type row_size, const size_type *input_offset_in_row,
const size_type *num_bytes, int8_t **output_data,
bitmask_type **output_nm, const int8_t *input_data) {
// We are going to copy the data in two passes.
// The first pass copies a chunk of data into shared memory.
// The second pass copies that chunk from shared memory out to the final location.
// Because shared memory is limited we copy a subset of the rows at a time.
// For simplicity we will refer to this as a row_group
// In practice we have found writing more than 4 columns of data per thread
// results in performance loss. As such we are using a 2 dimensional
// kernel in terms of threads, but not in terms of blocks. Columns are
// controlled by the y dimension (there is no y dimension in blocks). Rows
// are controlled by the x dimension (there are multiple blocks in the x
// dimension).
size_type const rows_per_group = blockDim.x;
size_type const row_group_start = blockIdx.x;
size_type const row_group_stride = gridDim.x;
size_type const row_group_end = (num_rows + rows_per_group - 1) / rows_per_group + 1;
extern __shared__ int8_t shared_data[];
// Because we are copying fixed width only data and we stride the rows
// this thread will always start copying from shared data in the same place
int8_t *row_tmp = &shared_data[row_size * threadIdx.x];
int8_t *row_vld_tmp = &row_tmp[input_offset_in_row[num_columns - 1] + num_bytes[num_columns - 1]];
for (auto row_group_index = row_group_start; row_group_index < row_group_end;
row_group_index += row_group_stride) {
// Step 1: Copy the data into shared memory
// We know row_size is always aligned with and a multiple of int64_t;
int64_t *long_shared = reinterpret_cast<int64_t *>(shared_data);
int64_t const *long_input = reinterpret_cast<int64_t const *>(input_data);
auto const shared_output_index = threadIdx.x + (threadIdx.y * blockDim.x);
auto const shared_output_stride = blockDim.x * blockDim.y;
auto const row_index_end = std::min(num_rows, ((row_group_index + 1) * rows_per_group));
auto const num_rows_in_group = row_index_end - (row_group_index * rows_per_group);
auto const shared_length = row_size * num_rows_in_group;
size_type const shared_output_end = shared_length / sizeof(int64_t);
auto const start_input_index = (row_size * row_group_index * rows_per_group) / sizeof(int64_t);
for (size_type shared_index = shared_output_index; shared_index < shared_output_end;
shared_index += shared_output_stride) {
long_shared[shared_index] = long_input[start_input_index + shared_index];
}
// Wait for all of the data to be in shared memory
__syncthreads();
// Step 2 copy the data back out
// Within the row group there should be 1 thread for each row. This is a
// requirement for launching the kernel
auto const row_index = (row_group_index * rows_per_group) + threadIdx.x;
// But we might not use all of the threads if the number of rows does not go
// evenly into the thread count. We don't want those threads to exit yet
// because we may need them to copy data in for the next row group.
uint32_t active_mask = __ballot_sync(0xffff'ffffu, row_index < num_rows);
if (row_index < num_rows) {
auto const col_index_start = threadIdx.y;
auto const col_index_stride = blockDim.y;
for (auto col_index = col_index_start; col_index < num_columns;
col_index += col_index_stride) {
auto const col_size = num_bytes[col_index];
int8_t const *col_tmp = &(row_tmp[input_offset_in_row[col_index]]);
int8_t *col_output = output_data[col_index];
switch (col_size) {
case 1: {
col_output[row_index] = *col_tmp;
break;
}
case 2: {
int16_t *short_col_output = reinterpret_cast<int16_t *>(col_output);
short_col_output[row_index] = *reinterpret_cast<const int16_t *>(col_tmp);
break;
}
case 4: {
int32_t *int_col_output = reinterpret_cast<int32_t *>(col_output);
int_col_output[row_index] = *reinterpret_cast<const int32_t *>(col_tmp);
break;
}
case 8: {
int64_t *long_col_output = reinterpret_cast<int64_t *>(col_output);
long_col_output[row_index] = *reinterpret_cast<const int64_t *>(col_tmp);
break;
}
default: {
auto const output_offset = col_size * row_index;
// TODO this should just not be supported for fixed width columns, but just in case...
for (auto b = 0; b < col_size; b++) {
col_output[b + output_offset] = col_tmp[b];
}
break;
}
}
bitmask_type *nm = output_nm[col_index];
int8_t *valid_byte = &row_vld_tmp[col_index / 8];
size_type byte_bit_offset = col_index % 8;
int predicate = *valid_byte & (1 << byte_bit_offset);
uint32_t bitmask = __ballot_sync(active_mask, predicate);
if (row_index % 32 == 0) {
nm[word_index(row_index)] = bitmask;
}
} // end column loop
} // end row copy
// wait for the row_group to be totally copied before starting on the next row group
__syncthreads();
}
}
__global__ void copy_to_rows_fixed_width_optimized(
const size_type start_row, const size_type num_rows, const size_type num_columns,
const size_type row_size, const size_type *output_offset_in_row, const size_type *num_bytes,
const int8_t **input_data, const bitmask_type **input_nm, int8_t *output_data) {
// We are going to copy the data in two passes.
// The first pass copies a chunk of data into shared memory.
// The second pass copies that chunk from shared memory out to the final location.
// Because shared memory is limited we copy a subset of the rows at a time.
// We do not support copying a subset of the columns in a row yet, so we don't
// currently support a row that is wider than shared memory.
// For simplicity we will refer to this as a row_group
// In practice we have found reading more than 4 columns of data per thread
// results in performance loss. As such we are using a 2 dimensional
// kernel in terms of threads, but not in terms of blocks. Columns are
// controlled by the y dimension (there is no y dimension in blocks). Rows
// are controlled by the x dimension (there are multiple blocks in the x
// dimension).
size_type rows_per_group = blockDim.x;
size_type row_group_start = blockIdx.x;
size_type row_group_stride = gridDim.x;
size_type row_group_end = (num_rows + rows_per_group - 1) / rows_per_group + 1;
extern __shared__ int8_t shared_data[];
// Because we are copying fixed width only data and we stride the rows
// this thread will always start copying to shared data in the same place
int8_t *row_tmp = &shared_data[row_size * threadIdx.x];
int8_t *row_vld_tmp =
&row_tmp[output_offset_in_row[num_columns - 1] + num_bytes[num_columns - 1]];
for (size_type row_group_index = row_group_start; row_group_index < row_group_end;
row_group_index += row_group_stride) {
// Within the row group there should be 1 thread for each row. This is a
// requirement for launching the kernel
size_type row_index = start_row + (row_group_index * rows_per_group) + threadIdx.x;
// But we might not use all of the threads if the number of rows does not go
// evenly into the thread count. We don't want those threads to exit yet
// because we may need them to copy data back out.
if (row_index < (start_row + num_rows)) {
size_type col_index_start = threadIdx.y;
size_type col_index_stride = blockDim.y;
for (size_type col_index = col_index_start; col_index < num_columns;
col_index += col_index_stride) {
size_type col_size = num_bytes[col_index];
int8_t *col_tmp = &(row_tmp[output_offset_in_row[col_index]]);
const int8_t *col_input = input_data[col_index];
switch (col_size) {
case 1: {
*col_tmp = col_input[row_index];
break;
}
case 2: {
const int16_t *short_col_input = reinterpret_cast<const int16_t *>(col_input);
*reinterpret_cast<int16_t *>(col_tmp) = short_col_input[row_index];
break;
}
case 4: {
const int32_t *int_col_input = reinterpret_cast<const int32_t *>(col_input);
*reinterpret_cast<int32_t *>(col_tmp) = int_col_input[row_index];
break;
}
case 8: {
const int64_t *long_col_input = reinterpret_cast<const int64_t *>(col_input);
*reinterpret_cast<int64_t *>(col_tmp) = long_col_input[row_index];
break;
}
default: {
size_type input_offset = col_size * row_index;
// TODO this should just not be supported for fixed width columns, but just in case...
for (size_type b = 0; b < col_size; b++) {
col_tmp[b] = col_input[b + input_offset];
}
break;
}
}
// atomicOr only works on 32 bit or 64 bit aligned values, and not byte aligned
// so we have to rewrite the addresses to make sure that it is 4 byte aligned
int8_t *valid_byte = &row_vld_tmp[col_index / 8];
size_type byte_bit_offset = col_index % 8;
uint64_t fixup_bytes = reinterpret_cast<uint64_t>(valid_byte) % 4;
int32_t *valid_int = reinterpret_cast<int32_t *>(valid_byte - fixup_bytes);
size_type int_bit_offset = byte_bit_offset + (fixup_bytes * 8);
// Now copy validity for the column
if (input_nm[col_index]) {
if (bit_is_set(input_nm[col_index], row_index)) {
atomicOr_block(valid_int, 1 << int_bit_offset);
} else {
atomicAnd_block(valid_int, ~(1 << int_bit_offset));
}
} else {
// It is valid so just set the bit
atomicOr_block(valid_int, 1 << int_bit_offset);
}
} // end column loop
} // end row copy
// wait for the row_group to be totally copied into shared memory
__syncthreads();
// Step 2: Copy the data back out
// We know row_size is always aligned with and a multiple of int64_t;
int64_t *long_shared = reinterpret_cast<int64_t *>(shared_data);
int64_t *long_output = reinterpret_cast<int64_t *>(output_data);
size_type shared_input_index = threadIdx.x + (threadIdx.y * blockDim.x);
size_type shared_input_stride = blockDim.x * blockDim.y;
size_type row_index_end = ((row_group_index + 1) * rows_per_group);
if (row_index_end > num_rows) {
row_index_end = num_rows;
}
size_type num_rows_in_group = row_index_end - (row_group_index * rows_per_group);
size_type shared_length = row_size * num_rows_in_group;
size_type shared_input_end = shared_length / sizeof(int64_t);
size_type start_output_index = (row_size * row_group_index * rows_per_group) / sizeof(int64_t);
for (size_type shared_index = shared_input_index; shared_index < shared_input_end;
shared_index += shared_input_stride) {
long_output[start_output_index + shared_index] = long_shared[shared_index];
}
__syncthreads();
// Go for the next round
}
}
#ifdef ASYNC_MEMCPY_SUPPORTED
#define MEMCPY(dst, src, size, barrier) cuda::memcpy_async(dst, src, size, barrier)
#else
#define MEMCPY(dst, src, size, barrier) memcpy(dst, src, size)
#endif // ASYNC_MEMCPY_SUPPORTED
/**
* @brief copy data from cudf columns into JCUDF format, which is row-based
*
* @tparam RowOffsetFunctor iterator that gives the size of a specific row of the table.
* @param num_rows total number of rows in the table
* @param num_columns total number of columns in the table
* @param shmem_used_per_tile shared memory amount each `tile_info` is using
* @param tile_infos span of `tile_info` structs the define the work
* @param input_data pointer to raw table data
* @param col_sizes array of sizes for each element in a column - one per column
* @param col_offsets offset into input data row for each column's start
* @param row_offsets offset to a specific row in the output data
* @param batch_row_boundaries row numbers for batch starts
* @param output_data pointer to output data
*
*/
template <typename RowOffsetFunctor>
__global__ void copy_to_rows(const size_type num_rows, const size_type num_columns,
const size_type shmem_used_per_tile,
device_span<const tile_info> tile_infos, const int8_t **input_data,
const size_type *col_sizes, const size_type *col_offsets,
RowOffsetFunctor row_offsets, size_type const *batch_row_boundaries,
int8_t **output_data) {
// We are going to copy the data in two passes.
// The first pass copies a chunk of data into shared memory.
// The second pass copies that chunk from shared memory out to the final location.
// Because shared memory is limited we copy a subset of the rows at a time.
// This has been broken up for us in the tile_info struct, so we don't have
// any calculation to do here, but it is important to note.
auto const group = cooperative_groups::this_thread_block();
auto const warp = cooperative_groups::tiled_partition<cudf::detail::warp_size>(group);
extern __shared__ int8_t shared_data[];
#ifdef ASYNC_MEMCPY_SUPPORTED
__shared__ cuda::barrier<cuda::thread_scope_block> tile_barrier;
if (group.thread_rank() == 0) {
init(&tile_barrier, group.size());
}
group.sync();
#endif // ASYNC_MEMCPY_SUPPORTED
auto const tile = tile_infos[blockIdx.x];
auto const num_tile_cols = tile.num_cols();
auto const num_tile_rows = tile.num_rows();
auto const tile_row_size = tile.get_shared_row_size(col_offsets, col_sizes);
auto const starting_column_offset = col_offsets[tile.start_col];
// to do the copy we need to do n column copies followed by m element copies OR we have to do m
// element copies followed by r row copies. When going from column to row it is much easier to
// copy by elements first otherwise we would need a running total of the column sizes for our
// tile, which isn't readily available. This makes it more appealing to copy element-wise from
// input data into shared matching the end layout and do row-based memcopies out.
// read each column across the tile
// each warp takes a column with each thread of a warp taking a row this is done with cooperative
// groups where each column is chosen by the tiled partition and each thread in that partition
// works on a row
for (int relative_col = warp.meta_group_rank(); relative_col < num_tile_cols;
relative_col += warp.meta_group_size()) {
auto const absolute_col = relative_col + tile.start_col;
auto const col_size = col_sizes[absolute_col];
auto const col_offset = col_offsets[absolute_col];
auto const relative_col_offset = col_offset - starting_column_offset;
auto const col_ptr = input_data[absolute_col];
if (col_ptr == nullptr) {
// variable-width data column
continue;
}
for (int relative_row = warp.thread_rank(); relative_row < num_tile_rows;
relative_row += warp.size()) {
if (relative_row >= num_tile_rows) {
// out of bounds
continue;
}
auto const absolute_row = relative_row + tile.start_row;
auto const shared_offset = relative_row * tile_row_size + relative_col_offset;
auto const input_src = col_ptr + col_size * absolute_row;
// copy the element from global memory
switch (col_size) {
case 2: {
const int16_t *short_col_input = reinterpret_cast<const int16_t *>(input_src);
*reinterpret_cast<int16_t *>(&shared_data[shared_offset]) = *short_col_input;
break;
}
case 4: {
const int32_t *int_col_input = reinterpret_cast<const int32_t *>(input_src);
*reinterpret_cast<int32_t *>(&shared_data[shared_offset]) = *int_col_input;
break;
}
case 8: {
const int64_t *long_col_input = reinterpret_cast<const int64_t *>(input_src);
*reinterpret_cast<int64_t *>(&shared_data[shared_offset]) = *long_col_input;
break;
}
case 1: shared_data[shared_offset] = *input_src; break;
default: {
for (int i = 0; i < col_size; ++i) {
shared_data[shared_offset] = *input_src;
}
break;
}
}
}
}
auto const tile_output_buffer = output_data[tile.batch_number];
auto const row_batch_start = tile.batch_number == 0 ? 0 : batch_row_boundaries[tile.batch_number];
// no async copies above waiting on the barrier, so we sync the group here to ensure all copies to
// shared memory are completed before copying data out
group.sync();
// each warp takes a row
for (int copy_row = warp.meta_group_rank(); copy_row < tile.num_rows();
copy_row += warp.meta_group_size()) {
auto const src = &shared_data[tile_row_size * copy_row];
auto const dst = tile_output_buffer + row_offsets(copy_row + tile.start_row, row_batch_start) +
starting_column_offset;
#ifdef ASYNC_MEMCPY_SUPPORTED
cuda::memcpy_async(warp, dst, src, tile_row_size, tile_barrier);
#else
for (int b = warp.thread_rank(); b < tile_row_size; b += warp.size()) {
dst[b] = src[b];
}
#endif
}
#ifdef ASYNC_MEMCPY_SUPPORTED
// wait on the last copies to complete
tile_barrier.arrive_and_wait();
#else
group.sync();
#endif // ASYNC_MEMCPY_SUPPORTED
}
/**
* @brief copy data from row-based format to cudf columns
*
* @tparam RowOffsetFunctor iterator that gives the size of a specific row of the table.
* @param num_rows total number of rows in the table
* @param num_columns total number of columns in the table
* @param shmem_used_per_tile amount of shared memory that is used by a tile
* @param row_offsets offset to a specific row in the output data
* @param batch_row_boundaries row numbers for batch starts
* @param output_data pointer to output data, partitioned by data size
* @param validity_offsets offset into input data row for validity data
* @param tile_infos information about the tiles of work
* @param input_nm pointer to input data
*
*/
template <typename RowOffsetFunctor>
__global__ void
copy_validity_to_rows(const size_type num_rows, const size_type num_columns,
const size_type shmem_used_per_tile, RowOffsetFunctor row_offsets,
size_type const *batch_row_boundaries, int8_t **output_data,
const size_type validity_offset, device_span<const tile_info> tile_infos,
const bitmask_type **input_nm) {
extern __shared__ int8_t shared_data[];
// each thread of warp reads a single int32 of validity - so we read 128 bytes then ballot_sync
// the bits and write the result to shmem after we fill shared mem memcpy it out in a blob.
auto const group = cooperative_groups::this_thread_block();
auto const warp = cooperative_groups::tiled_partition<cudf::detail::warp_size>(group);
#ifdef ASYNC_MEMCPY_SUPPORTED
// Initialize cuda barriers for each tile.
__shared__ cuda::barrier<cuda::thread_scope_block> shared_tile_barrier;
if (group.thread_rank() == 0) {
init(&shared_tile_barrier, group.size());
}
group.sync();
#endif // ASYNC_MEMCPY_SUPPORTED
auto tile = tile_infos[blockIdx.x];
auto const num_tile_cols = tile.num_cols();
auto const num_tile_rows = tile.num_rows();
auto const threads_per_warp = warp.size();
auto const rows_per_read = cudf::detail::size_in_bits<bitmask_type>();
auto const num_sections_x = util::div_rounding_up_unsafe(num_tile_cols, threads_per_warp);
auto const num_sections_y = util::div_rounding_up_unsafe(num_tile_rows, rows_per_read);
auto const validity_data_row_length = util::round_up_unsafe(
util::div_rounding_up_unsafe(num_tile_cols, CHAR_BIT), JCUDF_ROW_ALIGNMENT);
auto const total_sections = num_sections_x * num_sections_y;
// the tile is divided into sections. A warp operates on a section at a time.
for (int my_section_idx = warp.meta_group_rank(); my_section_idx < total_sections;
my_section_idx += warp.meta_group_size()) {
// convert to rows and cols
auto const section_x = my_section_idx % num_sections_x;
auto const section_y = my_section_idx / num_sections_x;
auto const relative_col = section_x * threads_per_warp + warp.thread_rank();
auto const relative_row = section_y * rows_per_read;
auto const absolute_col = relative_col + tile.start_col;
auto const absolute_row = relative_row + tile.start_row;
auto const participating = absolute_col < num_columns && absolute_row < num_rows;
auto const participation_mask = __ballot_sync(0xFFFF'FFFFu, participating);
if (participating) {
auto my_data = input_nm[absolute_col] != nullptr ?
input_nm[absolute_col][word_index(absolute_row)] :
std::numeric_limits<uint32_t>::max();
// every thread that is participating in the warp has 4 bytes, but it's column-based data and
// we need it in row-based. So we shuffle the bits around with ballot_sync to make the bytes
// we actually write.
bitmask_type dw_mask = 0x1;
for (int i = 0; i < threads_per_warp && relative_row + i < num_rows; ++i, dw_mask <<= 1) {
auto validity_data = __ballot_sync(participation_mask, my_data & dw_mask);
// lead thread in each warp writes data
auto const validity_write_offset =
validity_data_row_length * (relative_row + i) + (relative_col / CHAR_BIT);
if (warp.thread_rank() == 0) {
*reinterpret_cast<bitmask_type *>(&shared_data[validity_write_offset]) = validity_data;
}
}
}
}
auto const output_data_base =
output_data[tile.batch_number] + validity_offset + tile.start_col / CHAR_BIT;
// each warp copies a row at a time
auto const row_bytes = util::div_rounding_up_unsafe(num_tile_cols, CHAR_BIT);
auto const row_batch_start = tile.batch_number == 0 ? 0 : batch_row_boundaries[tile.batch_number];
// make sure entire tile has finished copy
// Note that this was copied from above just under the for loop due to nsight complaints about
// divergent threads
group.sync();
for (int relative_row = warp.meta_group_rank(); relative_row < num_tile_rows;
relative_row += warp.meta_group_size()) {
auto const src = &shared_data[validity_data_row_length * relative_row];
auto const dst = output_data_base + row_offsets(relative_row + tile.start_row, row_batch_start);
#ifdef ASYNC_MEMCPY_SUPPORTED
cuda::memcpy_async(warp, dst, src, row_bytes, shared_tile_barrier);
#else
for (int b = warp.thread_rank(); b < row_bytes; b += warp.size()) {
dst[b] = src[b];
}
#endif
}
#ifdef ASYNC_MEMCPY_SUPPORTED
// wait for tile of data to arrive
shared_tile_barrier.arrive_and_wait();
#else
group.sync();
#endif // ASYNC_MEMCPY_SUPPORTED
}
/**
* @brief kernel to copy string data to JCUDF row format
*
* @tparam RowOffsetFunctor iterator for row offsets into the destination data
* @param num_rows number of rows in this portion of the table
* @param num_variable_columns number of columns of variable-width data
* @param variable_input_data variable width data column pointers
* @param variable_col_output_offsets output offset information for variable-width columns
* @param variable_col_offsets input offset information for variable-width columns
* @param fixed_width_row_size offset to variable-width data in a row
* @param row_offsets offsets for each row in output data
* @param batch_row_offset row start for this batch
* @param output_data pointer to output data for this batch
*
*/
template <typename RowOffsetFunctor>
__global__ void copy_strings_to_rows(size_type const num_rows, size_type const num_variable_columns,
int8_t const **variable_input_data,
size_type const *variable_col_output_offsets,
size_type const **variable_col_offsets,
size_type fixed_width_row_size, RowOffsetFunctor row_offsets,
size_type const batch_row_offset, int8_t *output_data) {
// Each block will take a group of rows controlled by NUM_STRING_ROWS_PER_BLOCK_TO_ROWS. Each warp
// will copy a row at a time. The base thread will first go through column data and fill out
// offset/length information for the column. Then all threads of the warp will participate in the
// memcpy of the string data.
auto const my_block = cooperative_groups::this_thread_block();
auto const warp = cooperative_groups::tiled_partition<cudf::detail::warp_size>(my_block);
#ifdef ASYNC_MEMCPY_SUPPORTED
cuda::barrier<cuda::thread_scope_block> block_barrier;
#endif
auto const start_row =
blockIdx.x * NUM_STRING_ROWS_PER_BLOCK_TO_ROWS + warp.meta_group_rank() + batch_row_offset;
auto const end_row =
std::min(num_rows, static_cast<size_type>(start_row + NUM_STRING_ROWS_PER_BLOCK_TO_ROWS));
for (int row = start_row; row < end_row; row += warp.meta_group_size()) {
auto offset = fixed_width_row_size; // initial offset to variable-width data
auto const base_row_offset = row_offsets(row, 0);
for (int col = 0; col < num_variable_columns; ++col) {
auto const string_start_offset = variable_col_offsets[col][row];
auto const string_length = variable_col_offsets[col][row + 1] - string_start_offset;
if (warp.thread_rank() == 0) {
// write the offset/length to column
uint32_t *output_dest = reinterpret_cast<uint32_t *>(
&output_data[base_row_offset + variable_col_output_offsets[col]]);
output_dest[0] = offset;
output_dest[1] = string_length;
}
auto string_output_dest = &output_data[base_row_offset + offset];
auto string_output_src = &variable_input_data[col][string_start_offset];
warp.sync();
#ifdef ASYNC_MEMCPY_SUPPORTED
cuda::memcpy_async(warp, string_output_dest, string_output_src, string_length, block_barrier);
#else
for (int c = warp.thread_rank(); c < string_length; c += warp.size()) {
string_output_dest[c] = string_output_src[c];
}
#endif
offset += string_length;
}
}
}
/**
* @brief copy data from row-based format to cudf columns
*
* @tparam RowOffsetFunctor iterator that gives the size of a specific row of the table.
* @param num_rows total number of rows in the table
* @param num_columns total number of columns in the table
* @param shmem_used_per_tile amount of shared memory that is used by a tile
* @param row_offsets offset to a specific row in the input data
* @param batch_row_boundaries row numbers for batch starts
* @param output_data pointers to column data
* @param col_sizes array of sizes for each element in a column - one per column
* @param col_offsets offset into input data row for each column's start
* @param tile_infos information about the tiles of work
* @param input_data pointer to input data
*
*/
template <typename RowOffsetFunctor>
__global__ void copy_from_rows(const size_type num_rows, const size_type num_columns,
const size_type shmem_used_per_tile, RowOffsetFunctor row_offsets,
size_type const *batch_row_boundaries, int8_t **output_data,
const size_type *col_sizes, const size_type *col_offsets,
device_span<const tile_info> tile_infos, const int8_t *input_data) {
// We are going to copy the data in two passes.
// The first pass copies a chunk of data into shared memory.
// The second pass copies that chunk from shared memory out to the final location.
// Because shared memory is limited we copy a subset of the rows at a time. This has been broken
// up for us in the tile_info struct, so we don't have any calculation to do here, but it is
// important to note.
// To speed up some of the random access memory we do, we copy col_sizes and col_offsets to shared
// memory for each of the tiles that we work on
auto const group = cooperative_groups::this_thread_block();
auto const warp = cooperative_groups::tiled_partition<cudf::detail::warp_size>(group);
extern __shared__ int8_t shared[];
#ifdef ASYNC_MEMCPY_SUPPORTED
// Initialize cuda barriers for each tile.
__shared__ cuda::barrier<cuda::thread_scope_block> tile_barrier;
if (group.thread_rank() == 0) {
init(&tile_barrier, group.size());
}
group.sync();
#endif // ASYNC_MEMCPY_SUPPORTED
{
auto const fetch_tile = tile_infos[blockIdx.x];
auto const fetch_tile_start_row = fetch_tile.start_row;
auto const starting_col_offset = col_offsets[fetch_tile.start_col];
auto const fetch_tile_row_size = fetch_tile.get_shared_row_size(col_offsets, col_sizes);
auto const row_batch_start =
fetch_tile.batch_number == 0 ? 0 : batch_row_boundaries[fetch_tile.batch_number];
for (int absolute_row = warp.meta_group_rank() + fetch_tile.start_row;
absolute_row <= fetch_tile.end_row; absolute_row += warp.meta_group_size()) {
warp.sync();
auto shared_offset = (absolute_row - fetch_tile_start_row) * fetch_tile_row_size;
auto dst = &shared[shared_offset];
auto src = &input_data[row_offsets(absolute_row, row_batch_start) + starting_col_offset];
// copy the data
#ifdef ASYNC_MEMCPY_SUPPORTED
cuda::memcpy_async(warp, dst, src, fetch_tile_row_size, tile_barrier);
#else
for (int b = warp.thread_rank(); b < fetch_tile_row_size; b += warp.size()) {
dst[b] = src[b];
}
#endif
}
}
{
auto const tile = tile_infos[blockIdx.x];
auto const rows_in_tile = tile.num_rows();
auto const cols_in_tile = tile.num_cols();
auto const tile_row_size = tile.get_shared_row_size(col_offsets, col_sizes);
#ifdef ASYNC_MEMCPY_SUPPORTED
// ensure our data is ready
tile_barrier.arrive_and_wait();
#else
group.sync();
#endif // ASYNC_MEMCPY_SUPPORTED
// Now we copy from shared memory to final destination. The data is laid out in rows in shared
// memory, so the reads for a column will be "vertical". Because of this and the different sizes
// for each column, this portion is handled on row/column basis. to prevent each thread working
// on a single row and also to ensure that all threads can do work in the case of more threads
// than rows, we do a global index instead of a double for loop with col/row.
for (int relative_row = warp.thread_rank(); relative_row < rows_in_tile;
relative_row += warp.size()) {
auto const absolute_row = relative_row + tile.start_row;
auto const shared_memory_row_offset = tile_row_size * relative_row;
for (int relative_col = warp.meta_group_rank(); relative_col < cols_in_tile;
relative_col += warp.meta_group_size()) {
auto const absolute_col = relative_col + tile.start_col;
auto const shared_memory_offset =
col_offsets[absolute_col] - col_offsets[tile.start_col] + shared_memory_row_offset;
auto const column_size = col_sizes[absolute_col];
int8_t *shmem_src = &shared[shared_memory_offset];
int8_t *dst = &output_data[absolute_col][absolute_row * column_size];
MEMCPY(dst, shmem_src, column_size, tile_barrier);
}
}
}
#ifdef ASYNC_MEMCPY_SUPPORTED
// wait on the last copies to complete
tile_barrier.arrive_and_wait();
#else
group.sync();
#endif // ASYNC_MEMCPY_SUPPORTED
}
/**
* @brief copy data from row-based format to cudf columns
*
* @tparam RowOffsetFunctor iterator that gives the size of a specific row of the table.
* @param num_rows total number of rows in the table
* @param num_columns total number of columns in the table
* @param shmem_used_per_tile amount of shared memory that is used by a tile
* @param row_offsets offset to the first column a specific row in the input data
* @param batch_row_boundaries row numbers for batch starts
* @param output_nm pointers to null masks for columns
* @param validity_offsets offset into input data row for validity data
* @param tile_infos information about the tiles of work
* @param input_data pointer to input data
*
*/
template <typename RowOffsetFunctor>
__global__ void
copy_validity_from_rows(const size_type num_rows, const size_type num_columns,
const size_type shmem_used_per_tile, RowOffsetFunctor row_offsets,
size_type const *batch_row_boundaries, bitmask_type **output_nm,
const size_type validity_offset, device_span<const tile_info> tile_infos,
const int8_t *input_data) {
extern __shared__ int8_t shared[];
using cudf::detail::warp_size;
// each thread of warp reads a single byte of validity - so we read 32 bytes then ballot_sync the
// bits and write the result to shmem after we fill shared mem memcpy it out in a blob. Probably
// need knobs for number of rows vs columns to balance read/write
// C0 C1 C2 C3 C4 C5 C6 C7
// R0 1 0 1 0 0 1 1 0 <-- thread 0 reads byte r0
// R1 1 1 1 1 1 1 1 0 <-- thread 1 reads byte r1
// R2 0 0 1 0 0 1 1 0 <-- thread 2 reads byte r2
// ...
// R31 1 1 1 1 1 1 1 1 <-- thread 31 reads byte r31
// ^
// | 1 bit of each input byte, by column, are swizzled into a single 32 bit word via
// __ballot_sync, representing 32 rows of that column.
auto const group = cooperative_groups::this_thread_block();
auto const warp = cooperative_groups::tiled_partition<cudf::detail::warp_size>(group);
#ifdef ASYNC_MEMCPY_SUPPORTED
// Initialize cuda barriers for each tile.
__shared__ cuda::barrier<cuda::thread_scope_block> shared_tile_barrier;
if (group.thread_rank() == 0) {
init(&shared_tile_barrier, group.size());
}
group.sync();
#endif // ASYNC_MEMCPY_SUPPORTED
auto const tile = tile_infos[blockIdx.x];
auto const tile_start_col = tile.start_col;
auto const tile_start_row = tile.start_row;
auto const num_tile_cols = tile.num_cols();
auto const num_tile_rows = tile.num_rows();
auto const threads_per_warp = warp.size();
auto const cols_per_read = CHAR_BIT;
auto const rows_per_read = static_cast<size_type>(threads_per_warp);
auto const num_sections_x = util::div_rounding_up_safe(num_tile_cols, cols_per_read);
auto const num_sections_y = util::div_rounding_up_safe(num_tile_rows, rows_per_read);
auto const validity_data_col_length = num_sections_y * 4; // words to bytes
auto const total_sections = num_sections_x * num_sections_y;
// the tile is divided into sections. A warp operates on a section at a time.
for (int my_section_idx = warp.meta_group_rank(); my_section_idx < total_sections;
my_section_idx += warp.meta_group_size()) {
// convert section to row and col
auto const section_x = my_section_idx % num_sections_x;
auto const section_y = my_section_idx / num_sections_x;
auto const relative_col = section_x * cols_per_read;
auto const relative_row = section_y * rows_per_read + warp.thread_rank();
auto const absolute_col = relative_col + tile_start_col;
auto const absolute_row = relative_row + tile_start_row;
auto const row_batch_start =
tile.batch_number == 0 ? 0 : batch_row_boundaries[tile.batch_number];
auto const participation_mask = __ballot_sync(0xFFFF'FFFFu, absolute_row < num_rows);
if (absolute_row < num_rows) {
auto const my_byte = input_data[row_offsets(absolute_row, row_batch_start) + validity_offset +
(absolute_col / cols_per_read)];
// so every thread that is participating in the warp has a byte, but it's row-based data and
// we need it in column-based. So we shuffle the bits around to make the bytes we actually
// write.
for (int i = 0, byte_mask = 0x1; (i < cols_per_read) && ((relative_col + i) < num_columns);
++i, byte_mask <<= 1) {
auto const validity_data = __ballot_sync(participation_mask, my_byte & byte_mask);
// lead thread in each warp writes data
if (warp.thread_rank() == 0) {
auto const validity_write_offset =
validity_data_col_length * (relative_col + i) + relative_row / cols_per_read;
*reinterpret_cast<bitmask_type *>(&shared[validity_write_offset]) = validity_data;
}
}
}
}
// now memcpy the shared memory out to the final destination
auto const col_words = util::div_rounding_up_unsafe(num_tile_rows, CHAR_BIT * 4);
// make sure entire tile has finished copy
group.sync();
for (int relative_col = warp.meta_group_rank(); relative_col < num_tile_cols;
relative_col += warp.meta_group_size()) {
auto const absolute_col = relative_col + tile_start_col;
auto dst = output_nm[absolute_col] + word_index(tile_start_row);
auto const src =
reinterpret_cast<bitmask_type *>(&shared[validity_data_col_length * relative_col]);
#ifdef ASYNC_MEMCPY_SUPPORTED
cuda::memcpy_async(warp, dst, src, aligned_size_t<4>(validity_data_col_length),
shared_tile_barrier);
#else
for (int b = warp.thread_rank(); b < col_words; b += warp.size()) {
dst[b] = src[b];
}
#endif
}
#ifdef ASYNC_MEMCPY_SUPPORTED
// wait for tile of data to arrive
shared_tile_barrier.arrive_and_wait();
#else
group.sync();
#endif // ASYNC_MEMCPY_SUPPORTED
}
/**
* @brief copies string data from jcudf row format to cudf columns
*
* @tparam RowOffsetFunctor iterator for row offsets into the destination data
* @param row_offsets offsets for each row in input data
* @param string_row_offsets offset data into jcudf row data for each string
* @param string_lengths length of each incoming string in each column
* @param string_column_offsets offset column data for cudf column
* @param string_col_data output cudf string column data
* @param row_data jcudf row data
* @param num_rows number of rows in data
* @param num_string_columns number of string columns in the table
*/
template <typename RowOffsetFunctor>
__global__ void copy_strings_from_rows(RowOffsetFunctor row_offsets, int32_t **string_row_offsets,
int32_t **string_lengths, size_type **string_column_offsets,
char **string_col_data, int8_t const *row_data,
size_type const num_rows,
size_type const num_string_columns) {
// Each warp takes a tile, which is a single column and up to ROWS_PER_BLOCK rows. A tile will not
// wrap around the bottom of the table. The warp will copy the strings for each row in the tile.
// Traversing in row-major order to coalesce the offsets and size reads.
auto my_block = cooperative_groups::this_thread_block();
auto warp = cooperative_groups::tiled_partition<cudf::detail::warp_size>(my_block);
#ifdef ASYNC_MEMCPY_SUPPORTED
cuda::barrier<cuda::thread_scope_block> block_barrier;
#endif
// workaround for not being able to take a reference to a constexpr host variable
auto const ROWS_PER_BLOCK = NUM_STRING_ROWS_PER_BLOCK_FROM_ROWS;
auto const tiles_per_col = util::div_rounding_up_unsafe(num_rows, ROWS_PER_BLOCK);
auto const starting_tile = blockIdx.x * warp.meta_group_size() + warp.meta_group_rank();
auto const num_tiles = tiles_per_col * num_string_columns;
auto const tile_stride = warp.meta_group_size() * gridDim.x;
// Each warp will copy strings in its tile. This is handled by all the threads of a warp passing
// the same parameters to async_memcpy and all threads in the warp participating in the copy.
for (auto my_tile = starting_tile; my_tile < num_tiles; my_tile += tile_stride) {
auto const starting_row = (my_tile % tiles_per_col) * ROWS_PER_BLOCK;
auto const col = my_tile / tiles_per_col;
auto const str_len = string_lengths[col];
auto const str_row_off = string_row_offsets[col];
auto const str_col_off = string_column_offsets[col];
auto str_col_data = string_col_data[col];
for (int row = starting_row; row < starting_row + ROWS_PER_BLOCK && row < num_rows; ++row) {
auto const src = &row_data[row_offsets(row, 0) + str_row_off[row]];
auto dst = &str_col_data[str_col_off[row]];
#ifdef ASYNC_MEMCPY_SUPPORTED
cuda::memcpy_async(warp, dst, src, str_len[row], block_barrier);
#else
for (int c = warp.thread_rank(); c < str_len[row]; c += warp.size()) {
dst[c] = src[c];
}
#endif
}
}
}
/**
* @brief Calculate the dimensions of the kernel for fixed width only columns.
*
* @param [in] num_columns the number of columns being copied.
* @param [in] num_rows the number of rows being copied.
* @param [in] size_per_row the size each row takes up when padded.
* @param [out] blocks the size of the blocks for the kernel
* @param [out] threads the size of the threads for the kernel
* @return the size in bytes of shared memory needed for each block.
*/
static int calc_fixed_width_kernel_dims(const size_type num_columns, const size_type num_rows,
const size_type size_per_row, dim3 &blocks, dim3 &threads) {
// We have found speed degrades when a thread handles more than 4 columns.
// Each block is 2 dimensional. The y dimension indicates the columns.
// We limit this to 32 threads in the y dimension so we can still
// have at least 32 threads in the x dimension (1 warp) which should
// result in better coalescing of memory operations. We also
// want to guarantee that we are processing a multiple of 32 threads
// in the x dimension because we use atomic operations at the block
// level when writing validity data out to main memory, and that would
// need to change if we split a word of validity data between blocks.
int const y_block_size = min(util::div_rounding_up_safe(num_columns, 4), 32);
int const x_possible_block_size = 1024 / y_block_size;
// 48KB is the default setting for shared memory per block according to the cuda tutorials
// If someone configures the GPU to only have 16 KB this might not work.
int const max_shared_size = 48 * 1024;
// If we don't have enough shared memory there is no point in having more threads
// per block that will just sit idle
auto const max_block_size = std::min(x_possible_block_size, max_shared_size / size_per_row);
// Make sure that the x dimension is a multiple of 32 this not only helps
// coalesce memory access it also lets us do a ballot sync for validity to write
// the data back out the warp level. If x is a multiple of 32 then each thread in the y
// dimension is associated with one or more warps, that should correspond to the validity
// words directly.
int const block_size = (max_block_size / 32) * 32;
CUDF_EXPECTS(block_size != 0, "Row size is too large to fit in shared memory");
// The maximum number of blocks supported in the x dimension is 2 ^ 31 - 1
// but in practice having too many can cause some overhead that I don't totally
// understand. Playing around with this having as little as 600 blocks appears
// to be able to saturate memory on V100, so this is an order of magnitude higher
// to try and future proof this a bit.
int const num_blocks = std::clamp((num_rows + block_size - 1) / block_size, 1, 10240);
blocks.x = num_blocks;
blocks.y = 1;
blocks.z = 1;
threads.x = block_size;
threads.y = y_block_size;
threads.z = 1;
return size_per_row * block_size;
}
/**
* When converting to rows it is possible that the size of the table was too big to fit
* in a single column. This creates an output column for a subset of the rows in a table
* going from start row and containing the next num_rows. Most of the parameters passed
* into this function are common between runs and should be calculated once.
*/
static std::unique_ptr<column> fixed_width_convert_to_rows(
const size_type start_row, const size_type num_rows, const size_type num_columns,
const size_type size_per_row, rmm::device_uvector<size_type> &column_start,
rmm::device_uvector<size_type> &column_size, rmm::device_uvector<const int8_t *> &input_data,
rmm::device_uvector<const bitmask_type *> &input_nm, const scalar &zero,
const scalar &scalar_size_per_row, rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr) {
int64_t const total_allocation = size_per_row * num_rows;
// We made a mistake in the split somehow
CUDF_EXPECTS(total_allocation < std::numeric_limits<size_type>::max(),
"Table is too large to fit!");
// Allocate and set the offsets row for the byte array
std::unique_ptr<column> offsets =
cudf::detail::sequence(num_rows + 1, zero, scalar_size_per_row, stream);
std::unique_ptr<column> data =
make_numeric_column(data_type(type_id::INT8), static_cast<size_type>(total_allocation),
mask_state::UNALLOCATED, stream, mr);
dim3 blocks;
dim3 threads;
int shared_size =
detail::calc_fixed_width_kernel_dims(num_columns, num_rows, size_per_row, blocks, threads);
copy_to_rows_fixed_width_optimized<<<blocks, threads, shared_size, stream.value()>>>(
start_row, num_rows, num_columns, size_per_row, column_start.data(), column_size.data(),
input_data.data(), input_nm.data(), data->mutable_view().data<int8_t>());
return make_lists_column(num_rows, std::move(offsets), std::move(data), 0,
rmm::device_buffer{0, stream, mr}, stream, mr);
}
static inline bool are_all_fixed_width(std::vector<data_type> const &schema) {
return std::all_of(schema.begin(), schema.end(),
[](const data_type &t) { return is_fixed_width(t); });
}
/**
* @brief Given a set of fixed width columns, calculate how the data will be laid out in memory.
*
* @param [in] schema the types of columns that need to be laid out.
* @param [out] column_start the byte offset where each column starts in the row.
* @param [out] column_size the size in bytes of the data for each columns in the row.
* @return the size in bytes each row needs.
*/
static inline int32_t compute_fixed_width_layout(std::vector<data_type> const &schema,
std::vector<size_type> &column_start,
std::vector<size_type> &column_size) {
// We guarantee that the start of each column is 64-bit aligned so anything can go
// there, but to make the code simple we will still do an alignment for it.
int32_t at_offset = 0;
for (auto col = schema.begin(); col < schema.end(); col++) {
size_type s = size_of(*col);
column_size.emplace_back(s);
std::size_t allocation_needed = s;
std::size_t alignment_needed = allocation_needed; // They are the same for fixed width types
at_offset = util::round_up_unsafe(at_offset, static_cast<int32_t>(alignment_needed));
column_start.emplace_back(at_offset);
at_offset += allocation_needed;
}
// Now we need to add in space for validity
// Eventually we can think about nullable vs not nullable, but for now we will just always add
// it in
int32_t const validity_bytes_needed =
util::div_rounding_up_safe<int32_t>(schema.size(), CHAR_BIT);
// validity comes at the end and is byte aligned so we can pack more in.
at_offset += validity_bytes_needed;
// Now we need to pad the end so all rows are 64 bit aligned
return util::round_up_unsafe(at_offset, JCUDF_ROW_ALIGNMENT);
}
/**
* @brief column sizes and column start offsets for a table
*/
struct column_info_s {
size_type size_per_row;
std::vector<size_type> column_starts;
std::vector<size_type> column_sizes;
std::vector<size_type> variable_width_column_starts;
column_info_s &operator=(column_info_s const &other) = delete;
column_info_s &operator=(column_info_s &&other) = delete;
};
/**
* @brief Compute information about a table such as bytes per row and offsets.
*
* @tparam iterator iterator of column schema data
* @param begin starting iterator of column schema
* @param end ending iterator of column schema
* @param column_starts column start offsets
* @param column_sizes size in bytes of each column
* @return size of the fixed_width data portion of a row.
*/
template <typename iterator>
column_info_s compute_column_information(iterator begin, iterator end) {
size_type size_per_row = 0;
std::vector<size_type> column_starts;
std::vector<size_type> column_sizes;
std::vector<size_type> variable_width_column_starts;
column_starts.reserve(std::distance(begin, end) + 1);
column_sizes.reserve(std::distance(begin, end));
for (auto col_type = begin; col_type != end; ++col_type) {
bool const compound_type = is_compound(*col_type);
// a list or string column will write a single uint64 of data here for offset/length
auto const col_size = compound_type ? sizeof(uint32_t) + sizeof(uint32_t) : size_of(*col_type);
// align size for this type - They are the same for fixed width types and 4 bytes for variable
// width length/offset combos
size_type const alignment_needed = compound_type ? __alignof(uint32_t) : col_size;
size_per_row = util::round_up_unsafe(size_per_row, alignment_needed);
if (compound_type) {
variable_width_column_starts.push_back(size_per_row);
}
column_starts.push_back(size_per_row);
column_sizes.push_back(col_size);
size_per_row += col_size;
}
// add validity offset to the end of fixed_width offsets
auto validity_offset = size_per_row;
column_starts.push_back(validity_offset);
// validity is byte-aligned in the JCUDF format
size_per_row +=
util::div_rounding_up_safe(static_cast<size_type>(std::distance(begin, end)), CHAR_BIT);
return {size_per_row, std::move(column_starts), std::move(column_sizes),
std::move(variable_width_column_starts)};
}
/**
* @brief Build `tile_info` for the validity data to break up the work.
*
* @param num_columns number of columns in the table
* @param num_rows number of rows in the table
* @param shmem_limit_per_tile size of shared memory available to a single gpu tile
* @param row_batches batched row information for multiple output locations
* @return vector of `tile_info` structs for validity data
*/
std::vector<detail::tile_info>
build_validity_tile_infos(size_type const &num_columns, size_type const &num_rows,
size_type const &shmem_limit_per_tile,
std::vector<row_batch> const &row_batches) {
auto const desired_rows_and_columns = static_cast<int>(sqrt(shmem_limit_per_tile));
auto const column_stride = util::round_up_unsafe(
[&]() {
if (desired_rows_and_columns > num_columns) {
// not many columns, build a single tile for table width and ship it off
return num_columns;
} else {
return util::round_down_safe(desired_rows_and_columns, CHAR_BIT);
}
}(),
JCUDF_ROW_ALIGNMENT);
// we fit as much as we can given the column stride note that an element in the table takes just 1
// bit, but a row with a single element still takes 8 bytes!
auto const bytes_per_row = util::round_up_safe(
util::div_rounding_up_unsafe(column_stride, CHAR_BIT), JCUDF_ROW_ALIGNMENT);
auto const row_stride =
std::min(num_rows, util::round_down_safe(shmem_limit_per_tile / bytes_per_row, 64));
std::vector<detail::tile_info> validity_tile_infos;
validity_tile_infos.reserve(num_columns / column_stride * num_rows / row_stride);
for (int col = 0; col < num_columns; col += column_stride) {
int current_tile_row_batch = 0;
int rows_left_in_batch = row_batches[current_tile_row_batch].row_count;
int row = 0;
while (row < num_rows) {
if (rows_left_in_batch == 0) {
current_tile_row_batch++;
rows_left_in_batch = row_batches[current_tile_row_batch].row_count;
}
int const tile_height = std::min(row_stride, rows_left_in_batch);
validity_tile_infos.emplace_back(
detail::tile_info{col, row, std::min(col + column_stride - 1, num_columns - 1),
row + tile_height - 1, current_tile_row_batch});
row += tile_height;
rows_left_in_batch -= tile_height;
}
}
return validity_tile_infos;
}
/**
* @brief functor that returns the size of a row or 0 is row is greater than the number of rows in
* the table
*
* @tparam RowSize iterator that returns the size of a specific row
*/
template <typename RowSize> struct row_size_functor {
row_size_functor(size_type row_end, RowSize row_sizes, size_type last_row_end)
: _row_end(row_end), _row_sizes(row_sizes), _last_row_end(last_row_end) {}
__device__ inline uint64_t operator()(int i) const {
return i >= _row_end ? 0 : _row_sizes[i + _last_row_end];
}
size_type _row_end;
RowSize _row_sizes;
size_type _last_row_end;
};
/**
* @brief Builds batches of rows that will fit in the size limit of a column.
*
* @tparam RowSize iterator that gives the size of a specific row of the table.
* @param num_rows Total number of rows in the table
* @param row_sizes iterator that gives the size of a specific row of the table.
* @param all_fixed_width bool indicating all data in this table is fixed width
* @param stream stream to operate on for this work
* @param mr memory resource used to allocate any returned data
* @returns vector of size_type's that indicate row numbers for batch boundaries and a
* device_uvector of row offsets
*/
template <typename RowSize>
batch_data build_batches(size_type num_rows, RowSize row_sizes, bool all_fixed_width,
rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) {
auto const total_size = thrust::reduce(rmm::exec_policy(stream), row_sizes, row_sizes + num_rows);
auto const num_batches = static_cast<int32_t>(
util::div_rounding_up_safe(total_size, static_cast<uint64_t>(MAX_BATCH_SIZE)));
auto const num_offsets = num_batches + 1;
std::vector<row_batch> row_batches;
std::vector<size_type> batch_row_boundaries;
device_uvector<size_type> batch_row_offsets(all_fixed_width ? 0 : num_rows, stream);
// at most max gpu memory / 2GB iterations.
batch_row_boundaries.reserve(num_offsets);
batch_row_boundaries.push_back(0);
size_type last_row_end = 0;
device_uvector<uint64_t> cumulative_row_sizes(num_rows, stream);
thrust::inclusive_scan(rmm::exec_policy(stream), row_sizes, row_sizes + num_rows,
cumulative_row_sizes.begin());
// This needs to be split this into 2 gig batches. Care must be taken to avoid a batch larger than
// 2 gigs. Imagine a table with 900 meg rows. The batches should occur every 2 rows, but if a
// lower bound is run at 2 gigs, 4 gigs, 6 gigs. the batches will be 2 rows, 2 rows, 3 rows, which
// will be invalid. The previous batch size must be taken into account when building a new batch.
// One way is to pull the batch size back to the host and add it to MAX_BATCH_SIZE for the lower
// bound search. The other method involves keeping everything on device, but subtracting the
// previous batch from cumulative_row_sizes based on index. This involves no synchronization
// between GPU and CPU, but involves more work on the GPU. These further need to be broken on a
// 32-row boundary to match the fixed_width optimized versions.
while (last_row_end < num_rows) {
auto offset_row_sizes = thrust::make_transform_iterator(
cumulative_row_sizes.begin(),
[last_row_end, cumulative_row_sizes = cumulative_row_sizes.data()] __device__(auto i) {
return i - cumulative_row_sizes[last_row_end];
});
auto search_start = offset_row_sizes + last_row_end;
auto search_end = offset_row_sizes + num_rows;
// find the next MAX_BATCH_SIZE boundary
auto const lb =
thrust::lower_bound(rmm::exec_policy(stream), search_start, search_end, MAX_BATCH_SIZE);
size_type const batch_size = lb - search_start;
size_type const row_end = lb == search_end ?
batch_size + last_row_end :
last_row_end + util::round_down_safe(batch_size, 32);
// build offset list for each row in this batch
auto const num_rows_in_batch = row_end - last_row_end;
// build offset list for each row in this batch
auto const num_entries = row_end - last_row_end + 1;
device_uvector<size_type> output_batch_row_offsets(num_entries, stream, mr);
auto row_size_iter_bounded = cudf::detail::make_counting_transform_iterator(
0, row_size_functor(row_end, row_sizes, last_row_end));
thrust::exclusive_scan(rmm::exec_policy(stream), row_size_iter_bounded,
row_size_iter_bounded + num_entries, output_batch_row_offsets.begin());
auto const batch_bytes = output_batch_row_offsets.element(num_rows_in_batch, stream);
// The output_batch_row_offsets vector is used as the offset column of the returned data. This
// needs to be individually allocated, but the kernel needs a contiguous array of offsets or
// more global lookups are necessary.
if (!all_fixed_width) {
cudaMemcpy(batch_row_offsets.data() + last_row_end, output_batch_row_offsets.data(),
num_rows_in_batch * sizeof(size_type), cudaMemcpyDeviceToDevice);
}
batch_row_boundaries.push_back(row_end);
row_batches.push_back({batch_bytes, num_rows_in_batch, std::move(output_batch_row_offsets)});
last_row_end = row_end;
}
return {std::move(batch_row_offsets), make_device_uvector_async(batch_row_boundaries, stream),
std::move(batch_row_boundaries), std::move(row_batches)};
}
/**
* @brief Computes the number of tiles necessary given a tile height and batch offsets
*
* @param batch_row_boundaries row boundaries for each batch
* @param desired_tile_height height of each tile in the table
* @param stream stream to use
* @return number of tiles necessary
*/
int compute_tile_counts(device_span<size_type const> const &batch_row_boundaries,
int desired_tile_height, rmm::cuda_stream_view stream) {
size_type const num_batches = batch_row_boundaries.size() - 1;
device_uvector<size_type> num_tiles(num_batches, stream);
auto iter = thrust::make_counting_iterator(0);
thrust::transform(rmm::exec_policy(stream), iter, iter + num_batches, num_tiles.begin(),
[desired_tile_height,
batch_row_boundaries =
batch_row_boundaries.data()] __device__(auto batch_index) -> size_type {
return util::div_rounding_up_unsafe(batch_row_boundaries[batch_index + 1] -
batch_row_boundaries[batch_index],
desired_tile_height);
});
return thrust::reduce(rmm::exec_policy(stream), num_tiles.begin(), num_tiles.end());
}
/**
* @brief Builds the `tile_info` structs for a given table.
*
* @param tiles span of tiles to populate
* @param batch_row_boundaries boundary to row batches
* @param column_start starting column of the tile
* @param column_end ending column of the tile
* @param desired_tile_height height of the tile
* @param total_number_of_rows total number of rows in the table
* @param stream stream to use
* @return number of tiles created
*/
size_type
build_tiles(device_span<tile_info> tiles,
device_uvector<size_type> const &batch_row_boundaries, // comes from build_batches
int column_start, int column_end, int desired_tile_height, int total_number_of_rows,
rmm::cuda_stream_view stream) {
size_type const num_batches = batch_row_boundaries.size() - 1;
device_uvector<size_type> num_tiles(num_batches, stream);
auto iter = thrust::make_counting_iterator(0);
thrust::transform(rmm::exec_policy(stream), iter, iter + num_batches, num_tiles.begin(),
[desired_tile_height,
batch_row_boundaries =
batch_row_boundaries.data()] __device__(auto batch_index) -> size_type {
return util::div_rounding_up_unsafe(batch_row_boundaries[batch_index + 1] -
batch_row_boundaries[batch_index],
desired_tile_height);
});
size_type const total_tiles =
thrust::reduce(rmm::exec_policy(stream), num_tiles.begin(), num_tiles.end());
device_uvector<size_type> tile_starts(num_batches + 1, stream);
auto tile_iter = cudf::detail::make_counting_transform_iterator(
0, [num_tiles = num_tiles.data(), num_batches] __device__(auto i) {
return (i < num_batches) ? num_tiles[i] : 0;
});
thrust::exclusive_scan(rmm::exec_policy(stream), tile_iter, tile_iter + num_batches + 1,
tile_starts.begin()); // in tiles
thrust::transform(
rmm::exec_policy(stream), iter, iter + total_tiles, tiles.begin(),
[=, tile_starts = tile_starts.data(),
batch_row_boundaries = batch_row_boundaries.data()] __device__(size_type tile_index) {
// what batch this tile falls in
auto const batch_index_iter =
thrust::upper_bound(thrust::seq, tile_starts, tile_starts + num_batches, tile_index);
auto const batch_index = std::distance(tile_starts, batch_index_iter) - 1;
// local index within the tile
int const local_tile_index = tile_index - tile_starts[batch_index];
// the start row for this batch.
int const batch_row_start = batch_row_boundaries[batch_index];
// the start row for this tile
int const tile_row_start = batch_row_start + (local_tile_index * desired_tile_height);
// the end row for this tile
int const max_row =
std::min(total_number_of_rows - 1,
batch_index + 1 > num_batches ?
std::numeric_limits<size_type>::max() :
static_cast<int>(batch_row_boundaries[batch_index + 1]) - 1);
int const tile_row_end =
std::min(batch_row_start + ((local_tile_index + 1) * desired_tile_height) - 1, max_row);
// stuff the tile
return tile_info{column_start, tile_row_start, column_end, tile_row_end,
static_cast<int>(batch_index)};
});
return total_tiles;
}
/**
* @brief Determines what data should be operated on by each tile for the incoming table.
*
* @tparam TileCallback Callback that receives the start and end columns of tiles
* @param column_sizes vector of the size of each column
* @param column_starts vector of the offset of each column
* @param first_row_batch_size size of the first row batch to limit max tile size since a tile
* is unable to span batches
* @param total_number_of_rows total number of rows in the table
* @param shmem_limit_per_tile shared memory allowed per tile
* @param f callback function called when building a tile
*/
template <typename TileCallback>
void determine_tiles(std::vector<size_type> const &column_sizes,
std::vector<size_type> const &column_starts,
size_type const first_row_batch_size, size_type const total_number_of_rows,
size_type const &shmem_limit_per_tile, TileCallback f) {
// tile infos are organized with the tile going "down" the columns this provides the most
// coalescing of memory access
int current_tile_width = 0;
int current_tile_start_col = 0;
// the ideal tile height has lots of 8-byte reads and 8-byte writes. The optimal read/write would
// be memory cache line sized access, but since other tiles will read/write the edges this may not
// turn out to be overly important. For now, we will attempt to build a square tile as far as byte
// sizes. x * y = shared_mem_size. Which translates to x^2 = shared_mem_size since we want them
// equal, so height and width are sqrt(shared_mem_size). The trick is that it's in bytes, not rows
// or columns.
auto const square_bias = 32; // bias towards columns for performance reasons
auto const optimal_square_len = static_cast<size_type>(sqrt(shmem_limit_per_tile));
auto const desired_tile_height = util::round_up_safe<int>(
std::min(optimal_square_len / square_bias, total_number_of_rows), cudf::detail::warp_size);
auto const tile_height = std::clamp(desired_tile_height, 1, first_row_batch_size);
int row_size = 0;
// march each column and build the tiles of appropriate sizes
for (uint col = 0; col < column_sizes.size(); ++col) {
auto const col_size = column_sizes[col];
// align size for this type
auto const alignment_needed = col_size; // They are the same for fixed width types
auto const row_size_aligned = util::round_up_unsafe(row_size, alignment_needed);
auto const row_size_with_this_col = row_size_aligned + col_size;
auto const row_size_with_end_pad =
util::round_up_unsafe(row_size_with_this_col, JCUDF_ROW_ALIGNMENT);
if (row_size_with_end_pad * tile_height > shmem_limit_per_tile) {
// too large, close this tile, generate vertical tiles and restart
f(current_tile_start_col, col == 0 ? col : col - 1, tile_height);
row_size =
util::round_up_unsafe((column_starts[col] + column_sizes[col]) & 7, alignment_needed);
row_size += col_size; // alignment required for shared memory tile boundary to match alignment
// of output row
current_tile_start_col = col;
current_tile_width = 0;
} else {
row_size = row_size_with_this_col;
current_tile_width++;
}
}
// build last set of tiles
if (current_tile_width > 0) {
f(current_tile_start_col, static_cast<int>(column_sizes.size()) - 1, tile_height);
}
}
/**
* @brief convert cudf table into JCUDF row format
*
* @tparam offsetFunctor functor type for offset functor
* @param tbl table to convert to JCUDF row format
* @param batch_info information about the batches of data
* @param offset_functor functor that returns the starting offset of each row
* @param column_info information about incoming columns
* @param variable_width_offsets optional vector of offsets for variable-with columns
* @param stream stream used
* @param mr selected memory resource for returned data
* @return vector of list columns containing byte columns of the JCUDF row data
*/
template <typename offsetFunctor>
std::vector<std::unique_ptr<column>> convert_to_rows(
table_view const &tbl, batch_data &batch_info, offsetFunctor offset_functor,
column_info_s const &column_info,
std::optional<rmm::device_uvector<strings_column_view::offset_iterator>> variable_width_offsets,
rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) {
int device_id;
CUDF_CUDA_TRY(cudaGetDevice(&device_id));
int total_shmem_in_bytes;
CUDF_CUDA_TRY(
cudaDeviceGetAttribute(&total_shmem_in_bytes, cudaDevAttrMaxSharedMemoryPerBlock, device_id));
#ifndef __CUDA_ARCH__ // __host__ code.
// Need to reduce total shmem available by the size of barriers in the kernel's shared memory
total_shmem_in_bytes -=
util::round_up_unsafe(sizeof(cuda::barrier<cuda::thread_scope_block>), 16ul);
#endif // __CUDA_ARCH__
auto const shmem_limit_per_tile = total_shmem_in_bytes;
auto const num_rows = tbl.num_rows();
auto const fixed_width_only = !variable_width_offsets.has_value();
auto select_columns = [](auto const &tbl, auto column_predicate) {
std::vector<column_view> cols;
std::copy_if(tbl.begin(), tbl.end(), std::back_inserter(cols),
[&](auto c) { return column_predicate(c); });
return table_view(cols);
};
auto dev_col_sizes = make_device_uvector_async(column_info.column_sizes, stream);
auto dev_col_starts = make_device_uvector_async(column_info.column_starts, stream);
// Get the pointers to the input columnar data ready
auto const data_begin = thrust::make_transform_iterator(tbl.begin(), [](auto const &c) {
return is_compound(c.type()) ? nullptr : c.template data<int8_t>();
});
std::vector<int8_t const *> input_data(data_begin, data_begin + tbl.num_columns());
// validity code handles variable and fixed-width data, so give it everything
auto const nm_begin =
thrust::make_transform_iterator(tbl.begin(), [](auto const &c) { return c.null_mask(); });
std::vector<bitmask_type const *> input_nm(nm_begin, nm_begin + tbl.num_columns());
auto dev_input_data = make_device_uvector_async(input_data, stream);
auto dev_input_nm = make_device_uvector_async(input_nm, stream);
// the first batch always exists unless we were sent an empty table
auto const first_batch_size = batch_info.row_batches[0].row_count;
std::vector<rmm::device_buffer> output_buffers;
std::vector<int8_t *> output_data;
output_data.reserve(batch_info.row_batches.size());
output_buffers.reserve(batch_info.row_batches.size());
std::transform(batch_info.row_batches.begin(), batch_info.row_batches.end(),
std::back_inserter(output_buffers), [&](auto const &batch) {
return rmm::device_buffer(batch.num_bytes, stream, mr);
});
std::transform(output_buffers.begin(), output_buffers.end(), std::back_inserter(output_data),
[](auto &buf) { return static_cast<int8_t *>(buf.data()); });
auto dev_output_data = make_device_uvector_async(output_data, stream, mr);
int info_count = 0;
detail::determine_tiles(
column_info.column_sizes, column_info.column_starts, first_batch_size, num_rows,
shmem_limit_per_tile,
[&gpu_batch_row_boundaries = batch_info.d_batch_row_boundaries, &info_count,
&stream](int const start_col, int const end_col, int const tile_height) {
int i = detail::compute_tile_counts(gpu_batch_row_boundaries, tile_height, stream);
info_count += i;
});
// allocate space for tiles
device_uvector<detail::tile_info> gpu_tile_infos(info_count, stream);
int tile_offset = 0;
detail::determine_tiles(
column_info.column_sizes, column_info.column_starts, first_batch_size, num_rows,
shmem_limit_per_tile,
[&gpu_batch_row_boundaries = batch_info.d_batch_row_boundaries, &gpu_tile_infos, num_rows,
&tile_offset, stream](int const start_col, int const end_col, int const tile_height) {
tile_offset += detail::build_tiles(
{gpu_tile_infos.data() + tile_offset, gpu_tile_infos.size() - tile_offset},
gpu_batch_row_boundaries, start_col, end_col, tile_height, num_rows, stream);
});
// build validity tiles for ALL columns, variable and fixed width.
auto validity_tile_infos = detail::build_validity_tile_infos(
tbl.num_columns(), num_rows, shmem_limit_per_tile, batch_info.row_batches);
auto dev_validity_tile_infos = make_device_uvector_async(validity_tile_infos, stream);
auto const validity_offset = column_info.column_starts.back();
// blast through the entire table and convert it
detail::copy_to_rows<<<gpu_tile_infos.size(), NUM_WARPS_IN_BLOCK * cudf::detail::warp_size,
total_shmem_in_bytes, stream.value()>>>(
num_rows, tbl.num_columns(), shmem_limit_per_tile, gpu_tile_infos, dev_input_data.data(),
dev_col_sizes.data(), dev_col_starts.data(), offset_functor,
batch_info.d_batch_row_boundaries.data(),
reinterpret_cast<int8_t **>(dev_output_data.data()));
// note that validity gets the entire table and not the fixed-width portion
detail::copy_validity_to_rows<<<validity_tile_infos.size(),
NUM_WARPS_IN_BLOCK * cudf::detail::warp_size,
total_shmem_in_bytes, stream.value()>>>(
num_rows, tbl.num_columns(), shmem_limit_per_tile, offset_functor,
batch_info.d_batch_row_boundaries.data(), dev_output_data.data(), validity_offset,
dev_validity_tile_infos, dev_input_nm.data());
if (!fixed_width_only) {
// build table view for variable-width data only
auto const variable_width_table =
select_columns(tbl, [](auto col) { return is_compound(col.type()); });
CUDF_EXPECTS(!variable_width_table.is_empty(), "No variable-width columns when expected!");
CUDF_EXPECTS(variable_width_offsets.has_value(), "No variable width offset data!");
auto const variable_data_begin =
thrust::make_transform_iterator(variable_width_table.begin(), [](auto const &c) {
strings_column_view const scv{c};
return is_compound(c.type()) ? scv.chars().template data<int8_t>() : nullptr;
});
std::vector<int8_t const *> variable_width_input_data(
variable_data_begin, variable_data_begin + variable_width_table.num_columns());
auto dev_variable_input_data = make_device_uvector_async(variable_width_input_data, stream);
auto dev_variable_col_output_offsets =
make_device_uvector_async(column_info.variable_width_column_starts, stream);
for (uint i = 0; i < batch_info.row_batches.size(); i++) {
auto const batch_row_offset = batch_info.batch_row_boundaries[i];
auto const batch_num_rows = batch_info.row_batches[i].row_count;
dim3 const string_blocks(std::min(
MAX_STRING_BLOCKS,
util::div_rounding_up_unsafe(batch_num_rows, NUM_STRING_ROWS_PER_BLOCK_TO_ROWS)));
detail::copy_strings_to_rows<<<string_blocks, NUM_WARPS_IN_BLOCK * cudf::detail::warp_size, 0,
stream.value()>>>(
batch_num_rows, variable_width_table.num_columns(), dev_variable_input_data.data(),
dev_variable_col_output_offsets.data(), variable_width_offsets->data(),
column_info.size_per_row, offset_functor, batch_row_offset,
reinterpret_cast<int8_t *>(output_data[i]));
}
}
// split up the output buffer into multiple buffers based on row batch sizes and create list of
// byte columns
std::vector<std::unique_ptr<column>> ret;
ret.reserve(batch_info.row_batches.size());
auto counting_iter = thrust::make_counting_iterator(0);
std::transform(counting_iter, counting_iter + batch_info.row_batches.size(),
std::back_inserter(ret), [&](auto batch) {
auto const offset_count = batch_info.row_batches[batch].row_offsets.size();
auto offsets = std::make_unique<column>(
data_type{type_id::INT32}, (size_type)offset_count,
batch_info.row_batches[batch].row_offsets.release());
auto data = std::make_unique<column>(data_type{type_id::INT8},
batch_info.row_batches[batch].num_bytes,
std::move(output_buffers[batch]));
return make_lists_column(
batch_info.row_batches[batch].row_count, std::move(offsets), std::move(data),
0, rmm::device_buffer{0, cudf::get_default_stream(), mr}, stream, mr);
});
return ret;
}
} // namespace detail
/**
* @brief convert a cudf table to JCUDF row format
*
* @param tbl incoming table to convert
* @param stream stream to use for operations
* @param mr memory resource used for returned data
* @return vector of list columns containing byte columns of the JCUDF row data
*/
std::vector<std::unique_ptr<column>> convert_to_rows(table_view const &tbl,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr) {
auto const num_columns = tbl.num_columns();
auto const num_rows = tbl.num_rows();
auto const fixed_width_only = std::all_of(
tbl.begin(), tbl.end(), [](column_view const &c) { return is_fixed_width(c.type()); });
// Break up the work into tiles, which are a starting and ending row/col #. This tile size is
// calculated based on the shared memory size available we want a single tile to fill up the
// entire shared memory space available for the transpose-like conversion.
// There are two different processes going on here. The GPU conversion of the data and the writing
// of the data into the list of byte columns that are a maximum of 2 gigs each due to offset
// maximum size. The GPU conversion portion has to understand this limitation because the column
// must own the data inside and as a result it must be a distinct allocation for that column.
// Copying the data into these final buffers would be prohibitively expensive, so care is taken to
// ensure the GPU writes to the proper buffer. The tiles are broken at the boundaries of specific
// rows based on the row sizes up to that point. These are row batches and they are decided first
// before building the tiles so the tiles can be properly cut around them.
auto schema_column_iter =
thrust::make_transform_iterator(tbl.begin(), [](auto const &i) { return i.type(); });
auto column_info =
detail::compute_column_information(schema_column_iter, schema_column_iter + num_columns);
auto const size_per_row = column_info.size_per_row;
if (fixed_width_only) {
// total encoded row size. This includes fixed-width data and validity only. It does not include
// variable-width data since it isn't copied with the fixed-width and validity kernel.
auto row_size_iter = thrust::make_constant_iterator<uint64_t>(
util::round_up_unsafe(size_per_row, JCUDF_ROW_ALIGNMENT));
auto batch_info = detail::build_batches(num_rows, row_size_iter, fixed_width_only, stream, mr);
detail::fixed_width_row_offset_functor offset_functor(
util::round_up_unsafe(size_per_row, JCUDF_ROW_ALIGNMENT));
return detail::convert_to_rows(tbl, batch_info, offset_functor, std::move(column_info),
std::nullopt, stream, mr);
} else {
auto offset_data = detail::build_string_row_offsets(tbl, size_per_row, stream);
auto &row_sizes = std::get<0>(offset_data);
auto row_size_iter = cudf::detail::make_counting_transform_iterator(
0, detail::row_size_functor(num_rows, row_sizes.data(), 0));
auto batch_info = detail::build_batches(num_rows, row_size_iter, fixed_width_only, stream, mr);
detail::string_row_offset_functor offset_functor(batch_info.batch_row_offsets);
return detail::convert_to_rows(tbl, batch_info, offset_functor, std::move(column_info),
std::make_optional(std::move(std::get<1>(offset_data))), stream,
mr);
}
}
std::vector<std::unique_ptr<column>>
convert_to_rows_fixed_width_optimized(table_view const &tbl, rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr) {
auto const num_columns = tbl.num_columns();
std::vector<data_type> schema;
schema.resize(num_columns);
std::transform(tbl.begin(), tbl.end(), schema.begin(),
[](auto i) -> data_type { return i.type(); });
if (detail::are_all_fixed_width(schema)) {
std::vector<size_type> column_start;
std::vector<size_type> column_size;
int32_t const size_per_row =
detail::compute_fixed_width_layout(schema, column_start, column_size);
auto dev_column_start = make_device_uvector_async(column_start, stream, mr);
auto dev_column_size = make_device_uvector_async(column_size, stream, mr);
// Make the number of rows per batch a multiple of 32 so we don't have to worry about splitting
// validity at a specific row offset. This might change in the future.
auto const max_rows_per_batch =
util::round_down_safe(std::numeric_limits<size_type>::max() / size_per_row, 32);
auto const num_rows = tbl.num_rows();
// Get the pointers to the input columnar data ready
std::vector<const int8_t *> input_data;
std::vector<bitmask_type const *> input_nm;
for (size_type column_number = 0; column_number < num_columns; column_number++) {
column_view cv = tbl.column(column_number);
input_data.emplace_back(cv.data<int8_t>());
input_nm.emplace_back(cv.null_mask());
}
auto dev_input_data = make_device_uvector_async(input_data, stream, mr);
auto dev_input_nm = make_device_uvector_async(input_nm, stream, mr);
using ScalarType = scalar_type_t<size_type>;
auto zero = make_numeric_scalar(data_type(type_id::INT32), stream.value());
zero->set_valid_async(true, stream);
static_cast<ScalarType *>(zero.get())->set_value(0, stream);
auto step = make_numeric_scalar(data_type(type_id::INT32), stream.value());
step->set_valid_async(true, stream);
static_cast<ScalarType *>(step.get())->set_value(static_cast<size_type>(size_per_row), stream);
std::vector<std::unique_ptr<column>> ret;
for (size_type row_start = 0; row_start < num_rows; row_start += max_rows_per_batch) {
size_type row_count = num_rows - row_start;
row_count = row_count > max_rows_per_batch ? max_rows_per_batch : row_count;
ret.emplace_back(detail::fixed_width_convert_to_rows(
row_start, row_count, num_columns, size_per_row, dev_column_start, dev_column_size,
dev_input_data, dev_input_nm, *zero, *step, stream, mr));
}
return ret;
} else {
CUDF_FAIL("Only fixed width types are currently supported");
}
}
/**
* @brief convert from JCUDF row format to cudf columns
*
* @param input vector of list columns containing byte columns of the JCUDF row data
* @param schema incoming schema of the data
* @param stream stream to use for compute
* @param mr memory resource for returned data
* @return cudf table of the data
*/
std::unique_ptr<table> convert_from_rows(lists_column_view const &input,
std::vector<data_type> const &schema,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr) {
// verify that the types are what we expect
column_view child = input.child();
auto const list_type = child.type().id();
CUDF_EXPECTS(list_type == type_id::INT8 || list_type == type_id::UINT8,
"Only a list of bytes is supported as input");
// convert any strings in the schema to two int32 columns
// This allows us to leverage the fixed-width copy code to fill in our offset and string length
// data.
std::vector<data_type> string_schema;
string_schema.reserve(schema.size());
for (auto i : schema) {
if (i.id() == type_id::STRING) {
string_schema.push_back(data_type(type_id::INT32));
string_schema.push_back(data_type(type_id::INT32));
} else {
string_schema.push_back(i);
}
}
auto const num_columns = string_schema.size();
auto const num_rows = input.parent().size();
int device_id;
CUDF_CUDA_TRY(cudaGetDevice(&device_id));
int total_shmem_in_bytes;
CUDF_CUDA_TRY(
cudaDeviceGetAttribute(&total_shmem_in_bytes, cudaDevAttrMaxSharedMemoryPerBlock, device_id));
#ifndef __CUDA_ARCH__ // __host__ code.
// Need to reduce total shmem available by the size of barriers in the kernel's shared memory
total_shmem_in_bytes -=
util::round_up_unsafe(sizeof(cuda::barrier<cuda::thread_scope_block>), 16ul);
#endif // __CUDA_ARCH__
auto const shmem_limit_per_tile = total_shmem_in_bytes;
auto column_info = detail::compute_column_information(string_schema.begin(), string_schema.end());
auto const size_per_row = util::round_up_unsafe(column_info.size_per_row, JCUDF_ROW_ALIGNMENT);
// Ideally we would check that the offsets are all the same, etc. but for now this is probably
// fine
CUDF_EXPECTS(size_per_row * num_rows <= child.size(), "The layout of the data appears to be off");
auto dev_col_starts = make_device_uvector_async(column_info.column_starts, stream);
auto dev_col_sizes = make_device_uvector_async(column_info.column_sizes, stream);
// Allocate the columns we are going to write into
std::vector<std::unique_ptr<column>> output_columns;
std::vector<std::unique_ptr<column>> string_row_offset_columns;
std::vector<std::unique_ptr<column>> string_length_columns;
std::vector<int8_t *> output_data;
std::vector<bitmask_type *> output_nm;
std::vector<int32_t *> string_row_offsets;
std::vector<int32_t *> string_lengths;
for (auto i : schema) {
auto make_col = [&output_data, &output_nm](data_type type, size_type num_rows, bool include_nm,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr) {
auto column = make_fixed_width_column(
type, num_rows, include_nm ? mask_state::UNINITIALIZED : mask_state::UNALLOCATED, stream,
mr);
auto mut = column->mutable_view();
output_data.emplace_back(mut.data<int8_t>());
if (include_nm) {
output_nm.emplace_back(mut.null_mask());
}
return column;
};
if (i.id() == type_id::STRING) {
auto const int32type = data_type(type_id::INT32);
auto offset_col =
make_col(int32type, num_rows, true, stream, rmm::mr::get_current_device_resource());
string_row_offsets.push_back(offset_col->mutable_view().data<int32_t>());
string_row_offset_columns.emplace_back(std::move(offset_col));
auto length_col =
make_col(int32type, num_rows, false, stream, rmm::mr::get_current_device_resource());
string_lengths.push_back(length_col->mutable_view().data<int32_t>());
string_length_columns.emplace_back(std::move(length_col));
// placeholder
output_columns.emplace_back(make_empty_column(type_id::STRING));
} else {
output_columns.emplace_back(make_col(i, num_rows, true, stream, mr));
}
}
auto dev_string_row_offsets = make_device_uvector_async(string_row_offsets, stream);
auto dev_string_lengths = make_device_uvector_async(string_lengths, stream);
// build the row_batches from the passed in list column
std::vector<detail::row_batch> row_batches;
row_batches.push_back(
{detail::row_batch{child.size(), num_rows, device_uvector<size_type>(0, stream)}});
auto dev_output_data = make_device_uvector_async(output_data, stream);
auto dev_output_nm = make_device_uvector_async(output_nm, stream);
// only ever get a single batch when going from rows, so boundaries are 0, num_rows
constexpr auto num_batches = 2;
device_uvector<size_type> gpu_batch_row_boundaries(num_batches, stream);
thrust::transform(rmm::exec_policy(stream), thrust::make_counting_iterator(0),
thrust::make_counting_iterator(num_batches), gpu_batch_row_boundaries.begin(),
[num_rows] __device__(auto i) { return i == 0 ? 0 : num_rows; });
int info_count = 0;
detail::determine_tiles(
column_info.column_sizes, column_info.column_starts, num_rows, num_rows, shmem_limit_per_tile,
[&gpu_batch_row_boundaries, &info_count, &stream](int const start_col, int const end_col,
int const tile_height) {
info_count += detail::compute_tile_counts(gpu_batch_row_boundaries, tile_height, stream);
});
// allocate space for tiles
device_uvector<detail::tile_info> gpu_tile_infos(info_count, stream);
int tile_offset = 0;
detail::determine_tiles(
column_info.column_sizes, column_info.column_starts, num_rows, num_rows, shmem_limit_per_tile,
[&gpu_batch_row_boundaries, &gpu_tile_infos, num_rows, &tile_offset,
stream](int const start_col, int const end_col, int const tile_height) {
tile_offset += detail::build_tiles(
{gpu_tile_infos.data() + tile_offset, gpu_tile_infos.size() - tile_offset},
gpu_batch_row_boundaries, start_col, end_col, tile_height, num_rows, stream);
});
dim3 const blocks(gpu_tile_infos.size());
// validity needs to be calculated based on the actual number of final table columns
auto validity_tile_infos =
detail::build_validity_tile_infos(schema.size(), num_rows, shmem_limit_per_tile, row_batches);
auto dev_validity_tile_infos = make_device_uvector_async(validity_tile_infos, stream);
dim3 const validity_blocks(validity_tile_infos.size());
if (dev_string_row_offsets.size() == 0) {
detail::fixed_width_row_offset_functor offset_functor(size_per_row);
detail::copy_from_rows<<<gpu_tile_infos.size(), NUM_WARPS_IN_BLOCK * cudf::detail::warp_size,
total_shmem_in_bytes, stream.value()>>>(
num_rows, num_columns, shmem_limit_per_tile, offset_functor,
gpu_batch_row_boundaries.data(), dev_output_data.data(), dev_col_sizes.data(),
dev_col_starts.data(), gpu_tile_infos, child.data<int8_t>());
detail::copy_validity_from_rows<<<validity_tile_infos.size(),
NUM_WARPS_IN_BLOCK * cudf::detail::warp_size,
total_shmem_in_bytes, stream.value()>>>(
num_rows, num_columns, shmem_limit_per_tile, offset_functor,
gpu_batch_row_boundaries.data(), dev_output_nm.data(), column_info.column_starts.back(),
dev_validity_tile_infos, child.data<int8_t>());
} else {
detail::string_row_offset_functor offset_functor(device_span<size_type const>{input.offsets()});
detail::copy_from_rows<<<gpu_tile_infos.size(), NUM_WARPS_IN_BLOCK * cudf::detail::warp_size,
total_shmem_in_bytes, stream.value()>>>(
num_rows, num_columns, shmem_limit_per_tile, offset_functor,
gpu_batch_row_boundaries.data(), dev_output_data.data(), dev_col_sizes.data(),
dev_col_starts.data(), gpu_tile_infos, child.data<int8_t>());
detail::copy_validity_from_rows<<<validity_tile_infos.size(),
NUM_WARPS_IN_BLOCK * cudf::detail::warp_size,
total_shmem_in_bytes, stream.value()>>>(
num_rows, num_columns, shmem_limit_per_tile, offset_functor,
gpu_batch_row_boundaries.data(), dev_output_nm.data(), column_info.column_starts.back(),
dev_validity_tile_infos, child.data<int8_t>());
std::vector<device_uvector<size_type>> string_col_offsets;
std::vector<rmm::device_uvector<char>> string_data_cols;
std::vector<size_type *> string_col_offset_ptrs;
std::vector<char *> string_data_col_ptrs;
for (auto &col_string_lengths : string_lengths) {
device_uvector<size_type> output_string_offsets(num_rows + 1, stream, mr);
auto tmp = [num_rows, col_string_lengths] __device__(auto const &i) {
return i < num_rows ? col_string_lengths[i] : 0;
};
auto bounded_iter = cudf::detail::make_counting_transform_iterator(0, tmp);
thrust::exclusive_scan(rmm::exec_policy(stream), bounded_iter, bounded_iter + num_rows + 1,
output_string_offsets.begin());
// allocate destination string column
rmm::device_uvector<char> string_data(output_string_offsets.element(num_rows, stream), stream,
mr);
string_col_offset_ptrs.push_back(output_string_offsets.data());
string_data_col_ptrs.push_back(string_data.data());
string_col_offsets.push_back(std::move(output_string_offsets));
string_data_cols.push_back(std::move(string_data));
}
auto dev_string_col_offsets = make_device_uvector_async(string_col_offset_ptrs, stream);
auto dev_string_data_cols = make_device_uvector_async(string_data_col_ptrs, stream);
dim3 const string_blocks(
std::min(std::max(MIN_STRING_BLOCKS, num_rows / NUM_STRING_ROWS_PER_BLOCK_FROM_ROWS),
MAX_STRING_BLOCKS));
detail::copy_strings_from_rows<<<string_blocks, NUM_WARPS_IN_BLOCK * cudf::detail::warp_size, 0,
stream.value()>>>(
offset_functor, dev_string_row_offsets.data(), dev_string_lengths.data(),
dev_string_col_offsets.data(), dev_string_data_cols.data(), child.data<int8_t>(), num_rows,
static_cast<cudf::size_type>(string_col_offsets.size()));
// merge strings back into output_columns
int string_idx = 0;
for (int i = 0; i < static_cast<int>(schema.size()); ++i) {
if (schema[i].id() == type_id::STRING) {
// stuff real string column
auto string_data = string_row_offset_columns[string_idx].release()->release();
output_columns[i] = make_strings_column(num_rows, std::move(string_col_offsets[string_idx]),
std::move(string_data_cols[string_idx]),
std::move(*string_data.null_mask.release()),
cudf::UNKNOWN_NULL_COUNT);
string_idx++;
}
}
}
return std::make_unique<table>(std::move(output_columns));
}
std::unique_ptr<table> convert_from_rows_fixed_width_optimized(
lists_column_view const &input, std::vector<data_type> const &schema,
rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) {
// verify that the types are what we expect
column_view child = input.child();
auto const list_type = child.type().id();
CUDF_EXPECTS(list_type == type_id::INT8 || list_type == type_id::UINT8,
"Only a list of bytes is supported as input");
auto const num_columns = schema.size();
if (detail::are_all_fixed_width(schema)) {
std::vector<size_type> column_start;
std::vector<size_type> column_size;
auto const num_rows = input.parent().size();
auto const size_per_row = detail::compute_fixed_width_layout(schema, column_start, column_size);
// Ideally we would check that the offsets are all the same, etc. but for now this is probably
// fine
CUDF_EXPECTS(size_per_row * num_rows == child.size(),
"The layout of the data appears to be off");
auto dev_column_start = make_device_uvector_async(column_start, stream);
auto dev_column_size = make_device_uvector_async(column_size, stream);
// Allocate the columns we are going to write into
std::vector<std::unique_ptr<column>> output_columns;
std::vector<int8_t *> output_data;
std::vector<bitmask_type *> output_nm;
for (int i = 0; i < static_cast<int>(num_columns); i++) {
auto column =
make_fixed_width_column(schema[i], num_rows, mask_state::UNINITIALIZED, stream, mr);
auto mut = column->mutable_view();
output_data.emplace_back(mut.data<int8_t>());
output_nm.emplace_back(mut.null_mask());
output_columns.emplace_back(std::move(column));
}
auto dev_output_data = make_device_uvector_async(output_data, stream, mr);
auto dev_output_nm = make_device_uvector_async(output_nm, stream, mr);
dim3 blocks;
dim3 threads;
int shared_size =
detail::calc_fixed_width_kernel_dims(num_columns, num_rows, size_per_row, blocks, threads);
detail::copy_from_rows_fixed_width_optimized<<<blocks, threads, shared_size, stream.value()>>>(
num_rows, num_columns, size_per_row, dev_column_start.data(), dev_column_size.data(),
dev_output_data.data(), dev_output_nm.data(), child.data<int8_t>());
return std::make_unique<table>(std::move(output_columns));
} else {
CUDF_FAIL("Only fixed width types are currently supported");
}
}
} // namespace jni
} // namespace cudf
|
3e518b95a940c23c2151223af0299ad5287a5b0e.hip | // !!! This is a file automatically generated by hipify!!!
// 20181201
// Yuqiong Li
// a basic CUDA function to test working with device constant memory
#include <stdio.h>
#include <hip/hip_runtime.h>
const unsigned int N = 10; // size of vectors
__constant__ float const_d_a[N * sizeof(float)]; // filter in device const memory
// function declarations
__global__ void vecAddConstantKernel(float * b, float * c, unsigned int n);
__global__ void vecAddConstantKernel2(float * a, float * b, float * c, unsigned int n);
// main function
int main()
{
float * a, * b, * c; // a and b are vectors. c is the result
a = (float *)calloc(N, sizeof(float));
b = (float *)calloc(N, sizeof(float));
/**************************** Exp 1: sequential ***************************/
int i;
int size = N * sizeof(float);
float sum = 0;
for (i = 0; i < N; i++){
a[i] = (float)i / 0.23 + 1;
b[i] = (float)i / 5.89 + 9;
sum += a[i] + b[i];
}
c = (float*) malloc(size);
printf("Results from host :%.2f\n", sum);
/********************** Exp 2: CUDA w/o const mem *************************/
// 1. allocate memory on CUDA
float * d_b, * d_c; // device memory
hipError_t err2 = hipMalloc((void **) & d_b, size);
hipError_t err3 = hipMalloc((void **) & d_c, size);
if (err2 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err2), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
if (err3 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err3), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
// copy memory
hipError_t err4 = hipMemcpyToSymbol(const_d_a, a, size);
if (err4 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err4), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
// 2. operate on kernels
hipLaunchKernelGGL(( vecAddConstantKernel), dim3(ceil(N/256.0)), dim3(256), 0, 0, d_b, d_c, N); // no need to pass const_d_a as a parameter as it's global
// 3. copy the results back to host
hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost);
float cuda_res = 0;
for(i = 0; i < N; i++){
// printf("%.2f\t", c[i]);
cuda_res += c[i];
}
printf("Results from device :%.2f\n", cuda_res);
// 2. do it again but passing constant variable as a parameter
float * d_c1; // device memory
hipError_t err5 = hipMalloc((void **) & d_c1, size);
if (err5 != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err5), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
hipLaunchKernelGGL(( vecAddConstantKernel2), dim3(ceil(N/256.0)), dim3(256), 0, 0, const_d_a, d_b, d_c1, N); // no need to pass const_d_a as a parameter as it's global
// 3. copy the results back to host
hipMemcpy(c, d_c1, size, hipMemcpyDeviceToHost);
cuda_res = 0;
for(i = 0; i < N; i++){
// printf("%.2f\t", c[i]);
cuda_res += c[i];
}
printf("Results from host but pass const var as parameter:%.2f\n", cuda_res);
hipFree(d_b);
hipFree(d_c);
free(a);
free(b);
free(c);
return 0;
}
__global__ void vecAddConstantKernel(float * b, float * c, unsigned int N){
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i<N) c[i] = const_d_a[i] + b[i];
}
__global__ void vecAddConstantKernel2(float * a, float * b, float * c, unsigned int N){
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i<N) c[i] = a[i] + b[i];
}
| 3e518b95a940c23c2151223af0299ad5287a5b0e.cu | // 20181201
// Yuqiong Li
// a basic CUDA function to test working with device constant memory
#include <stdio.h>
#include <cuda.h>
const unsigned int N = 10; // size of vectors
__constant__ float const_d_a[N * sizeof(float)]; // filter in device const memory
// function declarations
__global__ void vecAddConstantKernel(float * b, float * c, unsigned int n);
__global__ void vecAddConstantKernel2(float * a, float * b, float * c, unsigned int n);
// main function
int main()
{
float * a, * b, * c; // a and b are vectors. c is the result
a = (float *)calloc(N, sizeof(float));
b = (float *)calloc(N, sizeof(float));
/**************************** Exp 1: sequential ***************************/
int i;
int size = N * sizeof(float);
float sum = 0;
for (i = 0; i < N; i++){
a[i] = (float)i / 0.23 + 1;
b[i] = (float)i / 5.89 + 9;
sum += a[i] + b[i];
}
c = (float*) malloc(size);
printf("Results from host :%.2f\n", sum);
/********************** Exp 2: CUDA w/o const mem *************************/
// 1. allocate memory on CUDA
float * d_b, * d_c; // device memory
cudaError_t err2 = cudaMalloc((void **) & d_b, size);
cudaError_t err3 = cudaMalloc((void **) & d_c, size);
if (err2 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err2), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
if (err3 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err3), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
// copy memory
cudaError_t err4 = cudaMemcpyToSymbol(const_d_a, a, size);
if (err4 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err4), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
// 2. operate on kernels
vecAddConstantKernel<<<ceil(N/256.0), 256>>>(d_b, d_c, N); // no need to pass const_d_a as a parameter as it's global
// 3. copy the results back to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
float cuda_res = 0;
for(i = 0; i < N; i++){
// printf("%.2f\t", c[i]);
cuda_res += c[i];
}
printf("Results from device :%.2f\n", cuda_res);
// 2. do it again but passing constant variable as a parameter
float * d_c1; // device memory
cudaError_t err5 = cudaMalloc((void **) & d_c1, size);
if (err5 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err5), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
vecAddConstantKernel2<<<ceil(N/256.0), 256>>>(const_d_a, d_b, d_c1, N); // no need to pass const_d_a as a parameter as it's global
// 3. copy the results back to host
cudaMemcpy(c, d_c1, size, cudaMemcpyDeviceToHost);
cuda_res = 0;
for(i = 0; i < N; i++){
// printf("%.2f\t", c[i]);
cuda_res += c[i];
}
printf("Results from host but pass const var as parameter:%.2f\n", cuda_res);
cudaFree(d_b);
cudaFree(d_c);
free(a);
free(b);
free(c);
return 0;
}
__global__ void vecAddConstantKernel(float * b, float * c, unsigned int N){
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i<N) c[i] = const_d_a[i] + b[i];
}
__global__ void vecAddConstantKernel2(float * a, float * b, float * c, unsigned int N){
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i<N) c[i] = a[i] + b[i];
}
|
54f740bfc6ac5d9c6f7d07374c2477d79a0131f7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author sgazeos@gmail.com
//
#include <ops/declarable/helpers/image_suppression.h>
#include <NDArrayFactory.h>
#include <NativeOps.h>
#include <cuda_exception.h>
namespace nd4j {
namespace ops {
namespace helpers {
template <typename T>
static __device__ bool needToSuppressWithThreshold(T* boxes, Nd4jLong* boxesShape, int previousIndex, int nextIndex, T threshold) {
Nd4jLong previous0[] = {previousIndex, 0};
Nd4jLong previous1[] = {previousIndex, 1};
Nd4jLong previous2[] = {previousIndex, 2};
Nd4jLong previous3[] = {previousIndex, 3};
Nd4jLong next0[] = {nextIndex, 0};
Nd4jLong next1[] = {nextIndex, 1};
Nd4jLong next2[] = {nextIndex, 2};
Nd4jLong next3[] = {nextIndex, 3};
Nd4jLong* shapeOf = shape::shapeOf(boxesShape);
Nd4jLong* strideOf = shape::stride(boxesShape);
T minYPrev = nd4j::math::nd4j_min(boxes[shape::getOffset(0, shapeOf, strideOf, previous0, 2)], boxes[shape::getOffset(0, shapeOf, strideOf, previous2, 2)]);
T minXPrev = nd4j::math::nd4j_min(boxes[shape::getOffset(0, shapeOf, strideOf, previous1, 2)], boxes[shape::getOffset(0, shapeOf, strideOf, previous3, 2)]);
T maxYPrev = nd4j::math::nd4j_max(boxes[shape::getOffset(0, shapeOf, strideOf, previous0, 2)], boxes[shape::getOffset(0, shapeOf, strideOf, previous2, 2)]);
T maxXPrev = nd4j::math::nd4j_max(boxes[shape::getOffset(0, shapeOf, strideOf, previous1, 2)], boxes[shape::getOffset(0, shapeOf, strideOf, previous3, 2)]);
T minYNext = nd4j::math::nd4j_min(boxes[shape::getOffset(0, shapeOf, strideOf, next0, 2)], boxes[shape::getOffset(0, shapeOf, strideOf, next2, 2)]);
T minXNext = nd4j::math::nd4j_min(boxes[shape::getOffset(0, shapeOf, strideOf, next1, 2)], boxes[shape::getOffset(0, shapeOf, strideOf, next3, 2)]);
T maxYNext = nd4j::math::nd4j_max(boxes[shape::getOffset(0, shapeOf, strideOf, next0, 2)], boxes[shape::getOffset(0, shapeOf, strideOf, next2, 2)]);
T maxXNext = nd4j::math::nd4j_max(boxes[shape::getOffset(0, shapeOf, strideOf, next1, 2)], boxes[shape::getOffset(0, shapeOf, strideOf, next3, 2)]);
T areaPrev = (maxYPrev - minYPrev) * (maxXPrev - minXPrev);
T areaNext = (maxYNext - minYNext) * (maxXNext - minXNext);
if (areaNext <= T(0.f) || areaPrev <= T(0.f)) return false;
T minIntersectionY = nd4j::math::nd4j_max(minYPrev, minYNext);
T minIntersectionX = nd4j::math::nd4j_max(minXPrev, minXNext);
T maxIntersectionY = nd4j::math::nd4j_min(maxYPrev, maxYNext);
T maxIntersectionX = nd4j::math::nd4j_min(maxXPrev, maxXNext);
T intersectionArea =
nd4j::math::nd4j_max(T(maxIntersectionY - minIntersectionY), T(0.0f)) *
nd4j::math::nd4j_max(T(maxIntersectionX - minIntersectionX), T(0.0f));
T intersectionValue = intersectionArea / (areaPrev + areaNext - intersectionArea);
return intersectionValue > threshold;
};
template <typename T, typename I>
static __global__ void shouldSelectKernel(T* boxesBuf, Nd4jLong* boxesShape, I* indexBuf, I* selectedIndicesData, double threshold, int numSelected, int i, bool* shouldSelect) {
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
auto step = gridDim.x * blockDim.x;
__shared__ unsigned int shouldSelectShared;
if (threadIdx.x == 0) {
shouldSelectShared = (unsigned int)shouldSelect[0];
}
__syncthreads();
for (int j = numSelected - 1 - tid; j >= 0; j -= step) {
if (shouldSelectShared) {
if (needToSuppressWithThreshold(boxesBuf, boxesShape, indexBuf[i],
indexBuf[selectedIndicesData[j]], T(threshold)))
atomicCAS(&shouldSelectShared, 1, 0);
}
}
__syncthreads();
if (threadIdx.x == 0) {
*shouldSelect = shouldSelectShared > 0;
}
}
template <typename I>
static __global__ void copyIndices(void* indices, void* indicesLong, Nd4jLong len) {
__shared__ I* indexBuf;
__shared__ Nd4jLong* srcBuf;
if (threadIdx.x == 0) {
indexBuf = reinterpret_cast<I*>(indices);
srcBuf = reinterpret_cast<Nd4jLong*>(indicesLong);
}
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
auto step = blockDim.x * gridDim.x;
for (auto i = tid; i < len; i += step)
indexBuf[i] = (I)srcBuf[i];
}
template <typename T, typename I>
static void nonMaxSuppressionV2_(nd4j::LaunchContext* context, NDArray* boxes, NDArray* scales, int maxSize, double threshold, NDArray* output) {
auto stream = context->getCudaStream();
NDArray::prepareSpecialUse({output}, {boxes, scales});
std::unique_ptr<NDArray> indices(NDArrayFactory::create_<I>('c', {scales->lengthOf()})); // - 1, scales->lengthOf()); //, scales->getContext());
indices->linspace(0);
indices->syncToDevice(); // linspace only on CPU, so sync to Device as well
NDArray scores(*scales);
Nd4jPointer extras[2] = {nullptr, stream};
sortByValue(extras, indices->buffer(), indices->shapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), scores.buffer(), scores.shapeInfo(), scores.specialBuffer(), scores.specialShapeInfo(), true);
// TO DO: sort indices using scales as value row
//std::sort(indices.begin(), indices.end(), [scales](int i, int j) {return scales->e<T>(i) > scales->e<T>(j);});
auto indexBuf = reinterpret_cast<I*>(indices->specialBuffer());
NDArray selectedIndices = NDArrayFactory::create<I>('c', {output->lengthOf()});
int numSelected = 0;
int numBoxes = boxes->sizeAt(0);
auto boxesBuf = reinterpret_cast<T*>(boxes->specialBuffer());
auto selectedIndicesData = reinterpret_cast<I*>(selectedIndices.specialBuffer());
auto outputBuf = reinterpret_cast<I*>(output->specialBuffer());
bool* shouldSelectD;
auto err = hipMalloc(&shouldSelectD, sizeof(bool));
if (err) {
throw cuda_exception::build("helpers::nonMaxSuppressionV2: Cannot allocate memory for bool flag", err);
}
for (I i = 0; i < boxes->sizeAt(0); ++i) {
bool shouldSelect = numSelected < output->lengthOf();
if (shouldSelect) {
err = hipMemcpy(shouldSelectD, &shouldSelect, sizeof(bool), hipMemcpyHostToDevice);
if (err) {
throw cuda_exception::build("helpers::nonMaxSuppressionV2: Cannot set up bool flag to device", err);
}
hipLaunchKernelGGL(( shouldSelectKernel<T,I>), dim3(128), dim3(256), 1024, *stream, boxesBuf, boxes->specialShapeInfo(), indexBuf, selectedIndicesData, threshold, numSelected, i, shouldSelectD);
err = hipMemcpy(&shouldSelect, shouldSelectD, sizeof(bool), hipMemcpyDeviceToHost);
if (err) {
throw cuda_exception::build("helpers::nonMaxSuppressionV2: Cannot set up bool flag to host", err);
}
}
if (shouldSelect) {
hipMemcpy(reinterpret_cast<I*>(output->specialBuffer()) + numSelected, indexBuf + i, sizeof(I), hipMemcpyDeviceToDevice);
hipMemcpy(selectedIndicesData + numSelected, &i, sizeof(I), hipMemcpyHostToDevice);
numSelected++;
}
}
err = hipFree(shouldSelectD);
if (err) {
throw cuda_exception::build("helpers::nonMaxSuppressionV2: Cannot deallocate memory for bool flag", err);
}
}
void nonMaxSuppressionV2(nd4j::LaunchContext * context, NDArray* boxes, NDArray* scales, int maxSize, double threshold, NDArray* output) {
BUILD_DOUBLE_SELECTOR(boxes->dataType(), output->dataType(), nonMaxSuppressionV2_, (context, boxes, scales, maxSize, threshold, output), FLOAT_TYPES, INDEXING_TYPES);
}
}
}
}
| 54f740bfc6ac5d9c6f7d07374c2477d79a0131f7.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author sgazeos@gmail.com
//
#include <ops/declarable/helpers/image_suppression.h>
#include <NDArrayFactory.h>
#include <NativeOps.h>
#include <cuda_exception.h>
namespace nd4j {
namespace ops {
namespace helpers {
template <typename T>
static __device__ bool needToSuppressWithThreshold(T* boxes, Nd4jLong* boxesShape, int previousIndex, int nextIndex, T threshold) {
Nd4jLong previous0[] = {previousIndex, 0};
Nd4jLong previous1[] = {previousIndex, 1};
Nd4jLong previous2[] = {previousIndex, 2};
Nd4jLong previous3[] = {previousIndex, 3};
Nd4jLong next0[] = {nextIndex, 0};
Nd4jLong next1[] = {nextIndex, 1};
Nd4jLong next2[] = {nextIndex, 2};
Nd4jLong next3[] = {nextIndex, 3};
Nd4jLong* shapeOf = shape::shapeOf(boxesShape);
Nd4jLong* strideOf = shape::stride(boxesShape);
T minYPrev = nd4j::math::nd4j_min(boxes[shape::getOffset(0, shapeOf, strideOf, previous0, 2)], boxes[shape::getOffset(0, shapeOf, strideOf, previous2, 2)]);
T minXPrev = nd4j::math::nd4j_min(boxes[shape::getOffset(0, shapeOf, strideOf, previous1, 2)], boxes[shape::getOffset(0, shapeOf, strideOf, previous3, 2)]);
T maxYPrev = nd4j::math::nd4j_max(boxes[shape::getOffset(0, shapeOf, strideOf, previous0, 2)], boxes[shape::getOffset(0, shapeOf, strideOf, previous2, 2)]);
T maxXPrev = nd4j::math::nd4j_max(boxes[shape::getOffset(0, shapeOf, strideOf, previous1, 2)], boxes[shape::getOffset(0, shapeOf, strideOf, previous3, 2)]);
T minYNext = nd4j::math::nd4j_min(boxes[shape::getOffset(0, shapeOf, strideOf, next0, 2)], boxes[shape::getOffset(0, shapeOf, strideOf, next2, 2)]);
T minXNext = nd4j::math::nd4j_min(boxes[shape::getOffset(0, shapeOf, strideOf, next1, 2)], boxes[shape::getOffset(0, shapeOf, strideOf, next3, 2)]);
T maxYNext = nd4j::math::nd4j_max(boxes[shape::getOffset(0, shapeOf, strideOf, next0, 2)], boxes[shape::getOffset(0, shapeOf, strideOf, next2, 2)]);
T maxXNext = nd4j::math::nd4j_max(boxes[shape::getOffset(0, shapeOf, strideOf, next1, 2)], boxes[shape::getOffset(0, shapeOf, strideOf, next3, 2)]);
T areaPrev = (maxYPrev - minYPrev) * (maxXPrev - minXPrev);
T areaNext = (maxYNext - minYNext) * (maxXNext - minXNext);
if (areaNext <= T(0.f) || areaPrev <= T(0.f)) return false;
T minIntersectionY = nd4j::math::nd4j_max(minYPrev, minYNext);
T minIntersectionX = nd4j::math::nd4j_max(minXPrev, minXNext);
T maxIntersectionY = nd4j::math::nd4j_min(maxYPrev, maxYNext);
T maxIntersectionX = nd4j::math::nd4j_min(maxXPrev, maxXNext);
T intersectionArea =
nd4j::math::nd4j_max(T(maxIntersectionY - minIntersectionY), T(0.0f)) *
nd4j::math::nd4j_max(T(maxIntersectionX - minIntersectionX), T(0.0f));
T intersectionValue = intersectionArea / (areaPrev + areaNext - intersectionArea);
return intersectionValue > threshold;
};
template <typename T, typename I>
static __global__ void shouldSelectKernel(T* boxesBuf, Nd4jLong* boxesShape, I* indexBuf, I* selectedIndicesData, double threshold, int numSelected, int i, bool* shouldSelect) {
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
auto step = gridDim.x * blockDim.x;
__shared__ unsigned int shouldSelectShared;
if (threadIdx.x == 0) {
shouldSelectShared = (unsigned int)shouldSelect[0];
}
__syncthreads();
for (int j = numSelected - 1 - tid; j >= 0; j -= step) {
if (shouldSelectShared) {
if (needToSuppressWithThreshold(boxesBuf, boxesShape, indexBuf[i],
indexBuf[selectedIndicesData[j]], T(threshold)))
atomicCAS(&shouldSelectShared, 1, 0);
}
}
__syncthreads();
if (threadIdx.x == 0) {
*shouldSelect = shouldSelectShared > 0;
}
}
template <typename I>
static __global__ void copyIndices(void* indices, void* indicesLong, Nd4jLong len) {
__shared__ I* indexBuf;
__shared__ Nd4jLong* srcBuf;
if (threadIdx.x == 0) {
indexBuf = reinterpret_cast<I*>(indices);
srcBuf = reinterpret_cast<Nd4jLong*>(indicesLong);
}
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
auto step = blockDim.x * gridDim.x;
for (auto i = tid; i < len; i += step)
indexBuf[i] = (I)srcBuf[i];
}
template <typename T, typename I>
static void nonMaxSuppressionV2_(nd4j::LaunchContext* context, NDArray* boxes, NDArray* scales, int maxSize, double threshold, NDArray* output) {
auto stream = context->getCudaStream();
NDArray::prepareSpecialUse({output}, {boxes, scales});
std::unique_ptr<NDArray> indices(NDArrayFactory::create_<I>('c', {scales->lengthOf()})); // - 1, scales->lengthOf()); //, scales->getContext());
indices->linspace(0);
indices->syncToDevice(); // linspace only on CPU, so sync to Device as well
NDArray scores(*scales);
Nd4jPointer extras[2] = {nullptr, stream};
sortByValue(extras, indices->buffer(), indices->shapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), scores.buffer(), scores.shapeInfo(), scores.specialBuffer(), scores.specialShapeInfo(), true);
// TO DO: sort indices using scales as value row
//std::sort(indices.begin(), indices.end(), [scales](int i, int j) {return scales->e<T>(i) > scales->e<T>(j);});
auto indexBuf = reinterpret_cast<I*>(indices->specialBuffer());
NDArray selectedIndices = NDArrayFactory::create<I>('c', {output->lengthOf()});
int numSelected = 0;
int numBoxes = boxes->sizeAt(0);
auto boxesBuf = reinterpret_cast<T*>(boxes->specialBuffer());
auto selectedIndicesData = reinterpret_cast<I*>(selectedIndices.specialBuffer());
auto outputBuf = reinterpret_cast<I*>(output->specialBuffer());
bool* shouldSelectD;
auto err = cudaMalloc(&shouldSelectD, sizeof(bool));
if (err) {
throw cuda_exception::build("helpers::nonMaxSuppressionV2: Cannot allocate memory for bool flag", err);
}
for (I i = 0; i < boxes->sizeAt(0); ++i) {
bool shouldSelect = numSelected < output->lengthOf();
if (shouldSelect) {
err = cudaMemcpy(shouldSelectD, &shouldSelect, sizeof(bool), cudaMemcpyHostToDevice);
if (err) {
throw cuda_exception::build("helpers::nonMaxSuppressionV2: Cannot set up bool flag to device", err);
}
shouldSelectKernel<T,I><<<128, 256, 1024, *stream>>>(boxesBuf, boxes->specialShapeInfo(), indexBuf, selectedIndicesData, threshold, numSelected, i, shouldSelectD);
err = cudaMemcpy(&shouldSelect, shouldSelectD, sizeof(bool), cudaMemcpyDeviceToHost);
if (err) {
throw cuda_exception::build("helpers::nonMaxSuppressionV2: Cannot set up bool flag to host", err);
}
}
if (shouldSelect) {
cudaMemcpy(reinterpret_cast<I*>(output->specialBuffer()) + numSelected, indexBuf + i, sizeof(I), cudaMemcpyDeviceToDevice);
cudaMemcpy(selectedIndicesData + numSelected, &i, sizeof(I), cudaMemcpyHostToDevice);
numSelected++;
}
}
err = cudaFree(shouldSelectD);
if (err) {
throw cuda_exception::build("helpers::nonMaxSuppressionV2: Cannot deallocate memory for bool flag", err);
}
}
void nonMaxSuppressionV2(nd4j::LaunchContext * context, NDArray* boxes, NDArray* scales, int maxSize, double threshold, NDArray* output) {
BUILD_DOUBLE_SELECTOR(boxes->dataType(), output->dataType(), nonMaxSuppressionV2_, (context, boxes, scales, maxSize, threshold, output), FLOAT_TYPES, INDEXING_TYPES);
}
}
}
}
|
443692106de98efb0a4ef6dce9eed7131e4cf27d.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
__global__ void glob()
{
return;
}
int main()
{
float time;
hipEvent_t start;
hipEvent_t stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( glob), dim3(13), dim3(128), 0, 0, );
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
std::cout << time << std::endl;
return 0;
}
| 443692106de98efb0a4ef6dce9eed7131e4cf27d.cu | #include <iostream>
#include <cuda.h>
__global__ void glob()
{
return;
}
int main()
{
float time;
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
glob<<<13, 128>>>();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
std::cout << time << std::endl;
return 0;
}
|
d03ad7180541bcb28ffd40dc24223edc41707bc2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Image.h"
#include "PPM.h"
#include <cstdio>
#include <cassert>
#include <iostream>
#include <sys/time.h>
#define Mask_Width 5
#define Mask_Height 5
#define Mask_Radius (Mask_Width/2)
#define Thread_Block_Dim 16
#define Dim_Shared (Thread_Block_Dim + (Mask_Radius*2))
__global__ void functionConv(float *N, float *M, float *P, int width, int height, int channels){
__shared__ float N_ds[Thread_Block_Dim + Mask_Radius*2][Thread_Block_Dim + Mask_Radius*2]; //Product Matrix
int y = (blockIdx.y * blockDim.y + threadIdx.y); //Thread row identification
int x = (blockIdx.x * blockDim.x + threadIdx.x); //Thread column identification
//Cycle inside the channels
for(int k=0 ; k<channels;k++){
//Each pixel in the image read 4 pixel of the original image
//Case 1: UP_SX
int xx = x - Mask_Radius;
int yy = y - Mask_Radius;
if(xx< 0 || yy < 0){
N_ds[threadIdx.y][threadIdx.x] = 0;
}else{
N_ds[threadIdx.y][threadIdx.x] = N[(yy*width+xx)*channels+k];
}
//Case 2: UP_DX
xx = x + Mask_Radius;
yy = y - Mask_Radius;
if(xx>=width-1 || yy < 0){
N_ds[threadIdx.y][threadIdx.x + 2*Mask_Radius] = 0;
}else{
N_ds[threadIdx.y][threadIdx.x + 2*Mask_Radius] = N[(yy*width+xx)*channels+k];
}
//Case 3: DOWN_SX
xx = x - Mask_Radius;
yy = y + Mask_Radius;
if(xx<0 || yy > height-1){
N_ds[threadIdx.y + 2*Mask_Radius][threadIdx.x] = 0;
}else{
N_ds[threadIdx.y + 2*Mask_Radius][threadIdx.x] = N[(yy*width+xx)*channels+k];
}
//Case 4: DOWN_DX
xx = x + Mask_Radius;
yy = y + Mask_Radius;
if(xx>width-1 || yy > height-1){
N_ds[threadIdx.y + 2*Mask_Radius][threadIdx.x + 2*Mask_Radius] = 0;
}else{
N_ds[threadIdx.y + 2*Mask_Radius][threadIdx.x + 2*Mask_Radius] = N[(yy*width+xx)*channels+k];
}
__syncthreads();
//Calculate the sum of the elements of the product matrix
float valPi = 0;
for(int i = -Mask_Radius ; i <= Mask_Radius ; i++){
for(int j = -Mask_Radius ; j <= Mask_Radius; j++){
valPi += N_ds[i+Mask_Radius+threadIdx.y][j+Mask_Radius+threadIdx.x]*M[(i+Mask_Radius)*Mask_Width+(j+Mask_Radius)];
}
}
//Set the new value in the output matrix
if(y < height && x < width){
P[(y*width+x)*channels + k] = valPi;
}
}
__syncthreads();
}
int main() {
int imageChannels;
int imageWidth;
int imageHeight;
int sizeAllocImage;
int sizeAllocMask;
Image_t* inputImage;
Image_t* outputImage;
float *hostDataImageInput;
float *deviceDataImageInput;
float *hostDataImageOutput;
float *deviceDataImageOutput;
float *deviceDataMask;
//float hostDataMask[Mask_Height * Mask_Width] = {-1,-1,-1,-1,8,-1,-1,-1,-1};
//float hostDataMask[Mask_Height * Mask_Width] = {0,-1,0,-1,5,-1,0,-1,0};
float hostDataMask[Mask_Height * Mask_Width]={(float)1/256, (float)4/256, (float)6/256, (float)4/256,(float)1/256, (float)4/256, (float)16/256, (float)24/256, (float)16/256, (float)4/256, (float)6/256, (float)24/256, (float)36/256, (float)24/256, (float)6/256, (float)4/256, (float)16/256, (float)24/256, (float)16/256, (float)4/256, (float)1/256, (float)4/256, (float)1/256, (float)4/256, (float)1/256};
const char* pathIn = "/home/salvatore/Scrivania/ImgPPM/1.ppm";
const char* pathOut = "/home/salvatore/Scrivania/out.ppm";
inputImage = PPM_import(pathIn);
imageWidth = Image_getWidth(inputImage);
imageHeight = Image_getHeight(inputImage);
imageChannels = Image_getChannels(inputImage);
sizeAllocImage = imageHeight * imageChannels * imageWidth * sizeof(float);
sizeAllocMask = Mask_Width * Mask_Height * sizeof(float);
outputImage = Image_new(imageWidth, imageHeight, imageChannels);
hostDataImageInput = Image_getData(inputImage);
hostDataImageOutput = Image_getData(outputImage);
timeval t1, t2;
hipDeviceReset();
//Device memory allocation
hipMalloc((void**) &deviceDataImageInput, sizeAllocImage);
hipMalloc((void**) &deviceDataImageOutput, sizeAllocImage);
hipMalloc((void**) &deviceDataMask, sizeAllocMask);
//Copying data from the host to the device
hipMemcpy(deviceDataImageInput, hostDataImageInput, sizeAllocImage, hipMemcpyHostToDevice);
hipMemcpy(deviceDataImageOutput, hostDataImageOutput, sizeAllocImage, hipMemcpyHostToDevice);
hipMemcpy(deviceDataMask, hostDataMask, sizeAllocMask, hipMemcpyHostToDevice);
//Setting the dimensions of the grid and blocks
dim3 dimGrid(ceil((float) imageWidth/Thread_Block_Dim),ceil((float) imageHeight/Thread_Block_Dim), 1);
dim3 dimBlock(Thread_Block_Dim,Thread_Block_Dim, 1);
gettimeofday(&t1,NULL);
//Kernel call
hipLaunchKernelGGL(( functionConv), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceDataImageInput, deviceDataMask, deviceDataImageOutput, imageWidth, imageHeight, imageChannels);
hipDeviceSynchronize();
gettimeofday(&t2,NULL);
//Copying data from the device to the host
hipMemcpy(hostDataImageOutput, deviceDataImageOutput, sizeAllocImage, hipMemcpyDeviceToHost);
PPM_export(pathOut, outputImage);
//Free space from both the host and the device
hipFree(deviceDataImageInput);
hipFree(deviceDataImageOutput);
hipFree(deviceDataMask);
Image_delete(outputImage);
Image_delete(inputImage);
double elapsedTime=(t2.tv_usec - t1.tv_usec)/1000.0;
elapsedTime += (t2.tv_sec - t1.tv_sec) * 1000.0;
printf("Elaboration time: %f ms", elapsedTime);
return 0;
}
| d03ad7180541bcb28ffd40dc24223edc41707bc2.cu | #include "Image.h"
#include "PPM.h"
#include <cstdio>
#include <cassert>
#include <iostream>
#include <sys/time.h>
#define Mask_Width 5
#define Mask_Height 5
#define Mask_Radius (Mask_Width/2)
#define Thread_Block_Dim 16
#define Dim_Shared (Thread_Block_Dim + (Mask_Radius*2))
__global__ void functionConv(float *N, float *M, float *P, int width, int height, int channels){
__shared__ float N_ds[Thread_Block_Dim + Mask_Radius*2][Thread_Block_Dim + Mask_Radius*2]; //Product Matrix
int y = (blockIdx.y * blockDim.y + threadIdx.y); //Thread row identification
int x = (blockIdx.x * blockDim.x + threadIdx.x); //Thread column identification
//Cycle inside the channels
for(int k=0 ; k<channels;k++){
//Each pixel in the image read 4 pixel of the original image
//Case 1: UP_SX
int xx = x - Mask_Radius;
int yy = y - Mask_Radius;
if(xx< 0 || yy < 0){
N_ds[threadIdx.y][threadIdx.x] = 0;
}else{
N_ds[threadIdx.y][threadIdx.x] = N[(yy*width+xx)*channels+k];
}
//Case 2: UP_DX
xx = x + Mask_Radius;
yy = y - Mask_Radius;
if(xx>=width-1 || yy < 0){
N_ds[threadIdx.y][threadIdx.x + 2*Mask_Radius] = 0;
}else{
N_ds[threadIdx.y][threadIdx.x + 2*Mask_Radius] = N[(yy*width+xx)*channels+k];
}
//Case 3: DOWN_SX
xx = x - Mask_Radius;
yy = y + Mask_Radius;
if(xx<0 || yy > height-1){
N_ds[threadIdx.y + 2*Mask_Radius][threadIdx.x] = 0;
}else{
N_ds[threadIdx.y + 2*Mask_Radius][threadIdx.x] = N[(yy*width+xx)*channels+k];
}
//Case 4: DOWN_DX
xx = x + Mask_Radius;
yy = y + Mask_Radius;
if(xx>width-1 || yy > height-1){
N_ds[threadIdx.y + 2*Mask_Radius][threadIdx.x + 2*Mask_Radius] = 0;
}else{
N_ds[threadIdx.y + 2*Mask_Radius][threadIdx.x + 2*Mask_Radius] = N[(yy*width+xx)*channels+k];
}
__syncthreads();
//Calculate the sum of the elements of the product matrix
float valPi = 0;
for(int i = -Mask_Radius ; i <= Mask_Radius ; i++){
for(int j = -Mask_Radius ; j <= Mask_Radius; j++){
valPi += N_ds[i+Mask_Radius+threadIdx.y][j+Mask_Radius+threadIdx.x]*M[(i+Mask_Radius)*Mask_Width+(j+Mask_Radius)];
}
}
//Set the new value in the output matrix
if(y < height && x < width){
P[(y*width+x)*channels + k] = valPi;
}
}
__syncthreads();
}
int main() {
int imageChannels;
int imageWidth;
int imageHeight;
int sizeAllocImage;
int sizeAllocMask;
Image_t* inputImage;
Image_t* outputImage;
float *hostDataImageInput;
float *deviceDataImageInput;
float *hostDataImageOutput;
float *deviceDataImageOutput;
float *deviceDataMask;
//float hostDataMask[Mask_Height * Mask_Width] = {-1,-1,-1,-1,8,-1,-1,-1,-1};
//float hostDataMask[Mask_Height * Mask_Width] = {0,-1,0,-1,5,-1,0,-1,0};
float hostDataMask[Mask_Height * Mask_Width]={(float)1/256, (float)4/256, (float)6/256, (float)4/256,(float)1/256, (float)4/256, (float)16/256, (float)24/256, (float)16/256, (float)4/256, (float)6/256, (float)24/256, (float)36/256, (float)24/256, (float)6/256, (float)4/256, (float)16/256, (float)24/256, (float)16/256, (float)4/256, (float)1/256, (float)4/256, (float)1/256, (float)4/256, (float)1/256};
const char* pathIn = "/home/salvatore/Scrivania/ImgPPM/1.ppm";
const char* pathOut = "/home/salvatore/Scrivania/out.ppm";
inputImage = PPM_import(pathIn);
imageWidth = Image_getWidth(inputImage);
imageHeight = Image_getHeight(inputImage);
imageChannels = Image_getChannels(inputImage);
sizeAllocImage = imageHeight * imageChannels * imageWidth * sizeof(float);
sizeAllocMask = Mask_Width * Mask_Height * sizeof(float);
outputImage = Image_new(imageWidth, imageHeight, imageChannels);
hostDataImageInput = Image_getData(inputImage);
hostDataImageOutput = Image_getData(outputImage);
timeval t1, t2;
cudaDeviceReset();
//Device memory allocation
cudaMalloc((void**) &deviceDataImageInput, sizeAllocImage);
cudaMalloc((void**) &deviceDataImageOutput, sizeAllocImage);
cudaMalloc((void**) &deviceDataMask, sizeAllocMask);
//Copying data from the host to the device
cudaMemcpy(deviceDataImageInput, hostDataImageInput, sizeAllocImage, cudaMemcpyHostToDevice);
cudaMemcpy(deviceDataImageOutput, hostDataImageOutput, sizeAllocImage, cudaMemcpyHostToDevice);
cudaMemcpy(deviceDataMask, hostDataMask, sizeAllocMask, cudaMemcpyHostToDevice);
//Setting the dimensions of the grid and blocks
dim3 dimGrid(ceil((float) imageWidth/Thread_Block_Dim),ceil((float) imageHeight/Thread_Block_Dim), 1);
dim3 dimBlock(Thread_Block_Dim,Thread_Block_Dim, 1);
gettimeofday(&t1,NULL);
//Kernel call
functionConv<<<dimGrid, dimBlock>>>(deviceDataImageInput, deviceDataMask, deviceDataImageOutput, imageWidth, imageHeight, imageChannels);
cudaDeviceSynchronize();
gettimeofday(&t2,NULL);
//Copying data from the device to the host
cudaMemcpy(hostDataImageOutput, deviceDataImageOutput, sizeAllocImage, cudaMemcpyDeviceToHost);
PPM_export(pathOut, outputImage);
//Free space from both the host and the device
cudaFree(deviceDataImageInput);
cudaFree(deviceDataImageOutput);
cudaFree(deviceDataMask);
Image_delete(outputImage);
Image_delete(inputImage);
double elapsedTime=(t2.tv_usec - t1.tv_usec)/1000.0;
elapsedTime += (t2.tv_sec - t1.tv_sec) * 1000.0;
printf("Elaboration time: %f ms", elapsedTime);
return 0;
}
|
9fb293e4c7efea5ab811214a25c8b4b99b17a2a3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"
#define BLOCK_WIDTH 16
#define BLOCK_HEIGHT 16
__global__ void filter(int *Input_Image, int *Output_Image, int Image_Width, int Image_Height)
{
const int tx_l = threadIdx.x; // --- Local thread x index
const int ty_l = threadIdx.y; // --- Local thread y index
const int tx_g = blockIdx.x * blockDim.x + tx_l; // --- Global thread x index
const int ty_g = blockIdx.y * blockDim.y + ty_l; // --- Global thread y index
__shared__ int smem[BLOCK_WIDTH+2][BLOCK_HEIGHT+2];
// --- Fill the shared memory border with zeros
if (tx_l == 0) smem[tx_l] [ty_l+1] = 0; // --- left border
else if (tx_l == BLOCK_WIDTH-1) smem[tx_l+2][ty_l+1] = 0; // --- right border
if (ty_l == 0) { smem[tx_l+1][ty_l] = 0; // --- upper border
if (tx_l == 0) smem[tx_l] [ty_l] = 0; // --- top-left corner
else if (tx_l == BLOCK_WIDTH-1) smem[tx_l+2][ty_l] = 0; // --- top-right corner
} else if (ty_l == BLOCK_HEIGHT-1) {smem[tx_l+1][ty_l+2] = 0; // --- bottom border
if (tx_l == 0) smem[tx_l] [ty_l+2] = 0; // --- bottom-left corder
else if (tx_l == BLOCK_WIDTH-1) smem[tx_l+2][ty_l+2] = 0; // --- bottom-right corner
}
// --- Fill shared memory
smem[tx_l+1][ty_l+1] = Input_Image[ty_g*Image_Width + tx_g]; // --- center
if ((tx_l == 0)&&((tx_g > 0))) smem[tx_l] [ty_l+1] = Input_Image[ty_g*Image_Width + tx_g-1]; // --- left border
else if ((tx_l == BLOCK_WIDTH-1)&&(tx_g < Image_Width - 1)) smem[tx_l+2][ty_l+1] = Input_Image[ty_g*Image_Width + tx_g+1]; // --- right border
if ((ty_l == 0)&&(ty_g > 0)) { smem[tx_l+1][ty_l] = Input_Image[(ty_g-1)*Image_Width + tx_g]; // --- upper border
if ((tx_l == 0)&&((tx_g > 0))) smem[tx_l] [ty_l] = Input_Image[(ty_g-1)*Image_Width + tx_g-1]; // --- top-left corner
else if ((tx_l == BLOCK_WIDTH-1)&&(tx_g < Image_Width - 1)) smem[tx_l+2][ty_l] = Input_Image[(ty_g-1)*Image_Width + tx_g+1]; // --- top-right corner
} else if ((ty_l == BLOCK_HEIGHT-1)&&(ty_g < Image_Height - 1)) { smem[tx_l+1][ty_l+2] = Input_Image[(ty_g+1)*Image_Width + tx_g]; // --- bottom border
if ((tx_l == 0)&&((tx_g > 0))) smem[tx_l] [ty_l+2] = Input_Image[(ty_g-1)*Image_Width + tx_g-1]; // --- bottom-left corder
else if ((tx_l == BLOCK_WIDTH-1)&&(tx_g < Image_Width - 1)) smem[tx_l+2][ty_l+2] = Input_Image[(ty_g+1)*Image_Width + tx_g+1]; // --- bottom-right corner
}
__syncthreads();
// --- Pull the 3x3 window in a local array
long v[9] = { smem[tx_l][ty_l], smem[tx_l+1][ty_l], smem[tx_l+2][ty_l],
smem[tx_l][ty_l+1], smem[tx_l+1][ty_l+1], smem[tx_l+2][ty_l+1],
smem[tx_l][ty_l+2], smem[tx_l+1][ty_l+2], smem[tx_l+2][ty_l+2] };
// --- Bubble-sort
for (int i = 0; i < 5; i++) {
for (int j = i + 1; j < 9; j++) {
if (v[i] > v[j]) { // swap?
int tmp = v[i];
v[i] = v[j];
v[j] = tmp;
}
}
}
// --- Pick the middle one
Output_Image[ty_g*Image_Width + tx_g] = v[4];
} | 9fb293e4c7efea5ab811214a25c8b4b99b17a2a3.cu | extern "C"
#define BLOCK_WIDTH 16
#define BLOCK_HEIGHT 16
__global__ void filter(int *Input_Image, int *Output_Image, int Image_Width, int Image_Height)
{
const int tx_l = threadIdx.x; // --- Local thread x index
const int ty_l = threadIdx.y; // --- Local thread y index
const int tx_g = blockIdx.x * blockDim.x + tx_l; // --- Global thread x index
const int ty_g = blockIdx.y * blockDim.y + ty_l; // --- Global thread y index
__shared__ int smem[BLOCK_WIDTH+2][BLOCK_HEIGHT+2];
// --- Fill the shared memory border with zeros
if (tx_l == 0) smem[tx_l] [ty_l+1] = 0; // --- left border
else if (tx_l == BLOCK_WIDTH-1) smem[tx_l+2][ty_l+1] = 0; // --- right border
if (ty_l == 0) { smem[tx_l+1][ty_l] = 0; // --- upper border
if (tx_l == 0) smem[tx_l] [ty_l] = 0; // --- top-left corner
else if (tx_l == BLOCK_WIDTH-1) smem[tx_l+2][ty_l] = 0; // --- top-right corner
} else if (ty_l == BLOCK_HEIGHT-1) {smem[tx_l+1][ty_l+2] = 0; // --- bottom border
if (tx_l == 0) smem[tx_l] [ty_l+2] = 0; // --- bottom-left corder
else if (tx_l == BLOCK_WIDTH-1) smem[tx_l+2][ty_l+2] = 0; // --- bottom-right corner
}
// --- Fill shared memory
smem[tx_l+1][ty_l+1] = Input_Image[ty_g*Image_Width + tx_g]; // --- center
if ((tx_l == 0)&&((tx_g > 0))) smem[tx_l] [ty_l+1] = Input_Image[ty_g*Image_Width + tx_g-1]; // --- left border
else if ((tx_l == BLOCK_WIDTH-1)&&(tx_g < Image_Width - 1)) smem[tx_l+2][ty_l+1] = Input_Image[ty_g*Image_Width + tx_g+1]; // --- right border
if ((ty_l == 0)&&(ty_g > 0)) { smem[tx_l+1][ty_l] = Input_Image[(ty_g-1)*Image_Width + tx_g]; // --- upper border
if ((tx_l == 0)&&((tx_g > 0))) smem[tx_l] [ty_l] = Input_Image[(ty_g-1)*Image_Width + tx_g-1]; // --- top-left corner
else if ((tx_l == BLOCK_WIDTH-1)&&(tx_g < Image_Width - 1)) smem[tx_l+2][ty_l] = Input_Image[(ty_g-1)*Image_Width + tx_g+1]; // --- top-right corner
} else if ((ty_l == BLOCK_HEIGHT-1)&&(ty_g < Image_Height - 1)) { smem[tx_l+1][ty_l+2] = Input_Image[(ty_g+1)*Image_Width + tx_g]; // --- bottom border
if ((tx_l == 0)&&((tx_g > 0))) smem[tx_l] [ty_l+2] = Input_Image[(ty_g-1)*Image_Width + tx_g-1]; // --- bottom-left corder
else if ((tx_l == BLOCK_WIDTH-1)&&(tx_g < Image_Width - 1)) smem[tx_l+2][ty_l+2] = Input_Image[(ty_g+1)*Image_Width + tx_g+1]; // --- bottom-right corner
}
__syncthreads();
// --- Pull the 3x3 window in a local array
long v[9] = { smem[tx_l][ty_l], smem[tx_l+1][ty_l], smem[tx_l+2][ty_l],
smem[tx_l][ty_l+1], smem[tx_l+1][ty_l+1], smem[tx_l+2][ty_l+1],
smem[tx_l][ty_l+2], smem[tx_l+1][ty_l+2], smem[tx_l+2][ty_l+2] };
// --- Bubble-sort
for (int i = 0; i < 5; i++) {
for (int j = i + 1; j < 9; j++) {
if (v[i] > v[j]) { // swap?
int tmp = v[i];
v[i] = v[j];
v[j] = tmp;
}
}
}
// --- Pick the middle one
Output_Image[ty_g*Image_Width + tx_g] = v[4];
} |
de69a0fcd3b7c14ca51e0f03203f662568de5c79.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define REAL double
__global__ void diffusion_kernel(
const REAL * __restrict__ gf1, REAL *gf2, int nx, int ny, int nz,
REAL ce, REAL cw, REAL cn, REAL cs, REAL ct, REAL cb, REAL cc){
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int c = i + j*nx;
int xy = nx*ny;
int w = (i == 0) ? c : c - 1;
int e = (i == nx-1) ? c : c + 1;
int n = (j == 0) ? c : c - nx;
int s = (j == ny-1) ? c : c + nx;
REAL t1, t2, t3;
t1 = t2 = gf1[c];
t3 = gf1[c + xy];
gf2[c] = cc*t2 + cw*gf1[w] + ce*gf1[e] + cs*gf1[s]
+ cn*gf1[n] + cb*t1 + ct*t3;
c += xy;
w += xy;
e += xy;
n += xy;
s += xy;
for(int k=1; k<nz-1; k++){
t1 = t2;
t2 = t3;
t3 = gf1[c+xy];
gf2[c] = cc*t2 + cw*gf1[w] + ce*gf1[e] + cs*gf1[s]
+ cn*gf1[n] + cb*t1 + ct*t3;
c += xy;
w += xy;
e += xy;
n += xy;
s += xy;
}
t1 = t2;
t2 = t3;
gf2[c] = cc*t2 + cw*gf1[w] + ce*gf1[e] + cs*gf1[s]
+ cn*gf1[n] + cb*t1 + ct*t3;
return;
}
extern "C" {
void diffusion_cuda_host(REAL *f1, REAL *f2, int nx, int ny, int nz,
REAL ce, REAL cw, REAL cn, REAL cs, REAL ct,
REAL cb, REAL cc, REAL dt,
REAL **f_ret, REAL *time_ret, int *count_ret) {
REAL time = 0.0;
int count = 0;
int blockDim_x = 128;
int blockDim_y = 1;
int blockDim_z = 1;
int grid_x = (nx-1) / blockDim_x + 1;
int grid_y = (ny-1) / blockDim_y + 1;
int grid_z = 1;
dim3 grid(grid_x, grid_y, grid_z);
dim3 threads(blockDim_x, blockDim_y, blockDim_z);
//hipFuncSetCacheConfig(diffusion_kernel, hipFuncCachePreferL1);
do {
hipLaunchKernelGGL(( diffusion_kernel), dim3(grid), dim3(threads), 0, 0, f1, f2, nx, ny, nz, ce, cw, cn, cs, ct, cb, cc);
REAL *t = f1;
f1 = f2;
f2 = t;
time += dt;
count++;
} while (time + 0.5*dt < 0.1);
//*f_ret = f1;
*time_ret = time;
*count_ret = count;
return;
}
}
| de69a0fcd3b7c14ca51e0f03203f662568de5c79.cu | #define REAL double
__global__ void diffusion_kernel(
const REAL * __restrict__ gf1, REAL *gf2, int nx, int ny, int nz,
REAL ce, REAL cw, REAL cn, REAL cs, REAL ct, REAL cb, REAL cc){
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int c = i + j*nx;
int xy = nx*ny;
int w = (i == 0) ? c : c - 1;
int e = (i == nx-1) ? c : c + 1;
int n = (j == 0) ? c : c - nx;
int s = (j == ny-1) ? c : c + nx;
REAL t1, t2, t3;
t1 = t2 = gf1[c];
t3 = gf1[c + xy];
gf2[c] = cc*t2 + cw*gf1[w] + ce*gf1[e] + cs*gf1[s]
+ cn*gf1[n] + cb*t1 + ct*t3;
c += xy;
w += xy;
e += xy;
n += xy;
s += xy;
for(int k=1; k<nz-1; k++){
t1 = t2;
t2 = t3;
t3 = gf1[c+xy];
gf2[c] = cc*t2 + cw*gf1[w] + ce*gf1[e] + cs*gf1[s]
+ cn*gf1[n] + cb*t1 + ct*t3;
c += xy;
w += xy;
e += xy;
n += xy;
s += xy;
}
t1 = t2;
t2 = t3;
gf2[c] = cc*t2 + cw*gf1[w] + ce*gf1[e] + cs*gf1[s]
+ cn*gf1[n] + cb*t1 + ct*t3;
return;
}
extern "C" {
void diffusion_cuda_host(REAL *f1, REAL *f2, int nx, int ny, int nz,
REAL ce, REAL cw, REAL cn, REAL cs, REAL ct,
REAL cb, REAL cc, REAL dt,
REAL **f_ret, REAL *time_ret, int *count_ret) {
REAL time = 0.0;
int count = 0;
int blockDim_x = 128;
int blockDim_y = 1;
int blockDim_z = 1;
int grid_x = (nx-1) / blockDim_x + 1;
int grid_y = (ny-1) / blockDim_y + 1;
int grid_z = 1;
dim3 grid(grid_x, grid_y, grid_z);
dim3 threads(blockDim_x, blockDim_y, blockDim_z);
//cudaFuncSetCacheConfig(diffusion_kernel, cudaFuncCachePreferL1);
do {
diffusion_kernel<<<grid, threads>>>(f1, f2, nx, ny, nz, ce, cw, cn, cs, ct, cb, cc);
REAL *t = f1;
f1 = f2;
f2 = t;
time += dt;
count++;
} while (time + 0.5*dt < 0.1);
//*f_ret = f1;
*time_ret = time;
*count_ret = count;
return;
}
}
|
b05f98aa07afa99762a2325cba98df74d49edf3e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//----------------------------------------------------------------------
/*!\file gpu_algorithms/filterKernels.cu
*
* \author Felix Laufer
*
*
* CUDA: Collection of filter kernels
*
*/
//----------------------------------------------------------------------
#include "gpu_algorithms/basicComplexMath.cu"
namespace gpu_algorithms
{
namespace cuda
{
//----------------------------------------------------------------------
// Helper functions
//----------------------------------------------------------------------
static inline __device__ __host__ float GaussianLowPass(const unsigned int x, const unsigned int y, const float sigma)
{
const float s = 2 * sigma * sigma;
return 1.0f / (M_PI * s) * (expf(-(x * x + y * y) / s));
}
static inline __device__ __host__ float GaussianHighPass(const unsigned int x, const unsigned int y, const float sigma)
{
const float gaussian_lowpass = GaussianLowPass(x, y, sigma);
return (x == 0 && y == 0) ? 2.0f - gaussian_lowpass : -gaussian_lowpass;
}
//----------------------------------------------------------------------
// Kernel functions
//----------------------------------------------------------------------
template<bool param_shift>
static __global__ void GaussianHighPassKernel(Complex *odata, const unsigned int size, const unsigned int nx, const float sigma)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const unsigned int threadID = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int offset = (nx - 1) / 2;
for (unsigned int i = threadID; i < size; i += numThreads)
{
int y = i / nx;
int x = i - y * nx;
float gaussian_highpass = GaussianHighPass(x, y, sigma);
unsigned int index = i;
if (param_shift)
{
index = SequentialIndex2DFFTShift(x, y, nx);
}
odata[index] = (Complex) {gaussian_highpass, 0.0f};
}
}
static __global__ void RaisedCosineWindow(Real *odata, const unsigned int nx, const unsigned int ny)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const unsigned int threadID = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = threadID; i < nx * ny; i += numThreads)
{
const unsigned int y = i / nx;
const unsigned int x = i - y * nx;
odata[i] = 0.5f * (1.0f - cosf((2.0f * M_PI * x) / (nx - 1))) * 0.5f * (1.0f - cosf((2.0f * M_PI * y) / (ny - 1)));
}
}
}
}
| b05f98aa07afa99762a2325cba98df74d49edf3e.cu | //----------------------------------------------------------------------
/*!\file gpu_algorithms/filterKernels.cu
*
* \author Felix Laufer
*
*
* CUDA: Collection of filter kernels
*
*/
//----------------------------------------------------------------------
#include "gpu_algorithms/basicComplexMath.cu"
namespace gpu_algorithms
{
namespace cuda
{
//----------------------------------------------------------------------
// Helper functions
//----------------------------------------------------------------------
static inline __device__ __host__ float GaussianLowPass(const unsigned int x, const unsigned int y, const float sigma)
{
const float s = 2 * sigma * sigma;
return 1.0f / (M_PI * s) * (expf(-(x * x + y * y) / s));
}
static inline __device__ __host__ float GaussianHighPass(const unsigned int x, const unsigned int y, const float sigma)
{
const float gaussian_lowpass = GaussianLowPass(x, y, sigma);
return (x == 0 && y == 0) ? 2.0f - gaussian_lowpass : -gaussian_lowpass;
}
//----------------------------------------------------------------------
// Kernel functions
//----------------------------------------------------------------------
template<bool param_shift>
static __global__ void GaussianHighPassKernel(Complex *odata, const unsigned int size, const unsigned int nx, const float sigma)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const unsigned int threadID = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int offset = (nx - 1) / 2;
for (unsigned int i = threadID; i < size; i += numThreads)
{
int y = i / nx;
int x = i - y * nx;
float gaussian_highpass = GaussianHighPass(x, y, sigma);
unsigned int index = i;
if (param_shift)
{
index = SequentialIndex2DFFTShift(x, y, nx);
}
odata[index] = (Complex) {gaussian_highpass, 0.0f};
}
}
static __global__ void RaisedCosineWindow(Real *odata, const unsigned int nx, const unsigned int ny)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const unsigned int threadID = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = threadID; i < nx * ny; i += numThreads)
{
const unsigned int y = i / nx;
const unsigned int x = i - y * nx;
odata[i] = 0.5f * (1.0f - cosf((2.0f * M_PI * x) / (nx - 1))) * 0.5f * (1.0f - cosf((2.0f * M_PI * y) / (ny - 1)));
}
}
}
}
|
bb9818b3b7187c7c625d02b875def4f5305fd603.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <primitiv/config.h>
#include <primitiv/devices/cuda16/device.h>
#include <primitiv/devices/cuda16/ops/common.h>
#include <primitiv/internal/cuda/utils.h>
namespace {
template<std::uint32_t BLOCK_SIZE>
__global__ void sum_fw_dev(
const half *px, std::uint32_t skip, std::uint32_t n, half *py) {
__shared__ float temp[BLOCK_SIZE];
const std::uint32_t bid = blockIdx.x;
const std::uint32_t tid = threadIdx.x;
px += bid % skip + (bid / skip) * skip * n;
temp[tid] = 0;
for (std::uint32_t i = tid; i < n; i += BLOCK_SIZE) {
temp[tid] += ::__half2float(px[i * skip]);
}
::__syncthreads();
#define REDUCE(k) \
if (BLOCK_SIZE >= k << 1) { \
if (tid < k) temp[tid] += temp[tid + k]; \
::__syncthreads(); \
}
REDUCE(512)
REDUCE(256)
REDUCE(128)
REDUCE(64)
REDUCE(32)
REDUCE(16)
REDUCE(8)
REDUCE(4)
REDUCE(2)
REDUCE(1)
#undef REDUCE
if (tid == 0) py[bid] = ::__float2half(temp[0]);
}
} // namespace
namespace primitiv {
namespace devices {
void CUDA16::sum_fw_impl(const Tensor &x, std::uint32_t dim, Tensor &y) {
const std::uint32_t n = x.shape()[dim];
const std::uint32_t r = y.shape().size();
const std::uint32_t s = y.shape().lower_volume(dim);
std::uint32_t block_size = dim1_x_;
while (block_size >> 1 >= n) block_size >>= 1;
CUDA_CALL(::hipSetDevice(dev_id_));
switch (block_size) {
#define CASE(k) \
case k: \
hipLaunchKernelGGL(( ::sum_fw_dev<k>), dim3(r), dim3(k), 0, 0, CDATA(half, x), s, n, MDATA(half, y)); \
break;
CASE(1024);
CASE(512);
CASE(256);
CASE(128);
CASE(64);
CASE(32);
CASE(16);
CASE(8);
CASE(4);
CASE(2);
CASE(1);
#undef CASE
}
}
} // namespace devices
} // namespace primitiv
| bb9818b3b7187c7c625d02b875def4f5305fd603.cu | #include <primitiv/config.h>
#include <primitiv/devices/cuda16/device.h>
#include <primitiv/devices/cuda16/ops/common.h>
#include <primitiv/internal/cuda/utils.h>
namespace {
template<std::uint32_t BLOCK_SIZE>
__global__ void sum_fw_dev(
const half *px, std::uint32_t skip, std::uint32_t n, half *py) {
__shared__ float temp[BLOCK_SIZE];
const std::uint32_t bid = blockIdx.x;
const std::uint32_t tid = threadIdx.x;
px += bid % skip + (bid / skip) * skip * n;
temp[tid] = 0;
for (std::uint32_t i = tid; i < n; i += BLOCK_SIZE) {
temp[tid] += ::__half2float(px[i * skip]);
}
::__syncthreads();
#define REDUCE(k) \
if (BLOCK_SIZE >= k << 1) { \
if (tid < k) temp[tid] += temp[tid + k]; \
::__syncthreads(); \
}
REDUCE(512)
REDUCE(256)
REDUCE(128)
REDUCE(64)
REDUCE(32)
REDUCE(16)
REDUCE(8)
REDUCE(4)
REDUCE(2)
REDUCE(1)
#undef REDUCE
if (tid == 0) py[bid] = ::__float2half(temp[0]);
}
} // namespace
namespace primitiv {
namespace devices {
void CUDA16::sum_fw_impl(const Tensor &x, std::uint32_t dim, Tensor &y) {
const std::uint32_t n = x.shape()[dim];
const std::uint32_t r = y.shape().size();
const std::uint32_t s = y.shape().lower_volume(dim);
std::uint32_t block_size = dim1_x_;
while (block_size >> 1 >= n) block_size >>= 1;
CUDA_CALL(::cudaSetDevice(dev_id_));
switch (block_size) {
#define CASE(k) \
case k: \
::sum_fw_dev<k><<<r, k>>>(CDATA(half, x), s, n, MDATA(half, y)); \
break;
CASE(1024);
CASE(512);
CASE(256);
CASE(128);
CASE(64);
CASE(32);
CASE(16);
CASE(8);
CASE(4);
CASE(2);
CASE(1);
#undef CASE
}
}
} // namespace devices
} // namespace primitiv
|
a5ba6d2a263484ee079568b20977a86f694a34d0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2020,2021 Sony Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/adaptive_separable_convolution.hpp>
#include <nbla/cuda/utils/atomic_add.cuh>
#include <nbla/variable.hpp>
// TODO: Remove these #includes. Only for debug.
#include <iostream>
#include <typeinfo>
namespace nbla {
__device__ int4 idx_to_4d(int idx, int4 strides) {
auto b = idx / strides.x;
idx -= b * strides.x;
auto c = idx / strides.y;
idx -= c * strides.y;
auto h = idx / strides.z;
idx -= h * strides.z;
auto w = idx;
return make_int4(b, c, h, w);
}
template <typename T>
__global__ void kernel_adaptive_separable_convolution_forward(
const int osize, T *y, const T *x, const T *kv, const T *kh,
const int4 y_strides, const int4 x_strides, const int4 kv_strides,
const int4 kh_strides, const int kv_filters, const int kh_filters) {
NBLA_CUDA_KERNEL_LOOP(idx, osize) {
auto bchw = idx_to_4d(idx, y_strides);
auto b = bchw.x;
auto c = bchw.y;
auto h = bchw.z;
auto w = bchw.w;
auto x_bc = x + (x_strides.x * b + x_strides.y * c);
auto kv_b = kv + (kv_strides.x * b);
auto kh_b = kh + (kh_strides.x * b);
// sum_{i, j} K_h(i, h, w) * K_v(j, h, w) * I(c, h+j, w+i)
T val = T(0.0);
for (int j = 0; j < kv_filters; ++j) {
for (int i = 0; i < kh_filters; ++i) {
auto kval = kv_b[kv_strides.y * j + kv_strides.z * h + w] *
kh_b[kh_strides.y * i + kh_strides.z * h + w];
auto pval = x_bc[x_strides.z * (h + j) + (w + i)];
val += kval * pval;
}
}
y[idx] = val;
}
}
template <typename T>
__global__ void kernel_adaptive_separable_convolution_input_backward(
const int osize, const T *g_y, T *g_x, const T *kv, const T *kh,
const int4 y_strides, const int4 x_strides, const int4 kv_strides,
const int4 kh_strides, const int kv_filters, const int kh_filters) {
NBLA_CUDA_KERNEL_LOOP(idx, osize) {
auto bchw = idx_to_4d(idx, y_strides);
auto b = bchw.x;
auto c = bchw.y;
auto h = bchw.z;
auto w = bchw.w;
auto g_y_bchw = g_y[idx];
auto kv_b = kv + (kv_strides.x * b);
auto kh_b = kh + (kh_strides.x * b);
auto g_x_bc = g_x + (x_strides.x * b + x_strides.y * c);
// g_y(c, h, w) * Kv(j, h, w) * Kh(i, h, w)
for (int j = 0; j < kv_filters; ++j) {
for (int i = 0; i < kh_filters; ++i) {
auto kv_val = kv_b[kv_strides.y * j + kv_strides.z * h + w];
auto kh_val = kh_b[kh_strides.y * i + kh_strides.z * h + w];
auto val = g_y_bchw * kv_val * kh_val;
atomic_add(&g_x_bc[x_strides.z * (h + j) + (w + i)], val);
}
}
}
}
template <typename T, bool accum>
__global__ void kernel_adaptive_separable_convolution_vertical_weight_backward(
const int kv_size, const T *g_y, T *g_kv, const T *x, const T *kh,
const int4 y_strides, const int4 x_strides, const int4 kv_strides,
const int4 kh_strides, const int imaps, const int kh_filters,
const int2 o_sshape) {
NBLA_CUDA_KERNEL_LOOP(idx, kv_size) {
auto bchw = idx_to_4d(idx, kv_strides);
auto b = bchw.x;
auto j = bchw.y;
auto h = bchw.z;
auto w = bchw.w;
auto oH = o_sshape.x;
auto oW = o_sshape.y;
if (h >= oH || w >= oW)
return;
// sum_{c} (sum_{i} K_h(i, h, w) * I(c, h+j, w+i)) * g_y(c, h, w))
auto kh_b = kh + kh_strides.x * b;
auto x_b = x + x_strides.x * b;
auto g_y_b = g_y + y_strides.x * b;
auto osum = T(0.0);
for (int c = 0; c < imaps; ++c) {
auto isum = T(0.0);
for (int i = 0; i < kh_filters; ++i) {
auto kval = kh_b[kh_strides.y * i + kh_strides.z * h + w];
auto pval = x_b[x_strides.y * c + x_strides.z * (h + j) + (w + i)];
isum += kval * pval;
}
osum += g_y_b[y_strides.y * c + y_strides.z * h + w] * isum;
}
g_kv[idx] = accum ? g_kv[idx] + osum : osum;
}
}
template <typename T, bool accum>
__global__ void
kernel_adaptive_separable_convolution_horizontal_weight_backward(
const int kh_size, const T *g_y, T *g_kh, const T *x, const T *kv,
const int4 y_strides, const int4 x_strides, const int4 kv_strides,
const int4 kh_strides, const int imaps, const int kv_filters,
const int2 o_sshape) {
NBLA_CUDA_KERNEL_LOOP(idx, kh_size) {
auto bchw = idx_to_4d(idx, kh_strides);
auto b = bchw.x;
auto i = bchw.y;
auto h = bchw.z;
auto w = bchw.w;
auto oH = o_sshape.x;
auto oW = o_sshape.y;
if (h >= oH || w >= oW)
return;
// sum_{c} (sum_{j} K_v(j, h, w) * I(c, h+j, w+i)) * g_y(c, h, w))
auto kv_b = kv + kv_strides.x * b;
auto x_b = x + x_strides.x * b;
auto g_y_b = g_y + y_strides.x * b;
auto osum = T(0.0);
for (int c = 0; c < imaps; ++c) {
auto isum = T(0.0);
for (int j = 0; j < kv_filters; ++j) {
auto kval = kv_b[kv_strides.y * j + kv_strides.z * h + w];
auto pval = x_b[x_strides.y * c + x_strides.z * (h + j) + (w + i)];
isum += kval * pval;
}
osum += g_y_b[y_strides.y * c + y_strides.z * h + w] * isum;
}
g_kh[idx] = accum ? g_kh[idx] + osum : osum;
}
}
template <typename T>
void AdaptiveSeparableConvolutionCuda<T>::setup_impl(const Variables &inputs,
const Variables &outputs) {
AdaptiveSeparableConvolution<T>::setup_impl(inputs, outputs);
cuda_set_device(this->device_);
}
template <typename T>
void AdaptiveSeparableConvolutionCuda<T>::forward_impl(
const Variables &inputs, const Variables &outputs) {
// TODO: it could be optimized
cuda_set_device(this->device_);
auto osize = outputs[0]->size();
Tcu *y = outputs[0]->cast_data_and_get_pointer<Tcu>(this->ctx_, true);
const Tcu *x = inputs[0]->get_data_pointer<Tcu>(this->ctx_);
const Tcu *kv = inputs[1]->get_data_pointer<Tcu>(this->ctx_);
const Tcu *kh = inputs[2]->get_data_pointer<Tcu>(this->ctx_);
auto y_strides =
make_int4(outputs[0]->strides()[0], outputs[0]->strides()[1],
outputs[0]->strides()[2], outputs[0]->strides()[3]);
auto x_strides = make_int4(inputs[0]->strides()[0], inputs[0]->strides()[1],
inputs[0]->strides()[2], inputs[0]->strides()[3]);
auto kv_strides = make_int4(inputs[1]->strides()[0], inputs[1]->strides()[1],
inputs[1]->strides()[2], inputs[1]->strides()[3]);
auto kh_strides = make_int4(inputs[2]->strides()[0], inputs[2]->strides()[1],
inputs[2]->strides()[2], inputs[2]->strides()[3]);
auto kv_filters = inputs[1]->shape()[1];
auto kh_filters = inputs[2]->shape()[1];
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
kernel_adaptive_separable_convolution_forward<Tcu>, osize, y, x, kv, kh,
y_strides, x_strides, kv_strides, kh_strides, kv_filters, kh_filters);
}
template <typename T>
void AdaptiveSeparableConvolutionCuda<T>::backward_impl(
const Variables &inputs, const Variables &outputs,
const vector<bool> &propagate_down, const vector<bool> &accum) {
// TODO: it could be optimized
if (!(propagate_down[0] || propagate_down[1] || propagate_down[2])) {
return;
}
cuda_set_device(this->device_);
const Tcu *g_y = outputs[0]->get_grad_pointer<Tcu>(this->ctx_);
Tcu *g_x{nullptr};
Tcu *g_kv{nullptr};
Tcu *g_kh{nullptr};
const Tcu *x = inputs[0]->get_data_pointer<Tcu>(this->ctx_);
const Tcu *kv = inputs[1]->get_data_pointer<Tcu>(this->ctx_);
const Tcu *kh = inputs[2]->get_data_pointer<Tcu>(this->ctx_);
auto osize = outputs[0]->size();
auto kv_size = inputs[1]->size();
auto kh_size = inputs[2]->size();
auto y_strides =
make_int4(outputs[0]->strides()[0], outputs[0]->strides()[1],
outputs[0]->strides()[2], outputs[0]->strides()[3]);
auto x_strides = make_int4(inputs[0]->strides()[0], inputs[0]->strides()[1],
inputs[0]->strides()[2], inputs[0]->strides()[3]);
auto kv_strides = make_int4(inputs[1]->strides()[0], inputs[1]->strides()[1],
inputs[1]->strides()[2], inputs[1]->strides()[3]);
auto kh_strides = make_int4(inputs[2]->strides()[0], inputs[2]->strides()[1],
inputs[2]->strides()[2], inputs[2]->strides()[3]);
const auto kv_filters = inputs[1]->shape()[1];
const auto kh_filters = inputs[2]->shape()[1];
const auto imaps = inputs[0]->shape()[1];
auto o_sshape = make_int2(outputs[0]->shape()[2], outputs[0]->shape()[3]);
if (propagate_down[0]) {
g_x = inputs[0]->cast_grad_and_get_pointer<Tcu>(this->ctx_, false);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
kernel_adaptive_separable_convolution_input_backward, osize, g_y, g_x,
kv, kh, y_strides, x_strides, kv_strides, kh_strides, kv_filters,
kh_filters);
}
if (propagate_down[1]) {
g_kv = inputs[1]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[1]);
auto kernel =
accum[1]
? (kernel_adaptive_separable_convolution_vertical_weight_backward<
Tcu, true>)
: (kernel_adaptive_separable_convolution_vertical_weight_backward<
Tcu, false>);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, kv_size, g_y, g_kv, x, kh, y_strides,
x_strides, kv_strides, kh_strides, imaps,
kh_filters, o_sshape);
}
if (propagate_down[2]) {
g_kh = inputs[2]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[2]);
auto kernel =
accum[2]
? (kernel_adaptive_separable_convolution_horizontal_weight_backward<
Tcu, true>)
: (kernel_adaptive_separable_convolution_horizontal_weight_backward<
Tcu, false>);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, kh_size, g_y, g_kh, x, kv, y_strides,
x_strides, kv_strides, kh_strides, imaps,
kv_filters, o_sshape);
}
}
} // namespace nbla
| a5ba6d2a263484ee079568b20977a86f694a34d0.cu | // Copyright 2020,2021 Sony Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/adaptive_separable_convolution.hpp>
#include <nbla/cuda/utils/atomic_add.cuh>
#include <nbla/variable.hpp>
// TODO: Remove these #includes. Only for debug.
#include <iostream>
#include <typeinfo>
namespace nbla {
__device__ int4 idx_to_4d(int idx, int4 strides) {
auto b = idx / strides.x;
idx -= b * strides.x;
auto c = idx / strides.y;
idx -= c * strides.y;
auto h = idx / strides.z;
idx -= h * strides.z;
auto w = idx;
return make_int4(b, c, h, w);
}
template <typename T>
__global__ void kernel_adaptive_separable_convolution_forward(
const int osize, T *y, const T *x, const T *kv, const T *kh,
const int4 y_strides, const int4 x_strides, const int4 kv_strides,
const int4 kh_strides, const int kv_filters, const int kh_filters) {
NBLA_CUDA_KERNEL_LOOP(idx, osize) {
auto bchw = idx_to_4d(idx, y_strides);
auto b = bchw.x;
auto c = bchw.y;
auto h = bchw.z;
auto w = bchw.w;
auto x_bc = x + (x_strides.x * b + x_strides.y * c);
auto kv_b = kv + (kv_strides.x * b);
auto kh_b = kh + (kh_strides.x * b);
// sum_{i, j} K_h(i, h, w) * K_v(j, h, w) * I(c, h+j, w+i)
T val = T(0.0);
for (int j = 0; j < kv_filters; ++j) {
for (int i = 0; i < kh_filters; ++i) {
auto kval = kv_b[kv_strides.y * j + kv_strides.z * h + w] *
kh_b[kh_strides.y * i + kh_strides.z * h + w];
auto pval = x_bc[x_strides.z * (h + j) + (w + i)];
val += kval * pval;
}
}
y[idx] = val;
}
}
template <typename T>
__global__ void kernel_adaptive_separable_convolution_input_backward(
const int osize, const T *g_y, T *g_x, const T *kv, const T *kh,
const int4 y_strides, const int4 x_strides, const int4 kv_strides,
const int4 kh_strides, const int kv_filters, const int kh_filters) {
NBLA_CUDA_KERNEL_LOOP(idx, osize) {
auto bchw = idx_to_4d(idx, y_strides);
auto b = bchw.x;
auto c = bchw.y;
auto h = bchw.z;
auto w = bchw.w;
auto g_y_bchw = g_y[idx];
auto kv_b = kv + (kv_strides.x * b);
auto kh_b = kh + (kh_strides.x * b);
auto g_x_bc = g_x + (x_strides.x * b + x_strides.y * c);
// g_y(c, h, w) * Kv(j, h, w) * Kh(i, h, w)
for (int j = 0; j < kv_filters; ++j) {
for (int i = 0; i < kh_filters; ++i) {
auto kv_val = kv_b[kv_strides.y * j + kv_strides.z * h + w];
auto kh_val = kh_b[kh_strides.y * i + kh_strides.z * h + w];
auto val = g_y_bchw * kv_val * kh_val;
atomic_add(&g_x_bc[x_strides.z * (h + j) + (w + i)], val);
}
}
}
}
template <typename T, bool accum>
__global__ void kernel_adaptive_separable_convolution_vertical_weight_backward(
const int kv_size, const T *g_y, T *g_kv, const T *x, const T *kh,
const int4 y_strides, const int4 x_strides, const int4 kv_strides,
const int4 kh_strides, const int imaps, const int kh_filters,
const int2 o_sshape) {
NBLA_CUDA_KERNEL_LOOP(idx, kv_size) {
auto bchw = idx_to_4d(idx, kv_strides);
auto b = bchw.x;
auto j = bchw.y;
auto h = bchw.z;
auto w = bchw.w;
auto oH = o_sshape.x;
auto oW = o_sshape.y;
if (h >= oH || w >= oW)
return;
// sum_{c} (sum_{i} K_h(i, h, w) * I(c, h+j, w+i)) * g_y(c, h, w))
auto kh_b = kh + kh_strides.x * b;
auto x_b = x + x_strides.x * b;
auto g_y_b = g_y + y_strides.x * b;
auto osum = T(0.0);
for (int c = 0; c < imaps; ++c) {
auto isum = T(0.0);
for (int i = 0; i < kh_filters; ++i) {
auto kval = kh_b[kh_strides.y * i + kh_strides.z * h + w];
auto pval = x_b[x_strides.y * c + x_strides.z * (h + j) + (w + i)];
isum += kval * pval;
}
osum += g_y_b[y_strides.y * c + y_strides.z * h + w] * isum;
}
g_kv[idx] = accum ? g_kv[idx] + osum : osum;
}
}
template <typename T, bool accum>
__global__ void
kernel_adaptive_separable_convolution_horizontal_weight_backward(
const int kh_size, const T *g_y, T *g_kh, const T *x, const T *kv,
const int4 y_strides, const int4 x_strides, const int4 kv_strides,
const int4 kh_strides, const int imaps, const int kv_filters,
const int2 o_sshape) {
NBLA_CUDA_KERNEL_LOOP(idx, kh_size) {
auto bchw = idx_to_4d(idx, kh_strides);
auto b = bchw.x;
auto i = bchw.y;
auto h = bchw.z;
auto w = bchw.w;
auto oH = o_sshape.x;
auto oW = o_sshape.y;
if (h >= oH || w >= oW)
return;
// sum_{c} (sum_{j} K_v(j, h, w) * I(c, h+j, w+i)) * g_y(c, h, w))
auto kv_b = kv + kv_strides.x * b;
auto x_b = x + x_strides.x * b;
auto g_y_b = g_y + y_strides.x * b;
auto osum = T(0.0);
for (int c = 0; c < imaps; ++c) {
auto isum = T(0.0);
for (int j = 0; j < kv_filters; ++j) {
auto kval = kv_b[kv_strides.y * j + kv_strides.z * h + w];
auto pval = x_b[x_strides.y * c + x_strides.z * (h + j) + (w + i)];
isum += kval * pval;
}
osum += g_y_b[y_strides.y * c + y_strides.z * h + w] * isum;
}
g_kh[idx] = accum ? g_kh[idx] + osum : osum;
}
}
template <typename T>
void AdaptiveSeparableConvolutionCuda<T>::setup_impl(const Variables &inputs,
const Variables &outputs) {
AdaptiveSeparableConvolution<T>::setup_impl(inputs, outputs);
cuda_set_device(this->device_);
}
template <typename T>
void AdaptiveSeparableConvolutionCuda<T>::forward_impl(
const Variables &inputs, const Variables &outputs) {
// TODO: it could be optimized
cuda_set_device(this->device_);
auto osize = outputs[0]->size();
Tcu *y = outputs[0]->cast_data_and_get_pointer<Tcu>(this->ctx_, true);
const Tcu *x = inputs[0]->get_data_pointer<Tcu>(this->ctx_);
const Tcu *kv = inputs[1]->get_data_pointer<Tcu>(this->ctx_);
const Tcu *kh = inputs[2]->get_data_pointer<Tcu>(this->ctx_);
auto y_strides =
make_int4(outputs[0]->strides()[0], outputs[0]->strides()[1],
outputs[0]->strides()[2], outputs[0]->strides()[3]);
auto x_strides = make_int4(inputs[0]->strides()[0], inputs[0]->strides()[1],
inputs[0]->strides()[2], inputs[0]->strides()[3]);
auto kv_strides = make_int4(inputs[1]->strides()[0], inputs[1]->strides()[1],
inputs[1]->strides()[2], inputs[1]->strides()[3]);
auto kh_strides = make_int4(inputs[2]->strides()[0], inputs[2]->strides()[1],
inputs[2]->strides()[2], inputs[2]->strides()[3]);
auto kv_filters = inputs[1]->shape()[1];
auto kh_filters = inputs[2]->shape()[1];
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
kernel_adaptive_separable_convolution_forward<Tcu>, osize, y, x, kv, kh,
y_strides, x_strides, kv_strides, kh_strides, kv_filters, kh_filters);
}
template <typename T>
void AdaptiveSeparableConvolutionCuda<T>::backward_impl(
const Variables &inputs, const Variables &outputs,
const vector<bool> &propagate_down, const vector<bool> &accum) {
// TODO: it could be optimized
if (!(propagate_down[0] || propagate_down[1] || propagate_down[2])) {
return;
}
cuda_set_device(this->device_);
const Tcu *g_y = outputs[0]->get_grad_pointer<Tcu>(this->ctx_);
Tcu *g_x{nullptr};
Tcu *g_kv{nullptr};
Tcu *g_kh{nullptr};
const Tcu *x = inputs[0]->get_data_pointer<Tcu>(this->ctx_);
const Tcu *kv = inputs[1]->get_data_pointer<Tcu>(this->ctx_);
const Tcu *kh = inputs[2]->get_data_pointer<Tcu>(this->ctx_);
auto osize = outputs[0]->size();
auto kv_size = inputs[1]->size();
auto kh_size = inputs[2]->size();
auto y_strides =
make_int4(outputs[0]->strides()[0], outputs[0]->strides()[1],
outputs[0]->strides()[2], outputs[0]->strides()[3]);
auto x_strides = make_int4(inputs[0]->strides()[0], inputs[0]->strides()[1],
inputs[0]->strides()[2], inputs[0]->strides()[3]);
auto kv_strides = make_int4(inputs[1]->strides()[0], inputs[1]->strides()[1],
inputs[1]->strides()[2], inputs[1]->strides()[3]);
auto kh_strides = make_int4(inputs[2]->strides()[0], inputs[2]->strides()[1],
inputs[2]->strides()[2], inputs[2]->strides()[3]);
const auto kv_filters = inputs[1]->shape()[1];
const auto kh_filters = inputs[2]->shape()[1];
const auto imaps = inputs[0]->shape()[1];
auto o_sshape = make_int2(outputs[0]->shape()[2], outputs[0]->shape()[3]);
if (propagate_down[0]) {
g_x = inputs[0]->cast_grad_and_get_pointer<Tcu>(this->ctx_, false);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
kernel_adaptive_separable_convolution_input_backward, osize, g_y, g_x,
kv, kh, y_strides, x_strides, kv_strides, kh_strides, kv_filters,
kh_filters);
}
if (propagate_down[1]) {
g_kv = inputs[1]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[1]);
auto kernel =
accum[1]
? (kernel_adaptive_separable_convolution_vertical_weight_backward<
Tcu, true>)
: (kernel_adaptive_separable_convolution_vertical_weight_backward<
Tcu, false>);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, kv_size, g_y, g_kv, x, kh, y_strides,
x_strides, kv_strides, kh_strides, imaps,
kh_filters, o_sshape);
}
if (propagate_down[2]) {
g_kh = inputs[2]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[2]);
auto kernel =
accum[2]
? (kernel_adaptive_separable_convolution_horizontal_weight_backward<
Tcu, true>)
: (kernel_adaptive_separable_convolution_horizontal_weight_backward<
Tcu, false>);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, kh_size, g_y, g_kh, x, kv, y_strides,
x_strides, kv_strides, kh_strides, imaps,
kv_filters, o_sshape);
}
}
} // namespace nbla
|
d6002fcefdd9b75e7f90073d32e1a8622e9c8a3f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vec_thetest.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
float *kz_is_imag = NULL;
hipMalloc(&kz_is_imag, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vec_thetest), dim3(gridBlock),dim3(threadBlock), 0, 0, n,kz_is_imag);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vec_thetest), dim3(gridBlock),dim3(threadBlock), 0, 0, n,kz_is_imag);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vec_thetest), dim3(gridBlock),dim3(threadBlock), 0, 0, n,kz_is_imag);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | d6002fcefdd9b75e7f90073d32e1a8622e9c8a3f.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vec_thetest.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
float *kz_is_imag = NULL;
cudaMalloc(&kz_is_imag, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vec_thetest<<<gridBlock,threadBlock>>>(n,kz_is_imag);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vec_thetest<<<gridBlock,threadBlock>>>(n,kz_is_imag);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vec_thetest<<<gridBlock,threadBlock>>>(n,kz_is_imag);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
2e72e73ce7e571674c9eb902ae923bbeeabecd56.hip | // !!! This is a file automatically generated by hipify!!!
// Initialize Memory using a Kernel - Two-Dimensional Data
// matrix_thread_id.cu
#include <iostream>
#include <cstdlib>
#include <hip/hip_runtime.h>
const unsigned ntpb = 32;
__global__ void initialize(float* a, float v, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < n && j < n)
a[j * n + i] = v;
}
int main(int argc, char* argv[]) {
if (argc != 3) {
std::cerr << "***Incorrect number of arguments***\n";
return 1;
}
unsigned n = atoi(argv[1]);
float v = atof(argv[2]);
int nb = (n + ntpb - 1) / ntpb;
std::cout << "n = " << n << ", No of Blocks = " << nb
<< ", No of Threads Per Block = " << ntpb << std::endl;
float* d_a = nullptr;
hipMalloc((void**)&d_a, n * n * sizeof(float));
if (!d_a) {
std::cerr << "***Out of Memory***\n";
return 2;
}
float* h_a = new float[n * n];
// launch
dim3 dGrid(nb, nb, 1);
dim3 dBlock(ntpb, ntpb, 1);
initialize << <dGrid, dBlock >> >(d_a, v, n);
// copy from device to host memory
hipMemcpy(h_a, d_a, n * n * sizeof(float), hipMemcpyDeviceToHost);
// check correctness
for (int i = 0; i < n * n; i++)
if (h_a[i] != v) std::cout << h_a[i] << "" << v << std::endl;
std::cout << "done" << std::endl;
hipFree(d_a);
delete[] h_a;
hipDeviceReset();
} | 2e72e73ce7e571674c9eb902ae923bbeeabecd56.cu | // Initialize Memory using a Kernel - Two-Dimensional Data
// matrix_thread_id.cu
#include <iostream>
#include <cstdlib>
#include <cuda_runtime.h>
const unsigned ntpb = 32;
__global__ void initialize(float* a, float v, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < n && j < n)
a[j * n + i] = v;
}
int main(int argc, char* argv[]) {
if (argc != 3) {
std::cerr << "***Incorrect number of arguments***\n";
return 1;
}
unsigned n = atoi(argv[1]);
float v = atof(argv[2]);
int nb = (n + ntpb - 1) / ntpb;
std::cout << "n = " << n << ", No of Blocks = " << nb
<< ", No of Threads Per Block = " << ntpb << std::endl;
float* d_a = nullptr;
cudaMalloc((void**)&d_a, n * n * sizeof(float));
if (!d_a) {
std::cerr << "***Out of Memory***\n";
return 2;
}
float* h_a = new float[n * n];
// launch
dim3 dGrid(nb, nb, 1);
dim3 dBlock(ntpb, ntpb, 1);
initialize << <dGrid, dBlock >> >(d_a, v, n);
// copy from device to host memory
cudaMemcpy(h_a, d_a, n * n * sizeof(float), cudaMemcpyDeviceToHost);
// check correctness
for (int i = 0; i < n * n; i++)
if (h_a[i] != v) std::cout << h_a[i] << "" << v << std::endl;
std::cout << "done" << std::endl;
cudaFree(d_a);
delete[] h_a;
cudaDeviceReset();
} |
8ff55976f68de6ec279ac1e06fa90d1811bacf23.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THH/generic/THHTensorMathCompareT.hip"
#else
void THCTensor_(gtTensor)(THCState *state, THCudaBoolTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THC_logicalTensor<bool, scalar_t>(state, self_, src1, src2,
TensorGTOp<scalar_t,
bool>());
}
void THCTensor_(leTensor)(THCState *state, THCudaBoolTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THC_logicalTensor<bool, scalar_t>(state, self_, src1, src2,
TensorLEOp<scalar_t,
bool>());
}
void THCTensor_(geTensor)(THCState *state, THCudaBoolTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THC_logicalTensor<bool, scalar_t>(state, self_, src1, src2,
TensorGEOp<scalar_t,
bool>());
}
void THCTensor_(eqTensor)(THCState *state, THCudaBoolTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THC_logicalTensor<bool, scalar_t>(state, self_, src1, src2,
TensorEQOp<scalar_t,
bool>());
}
void THCTensor_(neTensor)(THCState *state, THCudaBoolTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THC_logicalTensor<bool, scalar_t>(state, self_, src1, src2,
TensorNEOp<scalar_t,
bool>());
}
void THCTensor_(gtTensorT)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THC_logicalTensor<scalar_t, scalar_t>(state, self_, src1, src2,
TensorGTOp<scalar_t,
scalar_t>());
}
void THCTensor_(leTensorT)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THC_logicalTensor<scalar_t, scalar_t>(state, self_, src1, src2,
TensorLEOp<scalar_t,
scalar_t>());
}
void THCTensor_(geTensorT)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THC_logicalTensor<scalar_t, scalar_t>(state, self_, src1, src2,
TensorGEOp<scalar_t,
scalar_t>());
}
void THCTensor_(eqTensorT)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THC_logicalTensor<scalar_t, scalar_t>(state, self_, src1, src2,
TensorEQOp<scalar_t,
scalar_t>());
}
void THCTensor_(neTensorT)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THC_logicalTensor<scalar_t, scalar_t>(state, self_, src1, src2,
TensorNEOp<scalar_t,
scalar_t>());
}
void THCTensor_(gtTensorByte)(THCState *state, THCudaByteTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THC_logicalTensor<unsigned char, scalar_t>(state, self_, src1, src2,
TensorGTOp<scalar_t,
unsigned char>());
}
void THCTensor_(leTensorByte)(THCState *state, THCudaByteTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THC_logicalTensor<unsigned char, scalar_t>(state, self_, src1, src2,
TensorLEOp<scalar_t,
unsigned char>());
}
void THCTensor_(geTensorByte)(THCState *state, THCudaByteTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THC_logicalTensor<unsigned char, scalar_t>(state, self_, src1, src2,
TensorGEOp<scalar_t,
unsigned char>());
}
void THCTensor_(eqTensorByte)(THCState *state, THCudaByteTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THC_logicalTensor<unsigned char, scalar_t>(state, self_, src1, src2,
TensorEQOp<scalar_t,
unsigned char>());
}
void THCTensor_(neTensorByte)(THCState *state, THCudaByteTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THC_logicalTensor<unsigned char, scalar_t>(state, self_, src1, src2,
TensorNEOp<scalar_t,
unsigned char>());
}
#endif
| 8ff55976f68de6ec279ac1e06fa90d1811bacf23.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THC/generic/THCTensorMathCompareT.cu"
#else
void THCTensor_(gtTensor)(THCState *state, THCudaBoolTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THC_logicalTensor<bool, scalar_t>(state, self_, src1, src2,
TensorGTOp<scalar_t,
bool>());
}
void THCTensor_(leTensor)(THCState *state, THCudaBoolTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THC_logicalTensor<bool, scalar_t>(state, self_, src1, src2,
TensorLEOp<scalar_t,
bool>());
}
void THCTensor_(geTensor)(THCState *state, THCudaBoolTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THC_logicalTensor<bool, scalar_t>(state, self_, src1, src2,
TensorGEOp<scalar_t,
bool>());
}
void THCTensor_(eqTensor)(THCState *state, THCudaBoolTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THC_logicalTensor<bool, scalar_t>(state, self_, src1, src2,
TensorEQOp<scalar_t,
bool>());
}
void THCTensor_(neTensor)(THCState *state, THCudaBoolTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THC_logicalTensor<bool, scalar_t>(state, self_, src1, src2,
TensorNEOp<scalar_t,
bool>());
}
void THCTensor_(gtTensorT)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THC_logicalTensor<scalar_t, scalar_t>(state, self_, src1, src2,
TensorGTOp<scalar_t,
scalar_t>());
}
void THCTensor_(leTensorT)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THC_logicalTensor<scalar_t, scalar_t>(state, self_, src1, src2,
TensorLEOp<scalar_t,
scalar_t>());
}
void THCTensor_(geTensorT)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THC_logicalTensor<scalar_t, scalar_t>(state, self_, src1, src2,
TensorGEOp<scalar_t,
scalar_t>());
}
void THCTensor_(eqTensorT)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THC_logicalTensor<scalar_t, scalar_t>(state, self_, src1, src2,
TensorEQOp<scalar_t,
scalar_t>());
}
void THCTensor_(neTensorT)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THC_logicalTensor<scalar_t, scalar_t>(state, self_, src1, src2,
TensorNEOp<scalar_t,
scalar_t>());
}
void THCTensor_(gtTensorByte)(THCState *state, THCudaByteTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THC_logicalTensor<unsigned char, scalar_t>(state, self_, src1, src2,
TensorGTOp<scalar_t,
unsigned char>());
}
void THCTensor_(leTensorByte)(THCState *state, THCudaByteTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THC_logicalTensor<unsigned char, scalar_t>(state, self_, src1, src2,
TensorLEOp<scalar_t,
unsigned char>());
}
void THCTensor_(geTensorByte)(THCState *state, THCudaByteTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THC_logicalTensor<unsigned char, scalar_t>(state, self_, src1, src2,
TensorGEOp<scalar_t,
unsigned char>());
}
void THCTensor_(eqTensorByte)(THCState *state, THCudaByteTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THC_logicalTensor<unsigned char, scalar_t>(state, self_, src1, src2,
TensorEQOp<scalar_t,
unsigned char>());
}
void THCTensor_(neTensorByte)(THCState *state, THCudaByteTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THC_logicalTensor<unsigned char, scalar_t>(state, self_, src1, src2,
TensorNEOp<scalar_t,
unsigned char>());
}
#endif
|
d6e85ebfc938094ee14a965789301beb43efc045.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "pbf_xsph.h"
#include "../../kernel/cuda/pbf_kernel.h"
#include "../../util/pbf_cuda_util.h"
#include "../../interaction/cuda/pbf_grid.h"
#include <thrust/device_vector.h>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <device_launch_parameters.h>
#include <sm_35_intrinsics.h>
using namespace std;
namespace {
template<typename kernel_t>
__device__ dom_dim calcXSPHEach(uint32_t helper_index, const dom_dim& self_pos, const dom_dim& self_vel,
const dom_dim* helper_pos, const dom_dim* helper_vel, scalar_t h) {
auto pair_pos = helper_pos[helper_index];
auto pair_vel = helper_vel[helper_index];
auto pos_diff = pair_pos - self_pos;
auto r = glm::length(pos_diff);
auto vel_diff = pair_vel - self_vel;
const auto inv_h = 1.f / h;
auto kr = pbf::kernel::cuda::weight<kernel_t>(r, inv_h);
return vel_diff * kr;
}
template<typename kernel_t>
__device__
dom_dim calcContributionWithinCell(
uint32_t start_index,
uint32_t end_index,
const dom_dim& self_pos, const dom_dim& self_vel,
const dom_dim* helper_pos, const dom_dim* helper_vel,
scalar_t h
)
{
auto sum_prop = dom_dim(0.f);
if (start_index != 0xFFFFFFFF) {
// iterate over perticles in this cell
for (auto i = start_index; i < end_index; ++i) {
sum_prop += calcXSPHEach<kernel_t>(i, self_pos, self_vel, helper_pos, helper_vel, h);
}
}
return sum_prop;
}
template<typename kernel_t>
__device__ dom_dim calcContribution(
const dom_dim& self_pos,
const uint32_t* cell_start,
const uint32_t* cell_end,
scalar_t cell_width,
const dom_udim& grid_size,
const dom_dim& self_vel,
const dom_dim* helper_pos, const dom_dim* helper_vel,
scalar_t h
)
{
auto grid = pbf::cuda::calcGridPos(self_pos, cell_width);
auto sum_prop = dom_dim(0.f);
#pragma unroll
for (int z = -1; z <= 1; ++z) {
#pragma unroll
for (int y = -1; y <= 1; ++y) {
#pragma unroll
for (int x = -1; x <= 1; ++x) {
dom_idim neighbor_grid(grid.x + x, grid.y + y, grid.z + z);
auto neighbor_grid_hash = pbf::cuda::calcGridHash(neighbor_grid, grid_size);
auto start_index = cell_start[neighbor_grid_hash];
auto end_index = cell_end[neighbor_grid_hash];
sum_prop += calcContributionWithinCell<kernel_t>(start_index, end_index, self_pos, self_vel, helper_pos, helper_vel, h);
}
}
}
return sum_prop;
}
template<typename kernel_t>
__global__ void calcXSPHCUDA(
dom_dim* new_vel,
const dom_dim* pos,
const dom_dim* old_vel,
const uint32_t* cell_start,
const uint32_t* cell_end,
scalar_t cell_width, dom_udim grid_size,
scalar_t h, scalar_t m, scalar_t inv_rho0,
scalar_t xsph_param,
int num_particle
)
{
const uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_particle) return;
auto self_pos = pos[index];
auto self_vel = old_vel[index];
// Contribution Calculation
auto v = calcContribution<kernel_t>(self_pos, cell_start, cell_end, cell_width, grid_size, self_vel, pos, old_vel, h);
if (isnan(v.x) || isnan(v.y) || isnan(v.z)) {
new_vel[index] = self_vel;
}
else {
new_vel[index] = self_vel + xsph_param * m * inv_rho0 * v;
}
}
} // end of unnamed ns
namespace pbf {
namespace cuda {
;
void applyXSPH(
dom_dim* new_velocity,
const dom_dim* sorted_position,
const dom_dim* old_sorted_velocity,
std::shared_ptr<neighbor_search>& ns,
scalar_t smoothing_length,
scalar_t xsph_parameter,
scalar_t inv_stable_density,
scalar_t particle_mass,
int num_particle
)
{
typedef kernel::cuda::PBFKERNEL kernel_t;
uint32_t num_thread, num_block;
computeGridSize(num_particle, 128, num_block, num_thread);
using namespace std;
const auto& cell_start = ns->getCellStart();
const auto& cell_end = ns->getCellEnd();
const auto& cell_width = ns->getCellWidth();
auto& grid_size = ns->getGridSize();
if (num_block > 0)
calcXSPHCUDA<kernel_t> << < num_block, num_thread >> >
(new_velocity, sorted_position, old_sorted_velocity, cell_start, cell_end, cell_width, grid_size,
smoothing_length, particle_mass, inv_stable_density, xsph_parameter, num_particle);
#ifdef _DEBUG
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
}
} // end of cuda ns
} // end of pbf ns
| d6e85ebfc938094ee14a965789301beb43efc045.cu | #include "pbf_xsph.h"
#include "../../kernel/cuda/pbf_kernel.h"
#include "../../util/pbf_cuda_util.h"
#include "../../interaction/cuda/pbf_grid.h"
#include <thrust/device_vector.h>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <device_launch_parameters.h>
#include <sm_35_intrinsics.h>
using namespace std;
namespace {
template<typename kernel_t>
__device__ dom_dim calcXSPHEach(uint32_t helper_index, const dom_dim& self_pos, const dom_dim& self_vel,
const dom_dim* helper_pos, const dom_dim* helper_vel, scalar_t h) {
auto pair_pos = helper_pos[helper_index];
auto pair_vel = helper_vel[helper_index];
auto pos_diff = pair_pos - self_pos;
auto r = glm::length(pos_diff);
auto vel_diff = pair_vel - self_vel;
const auto inv_h = 1.f / h;
auto kr = pbf::kernel::cuda::weight<kernel_t>(r, inv_h);
return vel_diff * kr;
}
template<typename kernel_t>
__device__
dom_dim calcContributionWithinCell(
uint32_t start_index,
uint32_t end_index,
const dom_dim& self_pos, const dom_dim& self_vel,
const dom_dim* helper_pos, const dom_dim* helper_vel,
scalar_t h
)
{
auto sum_prop = dom_dim(0.f);
if (start_index != 0xFFFFFFFF) {
// iterate over perticles in this cell
for (auto i = start_index; i < end_index; ++i) {
sum_prop += calcXSPHEach<kernel_t>(i, self_pos, self_vel, helper_pos, helper_vel, h);
}
}
return sum_prop;
}
template<typename kernel_t>
__device__ dom_dim calcContribution(
const dom_dim& self_pos,
const uint32_t* cell_start,
const uint32_t* cell_end,
scalar_t cell_width,
const dom_udim& grid_size,
const dom_dim& self_vel,
const dom_dim* helper_pos, const dom_dim* helper_vel,
scalar_t h
)
{
auto grid = pbf::cuda::calcGridPos(self_pos, cell_width);
auto sum_prop = dom_dim(0.f);
#pragma unroll
for (int z = -1; z <= 1; ++z) {
#pragma unroll
for (int y = -1; y <= 1; ++y) {
#pragma unroll
for (int x = -1; x <= 1; ++x) {
dom_idim neighbor_grid(grid.x + x, grid.y + y, grid.z + z);
auto neighbor_grid_hash = pbf::cuda::calcGridHash(neighbor_grid, grid_size);
auto start_index = cell_start[neighbor_grid_hash];
auto end_index = cell_end[neighbor_grid_hash];
sum_prop += calcContributionWithinCell<kernel_t>(start_index, end_index, self_pos, self_vel, helper_pos, helper_vel, h);
}
}
}
return sum_prop;
}
template<typename kernel_t>
__global__ void calcXSPHCUDA(
dom_dim* new_vel,
const dom_dim* pos,
const dom_dim* old_vel,
const uint32_t* cell_start,
const uint32_t* cell_end,
scalar_t cell_width, dom_udim grid_size,
scalar_t h, scalar_t m, scalar_t inv_rho0,
scalar_t xsph_param,
int num_particle
)
{
const uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_particle) return;
auto self_pos = pos[index];
auto self_vel = old_vel[index];
// Contribution Calculation
auto v = calcContribution<kernel_t>(self_pos, cell_start, cell_end, cell_width, grid_size, self_vel, pos, old_vel, h);
if (isnan(v.x) || isnan(v.y) || isnan(v.z)) {
new_vel[index] = self_vel;
}
else {
new_vel[index] = self_vel + xsph_param * m * inv_rho0 * v;
}
}
} // end of unnamed ns
namespace pbf {
namespace cuda {
;
void applyXSPH(
dom_dim* new_velocity,
const dom_dim* sorted_position,
const dom_dim* old_sorted_velocity,
std::shared_ptr<neighbor_search>& ns,
scalar_t smoothing_length,
scalar_t xsph_parameter,
scalar_t inv_stable_density,
scalar_t particle_mass,
int num_particle
)
{
typedef kernel::cuda::PBFKERNEL kernel_t;
uint32_t num_thread, num_block;
computeGridSize(num_particle, 128, num_block, num_thread);
using namespace std;
const auto& cell_start = ns->getCellStart();
const auto& cell_end = ns->getCellEnd();
const auto& cell_width = ns->getCellWidth();
auto& grid_size = ns->getGridSize();
if (num_block > 0)
calcXSPHCUDA<kernel_t> << < num_block, num_thread >> >
(new_velocity, sorted_position, old_sorted_velocity, cell_start, cell_end, cell_width, grid_size,
smoothing_length, particle_mass, inv_stable_density, xsph_parameter, num_particle);
#ifdef _DEBUG
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
}
} // end of cuda ns
} // end of pbf ns
|
64eb60c93a6f63061b09d2e1081a75ab2e2152b8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
* Copyright 2019 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
#include "planning_block.h"
#include <iostream>
namespace apollo {
namespace planning {
bool InitialCuda() {
int dev = 0;
hipDeviceProp_t deviceProp;
CUDA_CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("Using device %d: %s\n", dev, deviceProp.name);
CUDA_CHECK(hipSetDevice(dev));
return false;
}
__global__ void fill_lower_left_gpu(int *iRow, int *jCol,
unsigned int *rind_L, unsigned int *cind_L, const int nnz_L) {
int i = threadIdx.x;
if (i < nnz_L) {
iRow[i] = rind_L[i];
jCol[i] = cind_L[i];
}
}
template<typename T>
__global__ void data_transfer_gpu(T *dst, const T* src,
const int size) {
int i = threadIdx.x;
if ( i < size) {
dst[i] = src[i];
}
}
bool fill_lower_left(int *iRow, int *jCol,
unsigned int *rind_L, unsigned int *cind_L, const int nnz_L) {
if (!InitialCuda())
return false;
int *d_iRow, *d_jCol;
unsigned int *d_rind_L, *d_cind_L;
unsigned int nBytes = nnz_L * sizeof(int);
unsigned int nUBytes = nnz_L * sizeof(unsigned int);
hipMalloc((void**)&d_iRow, nBytes);
hipMalloc((void**)&d_jCol, nBytes);
hipMalloc((void**)&d_rind_L, nUBytes);
hipMalloc((void**)&d_cind_L, nUBytes);
hipMemcpy(d_iRow, iRow, nBytes, hipMemcpyHostToDevice);
hipMemcpy(d_jCol, jCol, nBytes, hipMemcpyHostToDevice);
dim3 block(BLOCK_1);
dim3 grid((nnz_L + block.x - 1)/block.x);
hipLaunchKernelGGL(( fill_lower_left_gpu), dim3(grid), dim3(block), 0, 0, d_iRow, d_jCol,
d_rind_L, d_cind_L, nnz_L);
hipDeviceSynchronize();
hipMemcpy(rind_L, d_rind_L, nUBytes, hipMemcpyDeviceToHost);
hipMemcpy(cind_L, d_cind_L, nUBytes, hipMemcpyDeviceToHost);
hipFree(d_iRow);
hipFree(d_jCol);
hipFree(d_rind_L);
hipFree(d_cind_L);
hipDeviceReset();
return true;
}
template<typename T>
bool data_transfer(T *dst, const T *src, const int size) {
if (!InitialCuda())
return false;
T *d_dst, *d_src;
size_t nBytes = size * sizeof(T);
hipMalloc((void**)&d_dst, nBytes);
hipMalloc((void**)&d_src, nBytes);
hipMemcpy(d_src, src, nBytes, hipMemcpyHostToDevice);
hipMemcpy(d_dst, dst, nBytes, hipMemcpyHostToDevice);
dim3 block(BLOCK_1);
dim3 grid((size + block.x -1) / block.x);
hipLaunchKernelGGL(( data_transfer_gpu), dim3(grid), dim3(block), 0, 0, dst, src, size);
hipDeviceSynchronize();
hipMemcpy(dst, d_dst, nBytes, hipMemcpyDeviceToHost);
hipFree(d_dst);
hipFree(d_src);
hipDeviceReset();
return true;
}
DATA_TRANSFER_INST(int);
DATA_TRANSFER_INST(double);
DATA_TRANSFER_INST(float);
} // namespace planning
} // namespace apollo
| 64eb60c93a6f63061b09d2e1081a75ab2e2152b8.cu | /******************************************************************************
* Copyright 2019 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
#include "planning_block.h"
#include <iostream>
namespace apollo {
namespace planning {
bool InitialCuda() {
int dev = 0;
cudaDeviceProp deviceProp;
CUDA_CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using device %d: %s\n", dev, deviceProp.name);
CUDA_CHECK(cudaSetDevice(dev));
return false;
}
__global__ void fill_lower_left_gpu(int *iRow, int *jCol,
unsigned int *rind_L, unsigned int *cind_L, const int nnz_L) {
int i = threadIdx.x;
if (i < nnz_L) {
iRow[i] = rind_L[i];
jCol[i] = cind_L[i];
}
}
template<typename T>
__global__ void data_transfer_gpu(T *dst, const T* src,
const int size) {
int i = threadIdx.x;
if ( i < size) {
dst[i] = src[i];
}
}
bool fill_lower_left(int *iRow, int *jCol,
unsigned int *rind_L, unsigned int *cind_L, const int nnz_L) {
if (!InitialCuda())
return false;
int *d_iRow, *d_jCol;
unsigned int *d_rind_L, *d_cind_L;
unsigned int nBytes = nnz_L * sizeof(int);
unsigned int nUBytes = nnz_L * sizeof(unsigned int);
cudaMalloc((void**)&d_iRow, nBytes);
cudaMalloc((void**)&d_jCol, nBytes);
cudaMalloc((void**)&d_rind_L, nUBytes);
cudaMalloc((void**)&d_cind_L, nUBytes);
cudaMemcpy(d_iRow, iRow, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_jCol, jCol, nBytes, cudaMemcpyHostToDevice);
dim3 block(BLOCK_1);
dim3 grid((nnz_L + block.x - 1)/block.x);
fill_lower_left_gpu<<<grid, block>>>(d_iRow, d_jCol,
d_rind_L, d_cind_L, nnz_L);
cudaDeviceSynchronize();
cudaMemcpy(rind_L, d_rind_L, nUBytes, cudaMemcpyDeviceToHost);
cudaMemcpy(cind_L, d_cind_L, nUBytes, cudaMemcpyDeviceToHost);
cudaFree(d_iRow);
cudaFree(d_jCol);
cudaFree(d_rind_L);
cudaFree(d_cind_L);
cudaDeviceReset();
return true;
}
template<typename T>
bool data_transfer(T *dst, const T *src, const int size) {
if (!InitialCuda())
return false;
T *d_dst, *d_src;
size_t nBytes = size * sizeof(T);
cudaMalloc((void**)&d_dst, nBytes);
cudaMalloc((void**)&d_src, nBytes);
cudaMemcpy(d_src, src, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_dst, dst, nBytes, cudaMemcpyHostToDevice);
dim3 block(BLOCK_1);
dim3 grid((size + block.x -1) / block.x);
data_transfer_gpu<<<grid, block>>>(dst, src, size);
cudaDeviceSynchronize();
cudaMemcpy(dst, d_dst, nBytes, cudaMemcpyDeviceToHost);
cudaFree(d_dst);
cudaFree(d_src);
cudaDeviceReset();
return true;
}
DATA_TRANSFER_INST(int);
DATA_TRANSFER_INST(double);
DATA_TRANSFER_INST(float);
} // namespace planning
} // namespace apollo
|
075d9780aba748fd46c7f52986cb8ae6fa25254b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
// Includes
#include <stdio.h>
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <hip/hip_runtime.h>
//NI DAQ
#include "../include/ContAcq-IntClk.h"
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define ITERATIONS REPLACE_ITERATIONS
// Variables
bool noprompt = false;
unsigned int my_timer;
texture<float,1,hipReadModeElementType> texmem1;
texture<float,1,hipReadModeElementType> texmem2;
texture<float,1,hipReadModeElementType> texmem3;
texture<float,1,hipReadModeElementType> texmem4;
__constant__ float ConstArray1[THREADS_PER_BLOCK];
__constant__ float ConstArray2[THREADS_PER_BLOCK];
// Functions
void CleanupResources(void);
void RandomInit_int(unsigned*, int);
void RandomInit_fp(float*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal1(float *A, float *B, int N)
{
int tid = blockIdx.x*blockIdx.x + threadIdx.x;
float Value1=0;
float Value2=0;
__device__ __shared__ float I1[THREADS_PER_BLOCK];
__device__ __shared__ float I2[THREADS_PER_BLOCK];
I1[tid%THREADS_PER_BLOCK] = A[tid];
I2[tid%THREADS_PER_BLOCK] = B[tid];
__syncthreads();
float sum = 0.0;
if(tid < N){
for(unsigned i=0; i<ITERATIONS; ++i){
sum = tex1Dfetch(texmem1,tid)+B[tid];
for(unsigned j=1; j<=2; ++j){
sum+=tex1Dfetch(texmem1,tid*j);
Value1 *=ConstArray1[(tid+5)%THREADS_PER_BLOCK];
Value2 += A[tid]* I1[(i+j)%THREADS_PER_BLOCK]+Value1;
sum+=B[tid]+Value2;
Value1 += I2[(i+j+5)%THREADS_PER_BLOCK]+A[tid];
Value2 *= ConstArray2[(tid)%THREADS_PER_BLOCK]* I2[(i+j)%THREADS_PER_BLOCK];
sum/=log2(ConstArray1[(tid+10)%THREADS_PER_BLOCK]);
}
A[tid*2] = sum+Value1;
B[tid] = A[tid*2]+A[tid]+Value2;
}
}
}
__global__ void PowerKernalEmpty(unsigned* C, int N)
{
unsigned id = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
__syncthreads();
// Excessive Mod/Div Operations
for(unsigned long k=0; k<ITERATIONS*(blockDim.x + 299);k++) {
//Value1=(I1)+k;
//Value2=(I2)+k;
//Value3=(Value2)+k;
//Value2=(Value1)+k;
/*
__asm volatile (
"B0: bra.uni B1;\n\t"
"B1: bra.uni B2;\n\t"
"B2: bra.uni B3;\n\t"
"B3: bra.uni B4;\n\t"
"B4: bra.uni B5;\n\t"
"B5: bra.uni B6;\n\t"
"B6: bra.uni B7;\n\t"
"B7: bra.uni B8;\n\t"
"B8: bra.uni B9;\n\t"
"B9: bra.uni B10;\n\t"
"B10: bra.uni B11;\n\t"
"B11: bra.uni B12;\n\t"
"B12: bra.uni B13;\n\t"
"B13: bra.uni B14;\n\t"
"B14: bra.uni B15;\n\t"
"B15: bra.uni B16;\n\t"
"B16: bra.uni B17;\n\t"
"B17: bra.uni B18;\n\t"
"B18: bra.uni B19;\n\t"
"B19: bra.uni B20;\n\t"
"B20: bra.uni B21;\n\t"
"B21: bra.uni B22;\n\t"
"B22: bra.uni B23;\n\t"
"B23: bra.uni B24;\n\t"
"B24: bra.uni B25;\n\t"
"B25: bra.uni B26;\n\t"
"B26: bra.uni B27;\n\t"
"B27: bra.uni B28;\n\t"
"B28: bra.uni B29;\n\t"
"B29: bra.uni B30;\n\t"
"B30: bra.uni B31;\n\t"
"B31: bra.uni LOOP;\n\t"
"LOOP:"
);
*/
}
C[id]=id;
__syncthreads();
}
// Host code
float *h_A1, *h_A2, *h_A3;
float *d_A1, *d_A2, *d_A3;
int main()
{
printf("Power Microbenchmarks\n");
float array1[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array1[i] = rand() / RAND_MAX;
}
float array2[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array2[i] = rand() / RAND_MAX;
}
hipMemcpyToSymbol("ConstArray1", array1, sizeof(float) * THREADS_PER_BLOCK );
hipMemcpyToSymbol("ConstArray2", array2, sizeof(float) * THREADS_PER_BLOCK );
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS*2;
// Allocate input vectors h_A and h_B in host memory
size_t size1 = N * sizeof(float);
h_A1 = (float*)malloc(size1);
if (h_A1 == 0) CleanupResources();
h_A2 = (float*)malloc(size1);
if (h_A2 == 0) CleanupResources();
float *host_texture1 = (float*) malloc(size1);
for (int i=0; i< N; i++) {
host_texture1[i] = i;
}
float *device_texture1;
float *device_texture2;
float *device_texture3;
float *device_texture4;
hipMalloc((void**) &device_texture1, size1);
hipMalloc((void**) &device_texture2, size1);
hipMalloc((void**) &device_texture3, size1);
hipMalloc((void**) &device_texture4, size1);
hipMemcpy(device_texture1, host_texture1, size1, hipMemcpyHostToDevice);
hipMemcpy(device_texture2, host_texture1, size1, hipMemcpyHostToDevice);
hipMemcpy(device_texture3, host_texture1, size1, hipMemcpyHostToDevice);
hipMemcpy(device_texture4, host_texture1, size1, hipMemcpyHostToDevice);
hipBindTexture(0, texmem1, device_texture1, size1);
hipBindTexture(0, texmem2, device_texture2, size1);
hipBindTexture(0, texmem3, device_texture3, size1);
hipBindTexture(0, texmem4, device_texture4, size1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
// Initialize input vectors
RandomInit_fp(h_A1, N);
RandomInit_fp(h_A2, N);
// Allocate vectors in device memory
checkCudaErrors( hipMalloc((void**)&d_A1, size1) );
checkCudaErrors( hipMalloc((void**)&d_A2, size1) );
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A1, h_A1, size1, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_A2, h_A2, size1, hipMemcpyHostToDevice) );
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
CUDA_SAFE_CALL( hipDeviceSynchronize() );
//PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A3, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
hipLaunchKernelGGL(( PowerKernal1), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A1, d_A2, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
//PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A3, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( hipDeviceSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( hipDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A1)
hipFree(d_A1);
if (d_A2)
hipFree(d_A2);
if (d_A3)
hipFree(d_A3);
// Free host memory
if (h_A1)
free(h_A1);
if (h_A2)
free(h_A2);
if (h_A3)
free(h_A3);
}
// Allocates an array with random float entries.
void RandomInit_int(float* data, int n)
{
for (int i = 0; i < n; ++i){
srand((unsigned)time(0));
data[i] = rand() / RAND_MAX;
}
}
void RandomInit_fp(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
}
| 075d9780aba748fd46c7f52986cb8ae6fa25254b.cu | #include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
// Includes
#include <stdio.h>
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <cuda_runtime.h>
//NI DAQ
#include "../include/ContAcq-IntClk.h"
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define ITERATIONS REPLACE_ITERATIONS
// Variables
bool noprompt = false;
unsigned int my_timer;
texture<float,1,cudaReadModeElementType> texmem1;
texture<float,1,cudaReadModeElementType> texmem2;
texture<float,1,cudaReadModeElementType> texmem3;
texture<float,1,cudaReadModeElementType> texmem4;
__constant__ float ConstArray1[THREADS_PER_BLOCK];
__constant__ float ConstArray2[THREADS_PER_BLOCK];
// Functions
void CleanupResources(void);
void RandomInit_int(unsigned*, int);
void RandomInit_fp(float*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal1(float *A, float *B, int N)
{
int tid = blockIdx.x*blockIdx.x + threadIdx.x;
float Value1=0;
float Value2=0;
__device__ __shared__ float I1[THREADS_PER_BLOCK];
__device__ __shared__ float I2[THREADS_PER_BLOCK];
I1[tid%THREADS_PER_BLOCK] = A[tid];
I2[tid%THREADS_PER_BLOCK] = B[tid];
__syncthreads();
float sum = 0.0;
if(tid < N){
for(unsigned i=0; i<ITERATIONS; ++i){
sum = tex1Dfetch(texmem1,tid)+B[tid];
for(unsigned j=1; j<=2; ++j){
sum+=tex1Dfetch(texmem1,tid*j);
Value1 *=ConstArray1[(tid+5)%THREADS_PER_BLOCK];
Value2 += A[tid]* I1[(i+j)%THREADS_PER_BLOCK]+Value1;
sum+=B[tid]+Value2;
Value1 += I2[(i+j+5)%THREADS_PER_BLOCK]+A[tid];
Value2 *= ConstArray2[(tid)%THREADS_PER_BLOCK]* I2[(i+j)%THREADS_PER_BLOCK];
sum/=log2(ConstArray1[(tid+10)%THREADS_PER_BLOCK]);
}
A[tid*2] = sum+Value1;
B[tid] = A[tid*2]+A[tid]+Value2;
}
}
}
__global__ void PowerKernalEmpty(unsigned* C, int N)
{
unsigned id = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
__syncthreads();
// Excessive Mod/Div Operations
for(unsigned long k=0; k<ITERATIONS*(blockDim.x + 299);k++) {
//Value1=(I1)+k;
//Value2=(I2)+k;
//Value3=(Value2)+k;
//Value2=(Value1)+k;
/*
__asm volatile (
"B0: bra.uni B1;\n\t"
"B1: bra.uni B2;\n\t"
"B2: bra.uni B3;\n\t"
"B3: bra.uni B4;\n\t"
"B4: bra.uni B5;\n\t"
"B5: bra.uni B6;\n\t"
"B6: bra.uni B7;\n\t"
"B7: bra.uni B8;\n\t"
"B8: bra.uni B9;\n\t"
"B9: bra.uni B10;\n\t"
"B10: bra.uni B11;\n\t"
"B11: bra.uni B12;\n\t"
"B12: bra.uni B13;\n\t"
"B13: bra.uni B14;\n\t"
"B14: bra.uni B15;\n\t"
"B15: bra.uni B16;\n\t"
"B16: bra.uni B17;\n\t"
"B17: bra.uni B18;\n\t"
"B18: bra.uni B19;\n\t"
"B19: bra.uni B20;\n\t"
"B20: bra.uni B21;\n\t"
"B21: bra.uni B22;\n\t"
"B22: bra.uni B23;\n\t"
"B23: bra.uni B24;\n\t"
"B24: bra.uni B25;\n\t"
"B25: bra.uni B26;\n\t"
"B26: bra.uni B27;\n\t"
"B27: bra.uni B28;\n\t"
"B28: bra.uni B29;\n\t"
"B29: bra.uni B30;\n\t"
"B30: bra.uni B31;\n\t"
"B31: bra.uni LOOP;\n\t"
"LOOP:"
);
*/
}
C[id]=id;
__syncthreads();
}
// Host code
float *h_A1, *h_A2, *h_A3;
float *d_A1, *d_A2, *d_A3;
int main()
{
printf("Power Microbenchmarks\n");
float array1[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array1[i] = rand() / RAND_MAX;
}
float array2[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array2[i] = rand() / RAND_MAX;
}
cudaMemcpyToSymbol("ConstArray1", array1, sizeof(float) * THREADS_PER_BLOCK );
cudaMemcpyToSymbol("ConstArray2", array2, sizeof(float) * THREADS_PER_BLOCK );
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS*2;
// Allocate input vectors h_A and h_B in host memory
size_t size1 = N * sizeof(float);
h_A1 = (float*)malloc(size1);
if (h_A1 == 0) CleanupResources();
h_A2 = (float*)malloc(size1);
if (h_A2 == 0) CleanupResources();
float *host_texture1 = (float*) malloc(size1);
for (int i=0; i< N; i++) {
host_texture1[i] = i;
}
float *device_texture1;
float *device_texture2;
float *device_texture3;
float *device_texture4;
cudaMalloc((void**) &device_texture1, size1);
cudaMalloc((void**) &device_texture2, size1);
cudaMalloc((void**) &device_texture3, size1);
cudaMalloc((void**) &device_texture4, size1);
cudaMemcpy(device_texture1, host_texture1, size1, cudaMemcpyHostToDevice);
cudaMemcpy(device_texture2, host_texture1, size1, cudaMemcpyHostToDevice);
cudaMemcpy(device_texture3, host_texture1, size1, cudaMemcpyHostToDevice);
cudaMemcpy(device_texture4, host_texture1, size1, cudaMemcpyHostToDevice);
cudaBindTexture(0, texmem1, device_texture1, size1);
cudaBindTexture(0, texmem2, device_texture2, size1);
cudaBindTexture(0, texmem3, device_texture3, size1);
cudaBindTexture(0, texmem4, device_texture4, size1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
// Initialize input vectors
RandomInit_fp(h_A1, N);
RandomInit_fp(h_A2, N);
// Allocate vectors in device memory
checkCudaErrors( cudaMalloc((void**)&d_A1, size1) );
checkCudaErrors( cudaMalloc((void**)&d_A2, size1) );
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A1, h_A1, size1, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_A2, h_A2, size1, cudaMemcpyHostToDevice) );
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
CUDA_SAFE_CALL( cudaThreadSynchronize() );
//PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A3, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
PowerKernal1<<<dimGrid,dimBlock>>>(d_A1, d_A2, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
//PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A3, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( cudaThreadSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( cudaDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A1)
cudaFree(d_A1);
if (d_A2)
cudaFree(d_A2);
if (d_A3)
cudaFree(d_A3);
// Free host memory
if (h_A1)
free(h_A1);
if (h_A2)
free(h_A2);
if (h_A3)
free(h_A3);
}
// Allocates an array with random float entries.
void RandomInit_int(float* data, int n)
{
for (int i = 0; i < n; ++i){
srand((unsigned)time(0));
data[i] = rand() / RAND_MAX;
}
}
void RandomInit_fp(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
}
|
f238aa5de6184d8503d6c777c07dba288136d699.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CudaWrappers.h"
#include "CudaDeviceDataMan.h"
#include "DepthCamera.h"
#include "hip/device_functions.h"
__device__ bool buildSDFSolverRows(const tsdfvolume& volume,const float3& p, const Mat44& cur_transform, const Mat44& plus_cur_w1, const Mat44& minus_cur_w1, const Mat44& plus_cur_w2, const Mat44& minus_cur_w2, const Mat44& plus_cur_w3, const Mat44& minus_cur_w3,
float w_h, float v_h, float output[7])
{
bool ret = true;
float sdf0;
float4 pworld0 =cur_transform*make_float4(p.x,p.y,p.z, 1.0);
if (false == volume.interpolateSDF(make_float3(pworld0.x,pworld0.y,pworld0.z),sdf0))ret=false;
// else atomicAdd(&delta_n_kesei[6], 1); // Atomic addition
float4 pworld_r;
float sdf_w1_plus; pworld_r = plus_cur_w1*make_float4(p.x,p.y,p.z, 1.0);
if (false == volume.interpolateSDF(make_float3(pworld_r.x,pworld_r.y,pworld_r.z), sdf_w1_plus))ret = false;
// else atomicAdd(&delta_n_kesei[0], 1); // Atomic addition
float sdf_w1_minus; pworld_r = minus_cur_w1*make_float4(p.x,p.y,p.z, 1.0);
if (false == volume.interpolateSDF(make_float3(pworld_r.x,pworld_r.y,pworld_r.z), sdf_w1_minus))ret = false;
// else atomicAdd(&delta_n_kesei[0], -1); // Atomic addition
float sdf_w2_plus; pworld_r = plus_cur_w2*make_float4(p.x,p.y,p.z, 1.0);
if (false == volume.interpolateSDF(make_float3(pworld_r.x,pworld_r.y,pworld_r.z), sdf_w2_plus))ret = false;
// else atomicAdd(&delta_n_kesei[1], 1); // Atomic addition
float sdf_w2_minus; pworld_r = minus_cur_w2*make_float4(p.x,p.y,p.z, 1.0);
if (false == volume.interpolateSDF(make_float3(pworld_r.x,pworld_r.y,pworld_r.z), sdf_w2_minus))ret = false;
// else atomicAdd(&delta_n_kesei[1], -1); // Atomic addition
float sdf_w3_plus; pworld_r = plus_cur_w3*make_float4(p.x,p.y,p.z, 1.0);
if (false == volume.interpolateSDF(make_float3(pworld_r.x,pworld_r.y,pworld_r.z), sdf_w3_plus))ret = false;
// else atomicAdd(&delta_n_kesei[2], 1); // Atomic addition
float sdf_w3_minus; pworld_r = minus_cur_w3*make_float4(p.x,p.y,p.z, 1.0);
if (false == volume.interpolateSDF(make_float3(pworld_r.x,pworld_r.y,pworld_r.z), sdf_w3_minus))ret = false;
// else atomicAdd(&delta_n_kesei[2], -1); // Atomic addition
float3 pworld_v;
float sdf_v1_plus; pworld_v = make_float3(pworld0.x + v_h, pworld0.y, pworld0.z);
if (false == volume.interpolateSDF(pworld_v, sdf_v1_plus))ret = false;
// else atomicAdd(&delta_n_kesei[3], 1); // Atomic addition
float sdf_v1_minus; pworld_v = make_float3(pworld0.x-v_h, pworld0.y, pworld0.z);
if (false == volume.interpolateSDF(pworld_v, sdf_v1_minus))ret = false;
// else atomicAdd(&delta_n_kesei[3], -1); // Atomic addition
float sdf_v2_plus; pworld_v = make_float3(pworld0.x , pworld0.y+v_h, pworld0.z);
if (false == volume.interpolateSDF(pworld_v, sdf_v2_plus))ret = false;
// else atomicAdd(&delta_n_kesei[4], 1); // Atomic addition
float sdf_v2_minus; pworld_v = make_float3(pworld0.x, pworld0.y - v_h, pworld0.z);
if (false == volume.interpolateSDF(pworld_v, sdf_v2_minus))ret = false;
// else atomicAdd(&delta_n_kesei[4], -1); // Atomic addition
float sdf_v3_plus; pworld_v = make_float3(pworld0.x , pworld0.y, pworld0.z+v_h);
if (false == volume.interpolateSDF(pworld_v, sdf_v3_plus))ret = false;
// else atomicAdd(&delta_n_kesei[5], 1); // Atomic addition
float sdf_v3_minus; pworld_v = make_float3(pworld0.x, pworld0.y , pworld0.z-v_h);
if (false == volume.interpolateSDF(pworld_v, sdf_v3_minus))ret=false;
// else atomicAdd(&delta_n_kesei[5], -1); // Atomic addition
if (ret == false)return false;
output[0] = (sdf_w1_plus - sdf_w1_minus) / (2 * w_h);
output[1] = (sdf_w2_plus - sdf_w2_minus) / (2 * w_h);
output[2] = (sdf_w3_plus - sdf_w3_minus) / (2 * w_h);
output[3] = (sdf_v1_plus - sdf_v1_minus) / (2 * v_h);
output[4] = (sdf_v2_plus - sdf_v2_minus) / (2 * v_h);
output[5] = (sdf_v3_plus - sdf_v3_minus) / (2 * v_h);
output[6] = sdf0;
return true;
}
__global__ void computeSDFSolverbufKernel(const tsdfvolume volume, const DepthfMap2D depths, const CameraParams depth_camera_params, const Mat44 curTrans,CudaMap2D<float> gbuf,
const Mat44 plus_cur_w1, const Mat44 minus_cur_w1, const Mat44 plus_cur_w2, const Mat44 minus_cur_w2, const Mat44 plus_cur_w3, const Mat44 minus_cur_w3,
float w_h, float v_h)
{
const unsigned x = blockDim.x*blockIdx.x + threadIdx.x;
const unsigned y = blockDim.y*blockIdx.y + threadIdx.y;
__shared__ float smem[BLOCK_SIZE_2D_X*BLOCK_SIZE_2D_Y];
unsigned tid = flattenedThreadId();
smem[tid] = 0;
float row[7] = { 0, 0, 0, 0, 0, 0, 0 };
float d = depths.at(x, y);
if (d == 0)
{
}
else
{
float3 p=DepthCamera::depthToSkeleton(x, y, d,depth_camera_params);
buildSDFSolverRows(volume, p, curTrans, plus_cur_w1, minus_cur_w1, plus_cur_w2, minus_cur_w2, plus_cur_w3, minus_cur_w3, w_h, v_h, row);
}
int blockId = blockIdx.x + gridDim.x * blockIdx.y;
int shift = 0;
for (int i = 0; i < 6; ++i) //rows
{
#pragma unroll
for (int j = i; j < 7; ++j) // cols + b
{
__syncthreads();
smem[tid] = row[i] * row[j] ;
__syncthreads();
reduce(smem, BLOCK_SIZE_2D_X*BLOCK_SIZE_2D_Y);//
if (tid == 0)
{
gbuf.at(blockId, shift++)=smem[0];
}
}
}
}
void cudaCalSDFSolverParams(const CameraParams& depth_camera_params,const Mat44& cur_transform)
{
tsdfvolume volume=CudaDeviceDataMan::instance()->volume;
DepthfMap2D depths=CudaDeviceDataMan::instance()->trunced_depth;
CudaMap2D<float> buf_temp=CudaDeviceDataMan::instance()->rigid_align_buf_temp_pyramid[0];
CudaMap1D<float> buf_reduced=CudaDeviceDataMan::instance()->rigid_align_buf_reduced;
float w_h = 0.001;
float v_h = volume.size().x / volume.resolution().x;
Mat44 plus_cur_w1, minus_cur_w1, plus_cur_w2, minus_cur_w2, plus_cur_w3, minus_cur_w3
/*,plus_cur_v1, minus_cur_v1, plus_cur_v2, minus_cur_v2, plus_cur_v3, minus_cur_v3*/;
Mat44 delta = Mat44::getIdentity();
delta.m23 = -w_h; delta.m32 = w_h; plus_cur_w1 = delta*cur_transform;
delta.m23 = w_h; delta.m32 = -w_h; minus_cur_w1 = delta*cur_transform;
delta.m23 = 0; delta.m32 = 0;
delta.m13 = w_h; delta.m31 = -w_h; plus_cur_w2 = delta*cur_transform;
delta.m13 = -w_h; delta.m31 = w_h; minus_cur_w2 = delta*cur_transform;
delta.m13 = 0; delta.m31 = 0;
delta.m12 = -w_h; delta.m21 = w_h; plus_cur_w3 = delta*cur_transform;
delta.m12 = w_h; delta.m21 = -w_h; minus_cur_w3 = delta*cur_transform;
delta.m12 = 0; delta.m21 = 0;
const dim3 blockSize(BLOCK_SIZE_2D_X, BLOCK_SIZE_2D_Y);
const dim3 gridSize(divUp(depths.cols(), BLOCK_SIZE_2D_X), divUp(depths.rows(), BLOCK_SIZE_2D_Y));
computeSDFSolverbufKernel << <gridSize, blockSize >> >(volume, depths, depth_camera_params, cur_transform, buf_temp, plus_cur_w1, minus_cur_w1, plus_cur_w2, minus_cur_w2, plus_cur_w3, minus_cur_w3, w_h, v_h);
reduceGbufKernel << <buf_reduced.count(), 512, 512 * sizeof(float) >> >(buf_temp.ptr(),buf_temp.cols(),buf_temp.rows(), buf_reduced.ptr(), 512);
}
| f238aa5de6184d8503d6c777c07dba288136d699.cu | #include "CudaWrappers.h"
#include "CudaDeviceDataMan.h"
#include "DepthCamera.h"
#include "device_functions.h"
__device__ bool buildSDFSolverRows(const tsdfvolume& volume,const float3& p, const Mat44& cur_transform, const Mat44& plus_cur_w1, const Mat44& minus_cur_w1, const Mat44& plus_cur_w2, const Mat44& minus_cur_w2, const Mat44& plus_cur_w3, const Mat44& minus_cur_w3,
float w_h, float v_h, float output[7])
{
bool ret = true;
float sdf0;
float4 pworld0 =cur_transform*make_float4(p.x,p.y,p.z, 1.0);
if (false == volume.interpolateSDF(make_float3(pworld0.x,pworld0.y,pworld0.z),sdf0))ret=false;
// else atomicAdd(&delta_n_kesei[6], 1); // Atomic addition
float4 pworld_r;
float sdf_w1_plus; pworld_r = plus_cur_w1*make_float4(p.x,p.y,p.z, 1.0);
if (false == volume.interpolateSDF(make_float3(pworld_r.x,pworld_r.y,pworld_r.z), sdf_w1_plus))ret = false;
// else atomicAdd(&delta_n_kesei[0], 1); // Atomic addition
float sdf_w1_minus; pworld_r = minus_cur_w1*make_float4(p.x,p.y,p.z, 1.0);
if (false == volume.interpolateSDF(make_float3(pworld_r.x,pworld_r.y,pworld_r.z), sdf_w1_minus))ret = false;
// else atomicAdd(&delta_n_kesei[0], -1); // Atomic addition
float sdf_w2_plus; pworld_r = plus_cur_w2*make_float4(p.x,p.y,p.z, 1.0);
if (false == volume.interpolateSDF(make_float3(pworld_r.x,pworld_r.y,pworld_r.z), sdf_w2_plus))ret = false;
// else atomicAdd(&delta_n_kesei[1], 1); // Atomic addition
float sdf_w2_minus; pworld_r = minus_cur_w2*make_float4(p.x,p.y,p.z, 1.0);
if (false == volume.interpolateSDF(make_float3(pworld_r.x,pworld_r.y,pworld_r.z), sdf_w2_minus))ret = false;
// else atomicAdd(&delta_n_kesei[1], -1); // Atomic addition
float sdf_w3_plus; pworld_r = plus_cur_w3*make_float4(p.x,p.y,p.z, 1.0);
if (false == volume.interpolateSDF(make_float3(pworld_r.x,pworld_r.y,pworld_r.z), sdf_w3_plus))ret = false;
// else atomicAdd(&delta_n_kesei[2], 1); // Atomic addition
float sdf_w3_minus; pworld_r = minus_cur_w3*make_float4(p.x,p.y,p.z, 1.0);
if (false == volume.interpolateSDF(make_float3(pworld_r.x,pworld_r.y,pworld_r.z), sdf_w3_minus))ret = false;
// else atomicAdd(&delta_n_kesei[2], -1); // Atomic addition
float3 pworld_v;
float sdf_v1_plus; pworld_v = make_float3(pworld0.x + v_h, pworld0.y, pworld0.z);
if (false == volume.interpolateSDF(pworld_v, sdf_v1_plus))ret = false;
// else atomicAdd(&delta_n_kesei[3], 1); // Atomic addition
float sdf_v1_minus; pworld_v = make_float3(pworld0.x-v_h, pworld0.y, pworld0.z);
if (false == volume.interpolateSDF(pworld_v, sdf_v1_minus))ret = false;
// else atomicAdd(&delta_n_kesei[3], -1); // Atomic addition
float sdf_v2_plus; pworld_v = make_float3(pworld0.x , pworld0.y+v_h, pworld0.z);
if (false == volume.interpolateSDF(pworld_v, sdf_v2_plus))ret = false;
// else atomicAdd(&delta_n_kesei[4], 1); // Atomic addition
float sdf_v2_minus; pworld_v = make_float3(pworld0.x, pworld0.y - v_h, pworld0.z);
if (false == volume.interpolateSDF(pworld_v, sdf_v2_minus))ret = false;
// else atomicAdd(&delta_n_kesei[4], -1); // Atomic addition
float sdf_v3_plus; pworld_v = make_float3(pworld0.x , pworld0.y, pworld0.z+v_h);
if (false == volume.interpolateSDF(pworld_v, sdf_v3_plus))ret = false;
// else atomicAdd(&delta_n_kesei[5], 1); // Atomic addition
float sdf_v3_minus; pworld_v = make_float3(pworld0.x, pworld0.y , pworld0.z-v_h);
if (false == volume.interpolateSDF(pworld_v, sdf_v3_minus))ret=false;
// else atomicAdd(&delta_n_kesei[5], -1); // Atomic addition
if (ret == false)return false;
output[0] = (sdf_w1_plus - sdf_w1_minus) / (2 * w_h);
output[1] = (sdf_w2_plus - sdf_w2_minus) / (2 * w_h);
output[2] = (sdf_w3_plus - sdf_w3_minus) / (2 * w_h);
output[3] = (sdf_v1_plus - sdf_v1_minus) / (2 * v_h);
output[4] = (sdf_v2_plus - sdf_v2_minus) / (2 * v_h);
output[5] = (sdf_v3_plus - sdf_v3_minus) / (2 * v_h);
output[6] = sdf0;
return true;
}
__global__ void computeSDFSolverbufKernel(const tsdfvolume volume, const DepthfMap2D depths, const CameraParams depth_camera_params, const Mat44 curTrans,CudaMap2D<float> gbuf,
const Mat44 plus_cur_w1, const Mat44 minus_cur_w1, const Mat44 plus_cur_w2, const Mat44 minus_cur_w2, const Mat44 plus_cur_w3, const Mat44 minus_cur_w3,
float w_h, float v_h)
{
const unsigned x = blockDim.x*blockIdx.x + threadIdx.x;
const unsigned y = blockDim.y*blockIdx.y + threadIdx.y;
__shared__ float smem[BLOCK_SIZE_2D_X*BLOCK_SIZE_2D_Y];
unsigned tid = flattenedThreadId();
smem[tid] = 0;
float row[7] = { 0, 0, 0, 0, 0, 0, 0 };
float d = depths.at(x, y);
if (d == 0)
{
}
else
{
float3 p=DepthCamera::depthToSkeleton(x, y, d,depth_camera_params);
buildSDFSolverRows(volume, p, curTrans, plus_cur_w1, minus_cur_w1, plus_cur_w2, minus_cur_w2, plus_cur_w3, minus_cur_w3, w_h, v_h, row);
}
int blockId = blockIdx.x + gridDim.x * blockIdx.y;
int shift = 0;
for (int i = 0; i < 6; ++i) //rows
{
#pragma unroll
for (int j = i; j < 7; ++j) // cols + b
{
__syncthreads();
smem[tid] = row[i] * row[j] ;
__syncthreads();
reduce(smem, BLOCK_SIZE_2D_X*BLOCK_SIZE_2D_Y);//ÇóºÍ
if (tid == 0)
{
gbuf.at(blockId, shift++)=smem[0];
}
}
}
}
void cudaCalSDFSolverParams(const CameraParams& depth_camera_params,const Mat44& cur_transform)
{
tsdfvolume volume=CudaDeviceDataMan::instance()->volume;
DepthfMap2D depths=CudaDeviceDataMan::instance()->trunced_depth;
CudaMap2D<float> buf_temp=CudaDeviceDataMan::instance()->rigid_align_buf_temp_pyramid[0];
CudaMap1D<float> buf_reduced=CudaDeviceDataMan::instance()->rigid_align_buf_reduced;
float w_h = 0.001;
float v_h = volume.size().x / volume.resolution().x;
Mat44 plus_cur_w1, minus_cur_w1, plus_cur_w2, minus_cur_w2, plus_cur_w3, minus_cur_w3
/*,plus_cur_v1, minus_cur_v1, plus_cur_v2, minus_cur_v2, plus_cur_v3, minus_cur_v3*/;
Mat44 delta = Mat44::getIdentity();
delta.m23 = -w_h; delta.m32 = w_h; plus_cur_w1 = delta*cur_transform;
delta.m23 = w_h; delta.m32 = -w_h; minus_cur_w1 = delta*cur_transform;
delta.m23 = 0; delta.m32 = 0;
delta.m13 = w_h; delta.m31 = -w_h; plus_cur_w2 = delta*cur_transform;
delta.m13 = -w_h; delta.m31 = w_h; minus_cur_w2 = delta*cur_transform;
delta.m13 = 0; delta.m31 = 0;
delta.m12 = -w_h; delta.m21 = w_h; plus_cur_w3 = delta*cur_transform;
delta.m12 = w_h; delta.m21 = -w_h; minus_cur_w3 = delta*cur_transform;
delta.m12 = 0; delta.m21 = 0;
const dim3 blockSize(BLOCK_SIZE_2D_X, BLOCK_SIZE_2D_Y);
const dim3 gridSize(divUp(depths.cols(), BLOCK_SIZE_2D_X), divUp(depths.rows(), BLOCK_SIZE_2D_Y));
computeSDFSolverbufKernel << <gridSize, blockSize >> >(volume, depths, depth_camera_params, cur_transform, buf_temp, plus_cur_w1, minus_cur_w1, plus_cur_w2, minus_cur_w2, plus_cur_w3, minus_cur_w3, w_h, v_h);
reduceGbufKernel << <buf_reduced.count(), 512, 512 * sizeof(float) >> >(buf_temp.ptr(),buf_temp.cols(),buf_temp.rows(), buf_reduced.ptr(), 512);
}
|
463625426b490a804c2b4fef9e79763270aa7f52.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <math.h>
#include <helper_cuda.h>
#include <hip/hip_runtime.h>
#include "../../finufft/utils.h"
#include "../spreadinterp.h"
using namespace std;
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
#else
static __inline__ __device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
static __forceinline__ __device__
FLT evaluate_kernel(FLT x, FLT es_c, FLT es_beta)
/* ES ("exp sqrt") kernel evaluation at single real argument:
phi(x) = exp(beta.sqrt(1 - (2x/n_s)^2)), for |x| < nspread/2
related to an asymptotic approximation to the Kaiser--Bessel, itself an
approximation to prolate spheroidal wavefunction (PSWF) of order 0.
This is the "reference implementation", used by eg common/onedim_* 2/17/17 */
{
return exp(es_beta * (sqrt(1.0 - es_c*x*x)));
//return x;
//return 1.0;
}
static __inline__ __device__
void eval_kernel_vec_Horner(FLT *ker, const FLT x, const int w,
const double upsampfac)
/* Fill ker[] with Horner piecewise poly approx to [-w/2,w/2] ES kernel eval at
x_j = x + j, for j=0,..,w-1. Thus x in [-w/2,-w/2+1]. w is aka ns.
This is the current evaluation method, since it's faster (except i7 w=16).
Two upsampfacs implemented. Params must match ref formula. Barnett 4/24/18 */
{
FLT z = 2*x + w - 1.0; // scale so local grid offset z in [-1,1]
// insert the auto-generated code which expects z, w args, writes to ker...
if (upsampfac==2.0) { // floating point equality is fine here
#include "../../finufft/ker_horner_allw_loop.c"
}
}
static __inline__ __device__
void eval_kernel_vec(FLT *ker, const FLT x, const double w, const double es_c,
const double es_beta)
{
for(int i=0; i<w; i++){
ker[i] = evaluate_kernel(abs(x+i), es_c, es_beta);
}
}
/* Common Kernels */
__device__
int CalcGlobalIdx(int xidx, int yidx, int zidx, int onx, int ony, int onz,
int bnx, int bny, int bnz){
int oix,oiy,oiz;
oix = xidx/bnx;
oiy = yidx/bny;
oiz = zidx/bnz;
return (oix + oiy*onx + oiz*ony*onz)*(bnx*bny*bnz) +
(xidx%bnx+yidx%bny*bnx+zidx%bnz*bny*bnx);
}
__device__
int CalcGlobalIdx_V2(int xidx, int yidx, int zidx, int nbinx, int nbiny, int nbinz){
return xidx + yidx*nbinx + zidx*nbinx*nbiny;
}
#if 0
__global__
void RescaleXY_3d(int M, int nf1, int nf2, int nf3, FLT* x, FLT* y, FLT* z)
{
for(int i=blockDim.x*blockIdx.x+threadIdx.x; i<M; i+=blockDim.x*gridDim.x){
x[i] = RESCALE(x[i], nf1, 1);
y[i] = RESCALE(y[i], nf2, 1);
z[i] = RESCALE(z[i], nf3, 1);
}
}
#endif
/* ---------------------- 3d Spreading Kernels -------------------------------*/
/* Kernels for bin sort NUpts */
__global__
void CalcBinSize_noghost_3d(int M, int nf1, int nf2, int nf3, int bin_size_x,
int bin_size_y, int bin_size_z, int nbinx, int nbiny, int nbinz,
int* bin_size, FLT *x, FLT *y, FLT *z, int* sortidx, int pirange)
{
int binidx, binx, biny, binz;
int oldidx;
FLT x_rescaled,y_rescaled,z_rescaled;
for(int i=threadIdx.x+blockIdx.x*blockDim.x; i<M; i+=gridDim.x*blockDim.x){
x_rescaled=RESCALE(x[i], nf1, pirange);
y_rescaled=RESCALE(y[i], nf2, pirange);
z_rescaled=RESCALE(z[i], nf3, pirange);
binx = floor(x_rescaled/bin_size_x);
binx = binx >= nbinx ? binx-1 : binx;
biny = floor(y_rescaled/bin_size_y);
biny = biny >= nbiny ? biny-1 : biny;
binz = floor(z_rescaled/bin_size_z);
binz = binz >= nbinz ? binz-1 : binz;
binidx = binx+biny*nbinx+binz*nbinx*nbiny;
oldidx = atomicAdd(&bin_size[binidx], 1);
sortidx[i] = oldidx;
}
}
__global__
void CalcInvertofGlobalSortIdx_3d(int M, int bin_size_x, int bin_size_y,
int bin_size_z, int nbinx, int nbiny, int nbinz, int* bin_startpts,
int* sortidx, FLT *x, FLT *y, FLT *z, int* index, int pirange, int nf1,
int nf2, int nf3)
{
int binx,biny,binz;
int binidx;
FLT x_rescaled,y_rescaled,z_rescaled;
for(int i=threadIdx.x+blockIdx.x*blockDim.x; i<M; i+=gridDim.x*blockDim.x){
x_rescaled=RESCALE(x[i], nf1, pirange);
y_rescaled=RESCALE(y[i], nf2, pirange);
z_rescaled=RESCALE(z[i], nf3, pirange);
binx = floor(x_rescaled/bin_size_x);
binx = binx >= nbinx ? binx-1 : binx;
biny = floor(y_rescaled/bin_size_y);
biny = biny >= nbiny ? biny-1 : biny;
binz = floor(z_rescaled/bin_size_z);
binz = binz >= nbinz ? binz-1 : binz;
binidx = CalcGlobalIdx_V2(binx,biny,binz,nbinx,nbiny,nbinz);
index[bin_startpts[binidx]+sortidx[i]] = i;
}
}
__global__
void TrivialGlobalSortIdx_3d(int M, int* index)
{
for(int i=threadIdx.x+blockIdx.x*blockDim.x; i<M; i+=gridDim.x*blockDim.x){
index[i] = i;
}
}
/* Kernels for NUptsdriven method */
__global__
void Spread_3d_NUptsdriven_Horner(FLT *x, FLT *y, FLT *z, CUCPX *c, CUCPX *fw,
int M, const int ns, int nf1, int nf2, int nf3, FLT sigma, int* idxnupts,
int pirange)
{
int xx, yy, zz, ix, iy, iz;
int outidx;
FLT ker1[MAX_NSPREAD];
FLT ker2[MAX_NSPREAD];
FLT ker3[MAX_NSPREAD];
FLT ker1val, ker2val, ker3val;
FLT x_rescaled, y_rescaled, z_rescaled;
for(int i=blockDim.x*blockIdx.x+threadIdx.x; i<M; i+=blockDim.x*gridDim.x){
x_rescaled=RESCALE(x[idxnupts[i]], nf1, pirange);
y_rescaled=RESCALE(y[idxnupts[i]], nf2, pirange);
z_rescaled=RESCALE(z[idxnupts[i]], nf3, pirange);
int xstart = ceil(x_rescaled - ns/2.0);
int ystart = ceil(y_rescaled - ns/2.0);
int zstart = ceil(z_rescaled - ns/2.0);
int xend = floor(x_rescaled + ns/2.0);
int yend = floor(y_rescaled + ns/2.0);
int zend = floor(z_rescaled + ns/2.0);
FLT x1=(FLT)xstart-x_rescaled;
FLT y1=(FLT)ystart-y_rescaled;
FLT z1=(FLT)zstart-z_rescaled;
eval_kernel_vec_Horner(ker1,x1,ns,sigma);
eval_kernel_vec_Horner(ker2,y1,ns,sigma);
eval_kernel_vec_Horner(ker3,z1,ns,sigma);
for(zz=zstart; zz<=zend; zz++){
ker3val=ker3[zz-zstart];
for(yy=ystart; yy<=yend; yy++){
ker2val=ker2[yy-ystart];
for(xx=xstart; xx<=xend; xx++){
ix = xx < 0 ? xx+nf1 : (xx>nf1-1 ? xx-nf1 : xx);
iy = yy < 0 ? yy+nf2 : (yy>nf2-1 ? yy-nf2 : yy);
iz = zz < 0 ? zz+nf3 : (zz>nf3-1 ? zz-nf3 : zz);
outidx = ix+iy*nf1+iz*nf1*nf2;
ker1val=ker1[xx-xstart];
FLT kervalue=ker1val*ker2val*ker3val;
atomicAdd(&fw[outidx].x, c[idxnupts[i]].x*kervalue);
atomicAdd(&fw[outidx].y, c[idxnupts[i]].y*kervalue);
}
}
}
}
}
__global__
void Spread_3d_NUptsdriven(FLT *x, FLT *y, FLT *z, CUCPX *c, CUCPX *fw, int M,
const int ns, int nf1, int nf2, int nf3, FLT es_c, FLT es_beta,
int* idxnupts, int pirange)
{
int xx, yy, zz, ix, iy, iz;
int outidx;
FLT ker1[MAX_NSPREAD];
FLT ker2[MAX_NSPREAD];
FLT ker3[MAX_NSPREAD];
FLT x_rescaled, y_rescaled, z_rescaled;
FLT ker1val, ker2val, ker3val;
for(int i=blockDim.x*blockIdx.x+threadIdx.x; i<M; i+=blockDim.x*gridDim.x){
x_rescaled=RESCALE(x[idxnupts[i]], nf1, pirange);
y_rescaled=RESCALE(y[idxnupts[i]], nf2, pirange);
z_rescaled=RESCALE(z[idxnupts[i]], nf3, pirange);
int xstart = ceil(x_rescaled - ns/2.0);
int ystart = ceil(y_rescaled - ns/2.0);
int zstart = ceil(z_rescaled - ns/2.0);
int xend = floor(x_rescaled + ns/2.0);
int yend = floor(y_rescaled + ns/2.0);
int zend = floor(z_rescaled + ns/2.0);
FLT x1=(FLT)xstart-x_rescaled;
FLT y1=(FLT)ystart-y_rescaled;
FLT z1=(FLT)zstart-z_rescaled;
eval_kernel_vec(ker1,x1,ns,es_c,es_beta);
eval_kernel_vec(ker2,y1,ns,es_c,es_beta);
eval_kernel_vec(ker3,z1,ns,es_c,es_beta);
for(zz=zstart; zz<=zend; zz++){
ker3val=ker3[zz-zstart];
for(yy=ystart; yy<=yend; yy++){
ker2val=ker2[yy-ystart];
for(xx=xstart; xx<=xend; xx++){
ix = xx < 0 ? xx+nf1 : (xx>nf1-1 ? xx-nf1 : xx);
iy = yy < 0 ? yy+nf2 : (yy>nf2-1 ? yy-nf2 : yy);
iz = zz < 0 ? zz+nf3 : (zz>nf3-1 ? zz-nf3 : zz);
outidx = ix+iy*nf1+iz*nf1*nf2;
ker1val=ker1[xx-xstart];
FLT kervalue=ker1val*ker2val*ker3val;
atomicAdd(&fw[outidx].x, c[idxnupts[i]].x*kervalue);
atomicAdd(&fw[outidx].y, c[idxnupts[i]].y*kervalue);
}
}
}
}
}
/* Kernels for Subprob method */
__global__
void CalcSubProb_3d_v2(int* bin_size, int* num_subprob, int maxsubprobsize,
int numbins)
{
for(int i=threadIdx.x+blockIdx.x*blockDim.x; i<numbins;
i+=gridDim.x*blockDim.x){
num_subprob[i]=ceil(bin_size[i]/(float) maxsubprobsize);
}
}
__global__
void MapBintoSubProb_3d_v2(int* d_subprob_to_bin,int* d_subprobstartpts,
int* d_numsubprob,int numbins)
{
for(int i=threadIdx.x+blockIdx.x*blockDim.x; i<numbins;
i+=gridDim.x*blockDim.x){
for(int j=0; j<d_numsubprob[i]; j++){
d_subprob_to_bin[d_subprobstartpts[i]+j]=i;
}
}
}
__global__
void Spread_3d_Subprob_Horner(FLT *x, FLT *y, FLT *z, CUCPX *c, CUCPX *fw, int M,
const int ns, int nf1, int nf2, int nf3, FLT sigma, int* binstartpts,
int* bin_size, int bin_size_x, int bin_size_y, int bin_size_z,
int* subprob_to_bin, int* subprobstartpts, int* numsubprob,
int maxsubprobsize, int nbinx, int nbiny, int nbinz, int* idxnupts,
int pirange)
{
extern __shared__ CUCPX fwshared[];
int xstart,ystart,xend,yend,zstart,zend;
int bidx=subprob_to_bin[blockIdx.x];
int binsubp_idx=blockIdx.x-subprobstartpts[bidx];
int ix,iy,iz,outidx;
int ptstart=binstartpts[bidx]+binsubp_idx*maxsubprobsize;
int nupts=min(maxsubprobsize, bin_size[bidx]-binsubp_idx*maxsubprobsize);
int xoffset=(bidx % nbinx)*bin_size_x;
int yoffset=((bidx / nbinx)%nbiny)*bin_size_y;
int zoffset=(bidx/ (nbinx*nbiny))*bin_size_z;
int N = (bin_size_x+2*ceil(ns/2.0))*(bin_size_y+2*ceil(ns/2.0))*
(bin_size_z+2*ceil(ns/2.0));
for(int i=threadIdx.x; i<N; i+=blockDim.x){
fwshared[i].x = 0.0;
fwshared[i].y = 0.0;
}
__syncthreads();
FLT x_rescaled, y_rescaled, z_rescaled;
CUCPX cnow;
for(int i=threadIdx.x; i<nupts; i+=blockDim.x){
FLT ker1[MAX_NSPREAD];
FLT ker2[MAX_NSPREAD];
FLT ker3[MAX_NSPREAD];
int nuptsidx = idxnupts[ptstart+i];
x_rescaled = RESCALE(x[nuptsidx],nf1,pirange);
y_rescaled = RESCALE(y[nuptsidx],nf2,pirange);
z_rescaled = RESCALE(z[nuptsidx],nf3,pirange);
cnow = c[nuptsidx];
xstart = ceil(x_rescaled - ns/2.0)-xoffset;
ystart = ceil(y_rescaled - ns/2.0)-yoffset;
zstart = ceil(z_rescaled - ns/2.0)-zoffset;
xend = floor(x_rescaled + ns/2.0)-xoffset;
yend = floor(y_rescaled + ns/2.0)-yoffset;
zend = floor(z_rescaled + ns/2.0)-zoffset;
eval_kernel_vec_Horner(ker1,xstart+xoffset-x_rescaled,ns,sigma);
eval_kernel_vec_Horner(ker2,ystart+yoffset-y_rescaled,ns,sigma);
eval_kernel_vec_Horner(ker3,zstart+zoffset-z_rescaled,ns,sigma);
for (int zz=zstart; zz<=zend; zz++){
FLT kervalue3 = ker3[zz-zstart];
iz = zz+ceil(ns/2.0);
if(iz >= (bin_size_z + (int) ceil(ns/2.0)*2)) break;
for(int yy=ystart; yy<=yend; yy++){
FLT kervalue2 = ker2[yy-ystart];
iy = yy+ceil(ns/2.0);
if(iy >= (bin_size_y + (int) ceil(ns/2.0)*2)) break;
for(int xx=xstart; xx<=xend; xx++){
ix = xx+ceil(ns/2.0);
if(ix >= (bin_size_x + (int) ceil(ns/2.0)*2)) break;
outidx = ix+iy*(bin_size_x+ceil(ns/2.0)*2)+
iz*(bin_size_x+ceil(ns/2.0)*2)*
(bin_size_y+ceil(ns/2.0)*2);
FLT kervalue1 = ker1[xx-xstart];
atomicAdd(&fwshared[outidx].x, cnow.x*kervalue1*kervalue2*
kervalue3);
atomicAdd(&fwshared[outidx].y, cnow.y*kervalue1*kervalue2*
kervalue3);
}
}
}
}
__syncthreads();
/* write to global memory */
for(int n=threadIdx.x; n<N; n+=blockDim.x){
int i = n % (int) (bin_size_x+2*ceil(ns/2.0) );
int j = (int) (n /(bin_size_x+2*ceil(ns/2.0))) %
(int) (bin_size_y+2*ceil(ns/2.0));
int k = n / ((bin_size_x+2*ceil(ns/2.0))*(bin_size_y+2*ceil(ns/2.0)));
ix = xoffset-ceil(ns/2.0)+i;
iy = yoffset-ceil(ns/2.0)+j;
iz = zoffset-ceil(ns/2.0)+k;
if(ix<(nf1+ceil(ns/2.0)) &&
iy<(nf2+ceil(ns/2.0)) &&
iz<(nf3+ceil(ns/2.0))){
ix = ix < 0 ? ix+nf1 : (ix>nf1-1 ? ix-nf1 : ix);
iy = iy < 0 ? iy+nf2 : (iy>nf2-1 ? iy-nf2 : iy);
iz = iz < 0 ? iz+nf3 : (iz>nf3-1 ? iz-nf3 : iz);
outidx = ix+iy*nf1+iz*nf1*nf2;
int sharedidx=i+j*(bin_size_x+ceil(ns/2.0)*2)+
k*(bin_size_x+ceil(ns/2.0)*2)*(bin_size_y+ceil(ns/2.0)*2);
atomicAdd(&fw[outidx].x, fwshared[sharedidx].x);
atomicAdd(&fw[outidx].y, fwshared[sharedidx].y);
}
}
}
__global__
void Spread_3d_Subprob(FLT *x, FLT *y, FLT *z, CUCPX *c, CUCPX *fw, int M,
const int ns, int nf1, int nf2, int nf3, FLT es_c, FLT es_beta, int* binstartpts,
int* bin_size, int bin_size_x, int bin_size_y, int bin_size_z,
int* subprob_to_bin, int* subprobstartpts, int* numsubprob, int maxsubprobsize,
int nbinx, int nbiny, int nbinz, int* idxnupts, int pirange)
{
extern __shared__ CUCPX fwshared[];
int xstart,ystart,xend,yend,zstart,zend;
int subpidx=blockIdx.x;
int bidx=subprob_to_bin[subpidx];
int binsubp_idx=subpidx-subprobstartpts[bidx];
int ix, iy, iz, outidx;
int ptstart=binstartpts[bidx]+binsubp_idx*maxsubprobsize;
int nupts=min(maxsubprobsize, bin_size[bidx]-binsubp_idx*maxsubprobsize);
int xoffset=(bidx % nbinx)*bin_size_x;
int yoffset=((bidx / nbinx)%nbiny)*bin_size_y;
int zoffset=(bidx/ (nbinx*nbiny))*bin_size_z;
int N = (bin_size_x+2*ceil(ns/2.0))*(bin_size_y+2*ceil(ns/2.0))*
(bin_size_z+2*ceil(ns/2.0));
for(int i=threadIdx.x; i<N; i+=blockDim.x){
fwshared[i].x = 0.0;
fwshared[i].y = 0.0;
}
__syncthreads();
FLT x_rescaled, y_rescaled, z_rescaled;
CUCPX cnow;
for(int i=threadIdx.x; i<nupts; i+=blockDim.x){
FLT ker1[MAX_NSPREAD];
FLT ker2[MAX_NSPREAD];
FLT ker3[MAX_NSPREAD];
int idx = ptstart+i;
x_rescaled=RESCALE(x[idxnupts[idx]], nf1, pirange);
y_rescaled=RESCALE(y[idxnupts[idx]], nf2, pirange);
z_rescaled=RESCALE(z[idxnupts[idx]], nf3, pirange);
cnow = c[idxnupts[idx]];
xstart = ceil(x_rescaled - ns/2.0)-xoffset;
ystart = ceil(y_rescaled - ns/2.0)-yoffset;
zstart = ceil(z_rescaled - ns/2.0)-zoffset;
xend = floor(x_rescaled + ns/2.0)-xoffset;
yend = floor(y_rescaled + ns/2.0)-yoffset;
zend = floor(z_rescaled + ns/2.0)-zoffset;
FLT x1=(FLT)xstart+xoffset-x_rescaled;
FLT y1=(FLT)ystart+yoffset-y_rescaled;
FLT z1=(FLT)zstart+zoffset-z_rescaled;
eval_kernel_vec(ker1,x1,ns,es_c,es_beta);
eval_kernel_vec(ker2,y1,ns,es_c,es_beta);
eval_kernel_vec(ker3,z1,ns,es_c,es_beta);
#if 1
for(int zz=zstart; zz<=zend; zz++){
FLT kervalue3 = ker3[zz-zstart];
iz = zz+ceil(ns/2.0);
for(int yy=ystart; yy<=yend; yy++){
FLT kervalue2 = ker2[yy-ystart];
iy = yy+ceil(ns/2.0);
for(int xx=xstart; xx<=xend; xx++){
FLT kervalue1 = ker1[xx-xstart];
ix = xx+ceil(ns/2.0);
if(ix >= (bin_size_x + (int) ceil(ns/2.0)*2)) break;
if(iy >= (bin_size_y + (int) ceil(ns/2.0)*2)) break;
if(iz >= (bin_size_z + (int) ceil(ns/2.0)*2)) break;
outidx = ix+iy*(bin_size_x+ceil(ns/2.0)*2)+
iz*(bin_size_x+ceil(ns/2.0)*2)*
(bin_size_y+ceil(ns/2.0)*2);
#if 1
atomicAdd(&fwshared[outidx].x, cnow.x*kervalue1*kervalue2*
kervalue3);
atomicAdd(&fwshared[outidx].y, cnow.y*kervalue1*kervalue2*
kervalue3);
#endif
}
}
}
#endif
}
__syncthreads();
/* write to global memory */
for(int n=threadIdx.x; n<N; n+=blockDim.x){
int i = n % (int) (bin_size_x+2*ceil(ns/2.0) );
int j = (int) (n /(bin_size_x+2*ceil(ns/2.0))) % (int) (bin_size_y+2*ceil(ns/2.0));
int k = n / ((bin_size_x+2*ceil(ns/2.0))*(bin_size_y+2*ceil(ns/2.0)));
ix = xoffset-ceil(ns/2.0)+i;
iy = yoffset-ceil(ns/2.0)+j;
iz = zoffset-ceil(ns/2.0)+k;
if(ix<(nf1+ceil(ns/2.0)) && iy<(nf2+ceil(ns/2.0)) && iz<(nf3+ceil(ns/2.0))){
ix = ix < 0 ? ix+nf1 : (ix>nf1-1 ? ix-nf1 : ix);
iy = iy < 0 ? iy+nf2 : (iy>nf2-1 ? iy-nf2 : iy);
iz = iz < 0 ? iz+nf3 : (iz>nf3-1 ? iz-nf3 : iz);
outidx = ix+iy*nf1+iz*nf1*nf2;
int sharedidx=i+j*(bin_size_x+ceil(ns/2.0)*2)+
k*(bin_size_x+ceil(ns/2.0)*2)*(bin_size_y+ceil(ns/2.0)*2);
atomicAdd(&fw[outidx].x, fwshared[sharedidx].x);
atomicAdd(&fw[outidx].y, fwshared[sharedidx].y);
}
}
}
/* Kernels for Block BlockGather Method */
__global__
void Temp(int binsperobinx, int binsperobiny, int binsperobinz,
int nobinx, int nobiny, int nobinz, int* binsize)
{
int binx =threadIdx.x+blockIdx.x*blockDim.x;
int biny =threadIdx.y+blockIdx.y*blockDim.y;
int binz =threadIdx.z+blockIdx.z*blockDim.z;
int binidx = CalcGlobalIdx(binx,biny,binz,nobinx,nobiny,nobinz,binsperobinx,
binsperobiny, binsperobinz);
if(binx < nobinx*binsperobinx && biny < nobiny*binsperobiny &&
binz < nobinz*binsperobinz)
if (binx%binsperobinx >0 && binx%binsperobinx< binsperobinx-1)
if (biny%binsperobiny >0 && biny%binsperobiny< binsperobiny-1)
if (binz%binsperobinz >0 && binz%binsperobinz< binsperobinz-1)
binsize[binidx] = binidx;
}
__global__
void LocateNUptstoBins_ghost(int M, int bin_size_x, int bin_size_y,
int bin_size_z, int nobinx, int nobiny, int nobinz, int binsperobinx,
int binsperobiny, int binsperobinz, int* bin_size, FLT *x, FLT *y, FLT *z,
int* sortidx, int pirange, int nf1, int nf2, int nf3)
{
int binidx,binx,biny,binz;
int oldidx;
FLT x_rescaled,y_rescaled,z_rescaled;
for(int i=threadIdx.x+blockIdx.x*blockDim.x; i<M; i+=gridDim.x*blockDim.x){
x_rescaled=RESCALE(x[i], nf1, pirange);
y_rescaled=RESCALE(y[i], nf2, pirange);
z_rescaled=RESCALE(z[i], nf3, pirange);
binx = floor(x_rescaled/bin_size_x);
biny = floor(y_rescaled/bin_size_y);
binz = floor(z_rescaled/bin_size_z);
binx = binx/(binsperobinx-2)*binsperobinx + (binx%(binsperobinx-2)+1);
biny = biny/(binsperobiny-2)*binsperobiny + (biny%(binsperobiny-2)+1);
binz = binz/(binsperobinz-2)*binsperobinz + (binz%(binsperobinz-2)+1);
binidx = CalcGlobalIdx(binx,biny,binz,nobinx,nobiny,nobinz,binsperobinx,
binsperobiny, binsperobinz);
oldidx = atomicAdd(&bin_size[binidx], 1);
sortidx[i] = oldidx;
}
}
__global__
void FillGhostBins(int binsperobinx, int binsperobiny, int binsperobinz,
int nobinx, int nobiny, int nobinz, int* binsize)
{
int binx =threadIdx.x+blockIdx.x*blockDim.x;
int biny =threadIdx.y+blockIdx.y*blockDim.y;
int binz =threadIdx.z+blockIdx.z*blockDim.z;
int nbinx = nobinx*binsperobinx;
int nbiny = nobiny*binsperobiny;
int nbinz = nobinz*binsperobinz;
if(binx < nbinx && biny < nbiny && binz < nbinz){
int binidx = CalcGlobalIdx(binx,biny,binz,nobinx,nobiny,nobinz,
binsperobinx,binsperobiny, binsperobinz);
if(binx % binsperobinx == 1){
int i = binx - 2;
i = i<0 ? i+nbinx : i;
int idxtoupdate = CalcGlobalIdx(i,biny,binz,nobinx,nobiny,nobinz,
binsperobinx,binsperobiny, binsperobinz);
binsize[idxtoupdate] = binsize[binidx];
}
if(binx % binsperobinx == binsperobinx-2){
int i = binx + 2;
i = (i==nbinx) ? i-nbinx : i;
int idxtoupdate = CalcGlobalIdx(i,biny,binz,nobinx,nobiny,nobinz,
binsperobinx,binsperobiny, binsperobinz);
binsize[idxtoupdate] = binsize[binidx];
}
if(biny % binsperobiny == 1){
int i = biny - 2;
i = i<0 ? i+nbiny : i;
int idxtoupdate = CalcGlobalIdx(binx,i,binz,nobinx,nobiny,nobinz,
binsperobinx,binsperobiny, binsperobinz);
binsize[idxtoupdate] = binsize[binidx];
}
if(biny % binsperobinx == binsperobiny-2){
int i = biny + 2;
i = (i==nbiny) ? i-nbiny : i;
int idxtoupdate = CalcGlobalIdx(binx,i,binz,nobinx,nobiny,nobinz,
binsperobinx,binsperobiny, binsperobinz);
binsize[idxtoupdate] = binsize[binidx];
}
if(binz % binsperobinz == 1){
int i = binz - 2;
i = i<0 ? i+nbinz : i;
int idxtoupdate = CalcGlobalIdx(binx,biny,i,nobinx,nobiny,nobinz,
binsperobinx,binsperobiny, binsperobinz);
binsize[idxtoupdate] = binsize[binidx];
}
if(binz % binsperobinz == binsperobinz-2){
int i = binz + 2;
i = (i==nbinz) ? i-nbinz : i;
int idxtoupdate = CalcGlobalIdx(binx,biny,i,nobinx,nobiny,nobinz,
binsperobinx,binsperobiny, binsperobinz);
binsize[idxtoupdate] = binsize[binidx];
}
}
}
__global__
void CalcInvertofGlobalSortIdx_ghost(int M, int bin_size_x,
int bin_size_y, int bin_size_z, int nobinx, int nobiny, int nobinz,
int binsperobinx, int binsperobiny, int binsperobinz, int* bin_startpts,
int* sortidx, FLT *x, FLT *y, FLT *z, int* index, int pirange, int nf1,
int nf2, int nf3)
{
int binx,biny,binz;
int binidx;
FLT x_rescaled,y_rescaled,z_rescaled;
for(int i=threadIdx.x+blockIdx.x*blockDim.x; i<M; i+=gridDim.x*blockDim.x){
x_rescaled=RESCALE(x[i], nf1, pirange);
y_rescaled=RESCALE(y[i], nf2, pirange);
z_rescaled=RESCALE(z[i], nf3, pirange);
binx = floor(x_rescaled/bin_size_x);
biny = floor(y_rescaled/bin_size_y);
binz = floor(z_rescaled/bin_size_z);
binx = binx/(binsperobinx-2)*binsperobinx + (binx%(binsperobinx-2)+1);
biny = biny/(binsperobiny-2)*binsperobiny + (biny%(binsperobiny-2)+1);
binz = binz/(binsperobinz-2)*binsperobinz + (binz%(binsperobinz-2)+1);
binidx = CalcGlobalIdx(binx,biny,binz,nobinx,nobiny,nobinz,binsperobinx,
binsperobiny, binsperobinz);
index[bin_startpts[binidx]+sortidx[i]] = i;
}
}
__global__
void GhostBinPtsIdx(int binsperobinx, int binsperobiny, int binsperobinz,
int nobinx, int nobiny, int nobinz, int* binsize, int* index,
int* binstartpts, int M)
{
int binx =threadIdx.x+blockIdx.x*blockDim.x;
int biny =threadIdx.y+blockIdx.y*blockDim.y;
int binz =threadIdx.z+blockIdx.z*blockDim.z;
int nbinx = nobinx*binsperobinx;
int nbiny = nobiny*binsperobiny;
int nbinz = nobinz*binsperobinz;
int i,j,k;
int w = 0;
int box[3];
if(binx < nbinx && biny < nbiny && binz < nbinz){
box[0] = box[1] = box[2] = 0;
i = binx;
j = biny;
k = binz;
int binidx = CalcGlobalIdx(binx,biny,binz,nobinx,nobiny,nobinz,
binsperobinx,binsperobiny,binsperobinz);
if(binx % binsperobinx == 0){
i = binx - 2;
box[0] = (i<0);
i = i<0 ? i+nbinx : i;
w=1;
}
if(binx % binsperobinx == binsperobinx-1){
i = binx + 2;
box[0] = (i>nbinx)*2;
i = (i>nbinx) ? i-nbinx : i;
w=1;
}
if(biny % binsperobiny == 0){
j = biny - 2;
box[1] = (j<0);
j = j<0 ? j+nbiny : j;
w=1;
}
if(biny % binsperobiny == binsperobiny-1){
j = biny + 2;
box[1] = (j>nbiny)*2;
j = (j>nbiny) ? j-nbiny : j;
w=1;
}
if(binz % binsperobinz == 0){
k = binz - 2;
box[2] = (k<0);
k = k<0 ? k+nbinz : k;
w=1;
}
if(binz % binsperobinz == binsperobinz-1){
k = binz + 2;
box[2] = (k>nbinz)*2;
k = (k>nbinz) ? k-nbinz : k;
w=1;
}
int corbinidx = CalcGlobalIdx(i,j,k,nobinx,nobiny,nobinz,
binsperobinx,binsperobiny, binsperobinz);
if(w==1){
for(int n = 0; n<binsize[binidx];n++){
index[binstartpts[binidx]+n] = M*(box[0]+box[1]*3+box[2]*9) +
index[binstartpts[corbinidx]+n];
}
}
}
}
__global__
void CalcSubProb_3d_v1(int binsperobinx, int binsperobiny, int binsperobinz,
int* bin_size, int* num_subprob, int maxsubprobsize, int numbins)
{
for(int i=threadIdx.x+blockIdx.x*blockDim.x; i<numbins; i+=gridDim.x*
blockDim.x){
int numnupts = 0;
int binsperobin = binsperobinx*binsperobiny*binsperobinz;
for(int b = 0; b<binsperobin; b++){
numnupts += bin_size[binsperobin*i+b];
}
num_subprob[i]=ceil(numnupts/(float) maxsubprobsize);
}
}
__global__
void MapBintoSubProb_3d_v1(int* d_subprob_to_obin, int* d_subprobstartpts,
int* d_numsubprob,int numbins)
{
for(int i=threadIdx.x+blockIdx.x*blockDim.x; i<numbins;
i+=gridDim.x*blockDim.x){
for(int j=0; j<d_numsubprob[i]; j++){
d_subprob_to_obin[d_subprobstartpts[i]+j]=i;
}
}
}
__global__
void Spread_3d_BlockGather(FLT *x, FLT *y, FLT *z, CUCPX *c, CUCPX *fw, int M,
const int ns, int nf1, int nf2, int nf3, FLT es_c, FLT es_beta, FLT sigma,
int* binstartpts, int obin_size_x, int obin_size_y, int obin_size_z,
int binsperobin, int* subprob_to_bin, int* subprobstartpts,
int maxsubprobsize, int nobinx, int nobiny, int nobinz, int* idxnupts,
int pirange)
{
extern __shared__ CUCPX fwshared[];
int xstart,ystart,zstart,xend,yend,zend;
int subpidx=blockIdx.x;
int obidx=subprob_to_bin[subpidx];
int bidx = obidx*binsperobin;
int obinsubp_idx=subpidx-subprobstartpts[obidx];
int ix, iy, iz;
int outidx;
int ptstart=binstartpts[bidx]+obinsubp_idx*maxsubprobsize;
int nupts=min(maxsubprobsize, binstartpts[bidx+binsperobin]-binstartpts[bidx]
-obinsubp_idx*maxsubprobsize);
int xoffset=(obidx % nobinx)*obin_size_x;
int yoffset=(obidx / nobinx)%nobiny*obin_size_y;
int zoffset=(obidx / (nobinx*nobiny))*obin_size_z;
int N = obin_size_x*obin_size_y*obin_size_z;
for(int i=threadIdx.x; i<N; i+=blockDim.x){
fwshared[i].x = 0.0;
fwshared[i].y = 0.0;
}
__syncthreads();
FLT x_rescaled, y_rescaled, z_rescaled;
CUCPX cnow;
for(int i=threadIdx.x; i<nupts; i+=blockDim.x){
int idx = ptstart+i;
int b = idxnupts[idx]/M;
int box[3];
for(int d=0;d<3;d++){
box[d] = b%3;
if(box[d] == 1)
box[d] = -1;
b=b/3;
}
int ii = idxnupts[idx]%M;
x_rescaled = RESCALE(x[ii],nf1,pirange) + box[0]*nf1;
y_rescaled = RESCALE(y[ii],nf2,pirange) + box[1]*nf2;
z_rescaled = RESCALE(z[ii],nf3,pirange) + box[2]*nf3;
cnow = c[ii];
#if 1
xstart = ceil(x_rescaled - ns/2.0)-xoffset;
xstart = xstart < 0 ? 0 : xstart;
ystart = ceil(y_rescaled - ns/2.0)-yoffset;
ystart = ystart < 0 ? 0 : ystart;
zstart = ceil(z_rescaled - ns/2.0)-zoffset;
zstart = zstart < 0 ? 0 : zstart;
xend = floor(x_rescaled + ns/2.0)-xoffset;
xend = xend >= obin_size_x ? obin_size_x-1 : xend;
yend = floor(y_rescaled + ns/2.0)-yoffset;
yend = yend >= obin_size_y ? obin_size_y-1 : yend;
zend = floor(z_rescaled + ns/2.0)-zoffset;
zend = zend >= obin_size_z ? obin_size_z-1 : zend;
#else
xstart = 0;
ystart = 0;
zstart = 0;
xend = ns;
yend = ns;
zend = ns;
#endif
for(int zz=zstart; zz<=zend; zz++){
FLT disz=abs(z_rescaled-(zz+zoffset));
FLT kervalue3 = evaluate_kernel(disz, es_c, es_beta);
//FLT kervalue3 = disz;
for(int yy=ystart; yy<=yend; yy++){
FLT disy=abs(y_rescaled-(yy+yoffset));
FLT kervalue2 = evaluate_kernel(disy, es_c, es_beta);
//FLT kervalue2 = disy;
for(int xx=xstart; xx<=xend; xx++){
outidx = xx+yy*obin_size_x+zz*obin_size_y*obin_size_x;
FLT disx=abs(x_rescaled-(xx+xoffset));
FLT kervalue1 = evaluate_kernel(disx, es_c, es_beta);
//FLT kervalue1 = disx;
// fwshared[outidx].x += cnow.x*kervalue1*kervalue2*kervalue3;
// fwshared[outidx].y += cnow.y*kervalue1*kervalue2*kervalue3;
#if 1
atomicAdd(&fwshared[outidx].x, cnow.x*kervalue1*kervalue2*
kervalue3);
atomicAdd(&fwshared[outidx].y, cnow.y*kervalue1*kervalue2*
kervalue3);
#endif
}
}
}
}
__syncthreads();
/* write to global memory */
for(int n=threadIdx.x; n<N; n+=blockDim.x){
int i = n%obin_size_x;
int j = (n/obin_size_x)%obin_size_y;
int k = n/(obin_size_x*obin_size_y);
ix = xoffset+i;
iy = yoffset+j;
iz = zoffset+k;
outidx = ix+iy*nf1+iz*nf1*nf2;
atomicAdd(&fw[outidx].x, fwshared[n].x);
atomicAdd(&fw[outidx].y, fwshared[n].y);
}
}
__global__
void Spread_3d_BlockGather_Horner(FLT *x, FLT *y, FLT *z, CUCPX *c, CUCPX *fw, int M,
const int ns, int nf1, int nf2, int nf3, FLT es_c, FLT es_beta, FLT sigma,
int* binstartpts, int obin_size_x, int obin_size_y, int obin_size_z,
int binsperobin, int* subprob_to_bin, int* subprobstartpts,
int maxsubprobsize, int nobinx, int nobiny, int nobinz, int* idxnupts,
int pirange)
{
extern __shared__ CUCPX fwshared[];
int xstart,ystart,zstart,xend,yend,zend;
int subpidx=blockIdx.x;
int obidx=subprob_to_bin[subpidx];
int bidx = obidx*binsperobin;
int obinsubp_idx=subpidx-subprobstartpts[obidx];
int ix, iy, iz;
int outidx;
int ptstart=binstartpts[bidx]+obinsubp_idx*maxsubprobsize;
int nupts=min(maxsubprobsize, binstartpts[bidx+binsperobin]-binstartpts[bidx]
-obinsubp_idx*maxsubprobsize);
int xoffset=(obidx%nobinx)*obin_size_x;
int yoffset=(obidx/nobinx)%nobiny*obin_size_y;
int zoffset=(obidx/(nobinx*nobiny))*obin_size_z;
int N = obin_size_x*obin_size_y*obin_size_z;
FLT ker1[MAX_NSPREAD];
FLT ker2[MAX_NSPREAD];
FLT ker3[MAX_NSPREAD];
for(int i=threadIdx.x; i<N; i+=blockDim.x){
fwshared[i].x = 0.0;
fwshared[i].y = 0.0;
}
__syncthreads();
FLT x_rescaled, y_rescaled, z_rescaled;
CUCPX cnow;
for(int i=threadIdx.x; i<nupts; i+=blockDim.x){
int nidx = idxnupts[ptstart+i];
int b = nidx/M;
int box[3];
for(int d=0;d<3;d++){
box[d] = b%3;
if(box[d] == 1)
box[d] = -1;
b=b/3;
}
int ii = nidx%M;
x_rescaled = RESCALE(x[ii],nf1,pirange) + box[0]*nf1;
y_rescaled = RESCALE(y[ii],nf2,pirange) + box[1]*nf2;
z_rescaled = RESCALE(z[ii],nf3,pirange) + box[2]*nf3;
cnow = c[ii];
#if 0
xstart = max((int)ceil(x_rescaled - ns/2.0)-xoffset, 0);
//xstart = xstart < 0 ? 0 : xstart;
ystart = max((int)ceil(y_rescaled - ns/2.0)-yoffset, 0);
//ystart = ystart < 0 ? 0 : ystart;
zstart = max((int)ceil(z_rescaled - ns/2.0)-zoffset, 0);
//zstart = zstart < 0 ? 0 : zstart;
xend = min((int)floor(x_rescaled + ns/2.0)-xoffset, obin_size_x-1);
//xend = xend >= obin_size_x ? obin_size_x-1 : xend;
yend = min((int)floor(y_rescaled + ns/2.0)-yoffset, obin_size_y-1);
//yend = yend >= obin_size_y ? obin_size_y-1 : yend;
zend = min((int)floor(z_rescaled + ns/2.0)-zoffset, obin_size_z-1);
//zend = zend >= obin_size_z ? obin_size_z-1 : zend;
#else
xstart = ceil(x_rescaled - ns/2.0)-xoffset;
xstart = xstart < 0 ? 0 : xstart;
ystart = ceil(y_rescaled - ns/2.0)-yoffset;
ystart = ystart < 0 ? 0 : ystart;
zstart = ceil(z_rescaled - ns/2.0)-zoffset;
zstart = zstart < 0 ? 0 : zstart;
xend = floor(x_rescaled + ns/2.0)-xoffset;
xend = xend >= obin_size_x ? obin_size_x-1 : xend;
yend = floor(y_rescaled + ns/2.0)-yoffset;
yend = yend >= obin_size_y ? obin_size_y-1 : yend;
zend = floor(z_rescaled + ns/2.0)-zoffset;
zend = zend >= obin_size_z ? obin_size_z-1 : zend;
#endif
eval_kernel_vec_Horner(ker1,xstart+xoffset-x_rescaled,ns,sigma);
eval_kernel_vec_Horner(ker2,ystart+yoffset-y_rescaled,ns,sigma);
eval_kernel_vec_Horner(ker3,zstart+zoffset-z_rescaled,ns,sigma);
#if 1
for(int zz=zstart; zz<=zend; zz++){
FLT kervalue3 = ker3[zz-zstart];
for(int yy=ystart; yy<=yend; yy++){
FLT kervalue2 = ker2[yy-ystart];
for(int xx=xstart; xx<=xend; xx++){
outidx = xx+yy*obin_size_x+zz*obin_size_y*obin_size_x;
FLT kervalue1 = ker1[xx-xstart];
#if 1
atomicAdd(&fwshared[outidx].x, cnow.x*kervalue1*kervalue2*
kervalue3);
atomicAdd(&fwshared[outidx].y, cnow.y*kervalue1*kervalue2*
kervalue3);
#else
fwshared[outidx].x+= cnow.x*kervalue1*kervalue2*kervalue3;
fwshared[outidx].y+= cnow.y*kervalue1*kervalue2*kervalue3;
#endif
}
}
}
#endif
}
__syncthreads();
/* write to global memory */
for(int n=threadIdx.x; n<N; n+=blockDim.x){
int i = n%obin_size_x;
int j = (n/obin_size_x)%obin_size_y;
int k = n/(obin_size_x*obin_size_y);
ix = xoffset+i;
iy = yoffset+j;
iz = zoffset+k;
outidx = ix+iy*nf1+iz*nf1*nf2;
atomicAdd(&fw[outidx].x, fwshared[n].x);
atomicAdd(&fw[outidx].y, fwshared[n].y);
}
}
/* ---------------------- 3d Interpolation Kernels ---------------------------*/
/* Kernels for NUptsdriven Method */
__global__
void Interp_3d_NUptsdriven(FLT *x, FLT *y, FLT *z, CUCPX *c, CUCPX *fw, int M,
const int ns, int nf1, int nf2, int nf3, FLT es_c, FLT es_beta,
int *idxnupts, int pirange)
{
for(int i=blockDim.x*blockIdx.x+threadIdx.x; i<M; i+=blockDim.x*gridDim.x){
FLT x_rescaled=RESCALE(x[idxnupts[i]], nf1, pirange);
FLT y_rescaled=RESCALE(y[idxnupts[i]], nf2, pirange);
FLT z_rescaled=RESCALE(z[idxnupts[i]], nf3, pirange);
int xstart = ceil(x_rescaled - ns/2.0);
int ystart = ceil(y_rescaled - ns/2.0);
int zstart = ceil(z_rescaled - ns/2.0);
int xend = floor(x_rescaled + ns/2.0);
int yend = floor(y_rescaled + ns/2.0);
int zend = floor(z_rescaled + ns/2.0);
CUCPX cnow;
cnow.x = 0.0;
cnow.y = 0.0;
for(int zz=zstart; zz<=zend; zz++){
FLT disz=abs(z_rescaled-zz);
FLT kervalue3 = evaluate_kernel(disz, es_c, es_beta);
for(int yy=ystart; yy<=yend; yy++){
FLT disy=abs(y_rescaled-yy);
FLT kervalue2 = evaluate_kernel(disy, es_c, es_beta);
for(int xx=xstart; xx<=xend; xx++){
int ix = xx < 0 ? xx+nf1 : (xx>nf1-1 ? xx-nf1 : xx);
int iy = yy < 0 ? yy+nf2 : (yy>nf2-1 ? yy-nf2 : yy);
int iz = zz < 0 ? zz+nf3 : (zz>nf3-1 ? zz-nf3 : zz);
int inidx = ix+iy*nf1+iz*nf2*nf1;
FLT disx=abs(x_rescaled-xx);
FLT kervalue1 = evaluate_kernel(disx, es_c, es_beta);
cnow.x += fw[inidx].x*kervalue1*kervalue2*kervalue3;
cnow.y += fw[inidx].y*kervalue1*kervalue2*kervalue3;
}
}
}
c[idxnupts[i]].x = cnow.x;
c[idxnupts[i]].y = cnow.y;
}
}
__global__
void Interp_3d_NUptsdriven_Horner(FLT *x, FLT *y, FLT *z, CUCPX *c, CUCPX *fw,
int M, const int ns, int nf1, int nf2, int nf3, FLT sigma, int *idxnupts,
int pirange)
{
for(int i=blockDim.x*blockIdx.x+threadIdx.x; i<M; i+=blockDim.x*gridDim.x){
FLT x_rescaled=RESCALE(x[idxnupts[i]], nf1, pirange);
FLT y_rescaled=RESCALE(y[idxnupts[i]], nf2, pirange);
FLT z_rescaled=RESCALE(z[idxnupts[i]], nf3, pirange);
int xstart = ceil(x_rescaled - ns/2.0);
int ystart = ceil(y_rescaled - ns/2.0);
int zstart = ceil(z_rescaled - ns/2.0);
int xend = floor(x_rescaled + ns/2.0);
int yend = floor(y_rescaled + ns/2.0);
int zend = floor(z_rescaled + ns/2.0);
CUCPX cnow;
cnow.x = 0.0;
cnow.y = 0.0;
FLT ker1[MAX_NSPREAD];
FLT ker2[MAX_NSPREAD];
FLT ker3[MAX_NSPREAD];
eval_kernel_vec_Horner(ker1,xstart-x_rescaled,ns,sigma);
eval_kernel_vec_Horner(ker2,ystart-y_rescaled,ns,sigma);
eval_kernel_vec_Horner(ker3,zstart-z_rescaled,ns,sigma);
for(int zz=zstart; zz<=zend; zz++){
FLT kervalue3 = ker3[zz-zstart];
int iz = zz < 0 ? zz+nf3 : (zz>nf3-1 ? zz-nf3 : zz);
for(int yy=ystart; yy<=yend; yy++){
FLT kervalue2 = ker2[yy-ystart];
int iy = yy < 0 ? yy+nf2 : (yy>nf2-1 ? yy-nf2 : yy);
for(int xx=xstart; xx<=xend; xx++){
int ix = xx < 0 ? xx+nf1 : (xx>nf1-1 ? xx-nf1 : xx);
int inidx = ix+iy*nf1+iz*nf2*nf1;
FLT kervalue1 = ker1[xx-xstart];
cnow.x += fw[inidx].x*kervalue1*kervalue2*kervalue3;
cnow.y += fw[inidx].y*kervalue1*kervalue2*kervalue3;
}
}
}
c[idxnupts[i]].x = cnow.x;
c[idxnupts[i]].y = cnow.y;
}
}
/* Kernels for SubProb Method */
__global__
void Interp_3d_Subprob(FLT *x, FLT *y, FLT *z, CUCPX *c, CUCPX *fw,
int M, const int ns, int nf1, int nf2, int nf3, FLT es_c, FLT es_beta,
int* binstartpts, int* bin_size, int bin_size_x, int bin_size_y,
int bin_size_z, int* subprob_to_bin, int* subprobstartpts, int* numsubprob,
int maxsubprobsize, int nbinx, int nbiny, int nbinz, int* idxnupts,
int pirange)
{
extern __shared__ CUCPX fwshared[];
int xstart,ystart,xend,yend,zstart,zend;
int subpidx=blockIdx.x;
int bidx=subprob_to_bin[subpidx];
int binsubp_idx=subpidx-subprobstartpts[bidx];
int ix, iy, iz;
int outidx;
int ptstart=binstartpts[bidx]+binsubp_idx*maxsubprobsize;
int nupts=min(maxsubprobsize, bin_size[bidx]-binsubp_idx*maxsubprobsize);
int xoffset=(bidx % nbinx)*bin_size_x;
int yoffset=((bidx / nbinx)%nbiny)*bin_size_y;
int zoffset=(bidx/ (nbinx*nbiny))*bin_size_z;
int N = (bin_size_x+2*ceil(ns/2.0))*(bin_size_y+2*ceil(ns/2.0))*
(bin_size_z+2*ceil(ns/2.0));
#if 1
for(int n=threadIdx.x;n<N; n+=blockDim.x){
int i = n % (int) (bin_size_x+2*ceil(ns/2.0) );
int j = (int) (n /(bin_size_x+2*ceil(ns/2.0))) % (int) (bin_size_y+2*ceil(ns/2.0));
int k = n / ((bin_size_x+2*ceil(ns/2.0))*(bin_size_y+2*ceil(ns/2.0)));
ix = xoffset-ceil(ns/2.0)+i;
iy = yoffset-ceil(ns/2.0)+j;
iz = zoffset-ceil(ns/2.0)+k;
if(ix<(nf1+ceil(ns/2.0)) && iy<(nf2+ceil(ns/2.0)) && iz<(nf3+ceil(ns/2.0))){
ix = ix < 0 ? ix+nf1 : (ix>nf1-1 ? ix-nf1 : ix);
iy = iy < 0 ? iy+nf2 : (iy>nf2-1 ? iy-nf2 : iy);
iz = iz < 0 ? iz+nf3 : (iz>nf3-1 ? iz-nf3 : iz);
outidx = ix+iy*nf1+iz*nf1*nf2;
int sharedidx=i+j*(bin_size_x+ceil(ns/2.0)*2)+
k*(bin_size_x+ceil(ns/2.0)*2)*(bin_size_y+ceil(ns/2.0)*2);
fwshared[sharedidx].x = fw[outidx].x;
fwshared[sharedidx].y = fw[outidx].y;
}
}
#endif
__syncthreads();
FLT x_rescaled, y_rescaled, z_rescaled;
CUCPX cnow;
for(int i=threadIdx.x; i<nupts; i+=blockDim.x){
int idx = ptstart+i;
x_rescaled=RESCALE(x[idxnupts[idx]], nf1, pirange);
y_rescaled=RESCALE(y[idxnupts[idx]], nf2, pirange);
z_rescaled=RESCALE(z[idxnupts[idx]], nf3, pirange);
cnow.x = 0.0;
cnow.y = 0.0;
xstart = ceil(x_rescaled - ns/2.0)-xoffset;
ystart = ceil(y_rescaled - ns/2.0)-yoffset;
zstart = ceil(z_rescaled - ns/2.0)-zoffset;
xend = floor(x_rescaled + ns/2.0)-xoffset;
yend = floor(y_rescaled + ns/2.0)-yoffset;
zend = floor(z_rescaled + ns/2.0)-zoffset;
for (int zz=zstart; zz<=zend; zz++){
FLT disz=abs(z_rescaled-zz);
FLT kervalue3 = evaluate_kernel(disz, es_c, es_beta);
iz = zz+ceil(ns/2.0);
for(int yy=ystart; yy<=yend; yy++){
FLT disy=abs(y_rescaled-yy);
FLT kervalue2 = evaluate_kernel(disy, es_c, es_beta);
iy = yy+ceil(ns/2.0);
for(int xx=xstart; xx<=xend; xx++){
ix = xx+ceil(ns/2.0);
outidx = ix+iy*(bin_size_x+ceil(ns/2.0)*2)+
iz*(bin_size_x+ceil(ns/2.0)*2)*
(bin_size_y+ceil(ns/2.0)*2);
FLT disx=abs(x_rescaled-xx);
FLT kervalue1 = evaluate_kernel(disx, es_c, es_beta);
cnow.x += fwshared[outidx].x*kervalue1*kervalue2*kervalue3;
cnow.y += fwshared[outidx].y*kervalue1*kervalue2*kervalue3;
}
}
}
c[idxnupts[idx]].x = cnow.x;
c[idxnupts[idx]].y = cnow.y;
}
}
__global__
void Interp_3d_Subprob_Horner(FLT *x, FLT *y, FLT *z, CUCPX *c, CUCPX *fw,
int M, const int ns, int nf1, int nf2, int nf3, FLT sigma, int* binstartpts,
int* bin_size, int bin_size_x, int bin_size_y, int bin_size_z,
int* subprob_to_bin, int* subprobstartpts, int* numsubprob,
int maxsubprobsize, int nbinx, int nbiny, int nbinz, int* idxnupts,
int pirange)
{
extern __shared__ CUCPX fwshared[];
int xstart,ystart,xend,yend,zstart,zend;
int subpidx=blockIdx.x;
int bidx=subprob_to_bin[subpidx];
int binsubp_idx=subpidx-subprobstartpts[bidx];
int ix, iy, iz;
int outidx;
int ptstart=binstartpts[bidx]+binsubp_idx*maxsubprobsize;
int nupts=min(maxsubprobsize, bin_size[bidx]-binsubp_idx*maxsubprobsize);
int xoffset=(bidx % nbinx)*bin_size_x;
int yoffset=((bidx / nbinx)%nbiny)*bin_size_y;
int zoffset=(bidx/ (nbinx*nbiny))*bin_size_z;
int N = (bin_size_x+2*ceil(ns/2.0))*(bin_size_y+2*ceil(ns/2.0))*
(bin_size_z+2*ceil(ns/2.0));
for(int n=threadIdx.x;n<N; n+=blockDim.x){
int i = n % (int) (bin_size_x+2*ceil(ns/2.0) );
int j = (int) (n /(bin_size_x+2*ceil(ns/2.0))) % (int) (bin_size_y+2*ceil(ns/2.0));
int k = n / ((bin_size_x+2*ceil(ns/2.0))*(bin_size_y+2*ceil(ns/2.0)));
ix = xoffset-ceil(ns/2.0)+i;
iy = yoffset-ceil(ns/2.0)+j;
iz = zoffset-ceil(ns/2.0)+k;
if(ix<(nf1+ceil(ns/2.0)) && iy<(nf2+ceil(ns/2.0)) && iz<(nf3+ceil(ns/2.0))){
ix = ix < 0 ? ix+nf1 : (ix>nf1-1 ? ix-nf1 : ix);
iy = iy < 0 ? iy+nf2 : (iy>nf2-1 ? iy-nf2 : iy);
iz = iz < 0 ? iz+nf3 : (iz>nf3-1 ? iz-nf3 : iz);
outidx = ix+iy*nf1+iz*nf1*nf2;
int sharedidx=i+j*(bin_size_x+ceil(ns/2.0)*2)+
k*(bin_size_x+ceil(ns/2.0)*2)*(bin_size_y+ceil(ns/2.0)*2);
fwshared[sharedidx].x = fw[outidx].x;
fwshared[sharedidx].y = fw[outidx].y;
}
}
__syncthreads();
FLT ker1[MAX_NSPREAD];
FLT ker2[MAX_NSPREAD];
FLT ker3[MAX_NSPREAD];
FLT x_rescaled, y_rescaled, z_rescaled;
CUCPX cnow;
for(int i=threadIdx.x; i<nupts; i+=blockDim.x){
int idx = ptstart+i;
x_rescaled=RESCALE(x[idxnupts[idx]], nf1, pirange);
y_rescaled=RESCALE(y[idxnupts[idx]], nf2, pirange);
z_rescaled=RESCALE(z[idxnupts[idx]], nf3, pirange);
cnow.x = 0.0;
cnow.y = 0.0;
xstart = ceil(x_rescaled - ns/2.0)-xoffset;
ystart = ceil(y_rescaled - ns/2.0)-yoffset;
zstart = ceil(z_rescaled - ns/2.0)-zoffset;
xend = floor(x_rescaled + ns/2.0)-xoffset;
yend = floor(y_rescaled + ns/2.0)-yoffset;
zend = floor(z_rescaled + ns/2.0)-zoffset;
eval_kernel_vec_Horner(ker1,xstart+xoffset-x_rescaled,ns,sigma);
eval_kernel_vec_Horner(ker2,ystart+yoffset-y_rescaled,ns,sigma);
eval_kernel_vec_Horner(ker3,zstart+zoffset-z_rescaled,ns,sigma);
for (int zz=zstart; zz<=zend; zz++){
FLT kervalue3 = ker3[zz-zstart];
iz = zz+ceil(ns/2.0);
for(int yy=ystart; yy<=yend; yy++){
FLT kervalue2 = ker2[yy-ystart];
iy = yy+ceil(ns/2.0);
for(int xx=xstart; xx<=xend; xx++){
ix = xx+ceil(ns/2.0);
outidx = ix+iy*(bin_size_x+ceil(ns/2.0)*2)+
iz*(bin_size_x+ceil(ns/2.0)*2)*
(bin_size_y+ceil(ns/2.0)*2);
FLT kervalue1 = ker1[xx-xstart];
cnow.x += fwshared[outidx].x*kervalue1*kervalue2*kervalue3;
cnow.y += fwshared[outidx].y*kervalue1*kervalue2*kervalue3;
}
}
}
c[idxnupts[idx]].x = cnow.x;
c[idxnupts[idx]].y = cnow.y;
}
}
#if 0
// This kernels assumes that number of bins less than #maxnumberofthreads in
// each dim
__global__
void CalcSubProb_3d(int bin_size_x, int bin_size_y, int bin_size_z,
int o_bin_size_x, int o_bin_size_y, int o_bin_size_z, int nbinx, int nbiny,
int nbinz, int nobinx, int nobiny, int nobinz, int* bin_size,
int* num_subprob, int* num_nupts, int maxsubprobsize)
{
int numNUpts = 0;
int xbinstart, xbinend, ybinstart, ybinend, zbinstart, zbinend;
int xobin, yobin, zobin;
xobin = threadIdx.x+blockIdx.x*blockDim.x;
yobin = threadIdx.y+blockIdx.y*blockDim.y;
zobin = threadIdx.z+blockIdx.z*blockDim.z;
int nbins_obin_x, nbins_obin_y, nbins_obin_z;
nbins_obin_x = o_bin_size_x/bin_size_x;
nbins_obin_y = o_bin_size_y/bin_size_y;
nbins_obin_z = o_bin_size_z/bin_size_z;
if(xobin < nobinx && yobin < nobiny && zobin < nobinz){
xbinstart = xobin*nbins_obin_x-1;
xbinend = (xobin+1)*nbins_obin_x;
ybinstart = yobin*nbins_obin_y-1;
ybinend = (yobin+1)*nbins_obin_y;
zbinstart = zobin*nbins_obin_z-1;
zbinend = (zobin+1)*nbins_obin_z;
int ix, iy, iz;
for(int k = zbinstart; k<= zbinend; k++){
iz = (k < 0) ? k + nbinz : k;
iz = (k == nbinz) ? k - nbinz : iz;
for(int j=ybinstart; j<= ybinend; j++){
iy = (j < 0) ? j + nbiny : j;
iy = (j == nbiny) ? j - nbiny : iy;
for(int i=xbinstart; i<= xbinend; i++){
ix = (i < 0) ? i + nbinx : i;
ix = (i == nbinx) ? i - nbinx : ix;
int binidx = ix+iy*nbinx+iz*nbiny*nbinx;
numNUpts += bin_size[binidx];
//numSubProbs += ceil(bin_size[binidx]/
//(float) maxsubprobsize);
}
}
}
int obinidx = xobin + yobin*nobinx + zobin*nobiny*nobinx;
num_subprob[obinidx] = ceil(numNUpts/ (float) maxsubprobsize);
//num_subprob[obinidx] = numSubProbs;
num_nupts[obinidx] = numNUpts;
}
}
__global__
void MapBintoSubProb_3d(int* d_subprobstartpts, int* d_subprob_to_bin,
int* d_subprob_to_nupts, int bin_size_x, int bin_size_y, int bin_size_z,
int o_bin_size_x, int o_bin_size_y, int o_bin_size_z, int nbinx,
int nbiny, int nbinz, int nobinx, int nobiny, int nobinz, int* bin_size,
int* num_subprob, int* num_nupts, int maxsubprobsize)
{
int numNUpts = 0;
int s = 0;
int xbinstart, xbinend, ybinstart, ybinend, zbinstart, zbinend;
int xobin, yobin, zobin;
xobin = threadIdx.x+blockIdx.x*blockDim.x;
yobin = threadIdx.y+blockIdx.y*blockDim.y;
zobin = threadIdx.z+blockIdx.z*blockDim.z;
int nbins_obin_x, nbins_obin_y, nbins_obin_z;
nbins_obin_x = o_bin_size_x/bin_size_x;
nbins_obin_y = o_bin_size_y/bin_size_y;
nbins_obin_z = o_bin_size_z/bin_size_z;
if(xobin < nobinx && yobin < nobiny && zobin < nobinz){
int obinidx = xobin + yobin*nobinx + zobin*nobiny*nobinx;
int startsubprob = d_subprobstartpts[obinidx];
xbinstart = xobin*nbins_obin_x-1;
xbinend = (xobin+1)*nbins_obin_x;
ybinstart = yobin*nbins_obin_y-1;
ybinend = (yobin+1)*nbins_obin_y;
zbinstart = zobin*nbins_obin_z-1;
zbinend = (zobin+1)*nbins_obin_z;
int ix, iy, iz;
for(int k = zbinstart; k<= zbinend; k++){
iz = (k < 0) ? k + nbinz : k;
iz = (iz == nbinz) ? iz - nbinz : iz;
for(int j=ybinstart; j<= ybinend; j++){
iy = (j < 0) ? j + nbiny : j;
iy = (iy == nbiny) ? iy - nbiny : iy;
for(int i=xbinstart; i<= xbinend; i++){
ix = (i < 0) ? i + nbinx : i;
ix = (ix == nbinx) ? ix - nbinx : ix;
int binidx = ix+iy*nbinx+iz*nbiny*nbinx;
int numNUptsold = numNUpts - maxsubprobsize;
numNUpts += bin_size[binidx];
if(s == 0 && numNUpts > 0){
numNUptsold += maxsubprobsize;
d_subprob_to_bin[startsubprob+s] = binidx;
d_subprob_to_nupts[startsubprob+s] = 0;
s++;
}
while( numNUpts >= maxsubprobsize ){
numNUptsold += maxsubprobsize;
d_subprob_to_bin [startsubprob+s] = binidx;
d_subprob_to_nupts[startsubprob+s] = numNUptsold;
numNUpts -= maxsubprobsize;
s++;
}
}
}
}
}
}
__global__
void LocateNUptstoBins(int M, int nf1, int nf2, int nf3, int bin_size_x,
int bin_size_y, int bin_size_z, int nbinx, int nbiny, int nbinz,
int* bin_size, FLT *x, FLT *y, FLT *z, int* sortidx)
{
int binidx,binx,biny,binz;
int oldidx;
FLT x_rescaled,y_rescaled,z_rescaled;
for(int i=threadIdx.x+blockIdx.x*blockDim.x; i<M; i+=gridDim.x*blockDim.x){
x_rescaled=RESCALE(x[i], nf1, pirange);
y_rescaled=RESCALE(y[i], nf2, pirange);
z_rescaled=RESCALE(z[i], nf3, pirange);
binx = floor(x_rescaled/bin_size_x);
biny = floor(y_rescaled/bin_size_y);
binz = floor(z_rescaled/bin_size_z);
binidx = binx+biny*nbinx+binz*nbinx*nbiny;
oldidx = atomicAdd(&bin_size[binidx], 1);
sortidx[i] = oldidx;
}
}
#endif
| 463625426b490a804c2b4fef9e79763270aa7f52.cu | #include <iostream>
#include <math.h>
#include <helper_cuda.h>
#include <cuda.h>
#include "../../finufft/utils.h"
#include "../spreadinterp.h"
using namespace std;
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
#else
static __inline__ __device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
static __forceinline__ __device__
FLT evaluate_kernel(FLT x, FLT es_c, FLT es_beta)
/* ES ("exp sqrt") kernel evaluation at single real argument:
phi(x) = exp(beta.sqrt(1 - (2x/n_s)^2)), for |x| < nspread/2
related to an asymptotic approximation to the Kaiser--Bessel, itself an
approximation to prolate spheroidal wavefunction (PSWF) of order 0.
This is the "reference implementation", used by eg common/onedim_* 2/17/17 */
{
return exp(es_beta * (sqrt(1.0 - es_c*x*x)));
//return x;
//return 1.0;
}
static __inline__ __device__
void eval_kernel_vec_Horner(FLT *ker, const FLT x, const int w,
const double upsampfac)
/* Fill ker[] with Horner piecewise poly approx to [-w/2,w/2] ES kernel eval at
x_j = x + j, for j=0,..,w-1. Thus x in [-w/2,-w/2+1]. w is aka ns.
This is the current evaluation method, since it's faster (except i7 w=16).
Two upsampfacs implemented. Params must match ref formula. Barnett 4/24/18 */
{
FLT z = 2*x + w - 1.0; // scale so local grid offset z in [-1,1]
// insert the auto-generated code which expects z, w args, writes to ker...
if (upsampfac==2.0) { // floating point equality is fine here
#include "../../finufft/ker_horner_allw_loop.c"
}
}
static __inline__ __device__
void eval_kernel_vec(FLT *ker, const FLT x, const double w, const double es_c,
const double es_beta)
{
for(int i=0; i<w; i++){
ker[i] = evaluate_kernel(abs(x+i), es_c, es_beta);
}
}
/* Common Kernels */
__device__
int CalcGlobalIdx(int xidx, int yidx, int zidx, int onx, int ony, int onz,
int bnx, int bny, int bnz){
int oix,oiy,oiz;
oix = xidx/bnx;
oiy = yidx/bny;
oiz = zidx/bnz;
return (oix + oiy*onx + oiz*ony*onz)*(bnx*bny*bnz) +
(xidx%bnx+yidx%bny*bnx+zidx%bnz*bny*bnx);
}
__device__
int CalcGlobalIdx_V2(int xidx, int yidx, int zidx, int nbinx, int nbiny, int nbinz){
return xidx + yidx*nbinx + zidx*nbinx*nbiny;
}
#if 0
__global__
void RescaleXY_3d(int M, int nf1, int nf2, int nf3, FLT* x, FLT* y, FLT* z)
{
for(int i=blockDim.x*blockIdx.x+threadIdx.x; i<M; i+=blockDim.x*gridDim.x){
x[i] = RESCALE(x[i], nf1, 1);
y[i] = RESCALE(y[i], nf2, 1);
z[i] = RESCALE(z[i], nf3, 1);
}
}
#endif
/* ---------------------- 3d Spreading Kernels -------------------------------*/
/* Kernels for bin sort NUpts */
__global__
void CalcBinSize_noghost_3d(int M, int nf1, int nf2, int nf3, int bin_size_x,
int bin_size_y, int bin_size_z, int nbinx, int nbiny, int nbinz,
int* bin_size, FLT *x, FLT *y, FLT *z, int* sortidx, int pirange)
{
int binidx, binx, biny, binz;
int oldidx;
FLT x_rescaled,y_rescaled,z_rescaled;
for(int i=threadIdx.x+blockIdx.x*blockDim.x; i<M; i+=gridDim.x*blockDim.x){
x_rescaled=RESCALE(x[i], nf1, pirange);
y_rescaled=RESCALE(y[i], nf2, pirange);
z_rescaled=RESCALE(z[i], nf3, pirange);
binx = floor(x_rescaled/bin_size_x);
binx = binx >= nbinx ? binx-1 : binx;
biny = floor(y_rescaled/bin_size_y);
biny = biny >= nbiny ? biny-1 : biny;
binz = floor(z_rescaled/bin_size_z);
binz = binz >= nbinz ? binz-1 : binz;
binidx = binx+biny*nbinx+binz*nbinx*nbiny;
oldidx = atomicAdd(&bin_size[binidx], 1);
sortidx[i] = oldidx;
}
}
__global__
void CalcInvertofGlobalSortIdx_3d(int M, int bin_size_x, int bin_size_y,
int bin_size_z, int nbinx, int nbiny, int nbinz, int* bin_startpts,
int* sortidx, FLT *x, FLT *y, FLT *z, int* index, int pirange, int nf1,
int nf2, int nf3)
{
int binx,biny,binz;
int binidx;
FLT x_rescaled,y_rescaled,z_rescaled;
for(int i=threadIdx.x+blockIdx.x*blockDim.x; i<M; i+=gridDim.x*blockDim.x){
x_rescaled=RESCALE(x[i], nf1, pirange);
y_rescaled=RESCALE(y[i], nf2, pirange);
z_rescaled=RESCALE(z[i], nf3, pirange);
binx = floor(x_rescaled/bin_size_x);
binx = binx >= nbinx ? binx-1 : binx;
biny = floor(y_rescaled/bin_size_y);
biny = biny >= nbiny ? biny-1 : biny;
binz = floor(z_rescaled/bin_size_z);
binz = binz >= nbinz ? binz-1 : binz;
binidx = CalcGlobalIdx_V2(binx,biny,binz,nbinx,nbiny,nbinz);
index[bin_startpts[binidx]+sortidx[i]] = i;
}
}
__global__
void TrivialGlobalSortIdx_3d(int M, int* index)
{
for(int i=threadIdx.x+blockIdx.x*blockDim.x; i<M; i+=gridDim.x*blockDim.x){
index[i] = i;
}
}
/* Kernels for NUptsdriven method */
__global__
void Spread_3d_NUptsdriven_Horner(FLT *x, FLT *y, FLT *z, CUCPX *c, CUCPX *fw,
int M, const int ns, int nf1, int nf2, int nf3, FLT sigma, int* idxnupts,
int pirange)
{
int xx, yy, zz, ix, iy, iz;
int outidx;
FLT ker1[MAX_NSPREAD];
FLT ker2[MAX_NSPREAD];
FLT ker3[MAX_NSPREAD];
FLT ker1val, ker2val, ker3val;
FLT x_rescaled, y_rescaled, z_rescaled;
for(int i=blockDim.x*blockIdx.x+threadIdx.x; i<M; i+=blockDim.x*gridDim.x){
x_rescaled=RESCALE(x[idxnupts[i]], nf1, pirange);
y_rescaled=RESCALE(y[idxnupts[i]], nf2, pirange);
z_rescaled=RESCALE(z[idxnupts[i]], nf3, pirange);
int xstart = ceil(x_rescaled - ns/2.0);
int ystart = ceil(y_rescaled - ns/2.0);
int zstart = ceil(z_rescaled - ns/2.0);
int xend = floor(x_rescaled + ns/2.0);
int yend = floor(y_rescaled + ns/2.0);
int zend = floor(z_rescaled + ns/2.0);
FLT x1=(FLT)xstart-x_rescaled;
FLT y1=(FLT)ystart-y_rescaled;
FLT z1=(FLT)zstart-z_rescaled;
eval_kernel_vec_Horner(ker1,x1,ns,sigma);
eval_kernel_vec_Horner(ker2,y1,ns,sigma);
eval_kernel_vec_Horner(ker3,z1,ns,sigma);
for(zz=zstart; zz<=zend; zz++){
ker3val=ker3[zz-zstart];
for(yy=ystart; yy<=yend; yy++){
ker2val=ker2[yy-ystart];
for(xx=xstart; xx<=xend; xx++){
ix = xx < 0 ? xx+nf1 : (xx>nf1-1 ? xx-nf1 : xx);
iy = yy < 0 ? yy+nf2 : (yy>nf2-1 ? yy-nf2 : yy);
iz = zz < 0 ? zz+nf3 : (zz>nf3-1 ? zz-nf3 : zz);
outidx = ix+iy*nf1+iz*nf1*nf2;
ker1val=ker1[xx-xstart];
FLT kervalue=ker1val*ker2val*ker3val;
atomicAdd(&fw[outidx].x, c[idxnupts[i]].x*kervalue);
atomicAdd(&fw[outidx].y, c[idxnupts[i]].y*kervalue);
}
}
}
}
}
__global__
void Spread_3d_NUptsdriven(FLT *x, FLT *y, FLT *z, CUCPX *c, CUCPX *fw, int M,
const int ns, int nf1, int nf2, int nf3, FLT es_c, FLT es_beta,
int* idxnupts, int pirange)
{
int xx, yy, zz, ix, iy, iz;
int outidx;
FLT ker1[MAX_NSPREAD];
FLT ker2[MAX_NSPREAD];
FLT ker3[MAX_NSPREAD];
FLT x_rescaled, y_rescaled, z_rescaled;
FLT ker1val, ker2val, ker3val;
for(int i=blockDim.x*blockIdx.x+threadIdx.x; i<M; i+=blockDim.x*gridDim.x){
x_rescaled=RESCALE(x[idxnupts[i]], nf1, pirange);
y_rescaled=RESCALE(y[idxnupts[i]], nf2, pirange);
z_rescaled=RESCALE(z[idxnupts[i]], nf3, pirange);
int xstart = ceil(x_rescaled - ns/2.0);
int ystart = ceil(y_rescaled - ns/2.0);
int zstart = ceil(z_rescaled - ns/2.0);
int xend = floor(x_rescaled + ns/2.0);
int yend = floor(y_rescaled + ns/2.0);
int zend = floor(z_rescaled + ns/2.0);
FLT x1=(FLT)xstart-x_rescaled;
FLT y1=(FLT)ystart-y_rescaled;
FLT z1=(FLT)zstart-z_rescaled;
eval_kernel_vec(ker1,x1,ns,es_c,es_beta);
eval_kernel_vec(ker2,y1,ns,es_c,es_beta);
eval_kernel_vec(ker3,z1,ns,es_c,es_beta);
for(zz=zstart; zz<=zend; zz++){
ker3val=ker3[zz-zstart];
for(yy=ystart; yy<=yend; yy++){
ker2val=ker2[yy-ystart];
for(xx=xstart; xx<=xend; xx++){
ix = xx < 0 ? xx+nf1 : (xx>nf1-1 ? xx-nf1 : xx);
iy = yy < 0 ? yy+nf2 : (yy>nf2-1 ? yy-nf2 : yy);
iz = zz < 0 ? zz+nf3 : (zz>nf3-1 ? zz-nf3 : zz);
outidx = ix+iy*nf1+iz*nf1*nf2;
ker1val=ker1[xx-xstart];
FLT kervalue=ker1val*ker2val*ker3val;
atomicAdd(&fw[outidx].x, c[idxnupts[i]].x*kervalue);
atomicAdd(&fw[outidx].y, c[idxnupts[i]].y*kervalue);
}
}
}
}
}
/* Kernels for Subprob method */
__global__
void CalcSubProb_3d_v2(int* bin_size, int* num_subprob, int maxsubprobsize,
int numbins)
{
for(int i=threadIdx.x+blockIdx.x*blockDim.x; i<numbins;
i+=gridDim.x*blockDim.x){
num_subprob[i]=ceil(bin_size[i]/(float) maxsubprobsize);
}
}
__global__
void MapBintoSubProb_3d_v2(int* d_subprob_to_bin,int* d_subprobstartpts,
int* d_numsubprob,int numbins)
{
for(int i=threadIdx.x+blockIdx.x*blockDim.x; i<numbins;
i+=gridDim.x*blockDim.x){
for(int j=0; j<d_numsubprob[i]; j++){
d_subprob_to_bin[d_subprobstartpts[i]+j]=i;
}
}
}
__global__
void Spread_3d_Subprob_Horner(FLT *x, FLT *y, FLT *z, CUCPX *c, CUCPX *fw, int M,
const int ns, int nf1, int nf2, int nf3, FLT sigma, int* binstartpts,
int* bin_size, int bin_size_x, int bin_size_y, int bin_size_z,
int* subprob_to_bin, int* subprobstartpts, int* numsubprob,
int maxsubprobsize, int nbinx, int nbiny, int nbinz, int* idxnupts,
int pirange)
{
extern __shared__ CUCPX fwshared[];
int xstart,ystart,xend,yend,zstart,zend;
int bidx=subprob_to_bin[blockIdx.x];
int binsubp_idx=blockIdx.x-subprobstartpts[bidx];
int ix,iy,iz,outidx;
int ptstart=binstartpts[bidx]+binsubp_idx*maxsubprobsize;
int nupts=min(maxsubprobsize, bin_size[bidx]-binsubp_idx*maxsubprobsize);
int xoffset=(bidx % nbinx)*bin_size_x;
int yoffset=((bidx / nbinx)%nbiny)*bin_size_y;
int zoffset=(bidx/ (nbinx*nbiny))*bin_size_z;
int N = (bin_size_x+2*ceil(ns/2.0))*(bin_size_y+2*ceil(ns/2.0))*
(bin_size_z+2*ceil(ns/2.0));
for(int i=threadIdx.x; i<N; i+=blockDim.x){
fwshared[i].x = 0.0;
fwshared[i].y = 0.0;
}
__syncthreads();
FLT x_rescaled, y_rescaled, z_rescaled;
CUCPX cnow;
for(int i=threadIdx.x; i<nupts; i+=blockDim.x){
FLT ker1[MAX_NSPREAD];
FLT ker2[MAX_NSPREAD];
FLT ker3[MAX_NSPREAD];
int nuptsidx = idxnupts[ptstart+i];
x_rescaled = RESCALE(x[nuptsidx],nf1,pirange);
y_rescaled = RESCALE(y[nuptsidx],nf2,pirange);
z_rescaled = RESCALE(z[nuptsidx],nf3,pirange);
cnow = c[nuptsidx];
xstart = ceil(x_rescaled - ns/2.0)-xoffset;
ystart = ceil(y_rescaled - ns/2.0)-yoffset;
zstart = ceil(z_rescaled - ns/2.0)-zoffset;
xend = floor(x_rescaled + ns/2.0)-xoffset;
yend = floor(y_rescaled + ns/2.0)-yoffset;
zend = floor(z_rescaled + ns/2.0)-zoffset;
eval_kernel_vec_Horner(ker1,xstart+xoffset-x_rescaled,ns,sigma);
eval_kernel_vec_Horner(ker2,ystart+yoffset-y_rescaled,ns,sigma);
eval_kernel_vec_Horner(ker3,zstart+zoffset-z_rescaled,ns,sigma);
for (int zz=zstart; zz<=zend; zz++){
FLT kervalue3 = ker3[zz-zstart];
iz = zz+ceil(ns/2.0);
if(iz >= (bin_size_z + (int) ceil(ns/2.0)*2)) break;
for(int yy=ystart; yy<=yend; yy++){
FLT kervalue2 = ker2[yy-ystart];
iy = yy+ceil(ns/2.0);
if(iy >= (bin_size_y + (int) ceil(ns/2.0)*2)) break;
for(int xx=xstart; xx<=xend; xx++){
ix = xx+ceil(ns/2.0);
if(ix >= (bin_size_x + (int) ceil(ns/2.0)*2)) break;
outidx = ix+iy*(bin_size_x+ceil(ns/2.0)*2)+
iz*(bin_size_x+ceil(ns/2.0)*2)*
(bin_size_y+ceil(ns/2.0)*2);
FLT kervalue1 = ker1[xx-xstart];
atomicAdd(&fwshared[outidx].x, cnow.x*kervalue1*kervalue2*
kervalue3);
atomicAdd(&fwshared[outidx].y, cnow.y*kervalue1*kervalue2*
kervalue3);
}
}
}
}
__syncthreads();
/* write to global memory */
for(int n=threadIdx.x; n<N; n+=blockDim.x){
int i = n % (int) (bin_size_x+2*ceil(ns/2.0) );
int j = (int) (n /(bin_size_x+2*ceil(ns/2.0))) %
(int) (bin_size_y+2*ceil(ns/2.0));
int k = n / ((bin_size_x+2*ceil(ns/2.0))*(bin_size_y+2*ceil(ns/2.0)));
ix = xoffset-ceil(ns/2.0)+i;
iy = yoffset-ceil(ns/2.0)+j;
iz = zoffset-ceil(ns/2.0)+k;
if(ix<(nf1+ceil(ns/2.0)) &&
iy<(nf2+ceil(ns/2.0)) &&
iz<(nf3+ceil(ns/2.0))){
ix = ix < 0 ? ix+nf1 : (ix>nf1-1 ? ix-nf1 : ix);
iy = iy < 0 ? iy+nf2 : (iy>nf2-1 ? iy-nf2 : iy);
iz = iz < 0 ? iz+nf3 : (iz>nf3-1 ? iz-nf3 : iz);
outidx = ix+iy*nf1+iz*nf1*nf2;
int sharedidx=i+j*(bin_size_x+ceil(ns/2.0)*2)+
k*(bin_size_x+ceil(ns/2.0)*2)*(bin_size_y+ceil(ns/2.0)*2);
atomicAdd(&fw[outidx].x, fwshared[sharedidx].x);
atomicAdd(&fw[outidx].y, fwshared[sharedidx].y);
}
}
}
__global__
void Spread_3d_Subprob(FLT *x, FLT *y, FLT *z, CUCPX *c, CUCPX *fw, int M,
const int ns, int nf1, int nf2, int nf3, FLT es_c, FLT es_beta, int* binstartpts,
int* bin_size, int bin_size_x, int bin_size_y, int bin_size_z,
int* subprob_to_bin, int* subprobstartpts, int* numsubprob, int maxsubprobsize,
int nbinx, int nbiny, int nbinz, int* idxnupts, int pirange)
{
extern __shared__ CUCPX fwshared[];
int xstart,ystart,xend,yend,zstart,zend;
int subpidx=blockIdx.x;
int bidx=subprob_to_bin[subpidx];
int binsubp_idx=subpidx-subprobstartpts[bidx];
int ix, iy, iz, outidx;
int ptstart=binstartpts[bidx]+binsubp_idx*maxsubprobsize;
int nupts=min(maxsubprobsize, bin_size[bidx]-binsubp_idx*maxsubprobsize);
int xoffset=(bidx % nbinx)*bin_size_x;
int yoffset=((bidx / nbinx)%nbiny)*bin_size_y;
int zoffset=(bidx/ (nbinx*nbiny))*bin_size_z;
int N = (bin_size_x+2*ceil(ns/2.0))*(bin_size_y+2*ceil(ns/2.0))*
(bin_size_z+2*ceil(ns/2.0));
for(int i=threadIdx.x; i<N; i+=blockDim.x){
fwshared[i].x = 0.0;
fwshared[i].y = 0.0;
}
__syncthreads();
FLT x_rescaled, y_rescaled, z_rescaled;
CUCPX cnow;
for(int i=threadIdx.x; i<nupts; i+=blockDim.x){
FLT ker1[MAX_NSPREAD];
FLT ker2[MAX_NSPREAD];
FLT ker3[MAX_NSPREAD];
int idx = ptstart+i;
x_rescaled=RESCALE(x[idxnupts[idx]], nf1, pirange);
y_rescaled=RESCALE(y[idxnupts[idx]], nf2, pirange);
z_rescaled=RESCALE(z[idxnupts[idx]], nf3, pirange);
cnow = c[idxnupts[idx]];
xstart = ceil(x_rescaled - ns/2.0)-xoffset;
ystart = ceil(y_rescaled - ns/2.0)-yoffset;
zstart = ceil(z_rescaled - ns/2.0)-zoffset;
xend = floor(x_rescaled + ns/2.0)-xoffset;
yend = floor(y_rescaled + ns/2.0)-yoffset;
zend = floor(z_rescaled + ns/2.0)-zoffset;
FLT x1=(FLT)xstart+xoffset-x_rescaled;
FLT y1=(FLT)ystart+yoffset-y_rescaled;
FLT z1=(FLT)zstart+zoffset-z_rescaled;
eval_kernel_vec(ker1,x1,ns,es_c,es_beta);
eval_kernel_vec(ker2,y1,ns,es_c,es_beta);
eval_kernel_vec(ker3,z1,ns,es_c,es_beta);
#if 1
for(int zz=zstart; zz<=zend; zz++){
FLT kervalue3 = ker3[zz-zstart];
iz = zz+ceil(ns/2.0);
for(int yy=ystart; yy<=yend; yy++){
FLT kervalue2 = ker2[yy-ystart];
iy = yy+ceil(ns/2.0);
for(int xx=xstart; xx<=xend; xx++){
FLT kervalue1 = ker1[xx-xstart];
ix = xx+ceil(ns/2.0);
if(ix >= (bin_size_x + (int) ceil(ns/2.0)*2)) break;
if(iy >= (bin_size_y + (int) ceil(ns/2.0)*2)) break;
if(iz >= (bin_size_z + (int) ceil(ns/2.0)*2)) break;
outidx = ix+iy*(bin_size_x+ceil(ns/2.0)*2)+
iz*(bin_size_x+ceil(ns/2.0)*2)*
(bin_size_y+ceil(ns/2.0)*2);
#if 1
atomicAdd(&fwshared[outidx].x, cnow.x*kervalue1*kervalue2*
kervalue3);
atomicAdd(&fwshared[outidx].y, cnow.y*kervalue1*kervalue2*
kervalue3);
#endif
}
}
}
#endif
}
__syncthreads();
/* write to global memory */
for(int n=threadIdx.x; n<N; n+=blockDim.x){
int i = n % (int) (bin_size_x+2*ceil(ns/2.0) );
int j = (int) (n /(bin_size_x+2*ceil(ns/2.0))) % (int) (bin_size_y+2*ceil(ns/2.0));
int k = n / ((bin_size_x+2*ceil(ns/2.0))*(bin_size_y+2*ceil(ns/2.0)));
ix = xoffset-ceil(ns/2.0)+i;
iy = yoffset-ceil(ns/2.0)+j;
iz = zoffset-ceil(ns/2.0)+k;
if(ix<(nf1+ceil(ns/2.0)) && iy<(nf2+ceil(ns/2.0)) && iz<(nf3+ceil(ns/2.0))){
ix = ix < 0 ? ix+nf1 : (ix>nf1-1 ? ix-nf1 : ix);
iy = iy < 0 ? iy+nf2 : (iy>nf2-1 ? iy-nf2 : iy);
iz = iz < 0 ? iz+nf3 : (iz>nf3-1 ? iz-nf3 : iz);
outidx = ix+iy*nf1+iz*nf1*nf2;
int sharedidx=i+j*(bin_size_x+ceil(ns/2.0)*2)+
k*(bin_size_x+ceil(ns/2.0)*2)*(bin_size_y+ceil(ns/2.0)*2);
atomicAdd(&fw[outidx].x, fwshared[sharedidx].x);
atomicAdd(&fw[outidx].y, fwshared[sharedidx].y);
}
}
}
/* Kernels for Block BlockGather Method */
__global__
void Temp(int binsperobinx, int binsperobiny, int binsperobinz,
int nobinx, int nobiny, int nobinz, int* binsize)
{
int binx =threadIdx.x+blockIdx.x*blockDim.x;
int biny =threadIdx.y+blockIdx.y*blockDim.y;
int binz =threadIdx.z+blockIdx.z*blockDim.z;
int binidx = CalcGlobalIdx(binx,biny,binz,nobinx,nobiny,nobinz,binsperobinx,
binsperobiny, binsperobinz);
if(binx < nobinx*binsperobinx && biny < nobiny*binsperobiny &&
binz < nobinz*binsperobinz)
if (binx%binsperobinx >0 && binx%binsperobinx< binsperobinx-1)
if (biny%binsperobiny >0 && biny%binsperobiny< binsperobiny-1)
if (binz%binsperobinz >0 && binz%binsperobinz< binsperobinz-1)
binsize[binidx] = binidx;
}
__global__
void LocateNUptstoBins_ghost(int M, int bin_size_x, int bin_size_y,
int bin_size_z, int nobinx, int nobiny, int nobinz, int binsperobinx,
int binsperobiny, int binsperobinz, int* bin_size, FLT *x, FLT *y, FLT *z,
int* sortidx, int pirange, int nf1, int nf2, int nf3)
{
int binidx,binx,biny,binz;
int oldidx;
FLT x_rescaled,y_rescaled,z_rescaled;
for(int i=threadIdx.x+blockIdx.x*blockDim.x; i<M; i+=gridDim.x*blockDim.x){
x_rescaled=RESCALE(x[i], nf1, pirange);
y_rescaled=RESCALE(y[i], nf2, pirange);
z_rescaled=RESCALE(z[i], nf3, pirange);
binx = floor(x_rescaled/bin_size_x);
biny = floor(y_rescaled/bin_size_y);
binz = floor(z_rescaled/bin_size_z);
binx = binx/(binsperobinx-2)*binsperobinx + (binx%(binsperobinx-2)+1);
biny = biny/(binsperobiny-2)*binsperobiny + (biny%(binsperobiny-2)+1);
binz = binz/(binsperobinz-2)*binsperobinz + (binz%(binsperobinz-2)+1);
binidx = CalcGlobalIdx(binx,biny,binz,nobinx,nobiny,nobinz,binsperobinx,
binsperobiny, binsperobinz);
oldidx = atomicAdd(&bin_size[binidx], 1);
sortidx[i] = oldidx;
}
}
__global__
void FillGhostBins(int binsperobinx, int binsperobiny, int binsperobinz,
int nobinx, int nobiny, int nobinz, int* binsize)
{
int binx =threadIdx.x+blockIdx.x*blockDim.x;
int biny =threadIdx.y+blockIdx.y*blockDim.y;
int binz =threadIdx.z+blockIdx.z*blockDim.z;
int nbinx = nobinx*binsperobinx;
int nbiny = nobiny*binsperobiny;
int nbinz = nobinz*binsperobinz;
if(binx < nbinx && biny < nbiny && binz < nbinz){
int binidx = CalcGlobalIdx(binx,biny,binz,nobinx,nobiny,nobinz,
binsperobinx,binsperobiny, binsperobinz);
if(binx % binsperobinx == 1){
int i = binx - 2;
i = i<0 ? i+nbinx : i;
int idxtoupdate = CalcGlobalIdx(i,biny,binz,nobinx,nobiny,nobinz,
binsperobinx,binsperobiny, binsperobinz);
binsize[idxtoupdate] = binsize[binidx];
}
if(binx % binsperobinx == binsperobinx-2){
int i = binx + 2;
i = (i==nbinx) ? i-nbinx : i;
int idxtoupdate = CalcGlobalIdx(i,biny,binz,nobinx,nobiny,nobinz,
binsperobinx,binsperobiny, binsperobinz);
binsize[idxtoupdate] = binsize[binidx];
}
if(biny % binsperobiny == 1){
int i = biny - 2;
i = i<0 ? i+nbiny : i;
int idxtoupdate = CalcGlobalIdx(binx,i,binz,nobinx,nobiny,nobinz,
binsperobinx,binsperobiny, binsperobinz);
binsize[idxtoupdate] = binsize[binidx];
}
if(biny % binsperobinx == binsperobiny-2){
int i = biny + 2;
i = (i==nbiny) ? i-nbiny : i;
int idxtoupdate = CalcGlobalIdx(binx,i,binz,nobinx,nobiny,nobinz,
binsperobinx,binsperobiny, binsperobinz);
binsize[idxtoupdate] = binsize[binidx];
}
if(binz % binsperobinz == 1){
int i = binz - 2;
i = i<0 ? i+nbinz : i;
int idxtoupdate = CalcGlobalIdx(binx,biny,i,nobinx,nobiny,nobinz,
binsperobinx,binsperobiny, binsperobinz);
binsize[idxtoupdate] = binsize[binidx];
}
if(binz % binsperobinz == binsperobinz-2){
int i = binz + 2;
i = (i==nbinz) ? i-nbinz : i;
int idxtoupdate = CalcGlobalIdx(binx,biny,i,nobinx,nobiny,nobinz,
binsperobinx,binsperobiny, binsperobinz);
binsize[idxtoupdate] = binsize[binidx];
}
}
}
__global__
void CalcInvertofGlobalSortIdx_ghost(int M, int bin_size_x,
int bin_size_y, int bin_size_z, int nobinx, int nobiny, int nobinz,
int binsperobinx, int binsperobiny, int binsperobinz, int* bin_startpts,
int* sortidx, FLT *x, FLT *y, FLT *z, int* index, int pirange, int nf1,
int nf2, int nf3)
{
int binx,biny,binz;
int binidx;
FLT x_rescaled,y_rescaled,z_rescaled;
for(int i=threadIdx.x+blockIdx.x*blockDim.x; i<M; i+=gridDim.x*blockDim.x){
x_rescaled=RESCALE(x[i], nf1, pirange);
y_rescaled=RESCALE(y[i], nf2, pirange);
z_rescaled=RESCALE(z[i], nf3, pirange);
binx = floor(x_rescaled/bin_size_x);
biny = floor(y_rescaled/bin_size_y);
binz = floor(z_rescaled/bin_size_z);
binx = binx/(binsperobinx-2)*binsperobinx + (binx%(binsperobinx-2)+1);
biny = biny/(binsperobiny-2)*binsperobiny + (biny%(binsperobiny-2)+1);
binz = binz/(binsperobinz-2)*binsperobinz + (binz%(binsperobinz-2)+1);
binidx = CalcGlobalIdx(binx,biny,binz,nobinx,nobiny,nobinz,binsperobinx,
binsperobiny, binsperobinz);
index[bin_startpts[binidx]+sortidx[i]] = i;
}
}
__global__
void GhostBinPtsIdx(int binsperobinx, int binsperobiny, int binsperobinz,
int nobinx, int nobiny, int nobinz, int* binsize, int* index,
int* binstartpts, int M)
{
int binx =threadIdx.x+blockIdx.x*blockDim.x;
int biny =threadIdx.y+blockIdx.y*blockDim.y;
int binz =threadIdx.z+blockIdx.z*blockDim.z;
int nbinx = nobinx*binsperobinx;
int nbiny = nobiny*binsperobiny;
int nbinz = nobinz*binsperobinz;
int i,j,k;
int w = 0;
int box[3];
if(binx < nbinx && biny < nbiny && binz < nbinz){
box[0] = box[1] = box[2] = 0;
i = binx;
j = biny;
k = binz;
int binidx = CalcGlobalIdx(binx,biny,binz,nobinx,nobiny,nobinz,
binsperobinx,binsperobiny,binsperobinz);
if(binx % binsperobinx == 0){
i = binx - 2;
box[0] = (i<0);
i = i<0 ? i+nbinx : i;
w=1;
}
if(binx % binsperobinx == binsperobinx-1){
i = binx + 2;
box[0] = (i>nbinx)*2;
i = (i>nbinx) ? i-nbinx : i;
w=1;
}
if(biny % binsperobiny == 0){
j = biny - 2;
box[1] = (j<0);
j = j<0 ? j+nbiny : j;
w=1;
}
if(biny % binsperobiny == binsperobiny-1){
j = biny + 2;
box[1] = (j>nbiny)*2;
j = (j>nbiny) ? j-nbiny : j;
w=1;
}
if(binz % binsperobinz == 0){
k = binz - 2;
box[2] = (k<0);
k = k<0 ? k+nbinz : k;
w=1;
}
if(binz % binsperobinz == binsperobinz-1){
k = binz + 2;
box[2] = (k>nbinz)*2;
k = (k>nbinz) ? k-nbinz : k;
w=1;
}
int corbinidx = CalcGlobalIdx(i,j,k,nobinx,nobiny,nobinz,
binsperobinx,binsperobiny, binsperobinz);
if(w==1){
for(int n = 0; n<binsize[binidx];n++){
index[binstartpts[binidx]+n] = M*(box[0]+box[1]*3+box[2]*9) +
index[binstartpts[corbinidx]+n];
}
}
}
}
__global__
void CalcSubProb_3d_v1(int binsperobinx, int binsperobiny, int binsperobinz,
int* bin_size, int* num_subprob, int maxsubprobsize, int numbins)
{
for(int i=threadIdx.x+blockIdx.x*blockDim.x; i<numbins; i+=gridDim.x*
blockDim.x){
int numnupts = 0;
int binsperobin = binsperobinx*binsperobiny*binsperobinz;
for(int b = 0; b<binsperobin; b++){
numnupts += bin_size[binsperobin*i+b];
}
num_subprob[i]=ceil(numnupts/(float) maxsubprobsize);
}
}
__global__
void MapBintoSubProb_3d_v1(int* d_subprob_to_obin, int* d_subprobstartpts,
int* d_numsubprob,int numbins)
{
for(int i=threadIdx.x+blockIdx.x*blockDim.x; i<numbins;
i+=gridDim.x*blockDim.x){
for(int j=0; j<d_numsubprob[i]; j++){
d_subprob_to_obin[d_subprobstartpts[i]+j]=i;
}
}
}
__global__
void Spread_3d_BlockGather(FLT *x, FLT *y, FLT *z, CUCPX *c, CUCPX *fw, int M,
const int ns, int nf1, int nf2, int nf3, FLT es_c, FLT es_beta, FLT sigma,
int* binstartpts, int obin_size_x, int obin_size_y, int obin_size_z,
int binsperobin, int* subprob_to_bin, int* subprobstartpts,
int maxsubprobsize, int nobinx, int nobiny, int nobinz, int* idxnupts,
int pirange)
{
extern __shared__ CUCPX fwshared[];
int xstart,ystart,zstart,xend,yend,zend;
int subpidx=blockIdx.x;
int obidx=subprob_to_bin[subpidx];
int bidx = obidx*binsperobin;
int obinsubp_idx=subpidx-subprobstartpts[obidx];
int ix, iy, iz;
int outidx;
int ptstart=binstartpts[bidx]+obinsubp_idx*maxsubprobsize;
int nupts=min(maxsubprobsize, binstartpts[bidx+binsperobin]-binstartpts[bidx]
-obinsubp_idx*maxsubprobsize);
int xoffset=(obidx % nobinx)*obin_size_x;
int yoffset=(obidx / nobinx)%nobiny*obin_size_y;
int zoffset=(obidx / (nobinx*nobiny))*obin_size_z;
int N = obin_size_x*obin_size_y*obin_size_z;
for(int i=threadIdx.x; i<N; i+=blockDim.x){
fwshared[i].x = 0.0;
fwshared[i].y = 0.0;
}
__syncthreads();
FLT x_rescaled, y_rescaled, z_rescaled;
CUCPX cnow;
for(int i=threadIdx.x; i<nupts; i+=blockDim.x){
int idx = ptstart+i;
int b = idxnupts[idx]/M;
int box[3];
for(int d=0;d<3;d++){
box[d] = b%3;
if(box[d] == 1)
box[d] = -1;
b=b/3;
}
int ii = idxnupts[idx]%M;
x_rescaled = RESCALE(x[ii],nf1,pirange) + box[0]*nf1;
y_rescaled = RESCALE(y[ii],nf2,pirange) + box[1]*nf2;
z_rescaled = RESCALE(z[ii],nf3,pirange) + box[2]*nf3;
cnow = c[ii];
#if 1
xstart = ceil(x_rescaled - ns/2.0)-xoffset;
xstart = xstart < 0 ? 0 : xstart;
ystart = ceil(y_rescaled - ns/2.0)-yoffset;
ystart = ystart < 0 ? 0 : ystart;
zstart = ceil(z_rescaled - ns/2.0)-zoffset;
zstart = zstart < 0 ? 0 : zstart;
xend = floor(x_rescaled + ns/2.0)-xoffset;
xend = xend >= obin_size_x ? obin_size_x-1 : xend;
yend = floor(y_rescaled + ns/2.0)-yoffset;
yend = yend >= obin_size_y ? obin_size_y-1 : yend;
zend = floor(z_rescaled + ns/2.0)-zoffset;
zend = zend >= obin_size_z ? obin_size_z-1 : zend;
#else
xstart = 0;
ystart = 0;
zstart = 0;
xend = ns;
yend = ns;
zend = ns;
#endif
for(int zz=zstart; zz<=zend; zz++){
FLT disz=abs(z_rescaled-(zz+zoffset));
FLT kervalue3 = evaluate_kernel(disz, es_c, es_beta);
//FLT kervalue3 = disz;
for(int yy=ystart; yy<=yend; yy++){
FLT disy=abs(y_rescaled-(yy+yoffset));
FLT kervalue2 = evaluate_kernel(disy, es_c, es_beta);
//FLT kervalue2 = disy;
for(int xx=xstart; xx<=xend; xx++){
outidx = xx+yy*obin_size_x+zz*obin_size_y*obin_size_x;
FLT disx=abs(x_rescaled-(xx+xoffset));
FLT kervalue1 = evaluate_kernel(disx, es_c, es_beta);
//FLT kervalue1 = disx;
// fwshared[outidx].x += cnow.x*kervalue1*kervalue2*kervalue3;
// fwshared[outidx].y += cnow.y*kervalue1*kervalue2*kervalue3;
#if 1
atomicAdd(&fwshared[outidx].x, cnow.x*kervalue1*kervalue2*
kervalue3);
atomicAdd(&fwshared[outidx].y, cnow.y*kervalue1*kervalue2*
kervalue3);
#endif
}
}
}
}
__syncthreads();
/* write to global memory */
for(int n=threadIdx.x; n<N; n+=blockDim.x){
int i = n%obin_size_x;
int j = (n/obin_size_x)%obin_size_y;
int k = n/(obin_size_x*obin_size_y);
ix = xoffset+i;
iy = yoffset+j;
iz = zoffset+k;
outidx = ix+iy*nf1+iz*nf1*nf2;
atomicAdd(&fw[outidx].x, fwshared[n].x);
atomicAdd(&fw[outidx].y, fwshared[n].y);
}
}
__global__
void Spread_3d_BlockGather_Horner(FLT *x, FLT *y, FLT *z, CUCPX *c, CUCPX *fw, int M,
const int ns, int nf1, int nf2, int nf3, FLT es_c, FLT es_beta, FLT sigma,
int* binstartpts, int obin_size_x, int obin_size_y, int obin_size_z,
int binsperobin, int* subprob_to_bin, int* subprobstartpts,
int maxsubprobsize, int nobinx, int nobiny, int nobinz, int* idxnupts,
int pirange)
{
extern __shared__ CUCPX fwshared[];
int xstart,ystart,zstart,xend,yend,zend;
int subpidx=blockIdx.x;
int obidx=subprob_to_bin[subpidx];
int bidx = obidx*binsperobin;
int obinsubp_idx=subpidx-subprobstartpts[obidx];
int ix, iy, iz;
int outidx;
int ptstart=binstartpts[bidx]+obinsubp_idx*maxsubprobsize;
int nupts=min(maxsubprobsize, binstartpts[bidx+binsperobin]-binstartpts[bidx]
-obinsubp_idx*maxsubprobsize);
int xoffset=(obidx%nobinx)*obin_size_x;
int yoffset=(obidx/nobinx)%nobiny*obin_size_y;
int zoffset=(obidx/(nobinx*nobiny))*obin_size_z;
int N = obin_size_x*obin_size_y*obin_size_z;
FLT ker1[MAX_NSPREAD];
FLT ker2[MAX_NSPREAD];
FLT ker3[MAX_NSPREAD];
for(int i=threadIdx.x; i<N; i+=blockDim.x){
fwshared[i].x = 0.0;
fwshared[i].y = 0.0;
}
__syncthreads();
FLT x_rescaled, y_rescaled, z_rescaled;
CUCPX cnow;
for(int i=threadIdx.x; i<nupts; i+=blockDim.x){
int nidx = idxnupts[ptstart+i];
int b = nidx/M;
int box[3];
for(int d=0;d<3;d++){
box[d] = b%3;
if(box[d] == 1)
box[d] = -1;
b=b/3;
}
int ii = nidx%M;
x_rescaled = RESCALE(x[ii],nf1,pirange) + box[0]*nf1;
y_rescaled = RESCALE(y[ii],nf2,pirange) + box[1]*nf2;
z_rescaled = RESCALE(z[ii],nf3,pirange) + box[2]*nf3;
cnow = c[ii];
#if 0
xstart = max((int)ceil(x_rescaled - ns/2.0)-xoffset, 0);
//xstart = xstart < 0 ? 0 : xstart;
ystart = max((int)ceil(y_rescaled - ns/2.0)-yoffset, 0);
//ystart = ystart < 0 ? 0 : ystart;
zstart = max((int)ceil(z_rescaled - ns/2.0)-zoffset, 0);
//zstart = zstart < 0 ? 0 : zstart;
xend = min((int)floor(x_rescaled + ns/2.0)-xoffset, obin_size_x-1);
//xend = xend >= obin_size_x ? obin_size_x-1 : xend;
yend = min((int)floor(y_rescaled + ns/2.0)-yoffset, obin_size_y-1);
//yend = yend >= obin_size_y ? obin_size_y-1 : yend;
zend = min((int)floor(z_rescaled + ns/2.0)-zoffset, obin_size_z-1);
//zend = zend >= obin_size_z ? obin_size_z-1 : zend;
#else
xstart = ceil(x_rescaled - ns/2.0)-xoffset;
xstart = xstart < 0 ? 0 : xstart;
ystart = ceil(y_rescaled - ns/2.0)-yoffset;
ystart = ystart < 0 ? 0 : ystart;
zstart = ceil(z_rescaled - ns/2.0)-zoffset;
zstart = zstart < 0 ? 0 : zstart;
xend = floor(x_rescaled + ns/2.0)-xoffset;
xend = xend >= obin_size_x ? obin_size_x-1 : xend;
yend = floor(y_rescaled + ns/2.0)-yoffset;
yend = yend >= obin_size_y ? obin_size_y-1 : yend;
zend = floor(z_rescaled + ns/2.0)-zoffset;
zend = zend >= obin_size_z ? obin_size_z-1 : zend;
#endif
eval_kernel_vec_Horner(ker1,xstart+xoffset-x_rescaled,ns,sigma);
eval_kernel_vec_Horner(ker2,ystart+yoffset-y_rescaled,ns,sigma);
eval_kernel_vec_Horner(ker3,zstart+zoffset-z_rescaled,ns,sigma);
#if 1
for(int zz=zstart; zz<=zend; zz++){
FLT kervalue3 = ker3[zz-zstart];
for(int yy=ystart; yy<=yend; yy++){
FLT kervalue2 = ker2[yy-ystart];
for(int xx=xstart; xx<=xend; xx++){
outidx = xx+yy*obin_size_x+zz*obin_size_y*obin_size_x;
FLT kervalue1 = ker1[xx-xstart];
#if 1
atomicAdd(&fwshared[outidx].x, cnow.x*kervalue1*kervalue2*
kervalue3);
atomicAdd(&fwshared[outidx].y, cnow.y*kervalue1*kervalue2*
kervalue3);
#else
fwshared[outidx].x+= cnow.x*kervalue1*kervalue2*kervalue3;
fwshared[outidx].y+= cnow.y*kervalue1*kervalue2*kervalue3;
#endif
}
}
}
#endif
}
__syncthreads();
/* write to global memory */
for(int n=threadIdx.x; n<N; n+=blockDim.x){
int i = n%obin_size_x;
int j = (n/obin_size_x)%obin_size_y;
int k = n/(obin_size_x*obin_size_y);
ix = xoffset+i;
iy = yoffset+j;
iz = zoffset+k;
outidx = ix+iy*nf1+iz*nf1*nf2;
atomicAdd(&fw[outidx].x, fwshared[n].x);
atomicAdd(&fw[outidx].y, fwshared[n].y);
}
}
/* ---------------------- 3d Interpolation Kernels ---------------------------*/
/* Kernels for NUptsdriven Method */
__global__
void Interp_3d_NUptsdriven(FLT *x, FLT *y, FLT *z, CUCPX *c, CUCPX *fw, int M,
const int ns, int nf1, int nf2, int nf3, FLT es_c, FLT es_beta,
int *idxnupts, int pirange)
{
for(int i=blockDim.x*blockIdx.x+threadIdx.x; i<M; i+=blockDim.x*gridDim.x){
FLT x_rescaled=RESCALE(x[idxnupts[i]], nf1, pirange);
FLT y_rescaled=RESCALE(y[idxnupts[i]], nf2, pirange);
FLT z_rescaled=RESCALE(z[idxnupts[i]], nf3, pirange);
int xstart = ceil(x_rescaled - ns/2.0);
int ystart = ceil(y_rescaled - ns/2.0);
int zstart = ceil(z_rescaled - ns/2.0);
int xend = floor(x_rescaled + ns/2.0);
int yend = floor(y_rescaled + ns/2.0);
int zend = floor(z_rescaled + ns/2.0);
CUCPX cnow;
cnow.x = 0.0;
cnow.y = 0.0;
for(int zz=zstart; zz<=zend; zz++){
FLT disz=abs(z_rescaled-zz);
FLT kervalue3 = evaluate_kernel(disz, es_c, es_beta);
for(int yy=ystart; yy<=yend; yy++){
FLT disy=abs(y_rescaled-yy);
FLT kervalue2 = evaluate_kernel(disy, es_c, es_beta);
for(int xx=xstart; xx<=xend; xx++){
int ix = xx < 0 ? xx+nf1 : (xx>nf1-1 ? xx-nf1 : xx);
int iy = yy < 0 ? yy+nf2 : (yy>nf2-1 ? yy-nf2 : yy);
int iz = zz < 0 ? zz+nf3 : (zz>nf3-1 ? zz-nf3 : zz);
int inidx = ix+iy*nf1+iz*nf2*nf1;
FLT disx=abs(x_rescaled-xx);
FLT kervalue1 = evaluate_kernel(disx, es_c, es_beta);
cnow.x += fw[inidx].x*kervalue1*kervalue2*kervalue3;
cnow.y += fw[inidx].y*kervalue1*kervalue2*kervalue3;
}
}
}
c[idxnupts[i]].x = cnow.x;
c[idxnupts[i]].y = cnow.y;
}
}
__global__
void Interp_3d_NUptsdriven_Horner(FLT *x, FLT *y, FLT *z, CUCPX *c, CUCPX *fw,
int M, const int ns, int nf1, int nf2, int nf3, FLT sigma, int *idxnupts,
int pirange)
{
for(int i=blockDim.x*blockIdx.x+threadIdx.x; i<M; i+=blockDim.x*gridDim.x){
FLT x_rescaled=RESCALE(x[idxnupts[i]], nf1, pirange);
FLT y_rescaled=RESCALE(y[idxnupts[i]], nf2, pirange);
FLT z_rescaled=RESCALE(z[idxnupts[i]], nf3, pirange);
int xstart = ceil(x_rescaled - ns/2.0);
int ystart = ceil(y_rescaled - ns/2.0);
int zstart = ceil(z_rescaled - ns/2.0);
int xend = floor(x_rescaled + ns/2.0);
int yend = floor(y_rescaled + ns/2.0);
int zend = floor(z_rescaled + ns/2.0);
CUCPX cnow;
cnow.x = 0.0;
cnow.y = 0.0;
FLT ker1[MAX_NSPREAD];
FLT ker2[MAX_NSPREAD];
FLT ker3[MAX_NSPREAD];
eval_kernel_vec_Horner(ker1,xstart-x_rescaled,ns,sigma);
eval_kernel_vec_Horner(ker2,ystart-y_rescaled,ns,sigma);
eval_kernel_vec_Horner(ker3,zstart-z_rescaled,ns,sigma);
for(int zz=zstart; zz<=zend; zz++){
FLT kervalue3 = ker3[zz-zstart];
int iz = zz < 0 ? zz+nf3 : (zz>nf3-1 ? zz-nf3 : zz);
for(int yy=ystart; yy<=yend; yy++){
FLT kervalue2 = ker2[yy-ystart];
int iy = yy < 0 ? yy+nf2 : (yy>nf2-1 ? yy-nf2 : yy);
for(int xx=xstart; xx<=xend; xx++){
int ix = xx < 0 ? xx+nf1 : (xx>nf1-1 ? xx-nf1 : xx);
int inidx = ix+iy*nf1+iz*nf2*nf1;
FLT kervalue1 = ker1[xx-xstart];
cnow.x += fw[inidx].x*kervalue1*kervalue2*kervalue3;
cnow.y += fw[inidx].y*kervalue1*kervalue2*kervalue3;
}
}
}
c[idxnupts[i]].x = cnow.x;
c[idxnupts[i]].y = cnow.y;
}
}
/* Kernels for SubProb Method */
__global__
void Interp_3d_Subprob(FLT *x, FLT *y, FLT *z, CUCPX *c, CUCPX *fw,
int M, const int ns, int nf1, int nf2, int nf3, FLT es_c, FLT es_beta,
int* binstartpts, int* bin_size, int bin_size_x, int bin_size_y,
int bin_size_z, int* subprob_to_bin, int* subprobstartpts, int* numsubprob,
int maxsubprobsize, int nbinx, int nbiny, int nbinz, int* idxnupts,
int pirange)
{
extern __shared__ CUCPX fwshared[];
int xstart,ystart,xend,yend,zstart,zend;
int subpidx=blockIdx.x;
int bidx=subprob_to_bin[subpidx];
int binsubp_idx=subpidx-subprobstartpts[bidx];
int ix, iy, iz;
int outidx;
int ptstart=binstartpts[bidx]+binsubp_idx*maxsubprobsize;
int nupts=min(maxsubprobsize, bin_size[bidx]-binsubp_idx*maxsubprobsize);
int xoffset=(bidx % nbinx)*bin_size_x;
int yoffset=((bidx / nbinx)%nbiny)*bin_size_y;
int zoffset=(bidx/ (nbinx*nbiny))*bin_size_z;
int N = (bin_size_x+2*ceil(ns/2.0))*(bin_size_y+2*ceil(ns/2.0))*
(bin_size_z+2*ceil(ns/2.0));
#if 1
for(int n=threadIdx.x;n<N; n+=blockDim.x){
int i = n % (int) (bin_size_x+2*ceil(ns/2.0) );
int j = (int) (n /(bin_size_x+2*ceil(ns/2.0))) % (int) (bin_size_y+2*ceil(ns/2.0));
int k = n / ((bin_size_x+2*ceil(ns/2.0))*(bin_size_y+2*ceil(ns/2.0)));
ix = xoffset-ceil(ns/2.0)+i;
iy = yoffset-ceil(ns/2.0)+j;
iz = zoffset-ceil(ns/2.0)+k;
if(ix<(nf1+ceil(ns/2.0)) && iy<(nf2+ceil(ns/2.0)) && iz<(nf3+ceil(ns/2.0))){
ix = ix < 0 ? ix+nf1 : (ix>nf1-1 ? ix-nf1 : ix);
iy = iy < 0 ? iy+nf2 : (iy>nf2-1 ? iy-nf2 : iy);
iz = iz < 0 ? iz+nf3 : (iz>nf3-1 ? iz-nf3 : iz);
outidx = ix+iy*nf1+iz*nf1*nf2;
int sharedidx=i+j*(bin_size_x+ceil(ns/2.0)*2)+
k*(bin_size_x+ceil(ns/2.0)*2)*(bin_size_y+ceil(ns/2.0)*2);
fwshared[sharedidx].x = fw[outidx].x;
fwshared[sharedidx].y = fw[outidx].y;
}
}
#endif
__syncthreads();
FLT x_rescaled, y_rescaled, z_rescaled;
CUCPX cnow;
for(int i=threadIdx.x; i<nupts; i+=blockDim.x){
int idx = ptstart+i;
x_rescaled=RESCALE(x[idxnupts[idx]], nf1, pirange);
y_rescaled=RESCALE(y[idxnupts[idx]], nf2, pirange);
z_rescaled=RESCALE(z[idxnupts[idx]], nf3, pirange);
cnow.x = 0.0;
cnow.y = 0.0;
xstart = ceil(x_rescaled - ns/2.0)-xoffset;
ystart = ceil(y_rescaled - ns/2.0)-yoffset;
zstart = ceil(z_rescaled - ns/2.0)-zoffset;
xend = floor(x_rescaled + ns/2.0)-xoffset;
yend = floor(y_rescaled + ns/2.0)-yoffset;
zend = floor(z_rescaled + ns/2.0)-zoffset;
for (int zz=zstart; zz<=zend; zz++){
FLT disz=abs(z_rescaled-zz);
FLT kervalue3 = evaluate_kernel(disz, es_c, es_beta);
iz = zz+ceil(ns/2.0);
for(int yy=ystart; yy<=yend; yy++){
FLT disy=abs(y_rescaled-yy);
FLT kervalue2 = evaluate_kernel(disy, es_c, es_beta);
iy = yy+ceil(ns/2.0);
for(int xx=xstart; xx<=xend; xx++){
ix = xx+ceil(ns/2.0);
outidx = ix+iy*(bin_size_x+ceil(ns/2.0)*2)+
iz*(bin_size_x+ceil(ns/2.0)*2)*
(bin_size_y+ceil(ns/2.0)*2);
FLT disx=abs(x_rescaled-xx);
FLT kervalue1 = evaluate_kernel(disx, es_c, es_beta);
cnow.x += fwshared[outidx].x*kervalue1*kervalue2*kervalue3;
cnow.y += fwshared[outidx].y*kervalue1*kervalue2*kervalue3;
}
}
}
c[idxnupts[idx]].x = cnow.x;
c[idxnupts[idx]].y = cnow.y;
}
}
__global__
void Interp_3d_Subprob_Horner(FLT *x, FLT *y, FLT *z, CUCPX *c, CUCPX *fw,
int M, const int ns, int nf1, int nf2, int nf3, FLT sigma, int* binstartpts,
int* bin_size, int bin_size_x, int bin_size_y, int bin_size_z,
int* subprob_to_bin, int* subprobstartpts, int* numsubprob,
int maxsubprobsize, int nbinx, int nbiny, int nbinz, int* idxnupts,
int pirange)
{
extern __shared__ CUCPX fwshared[];
int xstart,ystart,xend,yend,zstart,zend;
int subpidx=blockIdx.x;
int bidx=subprob_to_bin[subpidx];
int binsubp_idx=subpidx-subprobstartpts[bidx];
int ix, iy, iz;
int outidx;
int ptstart=binstartpts[bidx]+binsubp_idx*maxsubprobsize;
int nupts=min(maxsubprobsize, bin_size[bidx]-binsubp_idx*maxsubprobsize);
int xoffset=(bidx % nbinx)*bin_size_x;
int yoffset=((bidx / nbinx)%nbiny)*bin_size_y;
int zoffset=(bidx/ (nbinx*nbiny))*bin_size_z;
int N = (bin_size_x+2*ceil(ns/2.0))*(bin_size_y+2*ceil(ns/2.0))*
(bin_size_z+2*ceil(ns/2.0));
for(int n=threadIdx.x;n<N; n+=blockDim.x){
int i = n % (int) (bin_size_x+2*ceil(ns/2.0) );
int j = (int) (n /(bin_size_x+2*ceil(ns/2.0))) % (int) (bin_size_y+2*ceil(ns/2.0));
int k = n / ((bin_size_x+2*ceil(ns/2.0))*(bin_size_y+2*ceil(ns/2.0)));
ix = xoffset-ceil(ns/2.0)+i;
iy = yoffset-ceil(ns/2.0)+j;
iz = zoffset-ceil(ns/2.0)+k;
if(ix<(nf1+ceil(ns/2.0)) && iy<(nf2+ceil(ns/2.0)) && iz<(nf3+ceil(ns/2.0))){
ix = ix < 0 ? ix+nf1 : (ix>nf1-1 ? ix-nf1 : ix);
iy = iy < 0 ? iy+nf2 : (iy>nf2-1 ? iy-nf2 : iy);
iz = iz < 0 ? iz+nf3 : (iz>nf3-1 ? iz-nf3 : iz);
outidx = ix+iy*nf1+iz*nf1*nf2;
int sharedidx=i+j*(bin_size_x+ceil(ns/2.0)*2)+
k*(bin_size_x+ceil(ns/2.0)*2)*(bin_size_y+ceil(ns/2.0)*2);
fwshared[sharedidx].x = fw[outidx].x;
fwshared[sharedidx].y = fw[outidx].y;
}
}
__syncthreads();
FLT ker1[MAX_NSPREAD];
FLT ker2[MAX_NSPREAD];
FLT ker3[MAX_NSPREAD];
FLT x_rescaled, y_rescaled, z_rescaled;
CUCPX cnow;
for(int i=threadIdx.x; i<nupts; i+=blockDim.x){
int idx = ptstart+i;
x_rescaled=RESCALE(x[idxnupts[idx]], nf1, pirange);
y_rescaled=RESCALE(y[idxnupts[idx]], nf2, pirange);
z_rescaled=RESCALE(z[idxnupts[idx]], nf3, pirange);
cnow.x = 0.0;
cnow.y = 0.0;
xstart = ceil(x_rescaled - ns/2.0)-xoffset;
ystart = ceil(y_rescaled - ns/2.0)-yoffset;
zstart = ceil(z_rescaled - ns/2.0)-zoffset;
xend = floor(x_rescaled + ns/2.0)-xoffset;
yend = floor(y_rescaled + ns/2.0)-yoffset;
zend = floor(z_rescaled + ns/2.0)-zoffset;
eval_kernel_vec_Horner(ker1,xstart+xoffset-x_rescaled,ns,sigma);
eval_kernel_vec_Horner(ker2,ystart+yoffset-y_rescaled,ns,sigma);
eval_kernel_vec_Horner(ker3,zstart+zoffset-z_rescaled,ns,sigma);
for (int zz=zstart; zz<=zend; zz++){
FLT kervalue3 = ker3[zz-zstart];
iz = zz+ceil(ns/2.0);
for(int yy=ystart; yy<=yend; yy++){
FLT kervalue2 = ker2[yy-ystart];
iy = yy+ceil(ns/2.0);
for(int xx=xstart; xx<=xend; xx++){
ix = xx+ceil(ns/2.0);
outidx = ix+iy*(bin_size_x+ceil(ns/2.0)*2)+
iz*(bin_size_x+ceil(ns/2.0)*2)*
(bin_size_y+ceil(ns/2.0)*2);
FLT kervalue1 = ker1[xx-xstart];
cnow.x += fwshared[outidx].x*kervalue1*kervalue2*kervalue3;
cnow.y += fwshared[outidx].y*kervalue1*kervalue2*kervalue3;
}
}
}
c[idxnupts[idx]].x = cnow.x;
c[idxnupts[idx]].y = cnow.y;
}
}
#if 0
// This kernels assumes that number of bins less than #maxnumberofthreads in
// each dim
__global__
void CalcSubProb_3d(int bin_size_x, int bin_size_y, int bin_size_z,
int o_bin_size_x, int o_bin_size_y, int o_bin_size_z, int nbinx, int nbiny,
int nbinz, int nobinx, int nobiny, int nobinz, int* bin_size,
int* num_subprob, int* num_nupts, int maxsubprobsize)
{
int numNUpts = 0;
int xbinstart, xbinend, ybinstart, ybinend, zbinstart, zbinend;
int xobin, yobin, zobin;
xobin = threadIdx.x+blockIdx.x*blockDim.x;
yobin = threadIdx.y+blockIdx.y*blockDim.y;
zobin = threadIdx.z+blockIdx.z*blockDim.z;
int nbins_obin_x, nbins_obin_y, nbins_obin_z;
nbins_obin_x = o_bin_size_x/bin_size_x;
nbins_obin_y = o_bin_size_y/bin_size_y;
nbins_obin_z = o_bin_size_z/bin_size_z;
if(xobin < nobinx && yobin < nobiny && zobin < nobinz){
xbinstart = xobin*nbins_obin_x-1;
xbinend = (xobin+1)*nbins_obin_x;
ybinstart = yobin*nbins_obin_y-1;
ybinend = (yobin+1)*nbins_obin_y;
zbinstart = zobin*nbins_obin_z-1;
zbinend = (zobin+1)*nbins_obin_z;
int ix, iy, iz;
for(int k = zbinstart; k<= zbinend; k++){
iz = (k < 0) ? k + nbinz : k;
iz = (k == nbinz) ? k - nbinz : iz;
for(int j=ybinstart; j<= ybinend; j++){
iy = (j < 0) ? j + nbiny : j;
iy = (j == nbiny) ? j - nbiny : iy;
for(int i=xbinstart; i<= xbinend; i++){
ix = (i < 0) ? i + nbinx : i;
ix = (i == nbinx) ? i - nbinx : ix;
int binidx = ix+iy*nbinx+iz*nbiny*nbinx;
numNUpts += bin_size[binidx];
//numSubProbs += ceil(bin_size[binidx]/
//(float) maxsubprobsize);
}
}
}
int obinidx = xobin + yobin*nobinx + zobin*nobiny*nobinx;
num_subprob[obinidx] = ceil(numNUpts/ (float) maxsubprobsize);
//num_subprob[obinidx] = numSubProbs;
num_nupts[obinidx] = numNUpts;
}
}
__global__
void MapBintoSubProb_3d(int* d_subprobstartpts, int* d_subprob_to_bin,
int* d_subprob_to_nupts, int bin_size_x, int bin_size_y, int bin_size_z,
int o_bin_size_x, int o_bin_size_y, int o_bin_size_z, int nbinx,
int nbiny, int nbinz, int nobinx, int nobiny, int nobinz, int* bin_size,
int* num_subprob, int* num_nupts, int maxsubprobsize)
{
int numNUpts = 0;
int s = 0;
int xbinstart, xbinend, ybinstart, ybinend, zbinstart, zbinend;
int xobin, yobin, zobin;
xobin = threadIdx.x+blockIdx.x*blockDim.x;
yobin = threadIdx.y+blockIdx.y*blockDim.y;
zobin = threadIdx.z+blockIdx.z*blockDim.z;
int nbins_obin_x, nbins_obin_y, nbins_obin_z;
nbins_obin_x = o_bin_size_x/bin_size_x;
nbins_obin_y = o_bin_size_y/bin_size_y;
nbins_obin_z = o_bin_size_z/bin_size_z;
if(xobin < nobinx && yobin < nobiny && zobin < nobinz){
int obinidx = xobin + yobin*nobinx + zobin*nobiny*nobinx;
int startsubprob = d_subprobstartpts[obinidx];
xbinstart = xobin*nbins_obin_x-1;
xbinend = (xobin+1)*nbins_obin_x;
ybinstart = yobin*nbins_obin_y-1;
ybinend = (yobin+1)*nbins_obin_y;
zbinstart = zobin*nbins_obin_z-1;
zbinend = (zobin+1)*nbins_obin_z;
int ix, iy, iz;
for(int k = zbinstart; k<= zbinend; k++){
iz = (k < 0) ? k + nbinz : k;
iz = (iz == nbinz) ? iz - nbinz : iz;
for(int j=ybinstart; j<= ybinend; j++){
iy = (j < 0) ? j + nbiny : j;
iy = (iy == nbiny) ? iy - nbiny : iy;
for(int i=xbinstart; i<= xbinend; i++){
ix = (i < 0) ? i + nbinx : i;
ix = (ix == nbinx) ? ix - nbinx : ix;
int binidx = ix+iy*nbinx+iz*nbiny*nbinx;
int numNUptsold = numNUpts - maxsubprobsize;
numNUpts += bin_size[binidx];
if(s == 0 && numNUpts > 0){
numNUptsold += maxsubprobsize;
d_subprob_to_bin[startsubprob+s] = binidx;
d_subprob_to_nupts[startsubprob+s] = 0;
s++;
}
while( numNUpts >= maxsubprobsize ){
numNUptsold += maxsubprobsize;
d_subprob_to_bin [startsubprob+s] = binidx;
d_subprob_to_nupts[startsubprob+s] = numNUptsold;
numNUpts -= maxsubprobsize;
s++;
}
}
}
}
}
}
__global__
void LocateNUptstoBins(int M, int nf1, int nf2, int nf3, int bin_size_x,
int bin_size_y, int bin_size_z, int nbinx, int nbiny, int nbinz,
int* bin_size, FLT *x, FLT *y, FLT *z, int* sortidx)
{
int binidx,binx,biny,binz;
int oldidx;
FLT x_rescaled,y_rescaled,z_rescaled;
for(int i=threadIdx.x+blockIdx.x*blockDim.x; i<M; i+=gridDim.x*blockDim.x){
x_rescaled=RESCALE(x[i], nf1, pirange);
y_rescaled=RESCALE(y[i], nf2, pirange);
z_rescaled=RESCALE(z[i], nf3, pirange);
binx = floor(x_rescaled/bin_size_x);
biny = floor(y_rescaled/bin_size_y);
binz = floor(z_rescaled/bin_size_z);
binidx = binx+biny*nbinx+binz*nbinx*nbiny;
oldidx = atomicAdd(&bin_size[binidx], 1);
sortidx[i] = oldidx;
}
}
#endif
|
0bd64d8bb465c936dc73a867f4aa705b100cf45d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <gl/freeglut.h>
#include <stdio.h>
#include <omp.h>
#include <math.h>
#define WIDTH 800
#define HEIGHT 800
#define ITERATIONS 5000
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
typedef struct rgb_color {
float r;
float g;
float b;
}color;
color colors[16];
color pixels[WIDTH*HEIGHT];
double real_min = -2, real_max = 1;
double img_min = -1.5, img_max = 1.5;
int MAX_ITER = ITERATIONS; int iter_step = 10;
int window;
int size = HEIGHT * WIDTH * sizeof(int); //device aloc size
int *iters = (int *)malloc(size);
int *iters_d;
int arr_size = WIDTH * HEIGHT;
int num_threads = 512;
int block_size = 1250;
double t_start, t_end;
void initColors() {
colors[0].r = 66; colors[0].g = 30; colors[0].b = 15;
colors[1].r = 25; colors[1].g = 7; colors[1].b = 26;
colors[2].r = 9; colors[2].g = 1; colors[2].b = 47;
colors[3].r = 4; colors[3].g = 4; colors[3].b = 73;
colors[4].r = 0; colors[4].g = 7; colors[4].b = 100;
colors[5].r = 12; colors[5].g = 44; colors[5].b = 138;
colors[6].r = 24; colors[6].g = 82; colors[6].b = 177;
colors[7].r = 57; colors[7].g = 125; colors[7].b = 209;
colors[8].r = 134; colors[8].g = 181; colors[8].b = 229;
colors[9].r = 211; colors[9].g = 236; colors[9].b = 248;
colors[10].r = 241; colors[10].g = 233; colors[10].b = 191;
colors[11].r = 248; colors[11].g = 201; colors[11].b = 95;
colors[12].r = 255; colors[12].g = 170; colors[12].b = 0;
colors[13].r = 204; colors[13].g = 128; colors[13].b = 0;
colors[14].r = 153; colors[14].g = 87; colors[14].b = 0;
colors[15].r = 106; colors[15].g = 52; colors[15].b = 3;
}
void setUpColor() {
const int NX = WIDTH;
const int NY = HEIGHT;
int i, j, VAL;
for (i = 0; i < NX; i++) {
for (j = 0; j < NY; j++) {
VAL = iters[i*WIDTH + j];
if (VAL < MAX_ITER && VAL > 0) {
int cid = VAL % 16;
pixels[i + j*HEIGHT].r = colors[cid].r / 255;
pixels[i + j*HEIGHT].g = colors[cid].g / 255;
pixels[i + j*HEIGHT].b = colors[cid].b / 255;
}
else {
pixels[i + j*HEIGHT].r = 0;
pixels[i + j*HEIGHT].g = 0;
pixels[i + j*HEIGHT].b = 0;
}
}
}
}
__global__ void mandelbrotset(int *iter_arr, double xmin, double xmax, double ymin, double ymax, int w, int h, int max_iters, int N) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index >= N)
return;
const int NX = w;
const int NY = h;
double dx = (xmax - xmin) / NX;
double dy = (ymax - ymin) / NY;
int i = index / w;
int j = index % h;
double x = xmin + i*dx;
double y = ymin + j*dy;
double c_i = y;
double c_r = x;
double z_r = 0, z_i = 0;
int n;
for ( n = 0; n < max_iters; ++n) {
double z_r2 = z_r*z_r, z_i2 = z_i*z_i;
if (z_r2 + z_i2 > 4) {
break;
}
z_i = 2 * z_r*z_i + c_i;
z_r = z_r2 - z_i2 + c_r;
}
iter_arr[index] = n;
}
void SpecialKeys(int key, int x, int y) {
double xStep = fabs(real_min - real_max) * 0.08;
double yStep = fabs(img_min - img_max) * 0.08;
switch (key) {
case GLUT_KEY_LEFT:
real_min -= xStep;
real_max -= xStep;
break;
case GLUT_KEY_RIGHT:
real_min += xStep;
real_max += xStep;
break;
case GLUT_KEY_UP:
img_min += yStep;
img_max += yStep;
break;
case GLUT_KEY_DOWN:
img_min -= yStep;
img_max -= yStep;
break;
}
glutPostRedisplay();
}
void KeyB(unsigned char key, int x, int y) {
double xStep = fabs(real_min - real_max) * 0.08;
double yStep = fabs(img_min - img_max) * 0.08;
switch (key) {
case '+':
real_min += xStep * 2;
real_max -= xStep * 2;
img_min += yStep * 2;
img_max -= yStep * 2;
break;
case '-':
real_min -= xStep * 2;
real_max += xStep * 2;
img_min -= yStep * 2;
img_max += yStep * 2;
break;
case 27: // Escape key
delete(iters);
glutDestroyWindow(window);
hipFree(iters_d);
exit(0);
break;
case 'r':
MAX_ITER += iter_step;
printf("Iterations: %d -> %d\n", MAX_ITER - 10, MAX_ITER);
break;
case 't':
MAX_ITER -= iter_step;
printf("Iterations: %d -> %d\n", MAX_ITER + 10, MAX_ITER);
break;
}
glutPostRedisplay();
}
void onDisplay() {
t_start = omp_get_wtime();
hipLaunchKernelGGL(( mandelbrotset), dim3((block_size*num_threads + num_threads - 1) / num_threads),dim3(num_threads), 0, 0, iters_d, real_min, real_max, img_min, img_max, WIDTH, HEIGHT, MAX_ITER, arr_size);
gpuErrchk(hipMemcpy(iters, iters_d, size, hipMemcpyDeviceToHost));
t_end = omp_get_wtime();
printf("Render time: %lf\n", t_end - t_start);
setUpColor();
glClearColor(1, 1, 1, 0);
glClear(GL_COLOR_BUFFER_BIT);
glDrawPixels(WIDTH, HEIGHT, GL_RGB, GL_FLOAT, pixels);
glutSwapBuffers();
}
void Init() {
glutInitWindowSize(WIDTH, HEIGHT);
glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE | GLUT_DEPTH);
glutInitWindowPosition(100, 100);
window = glutCreateWindow("Mandelbrotset");
glutKeyboardFunc(KeyB);
glutSpecialFunc(SpecialKeys);
glViewport(0, 0, HEIGHT, WIDTH);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluOrtho2D(0, WIDTH, 0, HEIGHT);
gpuErrchk(hipMalloc((void **)&iters_d, size));
initColors();
}
int main(int argc, char** argv) {
glutInit(&argc, argv);
Init();
glutDisplayFunc(onDisplay);
glutMainLoop();
return 0;
}
| 0bd64d8bb465c936dc73a867f4aa705b100cf45d.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <gl/freeglut.h>
#include <stdio.h>
#include <omp.h>
#include <math.h>
#define WIDTH 800
#define HEIGHT 800
#define ITERATIONS 5000
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
typedef struct rgb_color {
float r;
float g;
float b;
}color;
color colors[16];
color pixels[WIDTH*HEIGHT];
double real_min = -2, real_max = 1;
double img_min = -1.5, img_max = 1.5;
int MAX_ITER = ITERATIONS; int iter_step = 10;
int window;
int size = HEIGHT * WIDTH * sizeof(int); //device aloc size
int *iters = (int *)malloc(size);
int *iters_d;
int arr_size = WIDTH * HEIGHT;
int num_threads = 512;
int block_size = 1250;
double t_start, t_end;
void initColors() {
colors[0].r = 66; colors[0].g = 30; colors[0].b = 15;
colors[1].r = 25; colors[1].g = 7; colors[1].b = 26;
colors[2].r = 9; colors[2].g = 1; colors[2].b = 47;
colors[3].r = 4; colors[3].g = 4; colors[3].b = 73;
colors[4].r = 0; colors[4].g = 7; colors[4].b = 100;
colors[5].r = 12; colors[5].g = 44; colors[5].b = 138;
colors[6].r = 24; colors[6].g = 82; colors[6].b = 177;
colors[7].r = 57; colors[7].g = 125; colors[7].b = 209;
colors[8].r = 134; colors[8].g = 181; colors[8].b = 229;
colors[9].r = 211; colors[9].g = 236; colors[9].b = 248;
colors[10].r = 241; colors[10].g = 233; colors[10].b = 191;
colors[11].r = 248; colors[11].g = 201; colors[11].b = 95;
colors[12].r = 255; colors[12].g = 170; colors[12].b = 0;
colors[13].r = 204; colors[13].g = 128; colors[13].b = 0;
colors[14].r = 153; colors[14].g = 87; colors[14].b = 0;
colors[15].r = 106; colors[15].g = 52; colors[15].b = 3;
}
void setUpColor() {
const int NX = WIDTH;
const int NY = HEIGHT;
int i, j, VAL;
for (i = 0; i < NX; i++) {
for (j = 0; j < NY; j++) {
VAL = iters[i*WIDTH + j];
if (VAL < MAX_ITER && VAL > 0) {
int cid = VAL % 16;
pixels[i + j*HEIGHT].r = colors[cid].r / 255;
pixels[i + j*HEIGHT].g = colors[cid].g / 255;
pixels[i + j*HEIGHT].b = colors[cid].b / 255;
}
else {
pixels[i + j*HEIGHT].r = 0;
pixels[i + j*HEIGHT].g = 0;
pixels[i + j*HEIGHT].b = 0;
}
}
}
}
__global__ void mandelbrotset(int *iter_arr, double xmin, double xmax, double ymin, double ymax, int w, int h, int max_iters, int N) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index >= N)
return;
const int NX = w;
const int NY = h;
double dx = (xmax - xmin) / NX;
double dy = (ymax - ymin) / NY;
int i = index / w;
int j = index % h;
double x = xmin + i*dx;
double y = ymin + j*dy;
double c_i = y;
double c_r = x;
double z_r = 0, z_i = 0;
int n;
for ( n = 0; n < max_iters; ++n) {
double z_r2 = z_r*z_r, z_i2 = z_i*z_i;
if (z_r2 + z_i2 > 4) {
break;
}
z_i = 2 * z_r*z_i + c_i;
z_r = z_r2 - z_i2 + c_r;
}
iter_arr[index] = n;
}
void SpecialKeys(int key, int x, int y) {
double xStep = fabs(real_min - real_max) * 0.08;
double yStep = fabs(img_min - img_max) * 0.08;
switch (key) {
case GLUT_KEY_LEFT:
real_min -= xStep;
real_max -= xStep;
break;
case GLUT_KEY_RIGHT:
real_min += xStep;
real_max += xStep;
break;
case GLUT_KEY_UP:
img_min += yStep;
img_max += yStep;
break;
case GLUT_KEY_DOWN:
img_min -= yStep;
img_max -= yStep;
break;
}
glutPostRedisplay();
}
void KeyB(unsigned char key, int x, int y) {
double xStep = fabs(real_min - real_max) * 0.08;
double yStep = fabs(img_min - img_max) * 0.08;
switch (key) {
case '+':
real_min += xStep * 2;
real_max -= xStep * 2;
img_min += yStep * 2;
img_max -= yStep * 2;
break;
case '-':
real_min -= xStep * 2;
real_max += xStep * 2;
img_min -= yStep * 2;
img_max += yStep * 2;
break;
case 27: // Escape key
delete(iters);
glutDestroyWindow(window);
cudaFree(iters_d);
exit(0);
break;
case 'r':
MAX_ITER += iter_step;
printf("Iterations: %d -> %d\n", MAX_ITER - 10, MAX_ITER);
break;
case 't':
MAX_ITER -= iter_step;
printf("Iterations: %d -> %d\n", MAX_ITER + 10, MAX_ITER);
break;
}
glutPostRedisplay();
}
void onDisplay() {
t_start = omp_get_wtime();
mandelbrotset<<<(block_size*num_threads + num_threads - 1) / num_threads,num_threads>>>(iters_d, real_min, real_max, img_min, img_max, WIDTH, HEIGHT, MAX_ITER, arr_size);
gpuErrchk(cudaMemcpy(iters, iters_d, size, cudaMemcpyDeviceToHost));
t_end = omp_get_wtime();
printf("Render time: %lf\n", t_end - t_start);
setUpColor();
glClearColor(1, 1, 1, 0);
glClear(GL_COLOR_BUFFER_BIT);
glDrawPixels(WIDTH, HEIGHT, GL_RGB, GL_FLOAT, pixels);
glutSwapBuffers();
}
void Init() {
glutInitWindowSize(WIDTH, HEIGHT);
glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE | GLUT_DEPTH);
glutInitWindowPosition(100, 100);
window = glutCreateWindow("Mandelbrotset");
glutKeyboardFunc(KeyB);
glutSpecialFunc(SpecialKeys);
glViewport(0, 0, HEIGHT, WIDTH);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluOrtho2D(0, WIDTH, 0, HEIGHT);
gpuErrchk(cudaMalloc((void **)&iters_d, size));
initColors();
}
int main(int argc, char** argv) {
glutInit(&argc, argv);
Init();
glutDisplayFunc(onDisplay);
glutMainLoop();
return 0;
}
|
85e0c972dafe6b71b9ee2c7e85cf794a9b491b65.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the doubleing point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in doubleing-point.
extern "C"
// Round to nearest integer value in doubleing-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two doubleing point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the doubleing-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision doubleing-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision doubleing-point remainder.
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//WARNING : device_sum size should be gridDim.x
__global__ void vec_normcdf (int n, double *result, double *y)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
int id = idy * gridDim.x * blockDim.x + idx;
if (id < n)
{
result[id] = normcdf(y[id]);
}
} | 85e0c972dafe6b71b9ee2c7e85cf794a9b491b65.cu | #include "includes.h"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument × p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the doubleing point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in doubleing-point.
extern "C"
// Round to nearest integer value in doubleing-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument × p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two doubleing point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the doubleing-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision doubleing-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision doubleing-point remainder.
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//WARNING : device_sum size should be gridDim.x
__global__ void vec_normcdf (int n, double *result, double *y)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
int id = idy * gridDim.x * blockDim.x + idx;
if (id < n)
{
result[id] = normcdf(y[id]);
}
} |
3cb1f211e196ee2ca2844e2cc691c09e0b451fea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
__global__ void scan_cuda(double* a, double *s, int width) {
// kernel scan
}
__global__ void add_cuda(double *a, double *s, int width) {
// kernel soma
}
int main()
{
int width = 40000000;
int size = width * sizeof(double);
int block_size = 1024;
int num_blocks = (width-1)/block_size+1;
int s_size = (num_blocks * sizeof(double));
double *a = (double*) malloc (size);
double *s = (double*) malloc (s_size);
for(int i = 0; i < width; i++)
a[i] = i;
double *d_a, *d_s;
// alocar vetores "a" e "s" no device
// copiar vetor "a" para o device
// definio do nmero de blocos e threads (dimGrid e dimBlock)
// chamada do kernel scan
// copiar vetor "s" para o host
// scan no host (j implementado)
s[0] = 0;
for (int i = 1; i < num_blocks; i++)
s[i] += s[i-1];
// copiar vetor "s" para o device
// chamada do kernel da soma
// copiar o vetor "a" para o host
printf("\na[%d] = %f\n",width-1,a[width-1]);
hipFree(d_a);
hipFree(d_s);
}
| 3cb1f211e196ee2ca2844e2cc691c09e0b451fea.cu | #include <stdio.h>
#include <stdlib.h>
__global__ void scan_cuda(double* a, double *s, int width) {
// kernel scan
}
__global__ void add_cuda(double *a, double *s, int width) {
// kernel soma
}
int main()
{
int width = 40000000;
int size = width * sizeof(double);
int block_size = 1024;
int num_blocks = (width-1)/block_size+1;
int s_size = (num_blocks * sizeof(double));
double *a = (double*) malloc (size);
double *s = (double*) malloc (s_size);
for(int i = 0; i < width; i++)
a[i] = i;
double *d_a, *d_s;
// alocar vetores "a" e "s" no device
// copiar vetor "a" para o device
// definição do número de blocos e threads (dimGrid e dimBlock)
// chamada do kernel scan
// copiar vetor "s" para o host
// scan no host (já implementado)
s[0] = 0;
for (int i = 1; i < num_blocks; i++)
s[i] += s[i-1];
// copiar vetor "s" para o device
// chamada do kernel da soma
// copiar o vetor "a" para o host
printf("\na[%d] = %f\n",width-1,a[width-1]);
cudaFree(d_a);
cudaFree(d_s);
}
|
eca78e7e18a01f7e6cf5122d2625beadbeac9093.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <stdio.h>
#include <time.h>
#include <iomanip>
using namespace std;
#define PI 3.1415926535897932384
#define mu0 4*PI*1e-7
__global__ void init(double *rod_old, double *rod_new, double imax, double ldr, double rlength, int total_seg){
int i = threadIdx.x;
rod_new[i] = (1-(i*i*ldr*ldr/(3*rlength*rlength)))*3*mu0*imax*i*ldr/(4*PI*rlength*rlength);
if(i==0 || i==total_seg-1){
rod_old[i] = rod_new[i];
}
}
__global__ void run(double *rod_old, double *rod_new, double aug, long int maxSteps, int rod_size){
int i = threadIdx.x + 1;
long int steps = 0;
extern __shared__ double rod_new_s[];
extern __shared__ double rod_old_s[];
rod_new_s[i] = rod_new[i];
__syncthreads();
while(steps<maxSteps){
rod_old_s[i] = rod_new_s[i];
__syncthreads();
if(i==1)
rod_new_s[1]+= aug*(2*rod_old_s[2] - 4*rod_old_s[1]);
else if(i<(rod_size - 1))
rod_new_s[i] += aug*((1+(1/(2*i)))*rod_old_s[i+1] + (-2-(1/(i*i)))*rod_old_s[i] + (1-(1/(2*i)))*rod_old_s[i-1]);
steps++;
__syncthreads();
}
rod_new[i] = rod_new_s[i];
}
int main(){
FILE *myfile;
myfile = fopen("results.txt", "w");
double imax, rlength, eta, tstep, ldr, tottime;
int numseg;
printf("What is your I max? ");
scanf("%lf", &imax);
printf("What is the length of your rod? ");
scanf("%lf", &rlength);
printf("What is eta? ");
scanf("%lf", &eta);
printf("How many segments would you like? ");
scanf("%d", &numseg);
ldr = rlength/(numseg+1);
tstep = 0.25*ldr*ldr*mu0/eta;
printf("How long would you like to run? ");
scanf("%lf", &tottime);
double *h_rod, *d_rod_new, *d_rod_old;
size_t rod_size = (numseg + 2) * sizeof(double);
h_rod = (double*)malloc(rod_size);
hipMalloc(&d_rod_new, rod_size);
hipMalloc(&d_rod_old, rod_size);
hipLaunchKernelGGL(( init), dim3(1),dim3(numseg+2), 0, 0, d_rod_old, d_rod_new, imax, ldr, rlength, numseg + 2);
int out;
//output r values
for(out = 0; out<numseg+1; out++){
fprintf( myfile, "%lf ", out*ldr );
}
fprintf( myfile, "%lf\n", out*ldr );
hipMemcpy(h_rod, d_rod_new, rod_size, hipMemcpyDeviceToHost);
for(out = 0; out<numseg+1; out++){
fprintf( myfile, "%lf ", *(h_rod+out) );
}
fprintf( myfile, "%lf\n", *(h_rod+out) );
double aug = eta*tstep/(mu0*ldr*ldr);
long int total_steps = tottime / tstep;
printf("\nSteps: %ld\n", total_steps);
clock_t begin, end;
double time_spent;
begin = clock();
//run
hipLaunchKernelGGL(( run), dim3(1),dim3(numseg + 2), (numseg+2)*sizeof(double), 0, d_rod_old, d_rod_new, aug, total_steps, numseg+2);
hipDeviceSynchronize();
end = clock();
time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
hipMemcpy(h_rod, d_rod_new, rod_size, hipMemcpyDeviceToHost);
for(out = 0; out<numseg+1; out++){
fprintf( myfile, "%lf ", *(h_rod+out) );
}
fprintf( myfile, "%lf\n", *(h_rod+out) );
fprintf(myfile, "STOP\n");
fclose(myfile);
cout << "\n------------------------------------\nExecution took: "<< time_spent << " sec\n";
return 0;
}
| eca78e7e18a01f7e6cf5122d2625beadbeac9093.cu | #include <iostream>
#include <stdio.h>
#include <time.h>
#include <iomanip>
using namespace std;
#define PI 3.1415926535897932384
#define mu0 4*PI*1e-7
__global__ void init(double *rod_old, double *rod_new, double imax, double ldr, double rlength, int total_seg){
int i = threadIdx.x;
rod_new[i] = (1-(i*i*ldr*ldr/(3*rlength*rlength)))*3*mu0*imax*i*ldr/(4*PI*rlength*rlength);
if(i==0 || i==total_seg-1){
rod_old[i] = rod_new[i];
}
}
__global__ void run(double *rod_old, double *rod_new, double aug, long int maxSteps, int rod_size){
int i = threadIdx.x + 1;
long int steps = 0;
extern __shared__ double rod_new_s[];
extern __shared__ double rod_old_s[];
rod_new_s[i] = rod_new[i];
__syncthreads();
while(steps<maxSteps){
rod_old_s[i] = rod_new_s[i];
__syncthreads();
if(i==1)
rod_new_s[1]+= aug*(2*rod_old_s[2] - 4*rod_old_s[1]);
else if(i<(rod_size - 1))
rod_new_s[i] += aug*((1+(1/(2*i)))*rod_old_s[i+1] + (-2-(1/(i*i)))*rod_old_s[i] + (1-(1/(2*i)))*rod_old_s[i-1]);
steps++;
__syncthreads();
}
rod_new[i] = rod_new_s[i];
}
int main(){
FILE *myfile;
myfile = fopen("results.txt", "w");
double imax, rlength, eta, tstep, ldr, tottime;
int numseg;
printf("What is your I max? ");
scanf("%lf", &imax);
printf("What is the length of your rod? ");
scanf("%lf", &rlength);
printf("What is eta? ");
scanf("%lf", &eta);
printf("How many segments would you like? ");
scanf("%d", &numseg);
ldr = rlength/(numseg+1);
tstep = 0.25*ldr*ldr*mu0/eta;
printf("How long would you like to run? ");
scanf("%lf", &tottime);
double *h_rod, *d_rod_new, *d_rod_old;
size_t rod_size = (numseg + 2) * sizeof(double);
h_rod = (double*)malloc(rod_size);
cudaMalloc(&d_rod_new, rod_size);
cudaMalloc(&d_rod_old, rod_size);
init<<<1,numseg+2>>>(d_rod_old, d_rod_new, imax, ldr, rlength, numseg + 2);
int out;
//output r values
for(out = 0; out<numseg+1; out++){
fprintf( myfile, "%lf ", out*ldr );
}
fprintf( myfile, "%lf\n", out*ldr );
cudaMemcpy(h_rod, d_rod_new, rod_size, cudaMemcpyDeviceToHost);
for(out = 0; out<numseg+1; out++){
fprintf( myfile, "%lf ", *(h_rod+out) );
}
fprintf( myfile, "%lf\n", *(h_rod+out) );
double aug = eta*tstep/(mu0*ldr*ldr);
long int total_steps = tottime / tstep;
printf("\nSteps: %ld\n", total_steps);
clock_t begin, end;
double time_spent;
begin = clock();
//run
run<<<1,numseg + 2, (numseg+2)*sizeof(double)>>>(d_rod_old, d_rod_new, aug, total_steps, numseg+2);
cudaDeviceSynchronize();
end = clock();
time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
cudaMemcpy(h_rod, d_rod_new, rod_size, cudaMemcpyDeviceToHost);
for(out = 0; out<numseg+1; out++){
fprintf( myfile, "%lf ", *(h_rod+out) );
}
fprintf( myfile, "%lf\n", *(h_rod+out) );
fprintf(myfile, "STOP\n");
fclose(myfile);
cout << "\n------------------------------------\nExecution took: "<< time_spent << " sec\n";
return 0;
}
|
d34f46e2877a63f6f69a7d7d79bf3e57e0773ad1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright (c) 2013-2015, Gregory P. Meyer
University of Illinois Board of Trustees
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holder(s) nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <dip/common/error.h>
#include <dip/common/types.h>
#define FILTER_HALF_WIDTH 3
#define BLOCK_WIDTH 16
namespace dip {
__global__ void Variance(int width, int height, const Depth *depth,
float *variance, float *std, float *valid) {
// Allocate Shared Memory
__shared__ Depth ds[BLOCK_WIDTH][BLOCK_WIDTH];
// Get Block and Thread Id
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
// Calculate Row & Column
int col = tx + bx * BLOCK_WIDTH;
int row = ty + by * BLOCK_WIDTH;
// Cooperative Load of the Tile
if ((col < width) && (row < height)) {
ds[ty][tx] = depth[col + row * width];
} else {
ds[ty][tx] = 0;
}
// Sync Threads in Block
__syncthreads();
// Perform the Variance Filter
if ((col < width) && (row < height)) {
float sum = 0.0f, squared_sum = 0.0f;
int count = 0;
for (int dy = -FILTER_HALF_WIDTH; dy <= FILTER_HALF_WIDTH; dy++) {
for (int dx = -FILTER_HALF_WIDTH; dx <= FILTER_HALF_WIDTH; dx++) {
int x = col + dx;
int y = row + dy;
if ((x >= 0) && (x < width) && (y >= 0) && (y < height)) {
int i = tx + dx;
int j = ty + dy;
float depth_value;
if ((i >= 0) && (i < BLOCK_WIDTH) && (j >= 0) && (j < BLOCK_WIDTH))
depth_value = ds[j][i];
else
depth_value = depth[x + y * width];
if (depth_value > 0) {
sum += depth_value;
squared_sum += depth_value * depth_value;
count++;
}
}
}
}
if ((ds[ty][tx] > 0) && (count > 0)) {
float mean = sum / count;
float squared_mean = squared_sum / count;
float var = squared_mean - (mean * mean);
if (var > 0.0f) {
variance[col + row * width] = var;
std[col + row * width] = sqrt(var);
valid[col + row * width] = 1.0f;
} else {
variance[col + row * width] = 0.0f;
std[col + row * width] = 0.0f;
valid[col + row * width] = 0.0f;
}
} else {
variance[col + row * width] = 0.0f;
std[col + row * width] = 0.0f;
valid[col + row * width] = 0.0f;
}
}
}
__global__ void Threshold(float threshold, int width, int height,
const float *std, const Depth *depth,
Depth *filtered_depth) {
// Get Block and Thread Id
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
// Calculate Row & Column
int col = tx + bx * BLOCK_WIDTH;
int row = ty + by * BLOCK_WIDTH;
// Perform Threshold
if ((col < width) && (row < height)) {
int i = col + row * width;
if (std[i] < threshold)
filtered_depth[i] = depth[i];
else
filtered_depth[i] = 0;
}
}
void VarianceKernel(int width, int height, const Depth *depth,
float *variance, float *std, float *valid) {
// Launch Variance Filter Kernel
int grid_width = (width + (BLOCK_WIDTH - 1)) / BLOCK_WIDTH;
int grid_height = (height + (BLOCK_WIDTH - 1)) / BLOCK_WIDTH;
dim3 grid_dim(grid_width, grid_height, 1);
dim3 block_dim(BLOCK_WIDTH, BLOCK_WIDTH, 1);
hipLaunchKernelGGL(( Variance), dim3(grid_dim), dim3(block_dim), 0, 0, width, height, depth, variance, std, valid);
CUDA_ERROR_CHECK(hipDeviceSynchronize());
}
void ThresholdKernel(float threshold, int width, int height,
const float *std, const Depth *depth,
Depth *filtered_depth) {
// Launch Variance Filter Kernel
int grid_width = (width + (BLOCK_WIDTH - 1)) / BLOCK_WIDTH;
int grid_height = (height + (BLOCK_WIDTH - 1)) / BLOCK_WIDTH;
dim3 grid_dim(grid_width, grid_height, 1);
dim3 block_dim(BLOCK_WIDTH, BLOCK_WIDTH, 1);
hipLaunchKernelGGL(( Threshold), dim3(grid_dim), dim3(block_dim), 0, 0, threshold, width, height, std,
depth, filtered_depth);
CUDA_ERROR_CHECK(hipDeviceSynchronize());
}
} // namespace dip
| d34f46e2877a63f6f69a7d7d79bf3e57e0773ad1.cu | /*
Copyright (c) 2013-2015, Gregory P. Meyer
University of Illinois Board of Trustees
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holder(s) nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <dip/common/error.h>
#include <dip/common/types.h>
#define FILTER_HALF_WIDTH 3
#define BLOCK_WIDTH 16
namespace dip {
__global__ void Variance(int width, int height, const Depth *depth,
float *variance, float *std, float *valid) {
// Allocate Shared Memory
__shared__ Depth ds[BLOCK_WIDTH][BLOCK_WIDTH];
// Get Block and Thread Id
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
// Calculate Row & Column
int col = tx + bx * BLOCK_WIDTH;
int row = ty + by * BLOCK_WIDTH;
// Cooperative Load of the Tile
if ((col < width) && (row < height)) {
ds[ty][tx] = depth[col + row * width];
} else {
ds[ty][tx] = 0;
}
// Sync Threads in Block
__syncthreads();
// Perform the Variance Filter
if ((col < width) && (row < height)) {
float sum = 0.0f, squared_sum = 0.0f;
int count = 0;
for (int dy = -FILTER_HALF_WIDTH; dy <= FILTER_HALF_WIDTH; dy++) {
for (int dx = -FILTER_HALF_WIDTH; dx <= FILTER_HALF_WIDTH; dx++) {
int x = col + dx;
int y = row + dy;
if ((x >= 0) && (x < width) && (y >= 0) && (y < height)) {
int i = tx + dx;
int j = ty + dy;
float depth_value;
if ((i >= 0) && (i < BLOCK_WIDTH) && (j >= 0) && (j < BLOCK_WIDTH))
depth_value = ds[j][i];
else
depth_value = depth[x + y * width];
if (depth_value > 0) {
sum += depth_value;
squared_sum += depth_value * depth_value;
count++;
}
}
}
}
if ((ds[ty][tx] > 0) && (count > 0)) {
float mean = sum / count;
float squared_mean = squared_sum / count;
float var = squared_mean - (mean * mean);
if (var > 0.0f) {
variance[col + row * width] = var;
std[col + row * width] = sqrt(var);
valid[col + row * width] = 1.0f;
} else {
variance[col + row * width] = 0.0f;
std[col + row * width] = 0.0f;
valid[col + row * width] = 0.0f;
}
} else {
variance[col + row * width] = 0.0f;
std[col + row * width] = 0.0f;
valid[col + row * width] = 0.0f;
}
}
}
__global__ void Threshold(float threshold, int width, int height,
const float *std, const Depth *depth,
Depth *filtered_depth) {
// Get Block and Thread Id
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
// Calculate Row & Column
int col = tx + bx * BLOCK_WIDTH;
int row = ty + by * BLOCK_WIDTH;
// Perform Threshold
if ((col < width) && (row < height)) {
int i = col + row * width;
if (std[i] < threshold)
filtered_depth[i] = depth[i];
else
filtered_depth[i] = 0;
}
}
void VarianceKernel(int width, int height, const Depth *depth,
float *variance, float *std, float *valid) {
// Launch Variance Filter Kernel
int grid_width = (width + (BLOCK_WIDTH - 1)) / BLOCK_WIDTH;
int grid_height = (height + (BLOCK_WIDTH - 1)) / BLOCK_WIDTH;
dim3 grid_dim(grid_width, grid_height, 1);
dim3 block_dim(BLOCK_WIDTH, BLOCK_WIDTH, 1);
Variance<<<grid_dim, block_dim>>>(width, height, depth, variance, std, valid);
CUDA_ERROR_CHECK(cudaDeviceSynchronize());
}
void ThresholdKernel(float threshold, int width, int height,
const float *std, const Depth *depth,
Depth *filtered_depth) {
// Launch Variance Filter Kernel
int grid_width = (width + (BLOCK_WIDTH - 1)) / BLOCK_WIDTH;
int grid_height = (height + (BLOCK_WIDTH - 1)) / BLOCK_WIDTH;
dim3 grid_dim(grid_width, grid_height, 1);
dim3 block_dim(BLOCK_WIDTH, BLOCK_WIDTH, 1);
Threshold<<<grid_dim, block_dim>>>(threshold, width, height, std,
depth, filtered_depth);
CUDA_ERROR_CHECK(cudaDeviceSynchronize());
}
} // namespace dip
|
980bb67d23589ea801e82c87e1d26552f02aa42f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@author Raffaele Solca
@author Mark Gates
@generated from zlaset_band.cu normal z -> d, Fri Jan 30 19:00:09 2015
*/
#include "common_magma.h"
#define NB 64
/* ////////////////////////////////////////////////////////////////////////////
-- GPU kernel for setting the k-1 super-diagonals to OFFDIAG
and the main diagonal to DIAG.
Divides matrix into min( ceil((m+k-1)/nb), ceil(n/nb) ) block-columns,
with k threads in each block.
Each thread iterates across one diagonal.
Thread k-1 does the main diagonal, thread k-2 the first super-diagonal, etc.
block 0 block 1
0 => skip above matrix
1 0 => skip above matrix
2 1 0 => skip above matrix
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 | 0 ]
[ 3 2 | 1 0 ]
[ 3 | 2 1 0 ]
[ | 3 2 1 0 ]
[ | 3 2 1 ]
| 3 2 => skip below matrix
3 => skip below matrix
Thread assignment for m=10, n=12, k=4, nb=8. Each column is done in parallel.
*/
__global__
void dlaset_band_upper(
int m, int n,
double offdiag, double diag,
double *A, int lda)
{
int k = blockDim.x;
int ibx = blockIdx.x * NB;
int ind = ibx + threadIdx.x - k + 1;
A += ind + ibx*lda;
double value = offdiag;
if (threadIdx.x == k-1)
value = diag;
#pragma unroll
for (int j=0; j < NB; j++) {
if (ibx + j < n && ind + j >= 0 && ind + j < m) {
A[j*(lda+1)] = value;
}
}
}
/* ////////////////////////////////////////////////////////////////////////////
-- GPU kernel for setting the k-1 sub-diagonals to OFFDIAG
and the main diagonal to DIAG.
Divides matrix into min( ceil(m/nb), ceil(n/nb) ) block-columns,
with k threads in each block.
Each thread iterates across one diagonal.
Thread 0 does the main diagonal, thread 1 the first sub-diagonal, etc.
block 0 block 1
[ 0 | ]
[ 1 0 | ]
[ 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 | 0 ]
[ 3 2 | 1 0 ]
[ 3 | 2 1 0 ]
[ 3 2 1 0 ]
[ 3 2 1 ]
3 2 => skip below matrix
3 => skip below matrix
Thread assignment for m=13, n=12, k=4, nb=8. Each column is done in parallel.
*/
__global__
void dlaset_band_lower(
int m, int n,
double offdiag, double diag,
double *A, int lda)
{
//int k = blockDim.x;
int ibx = blockIdx.x * NB;
int ind = ibx + threadIdx.x;
A += ind + ibx*lda;
double value = offdiag;
if (threadIdx.x == 0)
value = diag;
#pragma unroll
for (int j=0; j < NB; j++) {
if (ibx + j < n && ind + j < m) {
A[j*(lda+1)] = value;
}
}
}
/**
Purpose
-------
DLASET_BAND_STREAM initializes the main diagonal of dA to DIAG,
and the K-1 sub- or super-diagonals to OFFDIAG.
This is the same as DLASET_BAND, but adds queue argument.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be set.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
k INTEGER
The number of diagonals to set, including the main diagonal. K >= 0.
Currently, K <= 1024 due to CUDA restrictions (max. number of threads per block).
@param[in]
offdiag DOUBLE_PRECISION
Off-diagonal elements in the band are set to OFFDIAG.
@param[in]
diag DOUBLE_PRECISION
All the main diagonal elements are set to DIAG.
@param[in]
dA DOUBLE_PRECISION array, dimension (LDDA,N)
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
On exit, A(i,j) = ALPHA, 1 <= i <= m, 1 <= j <= n where i != j, abs(i-j) < k;
A(i,i) = BETA , 1 <= i <= min(m,n)
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
queue magma_queue_t
Stream to execute DLASET in.
@ingroup magma_daux2
********************************************************************/
extern "C" void
magmablas_dlaset_band_q(
magma_uplo_t uplo, magma_int_t m, magma_int_t n, magma_int_t k,
double offdiag, double diag,
magmaDouble_ptr dA, magma_int_t ldda,
magma_queue_t queue)
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( k < 0 || k > 1024 )
info = -4;
else if ( ldda < max(1,m) )
info = -6;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if (uplo == MagmaUpper) {
dim3 threads( min(k,n) );
dim3 grid( (min(m+k-1,n) - 1)/NB + 1 );
hipLaunchKernelGGL(( dlaset_band_upper), dim3(grid), dim3(threads), 0, queue , m, n, offdiag, diag, dA, ldda);
}
else if (uplo == MagmaLower) {
dim3 threads( min(k,m) );
dim3 grid( (min(m,n) - 1)/NB + 1 );
hipLaunchKernelGGL(( dlaset_band_lower), dim3(grid), dim3(threads), 0, queue , m, n, offdiag, diag, dA, ldda);
}
}
/**
@see magmablas_dlaset_band_q
@ingroup magma_daux2
********************************************************************/
extern "C" void
magmablas_dlaset_band(
magma_uplo_t uplo, magma_int_t m, magma_int_t n, magma_int_t k,
double offdiag, double diag,
magmaDouble_ptr dA, magma_int_t ldda)
{
magmablas_dlaset_band_q(uplo, m, n, k, offdiag, diag, dA, ldda, magma_stream);
}
| 980bb67d23589ea801e82c87e1d26552f02aa42f.cu | /*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@author Raffaele Solca
@author Mark Gates
@generated from zlaset_band.cu normal z -> d, Fri Jan 30 19:00:09 2015
*/
#include "common_magma.h"
#define NB 64
/* ////////////////////////////////////////////////////////////////////////////
-- GPU kernel for setting the k-1 super-diagonals to OFFDIAG
and the main diagonal to DIAG.
Divides matrix into min( ceil((m+k-1)/nb), ceil(n/nb) ) block-columns,
with k threads in each block.
Each thread iterates across one diagonal.
Thread k-1 does the main diagonal, thread k-2 the first super-diagonal, etc.
block 0 block 1
0 => skip above matrix
1 0 => skip above matrix
2 1 0 => skip above matrix
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 | 0 ]
[ 3 2 | 1 0 ]
[ 3 | 2 1 0 ]
[ | 3 2 1 0 ]
[ | 3 2 1 ]
| 3 2 => skip below matrix
3 => skip below matrix
Thread assignment for m=10, n=12, k=4, nb=8. Each column is done in parallel.
*/
__global__
void dlaset_band_upper(
int m, int n,
double offdiag, double diag,
double *A, int lda)
{
int k = blockDim.x;
int ibx = blockIdx.x * NB;
int ind = ibx + threadIdx.x - k + 1;
A += ind + ibx*lda;
double value = offdiag;
if (threadIdx.x == k-1)
value = diag;
#pragma unroll
for (int j=0; j < NB; j++) {
if (ibx + j < n && ind + j >= 0 && ind + j < m) {
A[j*(lda+1)] = value;
}
}
}
/* ////////////////////////////////////////////////////////////////////////////
-- GPU kernel for setting the k-1 sub-diagonals to OFFDIAG
and the main diagonal to DIAG.
Divides matrix into min( ceil(m/nb), ceil(n/nb) ) block-columns,
with k threads in each block.
Each thread iterates across one diagonal.
Thread 0 does the main diagonal, thread 1 the first sub-diagonal, etc.
block 0 block 1
[ 0 | ]
[ 1 0 | ]
[ 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 | 0 ]
[ 3 2 | 1 0 ]
[ 3 | 2 1 0 ]
[ 3 2 1 0 ]
[ 3 2 1 ]
3 2 => skip below matrix
3 => skip below matrix
Thread assignment for m=13, n=12, k=4, nb=8. Each column is done in parallel.
*/
__global__
void dlaset_band_lower(
int m, int n,
double offdiag, double diag,
double *A, int lda)
{
//int k = blockDim.x;
int ibx = blockIdx.x * NB;
int ind = ibx + threadIdx.x;
A += ind + ibx*lda;
double value = offdiag;
if (threadIdx.x == 0)
value = diag;
#pragma unroll
for (int j=0; j < NB; j++) {
if (ibx + j < n && ind + j < m) {
A[j*(lda+1)] = value;
}
}
}
/**
Purpose
-------
DLASET_BAND_STREAM initializes the main diagonal of dA to DIAG,
and the K-1 sub- or super-diagonals to OFFDIAG.
This is the same as DLASET_BAND, but adds queue argument.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be set.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
k INTEGER
The number of diagonals to set, including the main diagonal. K >= 0.
Currently, K <= 1024 due to CUDA restrictions (max. number of threads per block).
@param[in]
offdiag DOUBLE_PRECISION
Off-diagonal elements in the band are set to OFFDIAG.
@param[in]
diag DOUBLE_PRECISION
All the main diagonal elements are set to DIAG.
@param[in]
dA DOUBLE_PRECISION array, dimension (LDDA,N)
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
On exit, A(i,j) = ALPHA, 1 <= i <= m, 1 <= j <= n where i != j, abs(i-j) < k;
A(i,i) = BETA , 1 <= i <= min(m,n)
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
queue magma_queue_t
Stream to execute DLASET in.
@ingroup magma_daux2
********************************************************************/
extern "C" void
magmablas_dlaset_band_q(
magma_uplo_t uplo, magma_int_t m, magma_int_t n, magma_int_t k,
double offdiag, double diag,
magmaDouble_ptr dA, magma_int_t ldda,
magma_queue_t queue)
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( k < 0 || k > 1024 )
info = -4;
else if ( ldda < max(1,m) )
info = -6;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if (uplo == MagmaUpper) {
dim3 threads( min(k,n) );
dim3 grid( (min(m+k-1,n) - 1)/NB + 1 );
dlaset_band_upper<<< grid, threads, 0, queue >>> (m, n, offdiag, diag, dA, ldda);
}
else if (uplo == MagmaLower) {
dim3 threads( min(k,m) );
dim3 grid( (min(m,n) - 1)/NB + 1 );
dlaset_band_lower<<< grid, threads, 0, queue >>> (m, n, offdiag, diag, dA, ldda);
}
}
/**
@see magmablas_dlaset_band_q
@ingroup magma_daux2
********************************************************************/
extern "C" void
magmablas_dlaset_band(
magma_uplo_t uplo, magma_int_t m, magma_int_t n, magma_int_t k,
double offdiag, double diag,
magmaDouble_ptr dA, magma_int_t ldda)
{
magmablas_dlaset_band_q(uplo, m, n, k, offdiag, diag, dA, ldda, magma_stream);
}
|
e83663904c6580ce8b36aed88aada30fc826e0c6.hip | // !!! This is a file automatically generated by hipify!!!
#include <vector>
#include <iostream>
#include <dirent.h>
#include "errors.h"
#include "debug.h"
#include "constants.cuh"
#include "template.cuh"
#include "matcher.cuh"
#include "io.cuh"
#include "mcc.cuh"
#include "consolidation.cuh"
using namespace std;
bool buildTemplateFromFile(
const char *input,
const char *output) {
int width, height, dpi, n;
vector<Minutia> minutiae;
if (!loadMinutiaeFromFile(input, width, height, dpi, n, minutiae))
return false;
vector<char> cylinderValidities, cellValidities, cellValues;
buildTemplate(minutiae, width, height,
cylinderValidities, cellValidities, cellValues);
handleError(hipDeviceSynchronize());
return saveTemplateToFile(
output, width, height, dpi, n, minutiae,
cylinderValidities.size(), cylinderValidities, cellValidities, cellValues);
}
bool buildSimilarityFromTemplate(
const char *template1,
const char *template2,
const char *output) {
int width1, height1, dpi1, n1;
vector<Minutia> minutiae1;
int m1;
vector<char> cylinderValidities1, cellValidities1, cellValues1;
if (!loadTemplateFromFile(template1,
width1, height1, dpi1, n1, minutiae1,
m1, cylinderValidities1, cellValidities1, cellValues1))
return false;
int width2, height2, dpi2, n2;
vector<Minutia> minutiae2;
int m2;
vector<char> cylinderValidities2, cellValidities2, cellValues2;
if (!loadTemplateFromFile(template2,
width2, height2, dpi2, n2, minutiae2,
m2, cylinderValidities2, cellValidities2, cellValues2))
return false;
vector<float> matrix;
matchTemplate(
minutiae1, cylinderValidities1, cellValidities1, cellValues1,
minutiae2, cylinderValidities2, cellValidities2, cellValues2,
matrix);
auto similarity = LSSR(matrix, m1, m2, minutiae1, minutiae2);
printf("Similarity: %f\n", similarity);
return saveSimilarityToFile(output, m1, m2, matrix);
}
bool buildSimilarityFromMinutiae(
const char *minutiae1,
const char *minutiae2,
const char *output) {
MCC mcc(minutiae1);
if (!mcc.load() || !mcc.build()) return false;
float similarity;
int n, m;
vector<float> matrix;
bool ret = mcc.match(minutiae2, similarity, n, m, matrix);
if (!ret) return false;
printf("Similarity: %f\n", similarity);
return saveSimilarityToFile(output, n, m, matrix);
}
bool matchMany(const char *input, const char *targetDir) {
DIR *dir;
struct dirent *ent;
vector<string> targets;
vector<float> values;
string stargetDir(targetDir);
if (stargetDir.back() != '/')
stargetDir += '/';
if ((dir = opendir(targetDir)) != NULL) {
while ((ent = readdir(dir)) != NULL) {
if (ent->d_type != DT_REG)
continue;
targets.push_back(stargetDir + string(ent->d_name));
}
closedir(dir);
values.resize(targets.size());
MCC mcc(input, false);
mcc.matchMany(targets, values);
return true;
}
return false;
}
void printUsage(char const *argv[]) {
cerr << "usage: " << argv[0] << " [mcc|template|match] [options]\n";
cerr << endl;
cerr << "mcc\t\t: <in:minutia1> <in:minutia2> <out:similarity>\n";
cerr << "template\t: <in:minutia> <out:template>\n";
cerr << "match\t\t: <in:template1> <in:template2> <out:similarity>\n";
cerr << "many\t\t: <in:minutia> <in:dir>\n";
}
int main(int argc, char const *argv[]) {
if (argc > 1) {
if (strncmp(argv[1], "mcc", 3) == 0 && argc == 5) {
return !buildSimilarityFromMinutiae(argv[2], argv[3], argv[4]);
} else if (strncmp(argv[1], "template", 8) == 0 && argc == 4) {
return !buildTemplateFromFile(argv[2], argv[3]);
} else if (strncmp(argv[1], "match", 5) == 0 && argc == 5) {
return !buildSimilarityFromTemplate(argv[2], argv[3], argv[4]);
} else if (strncmp(argv[1], "many", 4) == 0 && argc == 4) {
return !matchMany(argv[2], argv[3]);
}
}
printUsage(argv);
return 1;
}
| e83663904c6580ce8b36aed88aada30fc826e0c6.cu | #include <vector>
#include <iostream>
#include <dirent.h>
#include "errors.h"
#include "debug.h"
#include "constants.cuh"
#include "template.cuh"
#include "matcher.cuh"
#include "io.cuh"
#include "mcc.cuh"
#include "consolidation.cuh"
using namespace std;
bool buildTemplateFromFile(
const char *input,
const char *output) {
int width, height, dpi, n;
vector<Minutia> minutiae;
if (!loadMinutiaeFromFile(input, width, height, dpi, n, minutiae))
return false;
vector<char> cylinderValidities, cellValidities, cellValues;
buildTemplate(minutiae, width, height,
cylinderValidities, cellValidities, cellValues);
handleError(cudaDeviceSynchronize());
return saveTemplateToFile(
output, width, height, dpi, n, minutiae,
cylinderValidities.size(), cylinderValidities, cellValidities, cellValues);
}
bool buildSimilarityFromTemplate(
const char *template1,
const char *template2,
const char *output) {
int width1, height1, dpi1, n1;
vector<Minutia> minutiae1;
int m1;
vector<char> cylinderValidities1, cellValidities1, cellValues1;
if (!loadTemplateFromFile(template1,
width1, height1, dpi1, n1, minutiae1,
m1, cylinderValidities1, cellValidities1, cellValues1))
return false;
int width2, height2, dpi2, n2;
vector<Minutia> minutiae2;
int m2;
vector<char> cylinderValidities2, cellValidities2, cellValues2;
if (!loadTemplateFromFile(template2,
width2, height2, dpi2, n2, minutiae2,
m2, cylinderValidities2, cellValidities2, cellValues2))
return false;
vector<float> matrix;
matchTemplate(
minutiae1, cylinderValidities1, cellValidities1, cellValues1,
minutiae2, cylinderValidities2, cellValidities2, cellValues2,
matrix);
auto similarity = LSSR(matrix, m1, m2, minutiae1, minutiae2);
printf("Similarity: %f\n", similarity);
return saveSimilarityToFile(output, m1, m2, matrix);
}
bool buildSimilarityFromMinutiae(
const char *minutiae1,
const char *minutiae2,
const char *output) {
MCC mcc(minutiae1);
if (!mcc.load() || !mcc.build()) return false;
float similarity;
int n, m;
vector<float> matrix;
bool ret = mcc.match(minutiae2, similarity, n, m, matrix);
if (!ret) return false;
printf("Similarity: %f\n", similarity);
return saveSimilarityToFile(output, n, m, matrix);
}
bool matchMany(const char *input, const char *targetDir) {
DIR *dir;
struct dirent *ent;
vector<string> targets;
vector<float> values;
string stargetDir(targetDir);
if (stargetDir.back() != '/')
stargetDir += '/';
if ((dir = opendir(targetDir)) != NULL) {
while ((ent = readdir(dir)) != NULL) {
if (ent->d_type != DT_REG)
continue;
targets.push_back(stargetDir + string(ent->d_name));
}
closedir(dir);
values.resize(targets.size());
MCC mcc(input, false);
mcc.matchMany(targets, values);
return true;
}
return false;
}
void printUsage(char const *argv[]) {
cerr << "usage: " << argv[0] << " [mcc|template|match] [options]\n";
cerr << endl;
cerr << "mcc\t\t: <in:minutia1> <in:minutia2> <out:similarity>\n";
cerr << "template\t: <in:minutia> <out:template>\n";
cerr << "match\t\t: <in:template1> <in:template2> <out:similarity>\n";
cerr << "many\t\t: <in:minutia> <in:dir>\n";
}
int main(int argc, char const *argv[]) {
if (argc > 1) {
if (strncmp(argv[1], "mcc", 3) == 0 && argc == 5) {
return !buildSimilarityFromMinutiae(argv[2], argv[3], argv[4]);
} else if (strncmp(argv[1], "template", 8) == 0 && argc == 4) {
return !buildTemplateFromFile(argv[2], argv[3]);
} else if (strncmp(argv[1], "match", 5) == 0 && argc == 5) {
return !buildSimilarityFromTemplate(argv[2], argv[3], argv[4]);
} else if (strncmp(argv[1], "many", 4) == 0 && argc == 4) {
return !matchMany(argv[2], argv[3]);
}
}
printUsage(argv);
return 1;
}
|
93c87514e320e43d74c2bb29bfb0b0615edad5e8.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include "datadef.h"
#include "LCRNG.cuh"
__global__ void sample_fixed_source_kernel(unsigned N, unsigned RNUM_PER_THREAD, unsigned* active, unsigned* rn_bank , float * E, source_point* space){
int tid = threadIdx.x+blockIdx.x*blockDim.x;
if (tid >= N){return;}
//remap to active
//tid=active[tid];
// load in
unsigned rn = rn_bank[ tid ];
const float pi = 3.14159265359 ;
const float mu = ( get_rand(&rn) ) * 2.0 - 1.0;
const float theta = ( get_rand(&rn) ) * 2.0 * pi ;
//monoenergetic for now
E[tid]=1.0e-6;
//point source for now
space[tid].x = 0.0;
space[tid].y = 0.0;
space[tid].z = 0.0;
//set isotropic for now
space[tid].xhat = sqrtf(1-mu*mu) * cosf( theta );
space[tid].yhat = sqrtf(1-mu*mu) * sinf( theta );
space[tid].zhat = mu;
rn_bank[tid] = rn;
}
void sample_fixed_source( unsigned NUM_THREADS, unsigned N, unsigned RNUM_PER_THREAD, unsigned* active, unsigned* rn_bank, float * E, source_point* space){
unsigned blks = ( N + NUM_THREADS - 1 ) / NUM_THREADS;
hipLaunchKernelGGL(( sample_fixed_source_kernel) , dim3(blks), dim3(NUM_THREADS) , 0, 0, N, RNUM_PER_THREAD, active, rn_bank, E , space );
hipDeviceSynchronize();
}
| 93c87514e320e43d74c2bb29bfb0b0615edad5e8.cu | #include <cuda.h>
#include <stdio.h>
#include "datadef.h"
#include "LCRNG.cuh"
__global__ void sample_fixed_source_kernel(unsigned N, unsigned RNUM_PER_THREAD, unsigned* active, unsigned* rn_bank , float * E, source_point* space){
int tid = threadIdx.x+blockIdx.x*blockDim.x;
if (tid >= N){return;}
//remap to active
//tid=active[tid];
// load in
unsigned rn = rn_bank[ tid ];
const float pi = 3.14159265359 ;
const float mu = ( get_rand(&rn) ) * 2.0 - 1.0;
const float theta = ( get_rand(&rn) ) * 2.0 * pi ;
//monoenergetic for now
E[tid]=1.0e-6;
//point source for now
space[tid].x = 0.0;
space[tid].y = 0.0;
space[tid].z = 0.0;
//set isotropic for now
space[tid].xhat = sqrtf(1-mu*mu) * cosf( theta );
space[tid].yhat = sqrtf(1-mu*mu) * sinf( theta );
space[tid].zhat = mu;
rn_bank[tid] = rn;
}
void sample_fixed_source( unsigned NUM_THREADS, unsigned N, unsigned RNUM_PER_THREAD, unsigned* active, unsigned* rn_bank, float * E, source_point* space){
unsigned blks = ( N + NUM_THREADS - 1 ) / NUM_THREADS;
sample_fixed_source_kernel <<< blks, NUM_THREADS >>> ( N, RNUM_PER_THREAD, active, rn_bank, E , space );
cudaThreadSynchronize();
}
|
b0e265b726f891c9a11da23a1c59424e63ae53ec.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "sobelFilterShared2.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned char *data = NULL;
hipMalloc(&data, XSIZE*YSIZE);
unsigned char *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
sobelFilterShared2), dim3(gridBlock),dim3(threadBlock), 0, 0, data,result,width,height);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
sobelFilterShared2), dim3(gridBlock),dim3(threadBlock), 0, 0, data,result,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
sobelFilterShared2), dim3(gridBlock),dim3(threadBlock), 0, 0, data,result,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | b0e265b726f891c9a11da23a1c59424e63ae53ec.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "sobelFilterShared2.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned char *data = NULL;
cudaMalloc(&data, XSIZE*YSIZE);
unsigned char *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
sobelFilterShared2<<<gridBlock,threadBlock>>>(data,result,width,height);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
sobelFilterShared2<<<gridBlock,threadBlock>>>(data,result,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
sobelFilterShared2<<<gridBlock,threadBlock>>>(data,result,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
dfbbed330b57e9b55a23093bae82c59e4b735a33.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THH/generic/THHTensorRandom.hip"
#else
#include <ATen/hip/HIPContext.h>
#include <ATen/CUDAGeneratorImpl.h>
#include <ATen/Utils.h>
#include <utility>
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
void THCTensor_(multinomialAliasSetup)(THCState *state, THCTensor *_probs, THCudaLongTensor *_J, THCTensor *_q){
THArgCheck(_probs->dim() == 1, 1,
"expected 1-D probability tensor, got %d-D probability tensor instead",
_probs->dim());
THAssert(THCTensor_(isContiguous)(state, _q));
THAssert(THCudaLongTensor_isContiguous(state, _J));
THCTensor *probs = THCTensor_(newContiguous)(state, _probs);
THAssert(THCTensor_(isContiguous)(state, probs));
int64_t inputsize = THCTensor_(nElement)(state, probs);
THCudaLongTensor *smaller = THCudaLongTensor_newWithSize1d(state, inputsize);
THCudaLongTensor *larger = THCudaLongTensor_newWithSize1d(state, inputsize);
THCudaLongTensor *smaller_short = THCudaLongTensor_newWithSize1d(state, inputsize);
THCudaLongTensor *larger_short = THCudaLongTensor_newWithSize1d(state, inputsize);
THCudaLongTensor_resize1d(state, _J, inputsize);
THCTensor_(resize1d)(state, _q, inputsize);
scalar_t one = ScalarConvert<int64_t, scalar_t>::to(1);
int inputBlockDim = THCCeilDiv((int)inputsize + BLOCK_SIZE - 1, BLOCK_SIZE);
hipLaunchKernelGGL(( aliasMultinomialFilter)
, dim3(inputBlockDim), dim3(BLOCK_SIZE), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA() ,
THCTensor_(data)(state, _q),
THCTensor_(data)(state, probs),
THCudaLongTensor_data(state, smaller),
THCudaLongTensor_data(state, larger),
THCudaLongTensor_data(state, _J),
THCudaLongTensor_data(state, smaller_short),
THCudaLongTensor_data(state, larger_short),
one, inputsize
);
THCudaLongTensor_nonzero(state, smaller_short, smaller);
THCudaLongTensor_nonzero(state, larger_short, larger);
int h_large_c = THCudaLongTensor_nElement(state, larger_short);
THCudaLongTensor_resize1d(state, smaller_short, inputsize);
THCudaLongTensor_resize1d(state, larger_short, inputsize);
hipLaunchKernelGGL(( aliasMultinomialSetup)
, dim3(1), dim3(1), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
THCudaLongTensor_data(state, _J),
THCTensor_(data)(state, _q),
inputsize,
THCudaLongTensor_data(state, smaller_short),
THCudaLongTensor_data(state, larger_short),
inputsize - h_large_c, h_large_c
);
scalar_t q_max = THCTensor_(maxall)(state, _q);
hipLaunchKernelGGL(( condDiv),
dim3(inputBlockDim), dim3(BLOCK_SIZE), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
THCTensor_(data)(state, _q),
THCudaLongTensor_data(state, _J),
inputsize, q_max
);
THCudaLongTensor_free(state, smaller);
THCudaLongTensor_free(state, larger);
THCudaLongTensor_free(state, smaller_short);
THCudaLongTensor_free(state, larger_short);
THCTensor_free(state, probs);
}
void THCTensor_(multinomialAliasDraw)(THCState *state, THCudaLongTensor *self, THCTensor *_q, THCudaLongTensor *_J, int n_sample, c10::optional<at::Generator> gen_){
THArgCheck(_q->dim() == 1, 1,
"expected 1-D probability table, got %d-D probability table instead",
_q->dim());
THArgCheck(_J->dim() == 1, 2,
"expected 1-D alias table, got %d-D alias table instead",
_J->dim());
THArgCheck(n_sample > 0, 3, "cannot sample <= 0 samples");
THAssert(THCTensor_(isContiguous)(state, _q));
THAssert(THCudaLongTensor_isContiguous(state, _J));
int64_t K = THCudaLongTensor_nElement(state, _J);
THCudaLongTensor_resize1d(state, self, n_sample);
ptrdiff_t size = THCudaLongTensor_nElement(state, self);
THCTensor *uniform = THCTensor_(newWithSize1d)(state, n_sample);
THCTensor *bernoulli = THCTensor_(newWithSize1d)(state, n_sample);
auto out_uniform = THTensor_wrap(uniform);
auto out_bernoulli = THTensor_wrap(bernoulli);
at::native::uniform_(out_uniform, 0, K, gen_);
at::native::uniform_(out_bernoulli, 0, 1, gen_);
hipLaunchKernelGGL(( multinomialAliasDrawKernel)
, dim3(THCCeilDiv((int)n_sample+BLOCK_SIZE-1, BLOCK_SIZE)), dim3(BLOCK_SIZE), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
size,
THCudaLongTensor_data(state, self),
THCudaLongTensor_data(state, _J),
THCTensor_(data)(state, _q),
K,
THCTensor_(data)(state, uniform),
THCTensor_(data)(state, bernoulli)
);
THCTensor_(free)(state, uniform);
THCTensor_(free)(state, bernoulli);
}
#endif
#endif
| dfbbed330b57e9b55a23093bae82c59e4b735a33.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THC/generic/THCTensorRandom.cu"
#else
#include <ATen/cuda/CUDAContext.h>
#include <ATen/CUDAGeneratorImpl.h>
#include <ATen/Utils.h>
#include <utility>
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
void THCTensor_(multinomialAliasSetup)(THCState *state, THCTensor *_probs, THCudaLongTensor *_J, THCTensor *_q){
THArgCheck(_probs->dim() == 1, 1,
"expected 1-D probability tensor, got %d-D probability tensor instead",
_probs->dim());
THAssert(THCTensor_(isContiguous)(state, _q));
THAssert(THCudaLongTensor_isContiguous(state, _J));
THCTensor *probs = THCTensor_(newContiguous)(state, _probs);
THAssert(THCTensor_(isContiguous)(state, probs));
int64_t inputsize = THCTensor_(nElement)(state, probs);
THCudaLongTensor *smaller = THCudaLongTensor_newWithSize1d(state, inputsize);
THCudaLongTensor *larger = THCudaLongTensor_newWithSize1d(state, inputsize);
THCudaLongTensor *smaller_short = THCudaLongTensor_newWithSize1d(state, inputsize);
THCudaLongTensor *larger_short = THCudaLongTensor_newWithSize1d(state, inputsize);
THCudaLongTensor_resize1d(state, _J, inputsize);
THCTensor_(resize1d)(state, _q, inputsize);
scalar_t one = ScalarConvert<int64_t, scalar_t>::to(1);
int inputBlockDim = THCCeilDiv((int)inputsize + BLOCK_SIZE - 1, BLOCK_SIZE);
aliasMultinomialFilter
<<<inputBlockDim, BLOCK_SIZE, 0, c10::cuda::getCurrentCUDAStream() >>>(
THCTensor_(data)(state, _q),
THCTensor_(data)(state, probs),
THCudaLongTensor_data(state, smaller),
THCudaLongTensor_data(state, larger),
THCudaLongTensor_data(state, _J),
THCudaLongTensor_data(state, smaller_short),
THCudaLongTensor_data(state, larger_short),
one, inputsize
);
THCudaLongTensor_nonzero(state, smaller_short, smaller);
THCudaLongTensor_nonzero(state, larger_short, larger);
int h_large_c = THCudaLongTensor_nElement(state, larger_short);
THCudaLongTensor_resize1d(state, smaller_short, inputsize);
THCudaLongTensor_resize1d(state, larger_short, inputsize);
aliasMultinomialSetup
<<<1, 1, 0, c10::cuda::getCurrentCUDAStream()>>>(
THCudaLongTensor_data(state, _J),
THCTensor_(data)(state, _q),
inputsize,
THCudaLongTensor_data(state, smaller_short),
THCudaLongTensor_data(state, larger_short),
inputsize - h_large_c, h_large_c
);
scalar_t q_max = THCTensor_(maxall)(state, _q);
condDiv<<<
inputBlockDim, BLOCK_SIZE, 0, c10::cuda::getCurrentCUDAStream()>>>(
THCTensor_(data)(state, _q),
THCudaLongTensor_data(state, _J),
inputsize, q_max
);
THCudaLongTensor_free(state, smaller);
THCudaLongTensor_free(state, larger);
THCudaLongTensor_free(state, smaller_short);
THCudaLongTensor_free(state, larger_short);
THCTensor_free(state, probs);
}
void THCTensor_(multinomialAliasDraw)(THCState *state, THCudaLongTensor *self, THCTensor *_q, THCudaLongTensor *_J, int n_sample, c10::optional<at::Generator> gen_){
THArgCheck(_q->dim() == 1, 1,
"expected 1-D probability table, got %d-D probability table instead",
_q->dim());
THArgCheck(_J->dim() == 1, 2,
"expected 1-D alias table, got %d-D alias table instead",
_J->dim());
THArgCheck(n_sample > 0, 3, "cannot sample <= 0 samples");
THAssert(THCTensor_(isContiguous)(state, _q));
THAssert(THCudaLongTensor_isContiguous(state, _J));
int64_t K = THCudaLongTensor_nElement(state, _J);
THCudaLongTensor_resize1d(state, self, n_sample);
ptrdiff_t size = THCudaLongTensor_nElement(state, self);
THCTensor *uniform = THCTensor_(newWithSize1d)(state, n_sample);
THCTensor *bernoulli = THCTensor_(newWithSize1d)(state, n_sample);
auto out_uniform = THTensor_wrap(uniform);
auto out_bernoulli = THTensor_wrap(bernoulli);
at::native::uniform_(out_uniform, 0, K, gen_);
at::native::uniform_(out_bernoulli, 0, 1, gen_);
multinomialAliasDrawKernel
<<<THCCeilDiv((int)n_sample+BLOCK_SIZE-1, BLOCK_SIZE), BLOCK_SIZE, 0, c10::cuda::getCurrentCUDAStream()>>>(
size,
THCudaLongTensor_data(state, self),
THCudaLongTensor_data(state, _J),
THCTensor_(data)(state, _q),
K,
THCTensor_(data)(state, uniform),
THCTensor_(data)(state, bernoulli)
);
THCTensor_(free)(state, uniform);
THCTensor_(free)(state, bernoulli);
}
#endif
#endif
|
0a74ddc4378b47396166b465e6ce8090d37c2274.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void test(float *A, const int N){
int i = threadIdx.x;
float x = 0;
if (i < 8){
if (i < N){
x = A[i+1];
}
if (i % 2 == 1){
if (i < 4){
A[i] = x;
}
}
}
}
| 0a74ddc4378b47396166b465e6ce8090d37c2274.cu | __global__ void test(float *A, const int N){
int i = threadIdx.x;
float x = 0;
if (i < 8){
if (i < N){
x = A[i+1];
}
if (i % 2 == 1){
if (i < 4){
A[i] = x;
}
}
}
}
|
51c82978a2e6eb6893418ff97e845c70e5b04e43.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "interpolate.h"
float cpu_radially_interpolate_unstructured(float **data,
float *xi,
float *yi,
float *zi,
int num_data,
int dim,
int *directions,
float exponent,
bool reinitiate,
int num_support_points,
bool *verbose) {
float interpolated_value;
float x[3], minmaxxy[2][3];
static float max_distance = 0.0f;
if (num_support_points < 2) {
fprintf(stderr, "The number of points must be at least 2.\n");
exit(EXIT_FAILURE);
}
if (exponent < 1) {
fprintf(stderr, "The exponent should be larger or equal to unity.\n");
exit(EXIT_FAILURE);
}
if (verbose != NULL) {
static int count = 0;
if (*verbose) {
count++;
if (dim > 2) {
fprintf(stdout, "......Interpolating point %d at (%f %f %f).\n", count, *xi, *yi, *zi);
} else {
fprintf(stdout, "......Interpolating point %d at (%f %f).\n", count, *xi, *yi);
}
}
}
for (int i = 0; i < dim; i++) {
switch (directions[i]) {
case 1:
x[i] = *xi;
break;
case 2:
x[i] = *yi;
break;
case 3:
x[i] = *zi;
break;
}
}
float distance_to_point[num_support_points];
int closest_points[num_support_points];
if (reinitiate) {
// The bounding box
for (int i = 0; i < dim; i++) {
minmaxxy[0][i] = data[0][i];
minmaxxy[1][i] = data[0][i];
}
for (int i = 0; i < num_data; i++) {
for (int j = 0; j < dim; j++) {
if (data[i][j] < minmaxxy[0][j]) minmaxxy[0][j] = data[i][j];
if (data[i][j] > minmaxxy[1][j]) minmaxxy[1][j] = data[i][j];
}
}
max_distance = 0.0f;
for (int i = 0; i < dim; i++) {
max_distance = max_distance + powf((minmaxxy[1][i] - minmaxxy[0][i]),2.0f);
}
max_distance = 2.0f * sqrtf(max_distance);
}
// Get the supporting points for the interpolation
for (int i = 0; i < num_support_points; i++) {
distance_to_point[i] = max_distance;
}
memset(closest_points,-1,sizeof(closest_points));
for (int i = 0; i < num_data; i++) {
// Get radius to input data point
float radius = 0.0f;
for (int j = 0; j < dim; j++) {
radius = radius + powf((x[j] - data[i][j]),2.0f);
}
if (radius > 1.0e-06) {
radius = sqrtf(radius);
} else {
radius = 0.0f;
}
// Check whether one and if so which one
// of the current supporting points has a longer distance
float actual_difference = distance_to_point[num_support_points-1];
int actual_point = 0;
bool is_smaller_than_any = false;
for (int j = 0; j < num_support_points; j++) {
float difference = distance_to_point[j] - radius;
if (difference > 0.0f) {
is_smaller_than_any = true;
if (difference < actual_difference) {
actual_point = j;
actual_difference = difference;
}
}
}
// If so swap and reorder
if (is_smaller_than_any) {
for (int j = num_support_points-2; j >= actual_point; j--) {
distance_to_point[j+1] = distance_to_point[j];
closest_points[j+1] = closest_points[j];
}
distance_to_point[actual_point] = radius;
closest_points[actual_point] = i;
}
}
// Do we have a bull's eye
if (distance_to_point[0] < 1.0e-12) {
interpolated_value = data[closest_points[0]][dim];
} else {
// Interpolate
float weight_sum = 0.0f;
interpolated_value = 0.0f;
int used_supporting_point = 0;
for (int i = 0; i < num_support_points; i++) {
if (closest_points[i] >= 0) {
used_supporting_point = used_supporting_point + 1;
float weight = powf(distance_to_point[i],-exponent);
interpolated_value = interpolated_value + weight * data[closest_points[i]][dim];
weight_sum = weight_sum + weight;
}
}
if (used_supporting_point < num_support_points) {
if (dim > 2) {
fprintf(stdout, "Number of supporting points used for point (%f %f %f) = %d smaller than requested %d.\n",
*xi, *yi, *zi, used_supporting_point, num_support_points);
} else {
fprintf(stdout, "Number of supporting points used for point (%f %f) = %d smaller than requested %d.\n",
*xi, *yi, used_supporting_point, num_support_points);
}
}
if (used_supporting_point == 0) {
if (dim > 2) {
fprintf(stderr, "No supporting point for point (%f %f %f) found.\n", *xi, *yi, *zi);
} else {
fprintf(stderr, "No supporting point for point (%f %f) found.\n", *xi, *yi);
}
exit(EXIT_FAILURE);
}
interpolated_value = interpolated_value / weight_sum;
}
return interpolated_value;
}
#ifdef __NVCC__
__device__ float bounding_box(velo_grid *grid, const int NY, const int NX, const int dim) {
float minmaxxy[2][3];
float max_distance = 0.0f;
minmaxxy[0][0] = grid->x[0];
minmaxxy[1][0] = grid->x[0];
minmaxxy[0][1] = grid->y[0];
minmaxxy[1][1] = grid->y[0];
for (int y = 0; y < NY; y++) {
for (int x = 0; x < NX; x++) {
if (grid->x[(y*NX)+x] < minmaxxy[0][0])
minmaxxy[0][0] = grid->x[(y*NX)+x];
if (grid->y[(y*NX)+x] < minmaxxy[0][1])
minmaxxy[0][1] = grid->y[(y*NX)+x];
if (grid->x[(y*NX)+x] > minmaxxy[1][0])
minmaxxy[1][0] = grid->x[(y*NX)+x];
if (grid->y[(y*NX)+x] > minmaxxy[1][1])
minmaxxy[1][1] = grid->y[(y*NX)+x];
}
}
for (int i = 0; i < dim; i++) {
max_distance = max_distance + powf((minmaxxy[1][i] - minmaxxy[0][i]),2.0f);
}
max_distance = 2.0f * sqrtf(max_distance);
return max_distance;
}
__device__ void check_point(float *distance_to_point_u, float *distance_to_point_v, float *closest_points_u,
float *closest_points_v, int *closest_points_idx_u, int *closest_points_idx_v,
float *radius_u, float *radius_v, float *u_val, float *v_val,
int const NY_STAG, int const NX_STAG, int const NY, const int NX, const int z, const int *i,
const int u, const int v, const int num_support_points, const int step) {
// Check whether one and if so which one
// of the current supporting points has a longer distance
float actual_difference_u = distance_to_point_u[num_support_points-1];
float actual_difference_v = distance_to_point_v[num_support_points-1];
int actual_point_u = 0;
int actual_point_v = 0;
bool is_smaller_than_any_u = false;
bool is_smaller_than_any_v = false;
float r_u, r_v;
for (int j = 0; j < num_support_points; j++) {
if (step == 1) {
r_u = radius_u[*i];
r_v = radius_v[*i];
} else {
r_u = *radius_u;
r_v = *radius_v;
}
float difference_u = distance_to_point_u[j] - r_u;
float difference_v = distance_to_point_v[j] - r_v;
if (difference_u > 0.0f) {
is_smaller_than_any_u = true;
if (difference_u < actual_difference_u) {
actual_point_u = j;
actual_difference_u = difference_u;
}
}
if (difference_v > 0.0f) {
is_smaller_than_any_v = true;
if (difference_v < actual_difference_v) {
actual_point_v = j;
actual_difference_v = difference_v;
}
}
}
// If so swap and reorder
if (is_smaller_than_any_u) {
for (int j = num_support_points-2; j >= actual_point_u; j--) {
distance_to_point_u[j+1] = distance_to_point_u[j];
closest_points_u[j+1] = closest_points_u[j];
}
distance_to_point_u[actual_point_u] = r_u;
if (step == 1) {
closest_points_u[actual_point_u] = u_val[*i];
} else {
closest_points_u[actual_point_u] = u_val[(z*(NY*NX_STAG))+u];
}
closest_points_idx_u[actual_point_u] = (z*(NY*NX_STAG))+u;
}
if (is_smaller_than_any_v) {
for (int j = num_support_points-2; j >= actual_point_v; j--) {
distance_to_point_v[j+1] = distance_to_point_v[j];
closest_points_v[j+1] = closest_points_v[j];
}
distance_to_point_v[actual_point_v] = r_v;
if (step == 1) {
closest_points_v[actual_point_v] = v_val[*i];
} else {
closest_points_v[actual_point_v] = v_val[(z*(NY_STAG*NX))+v];
}
closest_points_idx_v[actual_point_v] = (z*(NY_STAG*NX))+v;
}
}
__global__ void gpu_radially_interpolate_unstructured(velo_grid *u_grid, velo_grid *v_grid, mass_grid *m_grid,
const int NY_STAG, const int NX_STAG, const int NY, const int NX, const int z, const int dim,
const int num_support_points, const float exponent) {
float interpolated_value_u;
float interpolated_value_v;
float max_distance_u = 0.0f;
float max_distance_v = 0.0f;
// We assume a maximum of four supporting points
float distance_to_point_u[4];
float distance_to_point_v[4];
float closest_points_u[4];
float closest_points_v[4];
int closest_points_idx_u[4];
int closest_points_idx_v[4];
int IDX = blockIdx.x * blockDim.x + threadIdx.x;
if (IDX >= (NY*NX)) return;
max_distance_u = bounding_box(u_grid, NY, NX_STAG, dim);
max_distance_v = bounding_box(v_grid, NY_STAG, NX, dim);
// Get the supporting points for the interpolation
for (int i = 0; i < num_support_points; i++) {
distance_to_point_u[i] = max_distance_u;
distance_to_point_v[i] = max_distance_v;
}
memset(closest_points_u, 0.0f, sizeof(closest_points_u));
memset(closest_points_v, 0.0f, sizeof(closest_points_v));
memset(closest_points_idx_u, -1, sizeof(closest_points_idx_u));
memset(closest_points_idx_v, -1, sizeof(closest_points_idx_v));
int u_left = (NY*NX_STAG) % DEF_UNROLL_SIZE;
int v_left = (NY_STAG*NX) % DEF_UNROLL_SIZE;
float radius_u[DEF_UNROLL_SIZE];
float radius_v[DEF_UNROLL_SIZE];
__shared__ float ux[DEF_UNROLL_SIZE];
__shared__ float uy[DEF_UNROLL_SIZE];
__shared__ float vx[DEF_UNROLL_SIZE];
__shared__ float vy[DEF_UNROLL_SIZE];
__shared__ float u_val[DEF_UNROLL_SIZE];
__shared__ float v_val[DEF_UNROLL_SIZE];
for (int u=0, v=0; u<NY*NX_STAG && v<NY_STAG*NX; u+=DEF_UNROLL_SIZE, v+=DEF_UNROLL_SIZE) {
ux[threadIdx.x] = u_grid->x[u+threadIdx.x];
uy[threadIdx.x] = u_grid->y[u+threadIdx.x];
vx[threadIdx.x] = v_grid->x[v+threadIdx.x];
vy[threadIdx.x] = v_grid->y[v+threadIdx.x];
u_val[threadIdx.x] = u_grid->val[(z*(NY*NX_STAG))+(u+threadIdx.x)];
v_val[threadIdx.x] = v_grid->val[(z*(NY_STAG*NX))+(v+threadIdx.x)];
__syncthreads();
for (int i = 0; i < DEF_UNROLL_SIZE; i++) {
int uu = u+i;
int vv = v+i;
radius_u[i] = 0.0f;
radius_u[i] = radius_u[i] + (m_grid->x[IDX]-ux[i])*(m_grid->x[IDX]-ux[i]);
radius_u[i] = radius_u[i] + (m_grid->y[IDX]-uy[i])*(m_grid->y[IDX]-uy[i]);
radius_v[i] = 0.0f;
radius_v[i] = radius_v[i] + (m_grid->x[IDX]-vx[i])*(m_grid->x[IDX]-vx[i]);
radius_v[i] = radius_v[i] + (m_grid->y[IDX]-vy[i])*(m_grid->y[IDX]-vy[i]);
if (radius_u[i] > 1.0e-06) {
radius_u[i] = sqrtf(radius_u[i]);
} else {
radius_u[i] = 0.0f;
}
if (radius_v[i] > 1.0e-06) {
radius_v[i] = sqrtf(radius_v[i]);
} else {
radius_v[i] = 0.0f;
}
check_point(distance_to_point_u, distance_to_point_v, closest_points_u,
closest_points_v, closest_points_idx_u, closest_points_idx_v,
radius_u, radius_v, u_val, v_val,
NY_STAG, NX_STAG, NY, NX, z, &i,
uu, vv, num_support_points, 1);
} // End of 16 elements block
}
for (int u=(NY*NX_STAG)-u_left, v=(NY_STAG*NX)-v_left; u<(NY*NX_STAG) && v<(NY_STAG*NX); u++, v++) {
float radius_u = 0.0f;
float radius_v = 0.0f;
radius_u = radius_u + (m_grid->x[IDX]-u_grid->x[u])*(m_grid->x[IDX]-u_grid->x[u]);
radius_u = radius_u + (m_grid->y[IDX]-u_grid->y[u])*(m_grid->y[IDX]-u_grid->y[u]);
radius_v = radius_v + (m_grid->x[IDX]-v_grid->x[v])*(m_grid->x[IDX]-v_grid->x[v]);
radius_v = radius_v + (m_grid->y[IDX]-v_grid->y[v])*(m_grid->y[IDX]-v_grid->y[v]);
if (radius_u > 1.0e-06) {
radius_u = sqrtf(radius_u);
} else {
radius_u = 0.0f;
}
if (radius_v > 1.0e-06) {
radius_v = sqrtf(radius_v);
} else {
radius_v = 0.0f;
}
check_point(distance_to_point_u, distance_to_point_v, closest_points_u,
closest_points_v, closest_points_idx_u, closest_points_idx_v,
&radius_u, &radius_v, u_grid->val, v_grid->val,
NY_STAG, NX_STAG, NY, NX, z, NULL,
u, v, num_support_points, 2);
}
// Do we have a bull's eye
if (distance_to_point_u[0] < 1.0e-12 || distance_to_point_v[0] < 1.0e-12) {
if (distance_to_point_u[0] < 1.0e-12) interpolated_value_u = closest_points_u[0];
if (distance_to_point_v[0] < 1.0e-12) interpolated_value_v = closest_points_v[0];
} else {
// Interpolate
float weight_sum_u = 0.0f;
float weight_sum_v = 0.0f;
interpolated_value_u = 0.0f;
interpolated_value_v = 0.0f;
int used_supporting_point_u = 0;
int used_supporting_point_v = 0;
for (int i = 0; i < num_support_points; i++) {
if (closest_points_idx_u[i] >= 0) {
used_supporting_point_u = used_supporting_point_u + 1;
float weight = powf(distance_to_point_u[i],-exponent);
interpolated_value_u = interpolated_value_u + weight * closest_points_u[i];
weight_sum_u = weight_sum_u + weight;
}
}
for (int i = 0; i < num_support_points; i++) {
if (closest_points_idx_v[i] >= 0) {
used_supporting_point_v = used_supporting_point_v + 1;
float weight = powf(distance_to_point_v[i],-exponent);
interpolated_value_v = interpolated_value_v + weight * closest_points_v[i];
weight_sum_v = weight_sum_v + weight;
}
}
if (used_supporting_point_u == 0 || used_supporting_point_v == 0) {
printf("Failure in interpolation due to lack of supporting points.\n");
}
interpolated_value_u = interpolated_value_u / weight_sum_u;
interpolated_value_v = interpolated_value_v / weight_sum_v;
}
m_grid->u[IDX] = interpolated_value_u;
m_grid->v[IDX] = interpolated_value_v;
}
__global__ void gpu_radially_interpolate_structured_horiz(velo_grid *u_grid, velo_grid *v_grid, mass_grid *m_grid,
const int NY, const int NX) {
int IDX = blockIdx.x * blockDim.x + threadIdx.x;
if (IDX >= (NY*NX)) return;
m_grid->u[IDX] = 0.5f * (u_grid->val[IDX] + u_grid->val[((NY*NX)*1)+IDX]);
m_grid->v[IDX] = 0.5f * (v_grid->val[IDX] + v_grid->val[((NY*NX)*1)+IDX]);
}
__global__ void gpu_radially_interpolate_structured_vert(velo_grid *w_grid, mass_grid *m_grid,
const int NY, const int NX) {
int IDX = blockIdx.x * blockDim.x + threadIdx.x;
if (IDX >= (NY*NX)) return;
m_grid->w[IDX] = 0.5f * (w_grid->val[IDX] + w_grid->val[((NY*NX)*1)+IDX]);
}
float cpu_radially_interpolate_structured_horiz(velo_grid *velo_grid, int idx, const int NY, const int NX) {
return 0.5f * (velo_grid->val[idx] + velo_grid->val[((NY*NX)*1)+idx]);
}
float cpu_radially_interpolate_structured_vert(velo_grid *w_grid, int idx, const int NY, const int NX) {
return 0.5f * (w_grid->val[idx] + w_grid->val[((NY*NX)*1)+idx]);
}
#endif
| 51c82978a2e6eb6893418ff97e845c70e5b04e43.cu | #include "interpolate.h"
float cpu_radially_interpolate_unstructured(float **data,
float *xi,
float *yi,
float *zi,
int num_data,
int dim,
int *directions,
float exponent,
bool reinitiate,
int num_support_points,
bool *verbose) {
float interpolated_value;
float x[3], minmaxxy[2][3];
static float max_distance = 0.0f;
if (num_support_points < 2) {
fprintf(stderr, "The number of points must be at least 2.\n");
exit(EXIT_FAILURE);
}
if (exponent < 1) {
fprintf(stderr, "The exponent should be larger or equal to unity.\n");
exit(EXIT_FAILURE);
}
if (verbose != NULL) {
static int count = 0;
if (*verbose) {
count++;
if (dim > 2) {
fprintf(stdout, "......Interpolating point %d at (%f %f %f).\n", count, *xi, *yi, *zi);
} else {
fprintf(stdout, "......Interpolating point %d at (%f %f).\n", count, *xi, *yi);
}
}
}
for (int i = 0; i < dim; i++) {
switch (directions[i]) {
case 1:
x[i] = *xi;
break;
case 2:
x[i] = *yi;
break;
case 3:
x[i] = *zi;
break;
}
}
float distance_to_point[num_support_points];
int closest_points[num_support_points];
if (reinitiate) {
// The bounding box
for (int i = 0; i < dim; i++) {
minmaxxy[0][i] = data[0][i];
minmaxxy[1][i] = data[0][i];
}
for (int i = 0; i < num_data; i++) {
for (int j = 0; j < dim; j++) {
if (data[i][j] < minmaxxy[0][j]) minmaxxy[0][j] = data[i][j];
if (data[i][j] > minmaxxy[1][j]) minmaxxy[1][j] = data[i][j];
}
}
max_distance = 0.0f;
for (int i = 0; i < dim; i++) {
max_distance = max_distance + powf((minmaxxy[1][i] - minmaxxy[0][i]),2.0f);
}
max_distance = 2.0f * sqrtf(max_distance);
}
// Get the supporting points for the interpolation
for (int i = 0; i < num_support_points; i++) {
distance_to_point[i] = max_distance;
}
memset(closest_points,-1,sizeof(closest_points));
for (int i = 0; i < num_data; i++) {
// Get radius to input data point
float radius = 0.0f;
for (int j = 0; j < dim; j++) {
radius = radius + powf((x[j] - data[i][j]),2.0f);
}
if (radius > 1.0e-06) {
radius = sqrtf(radius);
} else {
radius = 0.0f;
}
// Check whether one and if so which one
// of the current supporting points has a longer distance
float actual_difference = distance_to_point[num_support_points-1];
int actual_point = 0;
bool is_smaller_than_any = false;
for (int j = 0; j < num_support_points; j++) {
float difference = distance_to_point[j] - radius;
if (difference > 0.0f) {
is_smaller_than_any = true;
if (difference < actual_difference) {
actual_point = j;
actual_difference = difference;
}
}
}
// If so swap and reorder
if (is_smaller_than_any) {
for (int j = num_support_points-2; j >= actual_point; j--) {
distance_to_point[j+1] = distance_to_point[j];
closest_points[j+1] = closest_points[j];
}
distance_to_point[actual_point] = radius;
closest_points[actual_point] = i;
}
}
// Do we have a bull's eye
if (distance_to_point[0] < 1.0e-12) {
interpolated_value = data[closest_points[0]][dim];
} else {
// Interpolate
float weight_sum = 0.0f;
interpolated_value = 0.0f;
int used_supporting_point = 0;
for (int i = 0; i < num_support_points; i++) {
if (closest_points[i] >= 0) {
used_supporting_point = used_supporting_point + 1;
float weight = powf(distance_to_point[i],-exponent);
interpolated_value = interpolated_value + weight * data[closest_points[i]][dim];
weight_sum = weight_sum + weight;
}
}
if (used_supporting_point < num_support_points) {
if (dim > 2) {
fprintf(stdout, "Number of supporting points used for point (%f %f %f) = %d smaller than requested %d.\n",
*xi, *yi, *zi, used_supporting_point, num_support_points);
} else {
fprintf(stdout, "Number of supporting points used for point (%f %f) = %d smaller than requested %d.\n",
*xi, *yi, used_supporting_point, num_support_points);
}
}
if (used_supporting_point == 0) {
if (dim > 2) {
fprintf(stderr, "No supporting point for point (%f %f %f) found.\n", *xi, *yi, *zi);
} else {
fprintf(stderr, "No supporting point for point (%f %f) found.\n", *xi, *yi);
}
exit(EXIT_FAILURE);
}
interpolated_value = interpolated_value / weight_sum;
}
return interpolated_value;
}
#ifdef __NVCC__
__device__ float bounding_box(velo_grid *grid, const int NY, const int NX, const int dim) {
float minmaxxy[2][3];
float max_distance = 0.0f;
minmaxxy[0][0] = grid->x[0];
minmaxxy[1][0] = grid->x[0];
minmaxxy[0][1] = grid->y[0];
minmaxxy[1][1] = grid->y[0];
for (int y = 0; y < NY; y++) {
for (int x = 0; x < NX; x++) {
if (grid->x[(y*NX)+x] < minmaxxy[0][0])
minmaxxy[0][0] = grid->x[(y*NX)+x];
if (grid->y[(y*NX)+x] < minmaxxy[0][1])
minmaxxy[0][1] = grid->y[(y*NX)+x];
if (grid->x[(y*NX)+x] > minmaxxy[1][0])
minmaxxy[1][0] = grid->x[(y*NX)+x];
if (grid->y[(y*NX)+x] > minmaxxy[1][1])
minmaxxy[1][1] = grid->y[(y*NX)+x];
}
}
for (int i = 0; i < dim; i++) {
max_distance = max_distance + powf((minmaxxy[1][i] - minmaxxy[0][i]),2.0f);
}
max_distance = 2.0f * sqrtf(max_distance);
return max_distance;
}
__device__ void check_point(float *distance_to_point_u, float *distance_to_point_v, float *closest_points_u,
float *closest_points_v, int *closest_points_idx_u, int *closest_points_idx_v,
float *radius_u, float *radius_v, float *u_val, float *v_val,
int const NY_STAG, int const NX_STAG, int const NY, const int NX, const int z, const int *i,
const int u, const int v, const int num_support_points, const int step) {
// Check whether one and if so which one
// of the current supporting points has a longer distance
float actual_difference_u = distance_to_point_u[num_support_points-1];
float actual_difference_v = distance_to_point_v[num_support_points-1];
int actual_point_u = 0;
int actual_point_v = 0;
bool is_smaller_than_any_u = false;
bool is_smaller_than_any_v = false;
float r_u, r_v;
for (int j = 0; j < num_support_points; j++) {
if (step == 1) {
r_u = radius_u[*i];
r_v = radius_v[*i];
} else {
r_u = *radius_u;
r_v = *radius_v;
}
float difference_u = distance_to_point_u[j] - r_u;
float difference_v = distance_to_point_v[j] - r_v;
if (difference_u > 0.0f) {
is_smaller_than_any_u = true;
if (difference_u < actual_difference_u) {
actual_point_u = j;
actual_difference_u = difference_u;
}
}
if (difference_v > 0.0f) {
is_smaller_than_any_v = true;
if (difference_v < actual_difference_v) {
actual_point_v = j;
actual_difference_v = difference_v;
}
}
}
// If so swap and reorder
if (is_smaller_than_any_u) {
for (int j = num_support_points-2; j >= actual_point_u; j--) {
distance_to_point_u[j+1] = distance_to_point_u[j];
closest_points_u[j+1] = closest_points_u[j];
}
distance_to_point_u[actual_point_u] = r_u;
if (step == 1) {
closest_points_u[actual_point_u] = u_val[*i];
} else {
closest_points_u[actual_point_u] = u_val[(z*(NY*NX_STAG))+u];
}
closest_points_idx_u[actual_point_u] = (z*(NY*NX_STAG))+u;
}
if (is_smaller_than_any_v) {
for (int j = num_support_points-2; j >= actual_point_v; j--) {
distance_to_point_v[j+1] = distance_to_point_v[j];
closest_points_v[j+1] = closest_points_v[j];
}
distance_to_point_v[actual_point_v] = r_v;
if (step == 1) {
closest_points_v[actual_point_v] = v_val[*i];
} else {
closest_points_v[actual_point_v] = v_val[(z*(NY_STAG*NX))+v];
}
closest_points_idx_v[actual_point_v] = (z*(NY_STAG*NX))+v;
}
}
__global__ void gpu_radially_interpolate_unstructured(velo_grid *u_grid, velo_grid *v_grid, mass_grid *m_grid,
const int NY_STAG, const int NX_STAG, const int NY, const int NX, const int z, const int dim,
const int num_support_points, const float exponent) {
float interpolated_value_u;
float interpolated_value_v;
float max_distance_u = 0.0f;
float max_distance_v = 0.0f;
// We assume a maximum of four supporting points
float distance_to_point_u[4];
float distance_to_point_v[4];
float closest_points_u[4];
float closest_points_v[4];
int closest_points_idx_u[4];
int closest_points_idx_v[4];
int IDX = blockIdx.x * blockDim.x + threadIdx.x;
if (IDX >= (NY*NX)) return;
max_distance_u = bounding_box(u_grid, NY, NX_STAG, dim);
max_distance_v = bounding_box(v_grid, NY_STAG, NX, dim);
// Get the supporting points for the interpolation
for (int i = 0; i < num_support_points; i++) {
distance_to_point_u[i] = max_distance_u;
distance_to_point_v[i] = max_distance_v;
}
memset(closest_points_u, 0.0f, sizeof(closest_points_u));
memset(closest_points_v, 0.0f, sizeof(closest_points_v));
memset(closest_points_idx_u, -1, sizeof(closest_points_idx_u));
memset(closest_points_idx_v, -1, sizeof(closest_points_idx_v));
int u_left = (NY*NX_STAG) % DEF_UNROLL_SIZE;
int v_left = (NY_STAG*NX) % DEF_UNROLL_SIZE;
float radius_u[DEF_UNROLL_SIZE];
float radius_v[DEF_UNROLL_SIZE];
__shared__ float ux[DEF_UNROLL_SIZE];
__shared__ float uy[DEF_UNROLL_SIZE];
__shared__ float vx[DEF_UNROLL_SIZE];
__shared__ float vy[DEF_UNROLL_SIZE];
__shared__ float u_val[DEF_UNROLL_SIZE];
__shared__ float v_val[DEF_UNROLL_SIZE];
for (int u=0, v=0; u<NY*NX_STAG && v<NY_STAG*NX; u+=DEF_UNROLL_SIZE, v+=DEF_UNROLL_SIZE) {
ux[threadIdx.x] = u_grid->x[u+threadIdx.x];
uy[threadIdx.x] = u_grid->y[u+threadIdx.x];
vx[threadIdx.x] = v_grid->x[v+threadIdx.x];
vy[threadIdx.x] = v_grid->y[v+threadIdx.x];
u_val[threadIdx.x] = u_grid->val[(z*(NY*NX_STAG))+(u+threadIdx.x)];
v_val[threadIdx.x] = v_grid->val[(z*(NY_STAG*NX))+(v+threadIdx.x)];
__syncthreads();
for (int i = 0; i < DEF_UNROLL_SIZE; i++) {
int uu = u+i;
int vv = v+i;
radius_u[i] = 0.0f;
radius_u[i] = radius_u[i] + (m_grid->x[IDX]-ux[i])*(m_grid->x[IDX]-ux[i]);
radius_u[i] = radius_u[i] + (m_grid->y[IDX]-uy[i])*(m_grid->y[IDX]-uy[i]);
radius_v[i] = 0.0f;
radius_v[i] = radius_v[i] + (m_grid->x[IDX]-vx[i])*(m_grid->x[IDX]-vx[i]);
radius_v[i] = radius_v[i] + (m_grid->y[IDX]-vy[i])*(m_grid->y[IDX]-vy[i]);
if (radius_u[i] > 1.0e-06) {
radius_u[i] = sqrtf(radius_u[i]);
} else {
radius_u[i] = 0.0f;
}
if (radius_v[i] > 1.0e-06) {
radius_v[i] = sqrtf(radius_v[i]);
} else {
radius_v[i] = 0.0f;
}
check_point(distance_to_point_u, distance_to_point_v, closest_points_u,
closest_points_v, closest_points_idx_u, closest_points_idx_v,
radius_u, radius_v, u_val, v_val,
NY_STAG, NX_STAG, NY, NX, z, &i,
uu, vv, num_support_points, 1);
} // End of 16 elements block
}
for (int u=(NY*NX_STAG)-u_left, v=(NY_STAG*NX)-v_left; u<(NY*NX_STAG) && v<(NY_STAG*NX); u++, v++) {
float radius_u = 0.0f;
float radius_v = 0.0f;
radius_u = radius_u + (m_grid->x[IDX]-u_grid->x[u])*(m_grid->x[IDX]-u_grid->x[u]);
radius_u = radius_u + (m_grid->y[IDX]-u_grid->y[u])*(m_grid->y[IDX]-u_grid->y[u]);
radius_v = radius_v + (m_grid->x[IDX]-v_grid->x[v])*(m_grid->x[IDX]-v_grid->x[v]);
radius_v = radius_v + (m_grid->y[IDX]-v_grid->y[v])*(m_grid->y[IDX]-v_grid->y[v]);
if (radius_u > 1.0e-06) {
radius_u = sqrtf(radius_u);
} else {
radius_u = 0.0f;
}
if (radius_v > 1.0e-06) {
radius_v = sqrtf(radius_v);
} else {
radius_v = 0.0f;
}
check_point(distance_to_point_u, distance_to_point_v, closest_points_u,
closest_points_v, closest_points_idx_u, closest_points_idx_v,
&radius_u, &radius_v, u_grid->val, v_grid->val,
NY_STAG, NX_STAG, NY, NX, z, NULL,
u, v, num_support_points, 2);
}
// Do we have a bull's eye
if (distance_to_point_u[0] < 1.0e-12 || distance_to_point_v[0] < 1.0e-12) {
if (distance_to_point_u[0] < 1.0e-12) interpolated_value_u = closest_points_u[0];
if (distance_to_point_v[0] < 1.0e-12) interpolated_value_v = closest_points_v[0];
} else {
// Interpolate
float weight_sum_u = 0.0f;
float weight_sum_v = 0.0f;
interpolated_value_u = 0.0f;
interpolated_value_v = 0.0f;
int used_supporting_point_u = 0;
int used_supporting_point_v = 0;
for (int i = 0; i < num_support_points; i++) {
if (closest_points_idx_u[i] >= 0) {
used_supporting_point_u = used_supporting_point_u + 1;
float weight = powf(distance_to_point_u[i],-exponent);
interpolated_value_u = interpolated_value_u + weight * closest_points_u[i];
weight_sum_u = weight_sum_u + weight;
}
}
for (int i = 0; i < num_support_points; i++) {
if (closest_points_idx_v[i] >= 0) {
used_supporting_point_v = used_supporting_point_v + 1;
float weight = powf(distance_to_point_v[i],-exponent);
interpolated_value_v = interpolated_value_v + weight * closest_points_v[i];
weight_sum_v = weight_sum_v + weight;
}
}
if (used_supporting_point_u == 0 || used_supporting_point_v == 0) {
printf("Failure in interpolation due to lack of supporting points.\n");
}
interpolated_value_u = interpolated_value_u / weight_sum_u;
interpolated_value_v = interpolated_value_v / weight_sum_v;
}
m_grid->u[IDX] = interpolated_value_u;
m_grid->v[IDX] = interpolated_value_v;
}
__global__ void gpu_radially_interpolate_structured_horiz(velo_grid *u_grid, velo_grid *v_grid, mass_grid *m_grid,
const int NY, const int NX) {
int IDX = blockIdx.x * blockDim.x + threadIdx.x;
if (IDX >= (NY*NX)) return;
m_grid->u[IDX] = 0.5f * (u_grid->val[IDX] + u_grid->val[((NY*NX)*1)+IDX]);
m_grid->v[IDX] = 0.5f * (v_grid->val[IDX] + v_grid->val[((NY*NX)*1)+IDX]);
}
__global__ void gpu_radially_interpolate_structured_vert(velo_grid *w_grid, mass_grid *m_grid,
const int NY, const int NX) {
int IDX = blockIdx.x * blockDim.x + threadIdx.x;
if (IDX >= (NY*NX)) return;
m_grid->w[IDX] = 0.5f * (w_grid->val[IDX] + w_grid->val[((NY*NX)*1)+IDX]);
}
float cpu_radially_interpolate_structured_horiz(velo_grid *velo_grid, int idx, const int NY, const int NX) {
return 0.5f * (velo_grid->val[idx] + velo_grid->val[((NY*NX)*1)+idx]);
}
float cpu_radially_interpolate_structured_vert(velo_grid *w_grid, int idx, const int NY, const int NX) {
return 0.5f * (w_grid->val[idx] + w_grid->val[((NY*NX)*1)+idx]);
}
#endif
|
b710c555e95689209aac7ce2c1ea34fc6daa8b7b.hip | // !!! This is a file automatically generated by hipify!!!
/*
* This file belongs to the Galois project, a C++ library for exploiting parallelism.
* The code is being released under the terms of the 3-Clause BSD License (a
* copy is located in LICENSE.txt at the top-level directory).
*
* Copyright (C) 2018, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*/
//#define _GOPT_DEBUG 1
#include <algorithm>
#include <cstring>
#include <fstream>
#include <iostream>
#include <limits>
#include <map>
#include <math.h>
#include <set>
#include <string>
#include <vector>
#include <libgen.h>
#include <hip/hip_runtime.h>
#include "SGDAsyncEdgeCu.h"
int main(int argc, char ** args) {
const char * fname = "/net/ohm/export/iss/inputs/GaloisGPU/bgg.gr";
if (argc == 2)
fname = args[1];
typedef SGDAsynEdgeCudaFunctor SGDFunctorTy;
//fprintf(stderr, "===============================Starting- processing %s\n===============================", fname);
SGDFunctorTy func(false, fname);
func(5);
//fprintf(stderr, "====================Terminating - processed%s================================\n", fname);
//std::cout << "Completed successfully!\n";
return 0;
}
| b710c555e95689209aac7ce2c1ea34fc6daa8b7b.cu | /*
* This file belongs to the Galois project, a C++ library for exploiting parallelism.
* The code is being released under the terms of the 3-Clause BSD License (a
* copy is located in LICENSE.txt at the top-level directory).
*
* Copyright (C) 2018, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*/
//#define _GOPT_DEBUG 1
#include <algorithm>
#include <cstring>
#include <fstream>
#include <iostream>
#include <limits>
#include <map>
#include <math.h>
#include <set>
#include <string>
#include <vector>
#include <libgen.h>
#include <cuda.h>
#include "SGDAsyncEdgeCu.h"
int main(int argc, char ** args) {
const char * fname = "/net/ohm/export/iss/inputs/GaloisGPU/bgg.gr";
if (argc == 2)
fname = args[1];
typedef SGDAsynEdgeCudaFunctor SGDFunctorTy;
//fprintf(stderr, "===============================Starting- processing %s\n===============================", fname);
SGDFunctorTy func(false, fname);
func(5);
//fprintf(stderr, "====================Terminating - processed%s================================\n", fname);
//std::cout << "Completed successfully!\n";
return 0;
}
|
7991abd087126beb1021522c75a83b55a1cf4419.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "hip/hip_runtime.h"
#include "../common/book.h"
#include "../common/cpu_anim.h"
#define DIM 2000
#define PI 3.1415926535897932f
__global__ void kernel(unsigned char *ptr, int ticks) {
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
// now calculate the value at that position
float fx = x - DIM / 2;
float fy = y - DIM / 2;
float d = sqrtf(fx * fx + fy * fy);
unsigned char grey = (unsigned char)(128.0f + 127.0f *
cos(d / 10.0f - ticks / 7.0f) /
(d / 10.0f + 1.0f));
ptr[offset * 4 + 0] = grey;
ptr[offset * 4 + 1] = grey;
ptr[offset * 4 + 2] = grey;
ptr[offset * 4 + 3] = 255;
}
struct DataBlock {
unsigned char *dev_bitmap;
CPUAnimBitmap *bitmap;
};
void generate_frame(DataBlock *d, int ticks) {
dim3 blocks(DIM / 16, DIM / 16);
dim3 threads(16, 16);
kernel << <blocks, threads >> >(d->dev_bitmap, ticks);
HANDLE_ERROR(hipMemcpy(d->bitmap->get_ptr(),
d->dev_bitmap,
d->bitmap->image_size(),
hipMemcpyDeviceToHost));
}
// clean up memory allocated on the GPU
void cleanup(DataBlock *d) {
HANDLE_ERROR(hipFree(d->dev_bitmap));
}
int main(void) {
DataBlock data;
CPUAnimBitmap bitmap(DIM, DIM, &data);
data.bitmap = &bitmap;
HANDLE_ERROR(hipMalloc((void**)&data.dev_bitmap,
bitmap.image_size()));
bitmap.anim_and_exit((void(*)(void*, int))generate_frame,
(void(*)(void*))cleanup);
}
| 7991abd087126beb1021522c75a83b55a1cf4419.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "cuda.h"
#include "../common/book.h"
#include "../common/cpu_anim.h"
#define DIM 2000
#define PI 3.1415926535897932f
__global__ void kernel(unsigned char *ptr, int ticks) {
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
// now calculate the value at that position
float fx = x - DIM / 2;
float fy = y - DIM / 2;
float d = sqrtf(fx * fx + fy * fy);
unsigned char grey = (unsigned char)(128.0f + 127.0f *
cos(d / 10.0f - ticks / 7.0f) /
(d / 10.0f + 1.0f));
ptr[offset * 4 + 0] = grey;
ptr[offset * 4 + 1] = grey;
ptr[offset * 4 + 2] = grey;
ptr[offset * 4 + 3] = 255;
}
struct DataBlock {
unsigned char *dev_bitmap;
CPUAnimBitmap *bitmap;
};
void generate_frame(DataBlock *d, int ticks) {
dim3 blocks(DIM / 16, DIM / 16);
dim3 threads(16, 16);
kernel << <blocks, threads >> >(d->dev_bitmap, ticks);
HANDLE_ERROR(cudaMemcpy(d->bitmap->get_ptr(),
d->dev_bitmap,
d->bitmap->image_size(),
cudaMemcpyDeviceToHost));
}
// clean up memory allocated on the GPU
void cleanup(DataBlock *d) {
HANDLE_ERROR(cudaFree(d->dev_bitmap));
}
int main(void) {
DataBlock data;
CPUAnimBitmap bitmap(DIM, DIM, &data);
data.bitmap = &bitmap;
HANDLE_ERROR(cudaMalloc((void**)&data.dev_bitmap,
bitmap.image_size()));
bitmap.anim_and_exit((void(*)(void*, int))generate_frame,
(void(*)(void*))cleanup);
}
|
1491bbd23c32615a61ed1410219e7000815a0afc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <vector>
#define CSC(call) \
do { \
hipError_t res = call; \
if (res != hipSuccess) { \
fprintf(stderr, "ERROR in %s:%d. Message: %s\n", \
__FILE__, __LINE__, hipGetErrorString(res)); \
exit(0); \
} \
} while(0)
typedef unsigned char uchar;
#define NC_MAX 32 //
double3 avg[NC_MAX]; //
double cov[3 * 3 * NC_MAX]; //
__constant__ double3 AVG[NC_MAX];
__constant__ double COV[3 * 3 * NC_MAX];
__host__ __device__
double dot(const double *A, const double *a, const double *b, int n)
{
/*
* res = a ^ T * A * b
*/
int i, j;
double res = 0;
for (i = 0; i < n; i++)
{
for (j = 0; j < n; j++)
{
res += A[i * n + j] * a[i] * b[j];
}
}
return res;
}
__host__ __device__ //
void mat_mul(const double *A, const double *B, double *C, int m, int n, int l, double alpha=1, double beta=0)
{
/*
* C = alpha * A * B + beta * C
*
* m - A
* n - A B
* l - B
*/
int i, j, k;
double dot;
for (i = 0; i < m; i++)
{
for (j = 0; j < l; j++)
{
dot = 0;
for (k = 0; k < n; k++)
{
dot += A[i * n + k] * B[k * l + j];
}
C[i * l + j] = alpha * dot + (beta == 0 ? 0 : beta * C[i * l + j]);
}
}
}
__host__ __device__
void mat_mul_C(const double *A, double *B, double c, int m, int n)
{
/*
* B = c * A
*/
int i, j;
for (i = 0; i < m; i++)
{
for (j = 0; j < n; j++)
{
B[i * n + j] = c * A[i * n + j];
}
}
}
__host__ __device__
void mat_sum(const double *A, const double *B, double *C, int m, int n, double alpha=1, double beta=1)
{
/*
* C = alpha * A + beta * B
*/
int i, j;
for (i = 0; i < m; i++)
{
for (j = 0; j < n; j++)
{
C[i * n + j] = alpha * A[i * n + j] + beta * B[i * n + j];
}
}
}
__host__ __device__
void mat_set(double *A, int n, double alpha=0, double beta=0)
{
/*
* A alpha
* beta
*/
int i, j;
for (i = 0; i < n; i++)
{
for (j = 0; j < n; j++)
{
if (i == j)
{
A[i * n + j] = alpha;
}
else
{
A[i * n + j] = beta;
}
}
}
}
void mat_swap_rows(double *A, int n, int i1, int i2)
{
int j;
double tmp;
for (j = 0; j < n; j++)
{
tmp = A[i1 * n + j];
A[i1 * n + j] = A[i2 * n + j];
A[i2 * n + j] = tmp;
}
}
void mat_transpose(const double *A, double *A_t, int n)
{
int i, j;
for (i = 0; i < n; i++)
{
for (j = 0; j < n; j++)
{
A_t[i * n + j] = A[j * n + i];
}
}
}
bool LUP(double *A, int n, int *pi, double eps=1e-10)
{
int i, j, k, k_;
int tmp;
for (i = 0; i < n; i++)
{
pi[i] = i;
}
for (k = 0; k < n; k++)
{
k_ = k;
for (i = k + 1; i < n; i++)
{
if (fabs(A[i * n + k]) > fabs(A[k_ * n + k]))
{
k_ = i;
}
}
if (fabs(A[k_ * n + k]) < eps)
{
return false;
}
if (k != k_)
{
tmp = pi[k];
pi[k] = pi[k_];
pi[k_] = tmp;
mat_swap_rows(A, n, k, k_);
}
for (i = k + 1; i < n; i++)
{
A[i * n + k] /= A[k * n + k];
for (j = k + 1; j < n; j++)
{
A[i * n + j] -= A[i * n + k] * A[k * n + j];
}
}
}
return true;
}
void LUP_solve(const double *LU, const int *pi, const double *b, int n, double *x, double *work)
{
/*
* work - n
*/
int i, j;
double sum;
double *y = work;
memset(y, 0, n * sizeof(double));
for (i = 0; i < n; i++) //
{
for (sum = 0, j = 0; j <= i - 1; sum += LU[i * n + j] * y[j], j++);
y[i] = b[pi[i]] - sum;
}
for (i = n - 1; i >= 0; i--) //
{
for (sum = 0, j = i + 1; j < n; sum += LU[i * n + j] * x[j], j++);
x[i] = (y[i] - sum) / LU[i * n + i];
}
}
void LUP_inv_mat(double *A, double *A_inv, int n, double *work, int *iwork, double eps=1e-10) // LUP ( )
{
/*
* work - n ^ 2 + 2 * n
* iwork - n
*/
int i;
double *X, *e, *space;
int *pi;
X = work;
e = X + n * n;
space = e + n;
pi = iwork;
memset(e, 0, n * sizeof(double));
e[0] = 1;
LUP(A, n, pi, eps);
for (i = 0; i < n - 1; i++)
{
LUP_solve(A, pi, e, n, X + i * n, space);
e[i] = 0;
e[i + 1] = 1;
}
LUP_solve(A, pi, e, n, X + i * n, space);
mat_transpose(X, A_inv, n);
}
__global__
void kernel(int nc, uchar4 *im, int w, int h)
{
int i, j, jc, idx;
int offset;
double3 to_dot;
double dist, dist_max;
idx = blockDim.x * blockIdx.x + threadIdx.x; //
offset = blockDim.x * gridDim.x; // -
for (i = idx; i < w * h; i += offset) //
{
dist_max = -INFINITY;
jc = 0;
for (j = 0; j < nc; j++) //
{
to_dot = make_double3((double)im[i].x - AVG[j].x,
(double)im[i].y - AVG[j].y,
(double)im[i].z - AVG[j].z);
dist = -dot(COV + j * 3 * 3, (double *)&to_dot, (double *)&to_dot, 3); //
if (dist > dist_max) // dist
{
jc = j;
dist_max = dist;
}
}
im[i].w = jc; // max dist
}
}
int main()
{
int nc, np, i, j, x, y, w, h;
uchar4 *im = NULL, *im_dev = NULL;
std::vector<double3> v(0);
double mat[3 * 3],
work[3 * 3 + 2 * 3];
int iwork[3];
FILE *fp;
char name_src_im[256], name_dst_im[256];
dim3 blocks(256), threads(256);
scanf("%s\n%s\n%d", name_src_im, name_dst_im, &nc);
fp = fopen(name_src_im, "rb");
if (fp == NULL)
{
fprintf(stderr, "Error: can't open %s\n", name_src_im);
return 0;
}
fread(&w, sizeof(int), 1, fp);
fread(&h, sizeof(int), 1, fp);
im = (uchar4 *)malloc(w * h * sizeof(uchar4));
if (im == NULL)
{
fprintf(stderr, "Error: not enough memory in CPU\n");
goto FREE;
}
CSC(hipMalloc(&im_dev, w * h * sizeof(uchar4)));
fread(im, sizeof(uchar4), w * h, fp); //
fclose(fp);
for (j = 0; j < nc; j++) //
{
scanf("%d", &np); //
if(v.size() < np) v.resize(np);
avg[j] = make_double3(0, 0, 0);
for (i = 0; i < np; i++) // j-
{
scanf("%d %d", &x, &y);
v[i] = make_double3((double)im[y * w + x].x,
(double)im[y * w + x].y,
(double)im[y * w + x].z);
avg[j].x += v[i].x;
avg[j].y += v[i].y;
avg[j].z += v[i].z;
}
avg[j].x /= np;
avg[j].y /= np;
avg[j].z /= np;
if (np > 1)
{
mat_set(mat, 3, 0, 0); //
for (i = 0; i < np; i++) // i-
{
v[i].x = v[i].x - avg[j].x;
v[i].y = v[i].y - avg[j].y;
v[i].z = v[i].z - avg[j].z;
// double3 double
mat_mul((double *)(v.data() + i), (double *)(v.data() + i), mat, 3, 1, 3, 1, 1); // 31 13 33
}
mat_mul_C(mat, mat, 1. / (np - 1), 3, 3); //
LUP_inv_mat(mat, cov + j * 3 * 3, 3, work, iwork); // j- (work, iwork -- , malloc)
}
else
{
mat_set(cov + j * 3 * 3, 3, 1, 0); //
}
}
/* */
CSC(hipMemcpyToSymbol(AVG, avg, nc * sizeof(double3), 0, hipMemcpyHostToDevice));
CSC(hipMemcpyToSymbol(COV, cov, nc * 3 * 3 * sizeof(double), 0, hipMemcpyHostToDevice));
/* */
hipMemcpy(im_dev, im, w * h * sizeof(uchar4), hipMemcpyHostToDevice); //
hipLaunchKernelGGL(( kernel), dim3(blocks), dim3(threads), 0, 0, nc, im_dev, w, h);
CSC(hipGetLastError());
CSC(hipMemcpy(im, im_dev, w * h * sizeof(uchar4), hipMemcpyDeviceToHost));
fp = fopen(name_dst_im, "wb");
if (fp == NULL)
{
fprintf(stderr, "Error: can't open %s\n", name_dst_im);
goto FREE;
}
fwrite(&w, sizeof(int), 1, fp);
fwrite(&h, sizeof(int), 1, fp);
fwrite(im, sizeof(uchar4), w * h, fp);
fclose(fp);
FREE:
free(im);
hipFree(im_dev);
return 0;
}
| 1491bbd23c32615a61ed1410219e7000815a0afc.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <vector>
#define CSC(call) \
do { \
cudaError_t res = call; \
if (res != cudaSuccess) { \
fprintf(stderr, "ERROR in %s:%d. Message: %s\n", \
__FILE__, __LINE__, cudaGetErrorString(res)); \
exit(0); \
} \
} while(0)
typedef unsigned char uchar;
#define NC_MAX 32 // максимальное количество классов
double3 avg[NC_MAX]; // средние
double cov[3 * 3 * NC_MAX]; // ковариационные матрицы
__constant__ double3 AVG[NC_MAX];
__constant__ double COV[3 * 3 * NC_MAX];
__host__ __device__
double dot(const double *A, const double *a, const double *b, int n)
{
/*
* res = a ^ T * A * b
*/
int i, j;
double res = 0;
for (i = 0; i < n; i++)
{
for (j = 0; j < n; j++)
{
res += A[i * n + j] * a[i] * b[j];
}
}
return res;
}
__host__ __device__ // работает везде
void mat_mul(const double *A, const double *B, double *C, int m, int n, int l, double alpha=1, double beta=0)
{
/*
* C = alpha * A * B + beta * C
*
* m - число строк A
* n - число столбцов A и число строк B
* l - число столбцов B
*/
int i, j, k;
double dot;
for (i = 0; i < m; i++)
{
for (j = 0; j < l; j++)
{
dot = 0;
for (k = 0; k < n; k++)
{
dot += A[i * n + k] * B[k * l + j];
}
C[i * l + j] = alpha * dot + (beta == 0 ? 0 : beta * C[i * l + j]);
}
}
}
__host__ __device__
void mat_mul_C(const double *A, double *B, double c, int m, int n)
{
/*
* B = c * A
*/
int i, j;
for (i = 0; i < m; i++)
{
for (j = 0; j < n; j++)
{
B[i * n + j] = c * A[i * n + j];
}
}
}
__host__ __device__
void mat_sum(const double *A, const double *B, double *C, int m, int n, double alpha=1, double beta=1)
{
/*
* C = alpha * A + beta * B
*/
int i, j;
for (i = 0; i < m; i++)
{
for (j = 0; j < n; j++)
{
C[i * n + j] = alpha * A[i * n + j] + beta * B[i * n + j];
}
}
}
__host__ __device__
void mat_set(double *A, int n, double alpha=0, double beta=0)
{
/*
* Инициализация A значениями alpha на диагонали и
* значениями beta вне диагонали
*/
int i, j;
for (i = 0; i < n; i++)
{
for (j = 0; j < n; j++)
{
if (i == j)
{
A[i * n + j] = alpha;
}
else
{
A[i * n + j] = beta;
}
}
}
}
void mat_swap_rows(double *A, int n, int i1, int i2)
{
int j;
double tmp;
for (j = 0; j < n; j++)
{
tmp = A[i1 * n + j];
A[i1 * n + j] = A[i2 * n + j];
A[i2 * n + j] = tmp;
}
}
void mat_transpose(const double *A, double *A_t, int n)
{
int i, j;
for (i = 0; i < n; i++)
{
for (j = 0; j < n; j++)
{
A_t[i * n + j] = A[j * n + i];
}
}
}
bool LUP(double *A, int n, int *pi, double eps=1e-10)
{
int i, j, k, k_;
int tmp;
for (i = 0; i < n; i++)
{
pi[i] = i;
}
for (k = 0; k < n; k++)
{
k_ = k;
for (i = k + 1; i < n; i++)
{
if (fabs(A[i * n + k]) > fabs(A[k_ * n + k]))
{
k_ = i;
}
}
if (fabs(A[k_ * n + k]) < eps)
{
return false;
}
if (k != k_)
{
tmp = pi[k];
pi[k] = pi[k_];
pi[k_] = tmp;
mat_swap_rows(A, n, k, k_);
}
for (i = k + 1; i < n; i++)
{
A[i * n + k] /= A[k * n + k];
for (j = k + 1; j < n; j++)
{
A[i * n + j] -= A[i * n + k] * A[k * n + j];
}
}
}
return true;
}
void LUP_solve(const double *LU, const int *pi, const double *b, int n, double *x, double *work)
{
/*
* work - вектор размерности n
*/
int i, j;
double sum;
double *y = work;
memset(y, 0, n * sizeof(double));
for (i = 0; i < n; i++) // прямой ход
{
for (sum = 0, j = 0; j <= i - 1; sum += LU[i * n + j] * y[j], j++);
y[i] = b[pi[i]] - sum;
}
for (i = n - 1; i >= 0; i--) // обратный ход
{
for (sum = 0, j = i + 1; j < n; sum += LU[i * n + j] * x[j], j++);
x[i] = (y[i] - sum) / LU[i * n + i];
}
}
void LUP_inv_mat(double *A, double *A_inv, int n, double *work, int *iwork, double eps=1e-10) // инвариантная матрица при помощи LUP разложения (украдено из Кормана)
{
/*
* work - вектор длины n ^ 2 + 2 * n
* iwork - вектор длины n
*/
int i;
double *X, *e, *space;
int *pi;
X = work;
e = X + n * n;
space = e + n;
pi = iwork;
memset(e, 0, n * sizeof(double));
e[0] = 1;
LUP(A, n, pi, eps);
for (i = 0; i < n - 1; i++)
{
LUP_solve(A, pi, e, n, X + i * n, space);
e[i] = 0;
e[i + 1] = 1;
}
LUP_solve(A, pi, e, n, X + i * n, space);
mat_transpose(X, A_inv, n);
}
__global__
void kernel(int nc, uchar4 *im, int w, int h)
{
int i, j, jc, idx;
int offset;
double3 to_dot;
double dist, dist_max;
idx = blockDim.x * blockIdx.x + threadIdx.x; // абсолютный номер потока
offset = blockDim.x * gridDim.x; // общее кол-во потоков
for (i = idx; i < w * h; i += offset) // идем по всем пикселям
{
dist_max = -INFINITY;
jc = 0;
for (j = 0; j < nc; j++) // цикл по числу классов
{
to_dot = make_double3((double)im[i].x - AVG[j].x,
(double)im[i].y - AVG[j].y,
(double)im[i].z - AVG[j].z);
dist = -dot(COV + j * 3 * 3, (double *)&to_dot, (double *)&to_dot, 3); // произведение из основной формулы
if (dist > dist_max) // для каждого класса вычисляем dist и выбираем максимум
{
jc = j;
dist_max = dist;
}
}
im[i].w = jc; // записываем номер класса с max dist
}
}
int main()
{
int nc, np, i, j, x, y, w, h;
uchar4 *im = NULL, *im_dev = NULL;
std::vector<double3> v(0);
double mat[3 * 3],
work[3 * 3 + 2 * 3];
int iwork[3];
FILE *fp;
char name_src_im[256], name_dst_im[256];
dim3 blocks(256), threads(256);
scanf("%s\n%s\n%d", name_src_im, name_dst_im, &nc);
fp = fopen(name_src_im, "rb");
if (fp == NULL)
{
fprintf(stderr, "Error: can't open %s\n", name_src_im);
return 0;
}
fread(&w, sizeof(int), 1, fp);
fread(&h, sizeof(int), 1, fp);
im = (uchar4 *)malloc(w * h * sizeof(uchar4));
if (im == NULL)
{
fprintf(stderr, "Error: not enough memory in CPU\n");
goto FREE;
}
CSC(cudaMalloc(&im_dev, w * h * sizeof(uchar4)));
fread(im, sizeof(uchar4), w * h, fp); // считывание пикселей
fclose(fp);
for (j = 0; j < nc; j++) // цикл по числу классов
{
scanf("%d", &np); // количество пикселей в классе
if(v.size() < np) v.resize(np);
avg[j] = make_double3(0, 0, 0);
for (i = 0; i < np; i++) // просчёт среднего по j-му классу
{
scanf("%d %d", &x, &y);
v[i] = make_double3((double)im[y * w + x].x,
(double)im[y * w + x].y,
(double)im[y * w + x].z);
avg[j].x += v[i].x;
avg[j].y += v[i].y;
avg[j].z += v[i].z;
}
avg[j].x /= np;
avg[j].y /= np;
avg[j].z /= np;
if (np > 1)
{
mat_set(mat, 3, 0, 0); // инициализация нулями
for (i = 0; i < np; i++) // просчёт ковариации по i-му классу
{
v[i].x = v[i].x - avg[j].x;
v[i].y = v[i].y - avg[j].y;
v[i].z = v[i].z - avg[j].z;
// double3 приводим к вектору double
mat_mul((double *)(v.data() + i), (double *)(v.data() + i), mat, 3, 1, 3, 1, 1); // умножение векторов как матриц 3х1 1х3 получаем 3х3
}
mat_mul_C(mat, mat, 1. / (np - 1), 3, 3); // делим матрицу на коэффициент
LUP_inv_mat(mat, cov + j * 3 * 3, 3, work, iwork); // получаем матрицу ковариации для j-того класса (work, iwork -- место, чтобы внутри функции не делать malloc)
}
else
{
mat_set(cov + j * 3 * 3, 3, 1, 0); // единичная матрица
}
}
/* Копирование в константную память */
CSC(cudaMemcpyToSymbol(AVG, avg, nc * sizeof(double3), 0, cudaMemcpyHostToDevice));
CSC(cudaMemcpyToSymbol(COV, cov, nc * 3 * 3 * sizeof(double), 0, cudaMemcpyHostToDevice));
/* Копирование изображения */
cudaMemcpy(im_dev, im, w * h * sizeof(uchar4), cudaMemcpyHostToDevice); // копирование изображения
kernel<<<blocks, threads>>>(nc, im_dev, w, h);
CSC(cudaGetLastError());
CSC(cudaMemcpy(im, im_dev, w * h * sizeof(uchar4), cudaMemcpyDeviceToHost));
fp = fopen(name_dst_im, "wb");
if (fp == NULL)
{
fprintf(stderr, "Error: can't open %s\n", name_dst_im);
goto FREE;
}
fwrite(&w, sizeof(int), 1, fp);
fwrite(&h, sizeof(int), 1, fp);
fwrite(im, sizeof(uchar4), w * h, fp);
fclose(fp);
FREE:
free(im);
cudaFree(im_dev);
return 0;
}
|
747115a3789d86b2226504477c2d06ee963c3ea0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <cstdlib>
#include <ctime>
#include <Windows.h>
#define DO_STATS
#define M_SIZE 1024
typedef struct {
int births;
int deaths;
int alive;
int dead;
} MapStats;
template <size_t xSize, size_t ySize, size_t zSize>
void initMap(unsigned char *oldMap, int mapSeed);
template <size_t xSize, size_t ySize, size_t zSize>
void printJSON(unsigned char *map, int iter);
template <size_t xSize, size_t ySize, size_t zSize>
void countStats(unsigned char *oldMap, unsigned char *newMap, MapStats &stats);
//On Device function
template <size_t xSize, size_t ySize, size_t zSize>
__device__ int countNeighbours(unsigned char *map, int x, int y, int z);
//Kernels
template<size_t xSize, size_t ySize, size_t zSize>
__global__ void unopIterate(unsigned char *d_oldMap, unsigned char *d_newMap, int iters, int bLim, int dLim);
//Globals -- bad code
const int deathLimit = 15;
const int birthLimit = 17;
int main()
{
const int mapSeed = 45000;
const int xSize = M_SIZE;
const int ySize = M_SIZE;
const int zSize = M_SIZE;
const int maxIters = 30;
const bool TIME = true;
const int mapSize = xSize*ySize*zSize;
const int blockSize = 8;
dim3 blockDim(blockSize, blockSize, blockSize);
dim3 gridDim(xSize/blockSize, ySize/blockSize, zSize/blockSize);
clock_t start, total = 0;
unsigned char *oldMap = new unsigned char[xSize*ySize*zSize];
unsigned char *newMap = new unsigned char[xSize*ySize*zSize];
unsigned char *temp;
MapStats stats;
unsigned char *d_oldMap;
unsigned char *d_newMap;
hipMalloc((void **) &d_oldMap, mapSize);
hipMalloc((void **) &d_newMap, mapSize);
initMap<xSize,ySize,zSize>(oldMap, mapSeed);
#ifdef DO_STATS
#endif
//Main iteration section
if(TIME) start = clock();
hipMemcpy(d_oldMap, oldMap, mapSize, hipMemcpyHostToDevice);
for(int iter=0; iter<maxIters; ++iter)
{
hipLaunchKernelGGL(( unopIterate<xSize,ySize,zSize>), dim3(gridDim),dim3(blockDim), 0, 0, d_oldMap, d_newMap, 1, birthLimit, deathLimit);
// if(TIME) total += clock() - start;
// Sleep(50);
// printf("[%d] ", iter+1);
// if(TIME) start = clock();
if(iter != maxIters-1)
{
temp = d_oldMap;
d_oldMap = d_newMap;
d_newMap = temp;
}
}
hipMemcpy(newMap, d_newMap, mapSize, hipMemcpyDeviceToHost);
if(TIME) total += clock() - start;
//printf("\n");
hipMemcpy(oldMap, d_oldMap, mapSize, hipMemcpyDeviceToHost);
#ifdef DO_STATS
printf("[%d] ",maxIters);
countStats<xSize,ySize,zSize>(oldMap, newMap, stats);
printf("births: %d \tdeaths: %d \talive: %d \tdead: %d \ttotal: %d\n", stats.births, stats.deaths, stats.alive, stats.dead, stats.alive+stats.dead);
#endif
if(TIME)
{
double diff = (double(total))/CLOCKS_PER_SEC;
printf("time: took %f seconds for %dx%dx%d matrix\n", diff, xSize, ySize, ySize);
}
delete[] oldMap;
delete[] newMap;
return 0;
}
template <size_t xSize, size_t ySize, size_t zSize>
void initMap(unsigned char *oldMap, int mapSeed)
{
srand(mapSeed);
for(int k=0; k<zSize; ++k)
{
for(int j=0; j<ySize; ++j)
{
for(int i=0; i<xSize; ++i)
{
oldMap[k*(xSize*ySize)+j*xSize+i] = rand() % 2;
}
}
}
}
//Count neighbours that are alive
template <size_t xSize, size_t ySize, size_t zSize>
__device__ int countNeighbours(unsigned char *map, int x, int y, int z)
{
const bool countBounds = true;
int count = 0;
for(int k=-1; k<2; ++k)
{
for(int j=-1; j<2; ++j)
{
for(int i=-1; i<2; ++i)
{
//Count all except middle point
if( i != 0 || j != 0 || k != 0)
{
int xPos = x + i;
int yPos = y + j;
int zPos = z + k;
//Check boundaries
if(xPos < 0 || yPos < 0 || zPos < 0 || xPos >= xSize || yPos >= ySize || zPos >= zSize)
{
//if(x==0 && y==0 && z==0) printf("(%d,%d,%d):bounds\n",xPos,yPos,zPos);
if(countBounds) count++;
}
else
{
//if(x==0 && y==0 && z==0) printf("(%d,%d,%d):not bounds\n",xPos,yPos,zPos);
count += map[zPos*(xSize*ySize)+yPos*xSize+xPos];
}
}
}
}
}
return count;
}
template<size_t xSize, size_t ySize, size_t zSize>
__global__ void unopIterate(unsigned char *d_oldMap, unsigned char *d_newMap, int iters, int bLim, int dLim)
{
const int globalx = (blockIdx.x * blockDim.x) + threadIdx.x;
const int globaly = (blockIdx.y * blockDim.y) + threadIdx.y;
const int globalz = (blockIdx.z * blockDim.z) + threadIdx.z;
//Only perform action if thread is inside the bounds of the grid
if( !(globalx >= xSize || globaly >= ySize || globalz >= zSize) )
{
int globalIndex = globalz*(xSize * ySize) + globaly*(xSize)+globalx;
int aliveCnt = countNeighbours<xSize,ySize,zSize>(d_oldMap, globalx, globaly, globalz);
if(d_oldMap[globalIndex] == 1)
{
d_newMap[globalIndex] = (aliveCnt < dLim) ? 0 : 1;
}
else
{
d_newMap[globalIndex] = (aliveCnt > bLim) ? 1 : 0;
}
}
}
template <size_t xSize, size_t ySize, size_t zSize>
void printJSON(unsigned char *map, int iter)
{
printf("\t{\n");
printf("\t\"iteration\" : %d,\n", iter);
printf("\t\"map\" : [\n");
for(int i=0; i<xSize; ++i)
{
for(int j=0; j<ySize; ++j)
{
for(int k=0; k<zSize; ++k)
{
if( !(i == 0 && j == 0 && k == 0) ) printf(",\n");
char *val = ( map[k*(xSize*ySize)+j*xSize+i] ) ? "true" : "false";
printf("\t\t{\"x\":%d, \"y\":%d, \"z\":%d, \"value\":%s}", i, j, k, val);
}
}
}
printf("\n\t\t]\n\t}");
}
template <size_t xSize, size_t ySize, size_t zSize>
void countStats(unsigned char *oldMap, unsigned char *newMap, MapStats &stats)
{
int oldAlive = 0, oldDead = 0;
int newAlive = 0, newDead = 0;
for(int k=0; k<zSize; ++k)
{
for(int j=0; j<ySize; ++j)
{
for(int i=0; i<xSize; ++i)
{
oldAlive += oldMap[k*(xSize*ySize)+j*xSize+i];
newAlive += newMap[k*(xSize*ySize)+j*xSize+i];
}
}
}
stats.alive = newAlive;
newDead= (xSize*ySize*zSize) - newAlive;
oldDead = (xSize*ySize*zSize) - oldAlive;
stats.dead = newDead;
stats.births = (newAlive > oldAlive) ? newAlive - oldAlive : 0;
stats.deaths = (newDead > oldDead) ? newDead - oldDead : 0;
}
| 747115a3789d86b2226504477c2d06ee963c3ea0.cu | #include <cstdio>
#include <cstdlib>
#include <ctime>
#include <Windows.h>
#define DO_STATS
#define M_SIZE 1024
typedef struct {
int births;
int deaths;
int alive;
int dead;
} MapStats;
template <size_t xSize, size_t ySize, size_t zSize>
void initMap(unsigned char *oldMap, int mapSeed);
template <size_t xSize, size_t ySize, size_t zSize>
void printJSON(unsigned char *map, int iter);
template <size_t xSize, size_t ySize, size_t zSize>
void countStats(unsigned char *oldMap, unsigned char *newMap, MapStats &stats);
//On Device function
template <size_t xSize, size_t ySize, size_t zSize>
__device__ int countNeighbours(unsigned char *map, int x, int y, int z);
//Kernels
template<size_t xSize, size_t ySize, size_t zSize>
__global__ void unopIterate(unsigned char *d_oldMap, unsigned char *d_newMap, int iters, int bLim, int dLim);
//Globals -- bad code
const int deathLimit = 15;
const int birthLimit = 17;
int main()
{
const int mapSeed = 45000;
const int xSize = M_SIZE;
const int ySize = M_SIZE;
const int zSize = M_SIZE;
const int maxIters = 30;
const bool TIME = true;
const int mapSize = xSize*ySize*zSize;
const int blockSize = 8;
dim3 blockDim(blockSize, blockSize, blockSize);
dim3 gridDim(xSize/blockSize, ySize/blockSize, zSize/blockSize);
clock_t start, total = 0;
unsigned char *oldMap = new unsigned char[xSize*ySize*zSize];
unsigned char *newMap = new unsigned char[xSize*ySize*zSize];
unsigned char *temp;
MapStats stats;
unsigned char *d_oldMap;
unsigned char *d_newMap;
cudaMalloc((void **) &d_oldMap, mapSize);
cudaMalloc((void **) &d_newMap, mapSize);
initMap<xSize,ySize,zSize>(oldMap, mapSeed);
#ifdef DO_STATS
#endif
//Main iteration section
if(TIME) start = clock();
cudaMemcpy(d_oldMap, oldMap, mapSize, cudaMemcpyHostToDevice);
for(int iter=0; iter<maxIters; ++iter)
{
unopIterate<xSize,ySize,zSize><<<gridDim,blockDim>>>(d_oldMap, d_newMap, 1, birthLimit, deathLimit);
// if(TIME) total += clock() - start;
// Sleep(50);
// printf("[%d] ", iter+1);
// if(TIME) start = clock();
if(iter != maxIters-1)
{
temp = d_oldMap;
d_oldMap = d_newMap;
d_newMap = temp;
}
}
cudaMemcpy(newMap, d_newMap, mapSize, cudaMemcpyDeviceToHost);
if(TIME) total += clock() - start;
//printf("\n");
cudaMemcpy(oldMap, d_oldMap, mapSize, cudaMemcpyDeviceToHost);
#ifdef DO_STATS
printf("[%d] ",maxIters);
countStats<xSize,ySize,zSize>(oldMap, newMap, stats);
printf("births: %d \tdeaths: %d \talive: %d \tdead: %d \ttotal: %d\n", stats.births, stats.deaths, stats.alive, stats.dead, stats.alive+stats.dead);
#endif
if(TIME)
{
double diff = (double(total))/CLOCKS_PER_SEC;
printf("time: took %f seconds for %dx%dx%d matrix\n", diff, xSize, ySize, ySize);
}
delete[] oldMap;
delete[] newMap;
return 0;
}
template <size_t xSize, size_t ySize, size_t zSize>
void initMap(unsigned char *oldMap, int mapSeed)
{
srand(mapSeed);
for(int k=0; k<zSize; ++k)
{
for(int j=0; j<ySize; ++j)
{
for(int i=0; i<xSize; ++i)
{
oldMap[k*(xSize*ySize)+j*xSize+i] = rand() % 2;
}
}
}
}
//Count neighbours that are alive
template <size_t xSize, size_t ySize, size_t zSize>
__device__ int countNeighbours(unsigned char *map, int x, int y, int z)
{
const bool countBounds = true;
int count = 0;
for(int k=-1; k<2; ++k)
{
for(int j=-1; j<2; ++j)
{
for(int i=-1; i<2; ++i)
{
//Count all except middle point
if( i != 0 || j != 0 || k != 0)
{
int xPos = x + i;
int yPos = y + j;
int zPos = z + k;
//Check boundaries
if(xPos < 0 || yPos < 0 || zPos < 0 || xPos >= xSize || yPos >= ySize || zPos >= zSize)
{
//if(x==0 && y==0 && z==0) printf("(%d,%d,%d):bounds\n",xPos,yPos,zPos);
if(countBounds) count++;
}
else
{
//if(x==0 && y==0 && z==0) printf("(%d,%d,%d):not bounds\n",xPos,yPos,zPos);
count += map[zPos*(xSize*ySize)+yPos*xSize+xPos];
}
}
}
}
}
return count;
}
template<size_t xSize, size_t ySize, size_t zSize>
__global__ void unopIterate(unsigned char *d_oldMap, unsigned char *d_newMap, int iters, int bLim, int dLim)
{
const int globalx = (blockIdx.x * blockDim.x) + threadIdx.x;
const int globaly = (blockIdx.y * blockDim.y) + threadIdx.y;
const int globalz = (blockIdx.z * blockDim.z) + threadIdx.z;
//Only perform action if thread is inside the bounds of the grid
if( !(globalx >= xSize || globaly >= ySize || globalz >= zSize) )
{
int globalIndex = globalz*(xSize * ySize) + globaly*(xSize)+globalx;
int aliveCnt = countNeighbours<xSize,ySize,zSize>(d_oldMap, globalx, globaly, globalz);
if(d_oldMap[globalIndex] == 1)
{
d_newMap[globalIndex] = (aliveCnt < dLim) ? 0 : 1;
}
else
{
d_newMap[globalIndex] = (aliveCnt > bLim) ? 1 : 0;
}
}
}
template <size_t xSize, size_t ySize, size_t zSize>
void printJSON(unsigned char *map, int iter)
{
printf("\t{\n");
printf("\t\"iteration\" : %d,\n", iter);
printf("\t\"map\" : [\n");
for(int i=0; i<xSize; ++i)
{
for(int j=0; j<ySize; ++j)
{
for(int k=0; k<zSize; ++k)
{
if( !(i == 0 && j == 0 && k == 0) ) printf(",\n");
char *val = ( map[k*(xSize*ySize)+j*xSize+i] ) ? "true" : "false";
printf("\t\t{\"x\":%d, \"y\":%d, \"z\":%d, \"value\":%s}", i, j, k, val);
}
}
}
printf("\n\t\t]\n\t}");
}
template <size_t xSize, size_t ySize, size_t zSize>
void countStats(unsigned char *oldMap, unsigned char *newMap, MapStats &stats)
{
int oldAlive = 0, oldDead = 0;
int newAlive = 0, newDead = 0;
for(int k=0; k<zSize; ++k)
{
for(int j=0; j<ySize; ++j)
{
for(int i=0; i<xSize; ++i)
{
oldAlive += oldMap[k*(xSize*ySize)+j*xSize+i];
newAlive += newMap[k*(xSize*ySize)+j*xSize+i];
}
}
}
stats.alive = newAlive;
newDead= (xSize*ySize*zSize) - newAlive;
oldDead = (xSize*ySize*zSize) - oldAlive;
stats.dead = newDead;
stats.births = (newAlive > oldAlive) ? newAlive - oldAlive : 0;
stats.deaths = (newDead > oldDead) ? newDead - oldDead : 0;
}
|
aa469ebf418232b3014d59f9bdcd60c3a2d785f2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#define TOTAL_THREADS 1024
#define THREADS_PER_BLOCK 256
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
inline int opt_n_threads(int work_size) {
const int pow_2 = ::log(static_cast<double>(work_size)) / ::log(2.0);
return max(min(1 << pow_2, TOTAL_THREADS), 1);
}
__device__ void __update(float *__restrict__ dists, int *__restrict__ dists_i,
int idx1, int idx2) {
const float v1 = dists[idx1], v2 = dists[idx2];
const int i1 = dists_i[idx1], i2 = dists_i[idx2];
dists[idx1] = max(v1, v2);
dists_i[idx1] = v2 > v1 ? i2 : i1;
}
template <unsigned int block_size>
__global__ void density_and_manhattan_weights_meanwhile_furthest_point_sampling_kernel(int b, int n, int m,
float alpha,
const float *__restrict__ dataset,
const float *__restrict__ manhattan_weights,
const float *__restrict__ density_weights,
float *__restrict__ temp,
int *__restrict__ idxs) {
// dataset: (B, N, 3)
// temp: (B, N)
// weights: (B, N)
// output:
// idx: (B, M)
if (m <= 0) return;
__shared__ float dists[block_size];
__shared__ int dists_i[block_size];
int batch_index = blockIdx.x;
dataset += batch_index * n * 3;
temp += batch_index * n;
idxs += batch_index * m;
density_weights += batch_index * n;
int tid = threadIdx.x;
const int stride = block_size;
// get the manhattan weights
float manhattan_weight_x = manhattan_weights[0];
float manhattan_weight_y = manhattan_weights[1];
float manhattan_weight_z = manhattan_weights[2];
int old = 0;
if (threadIdx.x == 0) idxs[0] = old;
__syncthreads();
for (int j = 1; j < m; j++) {
int besti = 0;
float best = -1;
float x1 = dataset[old * 3 + 0];
float y1 = dataset[old * 3 + 1];
float z1 = dataset[old * 3 + 2];
for (int k = tid; k < n; k += stride) {
float x2,y2,z2;
x2 = dataset[k * 3 + 0];
y2 = dataset[k * 3 + 1];
z2 = dataset[k * 3 + 2];
//float d = manhattan_weight_x * (x2 - x1) * (x2 - x1) + manhattan_weight_y * (y2 - y1) * (y2 - y1) +
// manhattan_weight_z * (z2 - z1) * (z2 - z1);
float d_manhattan_part = sqrt(manhattan_weight_x * (x2 - x1) * (x2 - x1) + manhattan_weight_y * (y2 - y1) *
(y2 - y1) + manhattan_weight_z * (z2 - z1) * (z2 - z1));
float d_density_part = density_weights[k] * sqrt((x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) *
(z2 - z1));
float d = alpha * d_manhattan_part + (1 - alpha) * d_density_part;
//d = sqrt(d) * density_weights[k];
float d2 = min(d, temp[k]);
temp[k] = d2;
besti = d2 > best ? k : besti;
best = d2 > best ? d2 : best;
}
dists[tid] = best;
dists_i[tid] = besti;
__syncthreads();
if (block_size >= 1024) {
if (tid < 512) {
__update(dists, dists_i, tid, tid + 512);
}
__syncthreads();
}
if (block_size >= 512) {
if (tid < 256) {
__update(dists, dists_i, tid, tid + 256);
}
__syncthreads();
}
if (block_size >= 256) {
if (tid < 128) {
__update(dists, dists_i, tid, tid + 128);
}
__syncthreads();
}
if (block_size >= 128) {
if (tid < 64) {
__update(dists, dists_i, tid, tid + 64);
}
__syncthreads();
}
if (block_size >= 64) {
if (tid < 32) {
__update(dists, dists_i, tid, tid + 32);
}
__syncthreads();
}
if (block_size >= 32) {
if (tid < 16) {
__update(dists, dists_i, tid, tid + 16);
}
__syncthreads();
}
if (block_size >= 16) {
if (tid < 8) {
__update(dists, dists_i, tid, tid + 8);
}
__syncthreads();
}
if (block_size >= 8) {
if (tid < 4) {
__update(dists, dists_i, tid, tid + 4);
}
__syncthreads();
}
if (block_size >= 4) {
if (tid < 2) {
__update(dists, dists_i, tid, tid + 2);
}
__syncthreads();
}
if (block_size >= 2) {
if (tid < 1) {
__update(dists, dists_i, tid, tid + 1);
}
__syncthreads();
}
old = dists_i[0];
if (tid == 0) idxs[j] = old;
}
}
__global__ void num_points_within_r_kernel(int b, int n, float r,
const float *__restrict__ xyz,
float * __restrict__ results) {
// xyz: (B, N, 3)
// results: (B, N)
int bs_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bs_idx >= b || pt_idx >= n) return;
const float *__restrict__ new_xyz = xyz;
new_xyz += bs_idx * n * 3 + pt_idx * 3;
xyz += bs_idx * n * 3;
results += bs_idx * n;
float new_x = new_xyz[0];
float new_y = new_xyz[1];
float new_z = new_xyz[2];
float cnt = 0.0;
for (int k = 0; k < n; ++k) {
float x = xyz[k * 3 + 0];
float y = xyz[k * 3 + 1];
float z = xyz[k * 3 + 2];
float d = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) + (new_z - z) * (new_z - z);
if (d <= r * r) cnt++;
}
results[pt_idx] = cnt;
}
void num_points_within_r_kernel_launcher(int b, int n, float r,
const float *xyz,
float *results, hipStream_t stream) {
hipError_t err;
// batch.
dim3 blocks(DIVUP(n, THREADS_PER_BLOCK), b);
dim3 threads(THREADS_PER_BLOCK);
hipLaunchKernelGGL(( num_points_within_r_kernel), dim3(blocks), dim3(threads), 0, stream, b, n, r, xyz, results);
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
}
void density_and_manhattan_weights_meanwhile_furthest_point_sampling_kernel_launcher(int b, int n, int m,
float alpha,
const float *dataset,
const float *manhattan_weights,
const float *density_weights,
float *temp, int *idx, hipStream_t stream) {
hipError_t err;
unsigned int n_threads = opt_n_threads(n);
switch(n_threads) {
case 1024:
hipLaunchKernelGGL(( density_and_manhattan_weights_meanwhile_furthest_point_sampling_kernel<1024>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, alpha, dataset, manhattan_weights, density_weights, temp, idx);
break;
case 512:
hipLaunchKernelGGL(( density_and_manhattan_weights_meanwhile_furthest_point_sampling_kernel<512>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, alpha, dataset, manhattan_weights, density_weights, temp, idx);
break;
case 256:
hipLaunchKernelGGL(( density_and_manhattan_weights_meanwhile_furthest_point_sampling_kernel<256>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, alpha, dataset, manhattan_weights, density_weights, temp, idx);
break;
case 128:
hipLaunchKernelGGL(( density_and_manhattan_weights_meanwhile_furthest_point_sampling_kernel<128>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, alpha, dataset, manhattan_weights, density_weights, temp, idx);
break;
case 64:
hipLaunchKernelGGL(( density_and_manhattan_weights_meanwhile_furthest_point_sampling_kernel<64>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, alpha, dataset, manhattan_weights, density_weights, temp, idx);
break;
case 32:
hipLaunchKernelGGL(( density_and_manhattan_weights_meanwhile_furthest_point_sampling_kernel<32>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, alpha, dataset, manhattan_weights, density_weights, temp, idx);
break;
case 16:
hipLaunchKernelGGL(( density_and_manhattan_weights_meanwhile_furthest_point_sampling_kernel<16>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, alpha, dataset, manhattan_weights, density_weights, temp, idx);
break;
case 8:
hipLaunchKernelGGL(( density_and_manhattan_weights_meanwhile_furthest_point_sampling_kernel<8>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, alpha, dataset, manhattan_weights, density_weights, temp, idx);
break;
case 4:
hipLaunchKernelGGL(( density_and_manhattan_weights_meanwhile_furthest_point_sampling_kernel<4>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, alpha, dataset, manhattan_weights, density_weights, temp, idx);
break;
case 2:
hipLaunchKernelGGL(( density_and_manhattan_weights_meanwhile_furthest_point_sampling_kernel<2>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, alpha, dataset, manhattan_weights, density_weights, temp, idx);
break;
case 1:
hipLaunchKernelGGL(( density_and_manhattan_weights_meanwhile_furthest_point_sampling_kernel<1>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, alpha, dataset, manhattan_weights, density_weights, temp, idx);
break;
default:
hipLaunchKernelGGL(( density_and_manhattan_weights_meanwhile_furthest_point_sampling_kernel<1024>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, alpha, dataset, manhattan_weights, density_weights, temp, idx);
break;
}
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
}
// Modified from
// https://github.com/qiqihaer/3DSSD-pytorch/blob/master/lib/pointnet2/src/sampling_gpu.cu
template <unsigned int block_size>
__global__ void furthest_point_sampling_with_dist_kernel(
int b, int n, int m, const float *__restrict__ dataset,
float *__restrict__ temp, int *__restrict__ idxs) {
// dataset: (B, N, N)
// tmp: (B, N)
// output:
// idx: (B, M)
if (m <= 0)
return;
__shared__ float dists[block_size];
__shared__ int dists_i[block_size];
int batch_index = blockIdx.x;
dataset += batch_index * n * n;
temp += batch_index * n;
idxs += batch_index * m;
int tid = threadIdx.x;
const int stride = block_size;
int old = 0;
if (threadIdx.x == 0)
idxs[0] = old;
__syncthreads();
for (int j = 1; j < m; j++) {
int besti = 0;
float best = -1;
// float x1 = dataset[old * 3 + 0];
// float y1 = dataset[old * 3 + 1];
// float z1 = dataset[old * 3 + 2];
for (int k = tid; k < n; k += stride) {
// float x2, y2, z2;
// x2 = dataset[k * 3 + 0];
// y2 = dataset[k * 3 + 1];
// z2 = dataset[k * 3 + 2];
// float d = (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) *
// (z2 - z1);
float d = dataset[old * n + k];
float d2 = min(d, temp[k]);
temp[k] = d2;
besti = d2 > best ? k : besti;
best = d2 > best ? d2 : best;
}
dists[tid] = best;
dists_i[tid] = besti;
__syncthreads();
if (block_size >= 1024) {
if (tid < 512) {
__update(dists, dists_i, tid, tid + 512);
}
__syncthreads();
}
if (block_size >= 512) {
if (tid < 256) {
__update(dists, dists_i, tid, tid + 256);
}
__syncthreads();
}
if (block_size >= 256) {
if (tid < 128) {
__update(dists, dists_i, tid, tid + 128);
}
__syncthreads();
}
if (block_size >= 128) {
if (tid < 64) {
__update(dists, dists_i, tid, tid + 64);
}
__syncthreads();
}
if (block_size >= 64) {
if (tid < 32) {
__update(dists, dists_i, tid, tid + 32);
}
__syncthreads();
}
if (block_size >= 32) {
if (tid < 16) {
__update(dists, dists_i, tid, tid + 16);
}
__syncthreads();
}
if (block_size >= 16) {
if (tid < 8) {
__update(dists, dists_i, tid, tid + 8);
}
__syncthreads();
}
if (block_size >= 8) {
if (tid < 4) {
__update(dists, dists_i, tid, tid + 4);
}
__syncthreads();
}
if (block_size >= 4) {
if (tid < 2) {
__update(dists, dists_i, tid, tid + 2);
}
__syncthreads();
}
if (block_size >= 2) {
if (tid < 1) {
__update(dists, dists_i, tid, tid + 1);
}
__syncthreads();
}
old = dists_i[0];
if (tid == 0)
idxs[j] = old;
}
}
void furthest_point_sampling_with_dist_kernel_launcher(int b, int n, int m,
const float *dataset,
float *temp, int *idxs,
hipStream_t stream) {
// dataset: (B, N, N)
// temp: (B, N)
// output:
// idx: (B, M)
hipError_t err;
unsigned int n_threads = opt_n_threads(n);
switch (n_threads) {
case 2048:
hipLaunchKernelGGL(( furthest_point_sampling_with_dist_kernel<2048>), dim3(b), dim3(n_threads), 0, stream,
b, n, m, dataset, temp, idxs);
break;
case 1024:
hipLaunchKernelGGL(( furthest_point_sampling_with_dist_kernel<1024>), dim3(b), dim3(n_threads), 0, stream,
b, n, m, dataset, temp, idxs);
break;
case 512:
hipLaunchKernelGGL(( furthest_point_sampling_with_dist_kernel<512>), dim3(b), dim3(n_threads), 0, stream,
b, n, m, dataset, temp, idxs);
break;
case 256:
hipLaunchKernelGGL(( furthest_point_sampling_with_dist_kernel<256>), dim3(b), dim3(n_threads), 0, stream,
b, n, m, dataset, temp, idxs);
break;
case 128:
hipLaunchKernelGGL(( furthest_point_sampling_with_dist_kernel<128>), dim3(b), dim3(n_threads), 0, stream,
b, n, m, dataset, temp, idxs);
break;
case 64:
hipLaunchKernelGGL(( furthest_point_sampling_with_dist_kernel<64>), dim3(b), dim3(n_threads), 0, stream,
b, n, m, dataset, temp, idxs);
break;
case 32:
hipLaunchKernelGGL(( furthest_point_sampling_with_dist_kernel<32>), dim3(b), dim3(n_threads), 0, stream,
b, n, m, dataset, temp, idxs);
break;
case 16:
hipLaunchKernelGGL(( furthest_point_sampling_with_dist_kernel<16>), dim3(b), dim3(n_threads), 0, stream,
b, n, m, dataset, temp, idxs);
break;
case 8:
hipLaunchKernelGGL(( furthest_point_sampling_with_dist_kernel<8>), dim3(b), dim3(n_threads), 0, stream,
b, n, m, dataset, temp, idxs);
break;
case 4:
hipLaunchKernelGGL(( furthest_point_sampling_with_dist_kernel<4>), dim3(b), dim3(n_threads), 0, stream,
b, n, m, dataset, temp, idxs);
break;
case 2:
hipLaunchKernelGGL(( furthest_point_sampling_with_dist_kernel<2>), dim3(b), dim3(n_threads), 0, stream,
b, n, m, dataset, temp, idxs);
break;
case 1:
hipLaunchKernelGGL(( furthest_point_sampling_with_dist_kernel<1>), dim3(b), dim3(n_threads), 0, stream,
b, n, m, dataset, temp, idxs);
break;
default:
hipLaunchKernelGGL(( furthest_point_sampling_with_dist_kernel<1024>), dim3(b), dim3(n_threads), 0, stream,
b, n, m, dataset, temp, idxs);
}
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
} | aa469ebf418232b3014d59f9bdcd60c3a2d785f2.cu | #include <stdio.h>
#include <math.h>
#include <stdlib.h>
#define TOTAL_THREADS 1024
#define THREADS_PER_BLOCK 256
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
inline int opt_n_threads(int work_size) {
const int pow_2 = std::log(static_cast<double>(work_size)) / std::log(2.0);
return max(min(1 << pow_2, TOTAL_THREADS), 1);
}
__device__ void __update(float *__restrict__ dists, int *__restrict__ dists_i,
int idx1, int idx2) {
const float v1 = dists[idx1], v2 = dists[idx2];
const int i1 = dists_i[idx1], i2 = dists_i[idx2];
dists[idx1] = max(v1, v2);
dists_i[idx1] = v2 > v1 ? i2 : i1;
}
template <unsigned int block_size>
__global__ void density_and_manhattan_weights_meanwhile_furthest_point_sampling_kernel(int b, int n, int m,
float alpha,
const float *__restrict__ dataset,
const float *__restrict__ manhattan_weights,
const float *__restrict__ density_weights,
float *__restrict__ temp,
int *__restrict__ idxs) {
// dataset: (B, N, 3)
// temp: (B, N)
// weights: (B, N) 不同新加入点有不同权重
// output:
// idx: (B, M)
if (m <= 0) return;
__shared__ float dists[block_size];
__shared__ int dists_i[block_size];
int batch_index = blockIdx.x;
dataset += batch_index * n * 3;
temp += batch_index * n;
idxs += batch_index * m;
density_weights += batch_index * n;
int tid = threadIdx.x;
const int stride = block_size;
// get the manhattan weights
float manhattan_weight_x = manhattan_weights[0];
float manhattan_weight_y = manhattan_weights[1];
float manhattan_weight_z = manhattan_weights[2];
int old = 0;
if (threadIdx.x == 0) idxs[0] = old;
__syncthreads();
for (int j = 1; j < m; j++) {
int besti = 0;
float best = -1;
float x1 = dataset[old * 3 + 0];
float y1 = dataset[old * 3 + 1];
float z1 = dataset[old * 3 + 2];
for (int k = tid; k < n; k += stride) {
float x2,y2,z2;
x2 = dataset[k * 3 + 0];
y2 = dataset[k * 3 + 1];
z2 = dataset[k * 3 + 2];
//float d = manhattan_weight_x * (x2 - x1) * (x2 - x1) + manhattan_weight_y * (y2 - y1) * (y2 - y1) +
// manhattan_weight_z * (z2 - z1) * (z2 - z1);
float d_manhattan_part = sqrt(manhattan_weight_x * (x2 - x1) * (x2 - x1) + manhattan_weight_y * (y2 - y1) *
(y2 - y1) + manhattan_weight_z * (z2 - z1) * (z2 - z1));
float d_density_part = density_weights[k] * sqrt((x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) *
(z2 - z1));
float d = alpha * d_manhattan_part + (1 - alpha) * d_density_part;
//d = sqrt(d) * density_weights[k];
float d2 = min(d, temp[k]);
temp[k] = d2;
besti = d2 > best ? k : besti;
best = d2 > best ? d2 : best;
}
dists[tid] = best;
dists_i[tid] = besti;
__syncthreads();
if (block_size >= 1024) {
if (tid < 512) {
__update(dists, dists_i, tid, tid + 512);
}
__syncthreads();
}
if (block_size >= 512) {
if (tid < 256) {
__update(dists, dists_i, tid, tid + 256);
}
__syncthreads();
}
if (block_size >= 256) {
if (tid < 128) {
__update(dists, dists_i, tid, tid + 128);
}
__syncthreads();
}
if (block_size >= 128) {
if (tid < 64) {
__update(dists, dists_i, tid, tid + 64);
}
__syncthreads();
}
if (block_size >= 64) {
if (tid < 32) {
__update(dists, dists_i, tid, tid + 32);
}
__syncthreads();
}
if (block_size >= 32) {
if (tid < 16) {
__update(dists, dists_i, tid, tid + 16);
}
__syncthreads();
}
if (block_size >= 16) {
if (tid < 8) {
__update(dists, dists_i, tid, tid + 8);
}
__syncthreads();
}
if (block_size >= 8) {
if (tid < 4) {
__update(dists, dists_i, tid, tid + 4);
}
__syncthreads();
}
if (block_size >= 4) {
if (tid < 2) {
__update(dists, dists_i, tid, tid + 2);
}
__syncthreads();
}
if (block_size >= 2) {
if (tid < 1) {
__update(dists, dists_i, tid, tid + 1);
}
__syncthreads();
}
old = dists_i[0];
if (tid == 0) idxs[j] = old;
}
}
__global__ void num_points_within_r_kernel(int b, int n, float r,
const float *__restrict__ xyz,
float * __restrict__ results) {
// xyz: (B, N, 3)
// results: (B, N)
int bs_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bs_idx >= b || pt_idx >= n) return;
const float *__restrict__ new_xyz = xyz;
new_xyz += bs_idx * n * 3 + pt_idx * 3;
xyz += bs_idx * n * 3;
results += bs_idx * n;
float new_x = new_xyz[0];
float new_y = new_xyz[1];
float new_z = new_xyz[2];
float cnt = 0.0;
for (int k = 0; k < n; ++k) {
float x = xyz[k * 3 + 0];
float y = xyz[k * 3 + 1];
float z = xyz[k * 3 + 2];
float d = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) + (new_z - z) * (new_z - z);
if (d <= r * r) cnt++;
}
results[pt_idx] = cnt;
}
void num_points_within_r_kernel_launcher(int b, int n, float r,
const float *xyz,
float *results, cudaStream_t stream) {
cudaError_t err;
// 每一行处理的是一个batch的数据.
dim3 blocks(DIVUP(n, THREADS_PER_BLOCK), b);
dim3 threads(THREADS_PER_BLOCK);
num_points_within_r_kernel<<<blocks, threads, 0, stream>>>(b, n, r, xyz, results);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
void density_and_manhattan_weights_meanwhile_furthest_point_sampling_kernel_launcher(int b, int n, int m,
float alpha,
const float *dataset,
const float *manhattan_weights,
const float *density_weights,
float *temp, int *idx, cudaStream_t stream) {
cudaError_t err;
unsigned int n_threads = opt_n_threads(n);
switch(n_threads) {
case 1024:
density_and_manhattan_weights_meanwhile_furthest_point_sampling_kernel<1024>
<<<b, n_threads, 0, stream>>>(b, n, m, alpha, dataset, manhattan_weights, density_weights, temp, idx);
break;
case 512:
density_and_manhattan_weights_meanwhile_furthest_point_sampling_kernel<512>
<<<b, n_threads, 0, stream>>>(b, n, m, alpha, dataset, manhattan_weights, density_weights, temp, idx);
break;
case 256:
density_and_manhattan_weights_meanwhile_furthest_point_sampling_kernel<256>
<<<b, n_threads, 0, stream>>>(b, n, m, alpha, dataset, manhattan_weights, density_weights, temp, idx);
break;
case 128:
density_and_manhattan_weights_meanwhile_furthest_point_sampling_kernel<128>
<<<b, n_threads, 0, stream>>>(b, n, m, alpha, dataset, manhattan_weights, density_weights, temp, idx);
break;
case 64:
density_and_manhattan_weights_meanwhile_furthest_point_sampling_kernel<64>
<<<b, n_threads, 0, stream>>>(b, n, m, alpha, dataset, manhattan_weights, density_weights, temp, idx);
break;
case 32:
density_and_manhattan_weights_meanwhile_furthest_point_sampling_kernel<32>
<<<b, n_threads, 0, stream>>>(b, n, m, alpha, dataset, manhattan_weights, density_weights, temp, idx);
break;
case 16:
density_and_manhattan_weights_meanwhile_furthest_point_sampling_kernel<16>
<<<b, n_threads, 0, stream>>>(b, n, m, alpha, dataset, manhattan_weights, density_weights, temp, idx);
break;
case 8:
density_and_manhattan_weights_meanwhile_furthest_point_sampling_kernel<8>
<<<b, n_threads, 0, stream>>>(b, n, m, alpha, dataset, manhattan_weights, density_weights, temp, idx);
break;
case 4:
density_and_manhattan_weights_meanwhile_furthest_point_sampling_kernel<4>
<<<b, n_threads, 0, stream>>>(b, n, m, alpha, dataset, manhattan_weights, density_weights, temp, idx);
break;
case 2:
density_and_manhattan_weights_meanwhile_furthest_point_sampling_kernel<2>
<<<b, n_threads, 0, stream>>>(b, n, m, alpha, dataset, manhattan_weights, density_weights, temp, idx);
break;
case 1:
density_and_manhattan_weights_meanwhile_furthest_point_sampling_kernel<1>
<<<b, n_threads, 0, stream>>>(b, n, m, alpha, dataset, manhattan_weights, density_weights, temp, idx);
break;
default:
density_and_manhattan_weights_meanwhile_furthest_point_sampling_kernel<1024>
<<<b, n_threads, 0, stream>>>(b, n, m, alpha, dataset, manhattan_weights, density_weights, temp, idx);
break;
}
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
// Modified from
// https://github.com/qiqihaer/3DSSD-pytorch/blob/master/lib/pointnet2/src/sampling_gpu.cu
template <unsigned int block_size>
__global__ void furthest_point_sampling_with_dist_kernel(
int b, int n, int m, const float *__restrict__ dataset,
float *__restrict__ temp, int *__restrict__ idxs) {
// dataset: (B, N, N)
// tmp: (B, N)
// output:
// idx: (B, M)
if (m <= 0)
return;
__shared__ float dists[block_size];
__shared__ int dists_i[block_size];
int batch_index = blockIdx.x;
dataset += batch_index * n * n;
temp += batch_index * n;
idxs += batch_index * m;
int tid = threadIdx.x;
const int stride = block_size;
int old = 0;
if (threadIdx.x == 0)
idxs[0] = old;
__syncthreads();
for (int j = 1; j < m; j++) {
int besti = 0;
float best = -1;
// float x1 = dataset[old * 3 + 0];
// float y1 = dataset[old * 3 + 1];
// float z1 = dataset[old * 3 + 2];
for (int k = tid; k < n; k += stride) {
// float x2, y2, z2;
// x2 = dataset[k * 3 + 0];
// y2 = dataset[k * 3 + 1];
// z2 = dataset[k * 3 + 2];
// float d = (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) *
// (z2 - z1);
float d = dataset[old * n + k];
float d2 = min(d, temp[k]);
temp[k] = d2;
besti = d2 > best ? k : besti;
best = d2 > best ? d2 : best;
}
dists[tid] = best;
dists_i[tid] = besti;
__syncthreads();
if (block_size >= 1024) {
if (tid < 512) {
__update(dists, dists_i, tid, tid + 512);
}
__syncthreads();
}
if (block_size >= 512) {
if (tid < 256) {
__update(dists, dists_i, tid, tid + 256);
}
__syncthreads();
}
if (block_size >= 256) {
if (tid < 128) {
__update(dists, dists_i, tid, tid + 128);
}
__syncthreads();
}
if (block_size >= 128) {
if (tid < 64) {
__update(dists, dists_i, tid, tid + 64);
}
__syncthreads();
}
if (block_size >= 64) {
if (tid < 32) {
__update(dists, dists_i, tid, tid + 32);
}
__syncthreads();
}
if (block_size >= 32) {
if (tid < 16) {
__update(dists, dists_i, tid, tid + 16);
}
__syncthreads();
}
if (block_size >= 16) {
if (tid < 8) {
__update(dists, dists_i, tid, tid + 8);
}
__syncthreads();
}
if (block_size >= 8) {
if (tid < 4) {
__update(dists, dists_i, tid, tid + 4);
}
__syncthreads();
}
if (block_size >= 4) {
if (tid < 2) {
__update(dists, dists_i, tid, tid + 2);
}
__syncthreads();
}
if (block_size >= 2) {
if (tid < 1) {
__update(dists, dists_i, tid, tid + 1);
}
__syncthreads();
}
old = dists_i[0];
if (tid == 0)
idxs[j] = old;
}
}
void furthest_point_sampling_with_dist_kernel_launcher(int b, int n, int m,
const float *dataset,
float *temp, int *idxs,
cudaStream_t stream) {
// dataset: (B, N, N)
// temp: (B, N)
// output:
// idx: (B, M)
cudaError_t err;
unsigned int n_threads = opt_n_threads(n);
switch (n_threads) {
case 2048:
furthest_point_sampling_with_dist_kernel<2048><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 1024:
furthest_point_sampling_with_dist_kernel<1024><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 512:
furthest_point_sampling_with_dist_kernel<512><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 256:
furthest_point_sampling_with_dist_kernel<256><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 128:
furthest_point_sampling_with_dist_kernel<128><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 64:
furthest_point_sampling_with_dist_kernel<64><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 32:
furthest_point_sampling_with_dist_kernel<32><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 16:
furthest_point_sampling_with_dist_kernel<16><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 8:
furthest_point_sampling_with_dist_kernel<8><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 4:
furthest_point_sampling_with_dist_kernel<4><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 2:
furthest_point_sampling_with_dist_kernel<2><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 1:
furthest_point_sampling_with_dist_kernel<1><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
default:
furthest_point_sampling_with_dist_kernel<1024><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
}
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
} |
448a5d23b850cce5cb38343cfbe2248aca535dc5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// assumes square matrices (M = K = N)
// Note: A and B are source matrices
// A is M rows by K columns
// B is K rows by N columns
// C is destination
// C is M rows by N columns
extern "C" __global__ void sgemm(
const float* A,
const float* B,
float* C,
int widthA,
int widthB)
{
const int col = threadIdx.x + blockIdx.x * blockDim.x;
const int row = threadIdx.y + blockIdx.y * blockDim.y;
if (row < widthB && col < widthA)
{
float sum = 0.0f;
for (int i = 0; i < widthA; i++)
{
sum +=
A[i + row * widthA] *
B[col + i * widthB];
}
C[col + row * widthB] = sum;
}
}
extern "C" __global__ void dgemm(
const double* A,
const double* B,
double* C,
int widthA,
int widthB)
{
const int col = threadIdx.x + blockIdx.x * blockDim.x;
const int row = threadIdx.y + blockIdx.y * blockDim.y;
if (row < widthB && col < widthA)
{
float sum = 0.0f;
for (int i = 0; i < widthA; i++)
{
sum +=
A[i + row * widthA] *
B[col + i * widthB];
}
C[col + row * widthB] = sum;
}
} | 448a5d23b850cce5cb38343cfbe2248aca535dc5.cu | // assumes square matrices (M = K = N)
// Note: A and B are source matrices
// A is M rows by K columns
// B is K rows by N columns
// C is destination
// C is M rows by N columns
extern "C" __global__ void sgemm(
const float* A,
const float* B,
float* C,
int widthA,
int widthB)
{
const int col = threadIdx.x + blockIdx.x * blockDim.x;
const int row = threadIdx.y + blockIdx.y * blockDim.y;
if (row < widthB && col < widthA)
{
float sum = 0.0f;
for (int i = 0; i < widthA; i++)
{
sum +=
A[i + row * widthA] *
B[col + i * widthB];
}
C[col + row * widthB] = sum;
}
}
extern "C" __global__ void dgemm(
const double* A,
const double* B,
double* C,
int widthA,
int widthB)
{
const int col = threadIdx.x + blockIdx.x * blockDim.x;
const int row = threadIdx.y + blockIdx.y * blockDim.y;
if (row < widthB && col < widthA)
{
float sum = 0.0f;
for (int i = 0; i < widthA; i++)
{
sum +=
A[i + row * widthA] *
B[col + i * widthB];
}
C[col + row * widthB] = sum;
}
} |
402f3791fe6b202b505fca926950a0870af796c1.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vector_axpby.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int n = 1;
const REAL alpha = 1;
const REAL *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
const int offset_x = 1;
const int stride_x = 1;
const REAL beta = 1;
REAL *y = NULL;
hipMalloc(&y, XSIZE*YSIZE);
int offset_y = 1;
int stride_y = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vector_axpby), dim3(gridBlock),dim3(threadBlock), 0, 0, n,alpha,x,offset_x,stride_x,beta,y,offset_y,stride_y);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vector_axpby), dim3(gridBlock),dim3(threadBlock), 0, 0, n,alpha,x,offset_x,stride_x,beta,y,offset_y,stride_y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vector_axpby), dim3(gridBlock),dim3(threadBlock), 0, 0, n,alpha,x,offset_x,stride_x,beta,y,offset_y,stride_y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 402f3791fe6b202b505fca926950a0870af796c1.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vector_axpby.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int n = 1;
const REAL alpha = 1;
const REAL *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
const int offset_x = 1;
const int stride_x = 1;
const REAL beta = 1;
REAL *y = NULL;
cudaMalloc(&y, XSIZE*YSIZE);
int offset_y = 1;
int stride_y = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vector_axpby<<<gridBlock,threadBlock>>>(n,alpha,x,offset_x,stride_x,beta,y,offset_y,stride_y);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vector_axpby<<<gridBlock,threadBlock>>>(n,alpha,x,offset_x,stride_x,beta,y,offset_y,stride_y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vector_axpby<<<gridBlock,threadBlock>>>(n,alpha,x,offset_x,stride_x,beta,y,offset_y,stride_y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
55242ff9c3a703ef8dc01f6d4248d5facca94706.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2009-2016 The Regents of
the University of Michigan All rights reserved.
HOOMD-blue may contain modifications ("Contributions") provided, and to which
copyright is held, by various Contributors who have granted The Regents of the
University of Michigan the right to modify and/or distribute such Contributions.
You may redistribute, use, and create derivate works of HOOMD-blue, in source
and binary forms, provided you abide by the following conditions:
* Redistributions of source code must retain the above copyright notice, this
list of conditions, and the following disclaimer both in the code and
prominently in any materials provided with the distribution.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions, and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* All publications and presentations based on HOOMD-blue, including any reports
or published results obtained, in whole or in part, with HOOMD-blue, will
acknowledge its use according to the terms posted at the time of submission on:
http://codeblue.umich.edu/hoomd-blue/citations.html
* Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website:
http://codeblue.umich.edu/hoomd-blue/
* Apart from the above required attributions, neither the name of the copyright
holder nor the names of HOOMD-blue's contributors may be used to endorse or
promote products derived from this software without specific prior written
permission.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY
WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Maintainer: joaander
#include "TwoStepNVEGPU.cuh"
#include "VectorMath.h"
#include <assert.h>
/*! \file TwoStepNVEGPU.cu
\brief Defines GPU kernel code for NVE integration on the GPU. Used by TwoStepNVEGPU.
*/
//! Takes the first half-step forward in the velocity-verlet NVE integration on a group of particles
/*! \param d_pos array of particle positions
\param d_vel array of particle velocities
\param d_accel array of particle accelerations
\param d_image array of particle images
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param box Box dimensions for periodic boundary condition handling
\param deltaT timestep
\param limit If \a limit is true, then the dynamics will be limited so that particles do not move
a distance further than \a limit_val in one step.
\param limit_val Length to limit particle distance movement to
\param zero_force Set to true to always assign an acceleration of 0 to all particles in the group
This kernel must be executed with a 1D grid of any block size such that the number of threads is greater than or
equal to the number of members in the group. The kernel's implementation simply reads one particle in each thread
and updates that particle.
<b>Performance notes:</b>
Particle properties are read via the texture cache to optimize the bandwidth obtained with sparse groups. The writes
in sparse groups will not be coalesced. However, because ParticleGroup sorts the index list the writes will be as
contiguous as possible leading to fewer memory transactions on compute 1.3 hardware and more cache hits on Fermi.
*/
extern "C" __global__
void gpu_nve_step_one_kernel(Scalar4 *d_pos,
Scalar4 *d_vel,
const Scalar3 *d_accel,
int3 *d_image,
unsigned int *d_group_members,
unsigned int group_size,
BoxDim box,
Scalar deltaT,
bool limit,
Scalar limit_val,
bool zero_force)
{
// determine which particle this thread works on (MEM TRANSFER: 4 bytes)
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx < group_size)
{
unsigned int idx = d_group_members[group_idx];
// do velocity verlet update
// r(t+deltaT) = r(t) + v(t)*deltaT + (1/2)a(t)*deltaT^2
// v(t+deltaT/2) = v(t) + (1/2)a*deltaT
// read the particle's posision (MEM TRANSFER: 16 bytes)
Scalar4 postype = d_pos[idx];
Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
// read the particle's velocity and acceleration (MEM TRANSFER: 32 bytes)
Scalar4 velmass = d_vel[idx];
Scalar3 vel = make_scalar3(velmass.x, velmass.y, velmass.z);
Scalar3 accel = make_scalar3(Scalar(0.0), Scalar(0.0), Scalar(0.0));
if (!zero_force)
accel = d_accel[idx];
// update the position (FLOPS: 15)
Scalar3 dx = vel * deltaT + (Scalar(1.0)/Scalar(2.0)) * accel * deltaT * deltaT;
// limit the movement of the particles
if (limit)
{
Scalar len = sqrtf(dot(dx, dx));
if (len > limit_val)
dx = dx / len * limit_val;
}
// FLOPS: 3
pos += dx;
// update the velocity (FLOPS: 9)
vel += (Scalar(1.0)/Scalar(2.0)) * accel * deltaT;
// read in the particle's image (MEM TRANSFER: 16 bytes)
int3 image = d_image[idx];
// fix the periodic boundary conditions (FLOPS: 15)
box.wrap(pos, image);
// write out the results (MEM_TRANSFER: 48 bytes)
d_pos[idx] = make_scalar4(pos.x, pos.y, pos.z, postype.w);
d_vel[idx] = make_scalar4(vel.x, vel.y, vel.z, velmass.w);
d_image[idx] = image;
}
}
/*! \param d_pos array of particle positions
\param d_vel array of particle velocities
\param d_accel array of particle accelerations
\param d_image array of particle images
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param box Box dimensions for periodic boundary condition handling
\param deltaT timestep
\param limit If \a limit is true, then the dynamics will be limited so that particles do not move
a distance further than \a limit_val in one step.
\param limit_val Length to limit particle distance movement to
\param zero_force Set to true to always assign an acceleration of 0 to all particles in the group
See gpu_nve_step_one_kernel() for full documentation, this function is just a driver.
*/
hipError_t gpu_nve_step_one(Scalar4 *d_pos,
Scalar4 *d_vel,
const Scalar3 *d_accel,
int3 *d_image,
unsigned int *d_group_members,
unsigned int group_size,
const BoxDim& box,
Scalar deltaT,
bool limit,
Scalar limit_val,
bool zero_force,
unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)gpu_nve_step_one_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
// setup the grid to run the kernel
dim3 grid( (group_size/run_block_size) + 1, 1, 1);
dim3 threads(run_block_size, 1, 1);
// run the kernel
hipLaunchKernelGGL(( gpu_nve_step_one_kernel), dim3(grid), dim3(threads) , 0, 0, d_pos, d_vel, d_accel, d_image, d_group_members, group_size, box, deltaT, limit, limit_val, zero_force);
return hipSuccess;
}
//! NO_SQUISH angular part of the first half step
/*! \param d_orientation array of particle orientations
\param d_angmom array of particle conjugate quaternions
\param d_inertia array of moments of inertia
\param d_net_torque array of net torques
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param deltaT timestep
*/
__global__ void gpu_nve_angular_step_one_kernel(Scalar4 *d_orientation,
Scalar4 *d_angmom,
const Scalar3 *d_inertia,
const Scalar4 *d_net_torque,
unsigned int *d_group_members,
unsigned int group_size,
Scalar deltaT,
Scalar scale)
{
// determine which particle this thread works on (MEM TRANSFER: 4 bytes)
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx < group_size)
{
unsigned int idx = d_group_members[group_idx];
// read the particle's orientation, conjugate quaternion, moment of inertia and net torque
quat<Scalar> q(d_orientation[idx]);
quat<Scalar> p(d_angmom[idx]);
vec3<Scalar> t(d_net_torque[idx]);
vec3<Scalar> I(d_inertia[idx]);
// rotate torque into principal frame
t = rotate(conj(q),t);
// check for zero moment of inertia
bool x_zero, y_zero, z_zero;
x_zero = (I.x < Scalar(EPSILON)); y_zero = (I.y < Scalar(EPSILON)); z_zero = (I.z < Scalar(EPSILON));
// ignore torque component along an axis for which the moment of inertia zero
if (x_zero) t.x = Scalar(0.0);
if (y_zero) t.y = Scalar(0.0);
if (z_zero) t.z = Scalar(0.0);
// advance p(t)->p(t+deltaT/2), q(t)->q(t+deltaT)
p += deltaT*q*t;
p = p*scale;
quat<Scalar> p1, p2, p3; // permutated quaternions
quat<Scalar> q1, q2, q3;
Scalar phi1, cphi1, sphi1;
Scalar phi2, cphi2, sphi2;
Scalar phi3, cphi3, sphi3;
if (!z_zero)
{
p3 = quat<Scalar>(-p.v.z,vec3<Scalar>(p.v.y,-p.v.x,p.s));
q3 = quat<Scalar>(-q.v.z,vec3<Scalar>(q.v.y,-q.v.x,q.s));
phi3 = Scalar(1./4.)/I.z*dot(p,q3);
cphi3 = slow::cos(Scalar(1./2.)*deltaT*phi3);
sphi3 = slow::sin(Scalar(1./2.)*deltaT*phi3);
p=cphi3*p+sphi3*p3;
q=cphi3*q+sphi3*q3;
}
if (!y_zero)
{
p2 = quat<Scalar>(-p.v.y,vec3<Scalar>(-p.v.z,p.s,p.v.x));
q2 = quat<Scalar>(-q.v.y,vec3<Scalar>(-q.v.z,q.s,q.v.x));
phi2 = Scalar(1./4.)/I.y*dot(p,q2);
cphi2 = slow::cos(Scalar(1./2.)*deltaT*phi2);
sphi2 = slow::sin(Scalar(1./2.)*deltaT*phi2);
p=cphi2*p+sphi2*p2;
q=cphi2*q+sphi2*q2;
}
if (!x_zero)
{
p1 = quat<Scalar>(-p.v.x,vec3<Scalar>(p.s,p.v.z,-p.v.y));
q1 = quat<Scalar>(-q.v.x,vec3<Scalar>(q.s,q.v.z,-q.v.y));
phi1 = Scalar(1./4.)/I.x*dot(p,q1);
cphi1 = slow::cos(deltaT*phi1);
sphi1 = slow::sin(deltaT*phi1);
p=cphi1*p+sphi1*p1;
q=cphi1*q+sphi1*q1;
}
if (! y_zero)
{
p2 = quat<Scalar>(-p.v.y,vec3<Scalar>(-p.v.z,p.s,p.v.x));
q2 = quat<Scalar>(-q.v.y,vec3<Scalar>(-q.v.z,q.s,q.v.x));
phi2 = Scalar(1./4.)/I.y*dot(p,q2);
cphi2 = slow::cos(Scalar(1./2.)*deltaT*phi2);
sphi2 = slow::sin(Scalar(1./2.)*deltaT*phi2);
p=cphi2*p+sphi2*p2;
q=cphi2*q+sphi2*q2;
}
if (! z_zero)
{
p3 = quat<Scalar>(-p.v.z,vec3<Scalar>(p.v.y,-p.v.x,p.s));
q3 = quat<Scalar>(-q.v.z,vec3<Scalar>(q.v.y,-q.v.x,q.s));
phi3 = Scalar(1./4.)/I.z*dot(p,q3);
cphi3 = slow::cos(Scalar(1./2.)*deltaT*phi3);
sphi3 = slow::sin(Scalar(1./2.)*deltaT*phi3);
p=cphi3*p+sphi3*p3;
q=cphi3*q+sphi3*q3;
}
// renormalize (improves stability)
q = q*(Scalar(1.0)/slow::sqrt(norm2(q)));
d_orientation[idx] = quat_to_scalar4(q);
d_angmom[idx] = quat_to_scalar4(p);
}
}
/*! \param d_orientation array of particle orientations
\param d_angmom array of particle conjugate quaternions
\param d_inertia array of moments of inertia
\param d_net_torque array of net torques
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param deltaT timestep
*/
hipError_t gpu_nve_angular_step_one(Scalar4 *d_orientation,
Scalar4 *d_angmom,
const Scalar3 *d_inertia,
const Scalar4 *d_net_torque,
unsigned int *d_group_members,
unsigned int group_size,
Scalar deltaT,
Scalar scale)
{
// setup the grid to run the kernel
int block_size = 256;
dim3 grid( (group_size/block_size) + 1, 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel
hipLaunchKernelGGL(( gpu_nve_angular_step_one_kernel), dim3(grid), dim3(threads) , 0, 0, d_orientation, d_angmom, d_inertia, d_net_torque, d_group_members, group_size, deltaT, scale);
return hipSuccess;
}
//! Takes the second half-step forward in the velocity-verlet NVE integration on a group of particles
/*! \param d_vel array of particle velocities
\param d_accel array of particle accelerations
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param d_net_force Net force on each particle
\param deltaT Amount of real time to step forward in one time step
\param limit If \a limit is true, then the dynamics will be limited so that particles do not move
a distance further than \a limit_val in one step.
\param limit_val Length to limit particle distance movement to
\param zero_force Set to true to always assign an acceleration of 0 to all particles in the group
This kernel is implemented in a very similar manner to gpu_nve_step_one_kernel(), see it for design details.
*/
extern "C" __global__
void gpu_nve_step_two_kernel(
Scalar4 *d_vel,
Scalar3 *d_accel,
unsigned int *d_group_members,
unsigned int group_size,
Scalar4 *d_net_force,
Scalar deltaT,
bool limit,
Scalar limit_val,
bool zero_force)
{
// determine which particle this thread works on (MEM TRANSFER: 4 bytes)
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx < group_size)
{
unsigned int idx = d_group_members[group_idx];
// read in the net forc and calculate the acceleration MEM TRANSFER: 16 bytes
Scalar3 accel = make_scalar3(Scalar(0.0), Scalar(0.0), Scalar(0.0));
// read the current particle velocity (MEM TRANSFER: 16 bytes)
Scalar4 vel = d_vel[idx];
if (!zero_force)
{
Scalar4 net_force = d_net_force[idx];
accel = make_scalar3(net_force.x, net_force.y, net_force.z);
// MEM TRANSFER: 4 bytes FLOPS: 3
Scalar mass = vel.w;
accel.x /= mass;
accel.y /= mass;
accel.z /= mass;
}
// v(t+deltaT) = v(t+deltaT/2) + 1/2 * a(t+deltaT)*deltaT
// update the velocity (FLOPS: 6)
vel.x += (Scalar(1.0)/Scalar(2.0)) * accel.x * deltaT;
vel.y += (Scalar(1.0)/Scalar(2.0)) * accel.y * deltaT;
vel.z += (Scalar(1.0)/Scalar(2.0)) * accel.z * deltaT;
if (limit)
{
Scalar vel_len = sqrtf(vel.x*vel.x + vel.y*vel.y + vel.z*vel.z);
if ( (vel_len*deltaT) > limit_val)
{
vel.x = vel.x / vel_len * limit_val / deltaT;
vel.y = vel.y / vel_len * limit_val / deltaT;
vel.z = vel.z / vel_len * limit_val / deltaT;
}
}
// write out data (MEM TRANSFER: 32 bytes)
d_vel[idx] = vel;
// since we calculate the acceleration, we need to write it for the next step
d_accel[idx] = accel;
}
}
/*! \param d_vel array of particle velocities
\param d_accel array of particle accelerations
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param d_net_force Net force on each particle
\param deltaT Amount of real time to step forward in one time step
\param limit If \a limit is true, then the dynamics will be limited so that particles do not move
a distance further than \a limit_val in one step.
\param limit_val Length to limit particle distance movement to
\param zero_force Set to true to always assign an acceleration of 0 to all particles in the group
This is just a driver for gpu_nve_step_two_kernel(), see it for details.
*/
hipError_t gpu_nve_step_two(Scalar4 *d_vel,
Scalar3 *d_accel,
unsigned int *d_group_members,
unsigned int group_size,
Scalar4 *d_net_force,
Scalar deltaT,
bool limit,
Scalar limit_val,
bool zero_force,
unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void *)gpu_nve_step_two_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
// setup the grid to run the kernel
dim3 grid( (group_size/run_block_size) + 1, 1, 1);
dim3 threads(run_block_size, 1, 1);
// run the kernel
hipLaunchKernelGGL(( gpu_nve_step_two_kernel), dim3(grid), dim3(threads) , 0, 0, d_vel,
d_accel,
d_group_members,
group_size,
d_net_force,
deltaT,
limit,
limit_val,
zero_force);
return hipSuccess;
}
//! NO_SQUISH angular part of the second half step
/*! \param d_orientation array of particle orientations
\param d_angmom array of particle conjugate quaternions
\param d_inertia array of moments of inertia
\param d_net_torque array of net torques
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param deltaT timestep
*/
__global__ void gpu_nve_angular_step_two_kernel(const Scalar4 *d_orientation,
Scalar4 *d_angmom,
const Scalar3 *d_inertia,
const Scalar4 *d_net_torque,
unsigned int *d_group_members,
unsigned int group_size,
Scalar deltaT,
Scalar scale)
{
// determine which particle this thread works on (MEM TRANSFER: 4 bytes)
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx < group_size)
{
unsigned int idx = d_group_members[group_idx];
// read the particle's orientation, conjugate quaternion, moment of inertia and net torque
quat<Scalar> q(d_orientation[idx]);
quat<Scalar> p(d_angmom[idx]);
vec3<Scalar> t(d_net_torque[idx]);
vec3<Scalar> I(d_inertia[idx]);
// rotate torque into principal frame
t = rotate(conj(q),t);
// check for zero moment of inertia
bool x_zero, y_zero, z_zero;
x_zero = (I.x < Scalar(EPSILON)); y_zero = (I.y < Scalar(EPSILON)); z_zero = (I.z < Scalar(EPSILON));
// ignore torque component along an axis for which the moment of inertia zero
if (x_zero) t.x = Scalar(0.0);
if (y_zero) t.y = Scalar(0.0);
if (z_zero) t.z = Scalar(0.0);
// rescale
p = p*scale;
// advance p(t)->p(t+deltaT/2), q(t)->q(t+deltaT)
p += deltaT*q*t;
d_angmom[idx] = quat_to_scalar4(p);
}
}
/*! \param d_orientation array of particle orientations
\param d_angmom array of particle conjugate quaternions
\param d_inertia array of moments of inertia
\param d_net_torque array of net torques
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param deltaT timestep
*/
hipError_t gpu_nve_angular_step_two(const Scalar4 *d_orientation,
Scalar4 *d_angmom,
const Scalar3 *d_inertia,
const Scalar4 *d_net_torque,
unsigned int *d_group_members,
unsigned int group_size,
Scalar deltaT,
Scalar scale)
{
// setup the grid to run the kernel
int block_size = 256;
dim3 grid( (group_size/block_size) + 1, 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel
hipLaunchKernelGGL(( gpu_nve_angular_step_two_kernel), dim3(grid), dim3(threads) , 0, 0, d_orientation, d_angmom, d_inertia, d_net_torque, d_group_members, group_size, deltaT, scale);
return hipSuccess;
}
| 55242ff9c3a703ef8dc01f6d4248d5facca94706.cu | /*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2009-2016 The Regents of
the University of Michigan All rights reserved.
HOOMD-blue may contain modifications ("Contributions") provided, and to which
copyright is held, by various Contributors who have granted The Regents of the
University of Michigan the right to modify and/or distribute such Contributions.
You may redistribute, use, and create derivate works of HOOMD-blue, in source
and binary forms, provided you abide by the following conditions:
* Redistributions of source code must retain the above copyright notice, this
list of conditions, and the following disclaimer both in the code and
prominently in any materials provided with the distribution.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions, and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* All publications and presentations based on HOOMD-blue, including any reports
or published results obtained, in whole or in part, with HOOMD-blue, will
acknowledge its use according to the terms posted at the time of submission on:
http://codeblue.umich.edu/hoomd-blue/citations.html
* Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website:
http://codeblue.umich.edu/hoomd-blue/
* Apart from the above required attributions, neither the name of the copyright
holder nor the names of HOOMD-blue's contributors may be used to endorse or
promote products derived from this software without specific prior written
permission.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY
WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Maintainer: joaander
#include "TwoStepNVEGPU.cuh"
#include "VectorMath.h"
#include <assert.h>
/*! \file TwoStepNVEGPU.cu
\brief Defines GPU kernel code for NVE integration on the GPU. Used by TwoStepNVEGPU.
*/
//! Takes the first half-step forward in the velocity-verlet NVE integration on a group of particles
/*! \param d_pos array of particle positions
\param d_vel array of particle velocities
\param d_accel array of particle accelerations
\param d_image array of particle images
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param box Box dimensions for periodic boundary condition handling
\param deltaT timestep
\param limit If \a limit is true, then the dynamics will be limited so that particles do not move
a distance further than \a limit_val in one step.
\param limit_val Length to limit particle distance movement to
\param zero_force Set to true to always assign an acceleration of 0 to all particles in the group
This kernel must be executed with a 1D grid of any block size such that the number of threads is greater than or
equal to the number of members in the group. The kernel's implementation simply reads one particle in each thread
and updates that particle.
<b>Performance notes:</b>
Particle properties are read via the texture cache to optimize the bandwidth obtained with sparse groups. The writes
in sparse groups will not be coalesced. However, because ParticleGroup sorts the index list the writes will be as
contiguous as possible leading to fewer memory transactions on compute 1.3 hardware and more cache hits on Fermi.
*/
extern "C" __global__
void gpu_nve_step_one_kernel(Scalar4 *d_pos,
Scalar4 *d_vel,
const Scalar3 *d_accel,
int3 *d_image,
unsigned int *d_group_members,
unsigned int group_size,
BoxDim box,
Scalar deltaT,
bool limit,
Scalar limit_val,
bool zero_force)
{
// determine which particle this thread works on (MEM TRANSFER: 4 bytes)
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx < group_size)
{
unsigned int idx = d_group_members[group_idx];
// do velocity verlet update
// r(t+deltaT) = r(t) + v(t)*deltaT + (1/2)a(t)*deltaT^2
// v(t+deltaT/2) = v(t) + (1/2)a*deltaT
// read the particle's posision (MEM TRANSFER: 16 bytes)
Scalar4 postype = d_pos[idx];
Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
// read the particle's velocity and acceleration (MEM TRANSFER: 32 bytes)
Scalar4 velmass = d_vel[idx];
Scalar3 vel = make_scalar3(velmass.x, velmass.y, velmass.z);
Scalar3 accel = make_scalar3(Scalar(0.0), Scalar(0.0), Scalar(0.0));
if (!zero_force)
accel = d_accel[idx];
// update the position (FLOPS: 15)
Scalar3 dx = vel * deltaT + (Scalar(1.0)/Scalar(2.0)) * accel * deltaT * deltaT;
// limit the movement of the particles
if (limit)
{
Scalar len = sqrtf(dot(dx, dx));
if (len > limit_val)
dx = dx / len * limit_val;
}
// FLOPS: 3
pos += dx;
// update the velocity (FLOPS: 9)
vel += (Scalar(1.0)/Scalar(2.0)) * accel * deltaT;
// read in the particle's image (MEM TRANSFER: 16 bytes)
int3 image = d_image[idx];
// fix the periodic boundary conditions (FLOPS: 15)
box.wrap(pos, image);
// write out the results (MEM_TRANSFER: 48 bytes)
d_pos[idx] = make_scalar4(pos.x, pos.y, pos.z, postype.w);
d_vel[idx] = make_scalar4(vel.x, vel.y, vel.z, velmass.w);
d_image[idx] = image;
}
}
/*! \param d_pos array of particle positions
\param d_vel array of particle velocities
\param d_accel array of particle accelerations
\param d_image array of particle images
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param box Box dimensions for periodic boundary condition handling
\param deltaT timestep
\param limit If \a limit is true, then the dynamics will be limited so that particles do not move
a distance further than \a limit_val in one step.
\param limit_val Length to limit particle distance movement to
\param zero_force Set to true to always assign an acceleration of 0 to all particles in the group
See gpu_nve_step_one_kernel() for full documentation, this function is just a driver.
*/
cudaError_t gpu_nve_step_one(Scalar4 *d_pos,
Scalar4 *d_vel,
const Scalar3 *d_accel,
int3 *d_image,
unsigned int *d_group_members,
unsigned int group_size,
const BoxDim& box,
Scalar deltaT,
bool limit,
Scalar limit_val,
bool zero_force,
unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void*)gpu_nve_step_one_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
// setup the grid to run the kernel
dim3 grid( (group_size/run_block_size) + 1, 1, 1);
dim3 threads(run_block_size, 1, 1);
// run the kernel
gpu_nve_step_one_kernel<<< grid, threads >>>(d_pos, d_vel, d_accel, d_image, d_group_members, group_size, box, deltaT, limit, limit_val, zero_force);
return cudaSuccess;
}
//! NO_SQUISH angular part of the first half step
/*! \param d_orientation array of particle orientations
\param d_angmom array of particle conjugate quaternions
\param d_inertia array of moments of inertia
\param d_net_torque array of net torques
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param deltaT timestep
*/
__global__ void gpu_nve_angular_step_one_kernel(Scalar4 *d_orientation,
Scalar4 *d_angmom,
const Scalar3 *d_inertia,
const Scalar4 *d_net_torque,
unsigned int *d_group_members,
unsigned int group_size,
Scalar deltaT,
Scalar scale)
{
// determine which particle this thread works on (MEM TRANSFER: 4 bytes)
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx < group_size)
{
unsigned int idx = d_group_members[group_idx];
// read the particle's orientation, conjugate quaternion, moment of inertia and net torque
quat<Scalar> q(d_orientation[idx]);
quat<Scalar> p(d_angmom[idx]);
vec3<Scalar> t(d_net_torque[idx]);
vec3<Scalar> I(d_inertia[idx]);
// rotate torque into principal frame
t = rotate(conj(q),t);
// check for zero moment of inertia
bool x_zero, y_zero, z_zero;
x_zero = (I.x < Scalar(EPSILON)); y_zero = (I.y < Scalar(EPSILON)); z_zero = (I.z < Scalar(EPSILON));
// ignore torque component along an axis for which the moment of inertia zero
if (x_zero) t.x = Scalar(0.0);
if (y_zero) t.y = Scalar(0.0);
if (z_zero) t.z = Scalar(0.0);
// advance p(t)->p(t+deltaT/2), q(t)->q(t+deltaT)
p += deltaT*q*t;
p = p*scale;
quat<Scalar> p1, p2, p3; // permutated quaternions
quat<Scalar> q1, q2, q3;
Scalar phi1, cphi1, sphi1;
Scalar phi2, cphi2, sphi2;
Scalar phi3, cphi3, sphi3;
if (!z_zero)
{
p3 = quat<Scalar>(-p.v.z,vec3<Scalar>(p.v.y,-p.v.x,p.s));
q3 = quat<Scalar>(-q.v.z,vec3<Scalar>(q.v.y,-q.v.x,q.s));
phi3 = Scalar(1./4.)/I.z*dot(p,q3);
cphi3 = slow::cos(Scalar(1./2.)*deltaT*phi3);
sphi3 = slow::sin(Scalar(1./2.)*deltaT*phi3);
p=cphi3*p+sphi3*p3;
q=cphi3*q+sphi3*q3;
}
if (!y_zero)
{
p2 = quat<Scalar>(-p.v.y,vec3<Scalar>(-p.v.z,p.s,p.v.x));
q2 = quat<Scalar>(-q.v.y,vec3<Scalar>(-q.v.z,q.s,q.v.x));
phi2 = Scalar(1./4.)/I.y*dot(p,q2);
cphi2 = slow::cos(Scalar(1./2.)*deltaT*phi2);
sphi2 = slow::sin(Scalar(1./2.)*deltaT*phi2);
p=cphi2*p+sphi2*p2;
q=cphi2*q+sphi2*q2;
}
if (!x_zero)
{
p1 = quat<Scalar>(-p.v.x,vec3<Scalar>(p.s,p.v.z,-p.v.y));
q1 = quat<Scalar>(-q.v.x,vec3<Scalar>(q.s,q.v.z,-q.v.y));
phi1 = Scalar(1./4.)/I.x*dot(p,q1);
cphi1 = slow::cos(deltaT*phi1);
sphi1 = slow::sin(deltaT*phi1);
p=cphi1*p+sphi1*p1;
q=cphi1*q+sphi1*q1;
}
if (! y_zero)
{
p2 = quat<Scalar>(-p.v.y,vec3<Scalar>(-p.v.z,p.s,p.v.x));
q2 = quat<Scalar>(-q.v.y,vec3<Scalar>(-q.v.z,q.s,q.v.x));
phi2 = Scalar(1./4.)/I.y*dot(p,q2);
cphi2 = slow::cos(Scalar(1./2.)*deltaT*phi2);
sphi2 = slow::sin(Scalar(1./2.)*deltaT*phi2);
p=cphi2*p+sphi2*p2;
q=cphi2*q+sphi2*q2;
}
if (! z_zero)
{
p3 = quat<Scalar>(-p.v.z,vec3<Scalar>(p.v.y,-p.v.x,p.s));
q3 = quat<Scalar>(-q.v.z,vec3<Scalar>(q.v.y,-q.v.x,q.s));
phi3 = Scalar(1./4.)/I.z*dot(p,q3);
cphi3 = slow::cos(Scalar(1./2.)*deltaT*phi3);
sphi3 = slow::sin(Scalar(1./2.)*deltaT*phi3);
p=cphi3*p+sphi3*p3;
q=cphi3*q+sphi3*q3;
}
// renormalize (improves stability)
q = q*(Scalar(1.0)/slow::sqrt(norm2(q)));
d_orientation[idx] = quat_to_scalar4(q);
d_angmom[idx] = quat_to_scalar4(p);
}
}
/*! \param d_orientation array of particle orientations
\param d_angmom array of particle conjugate quaternions
\param d_inertia array of moments of inertia
\param d_net_torque array of net torques
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param deltaT timestep
*/
cudaError_t gpu_nve_angular_step_one(Scalar4 *d_orientation,
Scalar4 *d_angmom,
const Scalar3 *d_inertia,
const Scalar4 *d_net_torque,
unsigned int *d_group_members,
unsigned int group_size,
Scalar deltaT,
Scalar scale)
{
// setup the grid to run the kernel
int block_size = 256;
dim3 grid( (group_size/block_size) + 1, 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel
gpu_nve_angular_step_one_kernel<<< grid, threads >>>(d_orientation, d_angmom, d_inertia, d_net_torque, d_group_members, group_size, deltaT, scale);
return cudaSuccess;
}
//! Takes the second half-step forward in the velocity-verlet NVE integration on a group of particles
/*! \param d_vel array of particle velocities
\param d_accel array of particle accelerations
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param d_net_force Net force on each particle
\param deltaT Amount of real time to step forward in one time step
\param limit If \a limit is true, then the dynamics will be limited so that particles do not move
a distance further than \a limit_val in one step.
\param limit_val Length to limit particle distance movement to
\param zero_force Set to true to always assign an acceleration of 0 to all particles in the group
This kernel is implemented in a very similar manner to gpu_nve_step_one_kernel(), see it for design details.
*/
extern "C" __global__
void gpu_nve_step_two_kernel(
Scalar4 *d_vel,
Scalar3 *d_accel,
unsigned int *d_group_members,
unsigned int group_size,
Scalar4 *d_net_force,
Scalar deltaT,
bool limit,
Scalar limit_val,
bool zero_force)
{
// determine which particle this thread works on (MEM TRANSFER: 4 bytes)
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx < group_size)
{
unsigned int idx = d_group_members[group_idx];
// read in the net forc and calculate the acceleration MEM TRANSFER: 16 bytes
Scalar3 accel = make_scalar3(Scalar(0.0), Scalar(0.0), Scalar(0.0));
// read the current particle velocity (MEM TRANSFER: 16 bytes)
Scalar4 vel = d_vel[idx];
if (!zero_force)
{
Scalar4 net_force = d_net_force[idx];
accel = make_scalar3(net_force.x, net_force.y, net_force.z);
// MEM TRANSFER: 4 bytes FLOPS: 3
Scalar mass = vel.w;
accel.x /= mass;
accel.y /= mass;
accel.z /= mass;
}
// v(t+deltaT) = v(t+deltaT/2) + 1/2 * a(t+deltaT)*deltaT
// update the velocity (FLOPS: 6)
vel.x += (Scalar(1.0)/Scalar(2.0)) * accel.x * deltaT;
vel.y += (Scalar(1.0)/Scalar(2.0)) * accel.y * deltaT;
vel.z += (Scalar(1.0)/Scalar(2.0)) * accel.z * deltaT;
if (limit)
{
Scalar vel_len = sqrtf(vel.x*vel.x + vel.y*vel.y + vel.z*vel.z);
if ( (vel_len*deltaT) > limit_val)
{
vel.x = vel.x / vel_len * limit_val / deltaT;
vel.y = vel.y / vel_len * limit_val / deltaT;
vel.z = vel.z / vel_len * limit_val / deltaT;
}
}
// write out data (MEM TRANSFER: 32 bytes)
d_vel[idx] = vel;
// since we calculate the acceleration, we need to write it for the next step
d_accel[idx] = accel;
}
}
/*! \param d_vel array of particle velocities
\param d_accel array of particle accelerations
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param d_net_force Net force on each particle
\param deltaT Amount of real time to step forward in one time step
\param limit If \a limit is true, then the dynamics will be limited so that particles do not move
a distance further than \a limit_val in one step.
\param limit_val Length to limit particle distance movement to
\param zero_force Set to true to always assign an acceleration of 0 to all particles in the group
This is just a driver for gpu_nve_step_two_kernel(), see it for details.
*/
cudaError_t gpu_nve_step_two(Scalar4 *d_vel,
Scalar3 *d_accel,
unsigned int *d_group_members,
unsigned int group_size,
Scalar4 *d_net_force,
Scalar deltaT,
bool limit,
Scalar limit_val,
bool zero_force,
unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void *)gpu_nve_step_two_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
// setup the grid to run the kernel
dim3 grid( (group_size/run_block_size) + 1, 1, 1);
dim3 threads(run_block_size, 1, 1);
// run the kernel
gpu_nve_step_two_kernel<<< grid, threads >>>(d_vel,
d_accel,
d_group_members,
group_size,
d_net_force,
deltaT,
limit,
limit_val,
zero_force);
return cudaSuccess;
}
//! NO_SQUISH angular part of the second half step
/*! \param d_orientation array of particle orientations
\param d_angmom array of particle conjugate quaternions
\param d_inertia array of moments of inertia
\param d_net_torque array of net torques
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param deltaT timestep
*/
__global__ void gpu_nve_angular_step_two_kernel(const Scalar4 *d_orientation,
Scalar4 *d_angmom,
const Scalar3 *d_inertia,
const Scalar4 *d_net_torque,
unsigned int *d_group_members,
unsigned int group_size,
Scalar deltaT,
Scalar scale)
{
// determine which particle this thread works on (MEM TRANSFER: 4 bytes)
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx < group_size)
{
unsigned int idx = d_group_members[group_idx];
// read the particle's orientation, conjugate quaternion, moment of inertia and net torque
quat<Scalar> q(d_orientation[idx]);
quat<Scalar> p(d_angmom[idx]);
vec3<Scalar> t(d_net_torque[idx]);
vec3<Scalar> I(d_inertia[idx]);
// rotate torque into principal frame
t = rotate(conj(q),t);
// check for zero moment of inertia
bool x_zero, y_zero, z_zero;
x_zero = (I.x < Scalar(EPSILON)); y_zero = (I.y < Scalar(EPSILON)); z_zero = (I.z < Scalar(EPSILON));
// ignore torque component along an axis for which the moment of inertia zero
if (x_zero) t.x = Scalar(0.0);
if (y_zero) t.y = Scalar(0.0);
if (z_zero) t.z = Scalar(0.0);
// rescale
p = p*scale;
// advance p(t)->p(t+deltaT/2), q(t)->q(t+deltaT)
p += deltaT*q*t;
d_angmom[idx] = quat_to_scalar4(p);
}
}
/*! \param d_orientation array of particle orientations
\param d_angmom array of particle conjugate quaternions
\param d_inertia array of moments of inertia
\param d_net_torque array of net torques
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param deltaT timestep
*/
cudaError_t gpu_nve_angular_step_two(const Scalar4 *d_orientation,
Scalar4 *d_angmom,
const Scalar3 *d_inertia,
const Scalar4 *d_net_torque,
unsigned int *d_group_members,
unsigned int group_size,
Scalar deltaT,
Scalar scale)
{
// setup the grid to run the kernel
int block_size = 256;
dim3 grid( (group_size/block_size) + 1, 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel
gpu_nve_angular_step_two_kernel<<< grid, threads >>>(d_orientation, d_angmom, d_inertia, d_net_torque, d_group_members, group_size, deltaT, scale);
return cudaSuccess;
}
|
75f169fa377bbc907cd93732c2caa5cda23a9882.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
// TODO use `thrust::exclusive_scan`
// example: for device_vectors dv_in and dv_out:
thrust::device_vector<int> dv_in(idata, idata + n);
thrust::device_vector<int> dv_out = thrust::device_vector<int>(n, 0);
timer().startGpuTimer();
thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin());
timer().endGpuTimer();
thrust::copy(dv_out.begin(), dv_out.end(), odata);
}
}
}
| 75f169fa377bbc907cd93732c2caa5cda23a9882.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
// TODO use `thrust::exclusive_scan`
// example: for device_vectors dv_in and dv_out:
thrust::device_vector<int> dv_in(idata, idata + n);
thrust::device_vector<int> dv_out = thrust::device_vector<int>(n, 0);
timer().startGpuTimer();
thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin());
timer().endGpuTimer();
thrust::copy(dv_out.begin(), dv_out.end(), odata);
}
}
}
|
fb6d9dfb72c8ebaf7690e67940fc549475e43761.hip | // !!! This is a file automatically generated by hipify!!!
//This code is a modification of L1 cache benchmark from
//"Dissecting the NVIDIA Volta GPU Architecture via Microbenchmarking": https://arxiv.org/pdf/1804.06826.pdf
//This benchmark measures the latency of GPU memory
//This code have been tested on Volta V100 architecture
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#define THREADS_NUM 4 //HERE, we launch four threads, to ensure that one request is equal to DRAM trascation, 4 thread * 8 bytes = 32 bytes (= min DRAM trascation)
#define WARP_SIZE 32
#define ITERS 32768 //1MB of pointer chasing, ITERS*THREADS_NUM*8 bytes
#define ARRAY_SIZE 917504 //pointer-chasing array size in 64-bit. total array size is 7 MB which larger than L2 cache size (6 MB in Volta) to avoid l2 cache resident from the copy engine
#define BLOCKS 160
#define THREADS_PER_BLOCK 1024
#define TOTAL_THREADS BLOCKS*THREADS_PER_BLOCK
// GPU error check
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true){
if (code != hipSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void mem_lat(uint32_t *startClk, uint32_t *stopClk, uint64_t *posArray, uint64_t *dsink){
// thread index
uint32_t tid = threadIdx.x;
uint32_t uid = blockIdx.x * blockDim.x + tid;
// initialize pointer-chasing array
for (uint32_t i=uid; i<(ARRAY_SIZE-THREADS_NUM); i+=TOTAL_THREADS)
posArray[i] = (uint64_t)(posArray + i + THREADS_NUM);
if(uid < THREADS_NUM){ //only THREADS_NUM has to be active here
// initialize the tail to reference to the head of the array
posArray[ARRAY_SIZE-(THREADS_NUM-tid)] = (uint64_t)posArray + tid;
uint64_t *ptr = posArray + tid;
uint64_t ptr1, ptr0;
// initialize the pointers with the start address
//Here, we use cache volatile modifier to ignore the L2 cache
asm volatile ("{\t\n"
"ld.global.cv.u64 %0, [%1];\n\t"
"}" : "=l"(ptr1) : "l"(ptr) : "memory"
);
// synchronize all threads
asm volatile ("bar.sync 0;");
uint32_t start = 0;
uint32_t stop = 0;
// start timing
asm volatile ("mov.u32 %0, %%clock;" : "=r"(start) :: "memory");
// pointer-chasing ITERS times
//Here, we use cache volatile modifier to ignore the L2 cache
for(uint32_t i=tid; i<ITERS-THREADS_NUM; i+=THREADS_NUM) {
asm volatile ("{\t\n"
"ld.global.cv.u64 %0, [%1];\n\t"
"}" : "=l"(ptr0) : "l"((uint64_t*)ptr1) : "memory"
);
ptr1 = ptr0; //swap the register for the next load
}
// stop timing
asm volatile("mov.u32 %0, %%clock;" : "=r"(stop) :: "memory");
// write time and data back to memory
startClk[tid] = start;
stopClk[tid] = stop;
dsink[tid] = ptr1;
}
}
int main(){
uint32_t *startClk = (uint32_t*) malloc(THREADS_NUM*sizeof(uint32_t));
uint32_t *stopClk = (uint32_t*) malloc(THREADS_NUM*sizeof(uint32_t));
uint64_t *dsink = (uint64_t*) malloc(THREADS_NUM*sizeof(uint64_t));
uint32_t *startClk_g;
uint32_t *stopClk_g;
uint64_t *posArray_g;
uint64_t *dsink_g;
gpuErrchk( hipMalloc(&startClk_g, THREADS_NUM*sizeof(uint32_t)) );
gpuErrchk( hipMalloc(&stopClk_g, THREADS_NUM*sizeof(uint32_t)) );
gpuErrchk( hipMalloc(&posArray_g, ARRAY_SIZE*sizeof(uint64_t)) );
gpuErrchk( hipMalloc(&dsink_g, THREADS_NUM*sizeof(uint64_t)) );
hipLaunchKernelGGL(( mem_lat), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, startClk_g, stopClk_g, posArray_g, dsink_g);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipMemcpy(startClk, startClk_g, THREADS_NUM*sizeof(uint32_t), hipMemcpyDeviceToHost) );
gpuErrchk( hipMemcpy(stopClk, stopClk_g, THREADS_NUM*sizeof(uint32_t), hipMemcpyDeviceToHost) );
gpuErrchk( hipMemcpy(dsink, dsink_g, THREADS_NUM*sizeof(uint64_t), hipMemcpyDeviceToHost) );
printf("Mem latency = %12.4f cycles \n", (float)(stopClk[0]-startClk[0])/(float)(ITERS/THREADS_NUM));
printf("Total Clk number = %u \n", stopClk[0]-startClk[0]);
return 0;
}
| fb6d9dfb72c8ebaf7690e67940fc549475e43761.cu | //This code is a modification of L1 cache benchmark from
//"Dissecting the NVIDIA Volta GPU Architecture via Microbenchmarking": https://arxiv.org/pdf/1804.06826.pdf
//This benchmark measures the latency of GPU memory
//This code have been tested on Volta V100 architecture
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define THREADS_NUM 4 //HERE, we launch four threads, to ensure that one request is equal to DRAM trascation, 4 thread * 8 bytes = 32 bytes (= min DRAM trascation)
#define WARP_SIZE 32
#define ITERS 32768 //1MB of pointer chasing, ITERS*THREADS_NUM*8 bytes
#define ARRAY_SIZE 917504 //pointer-chasing array size in 64-bit. total array size is 7 MB which larger than L2 cache size (6 MB in Volta) to avoid l2 cache resident from the copy engine
#define BLOCKS 160
#define THREADS_PER_BLOCK 1024
#define TOTAL_THREADS BLOCKS*THREADS_PER_BLOCK
// GPU error check
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true){
if (code != cudaSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void mem_lat(uint32_t *startClk, uint32_t *stopClk, uint64_t *posArray, uint64_t *dsink){
// thread index
uint32_t tid = threadIdx.x;
uint32_t uid = blockIdx.x * blockDim.x + tid;
// initialize pointer-chasing array
for (uint32_t i=uid; i<(ARRAY_SIZE-THREADS_NUM); i+=TOTAL_THREADS)
posArray[i] = (uint64_t)(posArray + i + THREADS_NUM);
if(uid < THREADS_NUM){ //only THREADS_NUM has to be active here
// initialize the tail to reference to the head of the array
posArray[ARRAY_SIZE-(THREADS_NUM-tid)] = (uint64_t)posArray + tid;
uint64_t *ptr = posArray + tid;
uint64_t ptr1, ptr0;
// initialize the pointers with the start address
//Here, we use cache volatile modifier to ignore the L2 cache
asm volatile ("{\t\n"
"ld.global.cv.u64 %0, [%1];\n\t"
"}" : "=l"(ptr1) : "l"(ptr) : "memory"
);
// synchronize all threads
asm volatile ("bar.sync 0;");
uint32_t start = 0;
uint32_t stop = 0;
// start timing
asm volatile ("mov.u32 %0, %%clock;" : "=r"(start) :: "memory");
// pointer-chasing ITERS times
//Here, we use cache volatile modifier to ignore the L2 cache
for(uint32_t i=tid; i<ITERS-THREADS_NUM; i+=THREADS_NUM) {
asm volatile ("{\t\n"
"ld.global.cv.u64 %0, [%1];\n\t"
"}" : "=l"(ptr0) : "l"((uint64_t*)ptr1) : "memory"
);
ptr1 = ptr0; //swap the register for the next load
}
// stop timing
asm volatile("mov.u32 %0, %%clock;" : "=r"(stop) :: "memory");
// write time and data back to memory
startClk[tid] = start;
stopClk[tid] = stop;
dsink[tid] = ptr1;
}
}
int main(){
uint32_t *startClk = (uint32_t*) malloc(THREADS_NUM*sizeof(uint32_t));
uint32_t *stopClk = (uint32_t*) malloc(THREADS_NUM*sizeof(uint32_t));
uint64_t *dsink = (uint64_t*) malloc(THREADS_NUM*sizeof(uint64_t));
uint32_t *startClk_g;
uint32_t *stopClk_g;
uint64_t *posArray_g;
uint64_t *dsink_g;
gpuErrchk( cudaMalloc(&startClk_g, THREADS_NUM*sizeof(uint32_t)) );
gpuErrchk( cudaMalloc(&stopClk_g, THREADS_NUM*sizeof(uint32_t)) );
gpuErrchk( cudaMalloc(&posArray_g, ARRAY_SIZE*sizeof(uint64_t)) );
gpuErrchk( cudaMalloc(&dsink_g, THREADS_NUM*sizeof(uint64_t)) );
mem_lat<<<BLOCKS,THREADS_PER_BLOCK>>>(startClk_g, stopClk_g, posArray_g, dsink_g);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaMemcpy(startClk, startClk_g, THREADS_NUM*sizeof(uint32_t), cudaMemcpyDeviceToHost) );
gpuErrchk( cudaMemcpy(stopClk, stopClk_g, THREADS_NUM*sizeof(uint32_t), cudaMemcpyDeviceToHost) );
gpuErrchk( cudaMemcpy(dsink, dsink_g, THREADS_NUM*sizeof(uint64_t), cudaMemcpyDeviceToHost) );
printf("Mem latency = %12.4f cycles \n", (float)(stopClk[0]-startClk[0])/(float)(ITERS/THREADS_NUM));
printf("Total Clk number = %u \n", stopClk[0]-startClk[0]);
return 0;
}
|
180ea4e8ee366a5141629cf1beb01a3a1a2e320c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "flip_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void FlipKernel(const int num, const int channels, const int height, const int width,
const Dtype* src_data, Dtype* target_data, bool flip_height, bool flip_width) {
CUDA_KERNEL_LOOP(index, num * channels * height * width) {
int n = index / (channels * height * width);
int cs = index % (channels * height * width);
int c = cs / (height * width);
int s = cs % (height * width);
int h = s / width;
int w = s % width;
target_data[(((n * channels + c) * height + h) * width) + w] =
src_data[(((n * channels + c) * height + (flip_height ? (height - 1 - h) : h)) * width) + (flip_width ? (width - 1 - w) : w)];
}
}
template <typename Dtype>
void FlipLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int width = bottom[0]->width();
int height = bottom[0]->height();
FlipKernel<Dtype> << <CAFFE_GET_BLOCKS(num * channels * height * width),
CAFFE_CUDA_NUM_THREADS >> >(num, channels, height, width,
bottom_data, top_data, flip_height_, flip_width_);
}
INSTANTIATE_LAYER_GPU_FUNCS(FlipLayer);
} // namespace caffe
| 180ea4e8ee366a5141629cf1beb01a3a1a2e320c.cu | #include <vector>
#include "flip_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void FlipKernel(const int num, const int channels, const int height, const int width,
const Dtype* src_data, Dtype* target_data, bool flip_height, bool flip_width) {
CUDA_KERNEL_LOOP(index, num * channels * height * width) {
int n = index / (channels * height * width);
int cs = index % (channels * height * width);
int c = cs / (height * width);
int s = cs % (height * width);
int h = s / width;
int w = s % width;
target_data[(((n * channels + c) * height + h) * width) + w] =
src_data[(((n * channels + c) * height + (flip_height ? (height - 1 - h) : h)) * width) + (flip_width ? (width - 1 - w) : w)];
}
}
template <typename Dtype>
void FlipLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int width = bottom[0]->width();
int height = bottom[0]->height();
FlipKernel<Dtype> << <CAFFE_GET_BLOCKS(num * channels * height * width),
CAFFE_CUDA_NUM_THREADS >> >(num, channels, height, width,
bottom_data, top_data, flip_height_, flip_width_);
}
INSTANTIATE_LAYER_GPU_FUNCS(FlipLayer);
} // namespace caffe
|
686be76f4f3c7559790443ca0e7dae50a31ab002.hip | // !!! This is a file automatically generated by hipify!!!
#include "THHTensorMath.h"
#include "THHGeneral.h"
#include "THHBlas.h"
#include "THHTensorCopy.h"
#include "THHTensorRandom.h"
#include "THHApply.cuh"
#include "THHReduce.cuh"
#include "THHTensorMathReduce.cuh"
#include "THHTensorMathPointwise.cuh"
struct TensorATan2Op {
__device__ __forceinline__ void operator()(float* out, float* a, float* b) {
*out = atan2f(*a, *b);
}
};
void THCudaTensor_atan2(THCState *state, THCudaTensor *self_, THCudaTensor *tx, THCudaTensor *ty)
{
THAssert(THCudaTensor_checkGPU(state, 3, self_, tx, ty));
THArgCheck(THCudaTensor_nElement(state, tx) ==
THCudaTensor_nElement(state, ty), 3, "sizes do not match");
THCudaTensor_resizeAs(state, self_, tx);
if (!THC_pointwiseApply3(state, self_, tx, ty, TensorATan2Op())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
void THCudaTensor_rand(THCState *state, THCudaTensor *r_, THLongStorage *size)
{
THAssert(THCudaTensor_checkGPU(state, 1, r_));
THCudaTensor_resize(state, r_, size, NULL);
THCudaTensor_uniform(state, r_, 0, 1);
}
void THCudaTensor_randn(THCState *state, THCudaTensor *r_, THLongStorage *size)
{
THAssert(THCudaTensor_checkGPU(state, 1, r_));
THCudaTensor_resize(state, r_, size, NULL);
THCudaTensor_normal(state, r_, 0, 1);
}
| 686be76f4f3c7559790443ca0e7dae50a31ab002.cu | #include "THCTensorMath.h"
#include "THCGeneral.h"
#include "THCBlas.h"
#include "THCTensorCopy.h"
#include "THCTensorRandom.h"
#include "THCApply.cuh"
#include "THCReduce.cuh"
#include "THCTensorMathReduce.cuh"
#include "THCTensorMathPointwise.cuh"
struct TensorATan2Op {
__device__ __forceinline__ void operator()(float* out, float* a, float* b) {
*out = atan2f(*a, *b);
}
};
void THCudaTensor_atan2(THCState *state, THCudaTensor *self_, THCudaTensor *tx, THCudaTensor *ty)
{
THAssert(THCudaTensor_checkGPU(state, 3, self_, tx, ty));
THArgCheck(THCudaTensor_nElement(state, tx) ==
THCudaTensor_nElement(state, ty), 3, "sizes do not match");
THCudaTensor_resizeAs(state, self_, tx);
if (!THC_pointwiseApply3(state, self_, tx, ty, TensorATan2Op())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
void THCudaTensor_rand(THCState *state, THCudaTensor *r_, THLongStorage *size)
{
THAssert(THCudaTensor_checkGPU(state, 1, r_));
THCudaTensor_resize(state, r_, size, NULL);
THCudaTensor_uniform(state, r_, 0, 1);
}
void THCudaTensor_randn(THCState *state, THCudaTensor *r_, THLongStorage *size)
{
THAssert(THCudaTensor_checkGPU(state, 1, r_));
THCudaTensor_resize(state, r_, size, NULL);
THCudaTensor_normal(state, r_, 0, 1);
}
|
7a5f8d47adbafdf1e6cba9b1d1937c599f29feb4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/hip/detail/KernelUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <c10/util/Exception.h>
#include <algorithm>
#include <cfloat>
#include <cmath>
namespace at {
namespace native {
using namespace at::cuda::detail;
namespace {
template <typename scalar_t, typename accscalar_t>
__device__ inline int get_interval(accscalar_t sample,
int index, int inputSize, int outputSize, int poolSize) {
accscalar_t alpha = static_cast<accscalar_t>(inputSize - poolSize) /
static_cast<accscalar_t>(outputSize - 1);
if (index == outputSize - 1) {
return inputSize - poolSize;
} else {
return static_cast<int>((index + sample) * alpha) -
static_cast<int>(sample * alpha);
}
}
template <typename scalar_t>
__global__ void fractional_max_pool2d_out_cuda_frame(
PackedTensorAccessor<scalar_t, 4> output,
PackedTensorAccessor<int64_t, 4> indices,
PackedTensorAccessor<scalar_t, 4> input,
PackedTensorAccessor<scalar_t, 3> samples,
int poolSizeH, int poolSizeW) {
using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>;
int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
// Each thread generates a specific output point
if (ourOutputPoint < output.size(2) * output.size(3)) {
int outputW = ourOutputPoint % output.size(3);
int outputH = ourOutputPoint / output.size(3);
int poolW = get_interval<scalar_t, accscalar_t>(
static_cast<accscalar_t>(samples[batch][plane][0]),
outputW, input.size(3), output.size(3), poolSizeW);
int poolH = get_interval<scalar_t, accscalar_t>(
static_cast<accscalar_t>(samples[batch][plane][1]),
outputH, input.size(2), output.size(2), poolSizeH);
scalar_t maxVal = at::numeric_limits<scalar_t>::lowest();
int maxIndex = -1;
for (int h = poolH; h < poolH + poolSizeH; ++h) {
if (poolSizeW < 2 || poolSizeW > 7) {
for (int w = poolW; w < poolW + poolSizeW; ++w) {
scalar_t val = input[batch][plane][h][w];
// for consistency with THNN, favor the first max
if (val > maxVal) {
maxIndex = h * input.size(3) + w;
maxVal = val;
}
}
} else {
for (int i = 0; i < poolSizeW; ++i) {
int w = i + poolW;
scalar_t val = input[batch][plane][h][w];
// for consistency with THNN, favor the first max
if (val > maxVal) {
maxIndex = h * input.size(3) + w;
maxVal = val;
}
}
}
}
assert(maxVal != at::numeric_limits<scalar_t>::lowest());
assert(maxIndex != -1);
indices[batch][plane][outputH][outputW] = maxIndex;
output[batch][plane][outputH][outputW] = maxVal;
}
}
template <typename scalar_t>
__global__ void fractional_max_pool2d_backward_out_cuda_frame(
PackedTensorAccessor<scalar_t, 4> gradInput,
PackedTensorAccessor<scalar_t, 4> gradOutput,
PackedTensorAccessor<int64_t, 4> indices) {
// Output (h, w) point that this thread is responsible for
int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
// Each thread generates a specific output point
if (ourOutputPoint < gradOutput.size(2) *
gradOutput.size(3)) {
int outputW = ourOutputPoint % gradOutput.size(3);
int outputH = ourOutputPoint / gradOutput.size(3);
int index = indices[batch][plane][outputH][outputW];
assert(index >= 0);
int inputW = index % gradInput.size(3);
int inputH = index / gradInput.size(3);
assert(inputH < gradInput.size(2));
atomicAdd(
&gradInput[batch][plane][inputH][inputW],
gradOutput[batch][plane][outputH][outputW]
);
}
}
void fractional_max_pool2d_out_cuda_template(
Tensor & output,
Tensor& indices,
const Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const Tensor& randomSamples) {
int planeDim = 0;
int dimh = 1;
int dimw = 2;
int numBatch = 1;
int ndims = input.ndimension();
TORCH_CHECK(input.numel() > 0,
"fractional_max_pool2d(): expected input to have non-empty ",
"spatial dimensions.");
TORCH_CHECK((ndims == 3 || ndims == 4),
"non-empty 3D or 4D (batch mode) tensor expected for input");
if (ndims == 4) {
numBatch = input.size(0);
planeDim++;
dimh++;
dimw++;
}
/* sizes */
int numPlanes = input.size(planeDim);
int inputH = input.size(dimh);
int inputW = input.size(dimw);
int outputH = output_size[0];
int outputW = output_size[1];
int poolSizeH = pool_size[0];
int poolSizeW = pool_size[1];
TORCH_CHECK(outputH + poolSizeH - 1 <= inputH,
"fractional_max_pool2d(): pool_size height ", poolSizeH,
" too large relative to input height ", inputH);
TORCH_CHECK(outputW + poolSizeW - 1 <= inputW,
"pool_size width ", poolSizeW,
" too large relative to input width ", inputW);
if (ndims == 3) {
/* resize output */
output.resize_({numPlanes, outputH, outputW});
/* indices will contain the locations for each output point */
indices.resize_({numPlanes, outputH, outputW});
} else {
output.resize_({numBatch, numPlanes, outputH, outputW});
indices.resize_({numBatch, numPlanes, outputH, outputW});
}
auto output_ = output;
auto input_ = input;
auto indices_ = indices;
if(ndims == 3) {
output_ = output_.reshape({1, numPlanes, outputH, outputW});
indices_ = indices_.reshape({1, numPlanes, outputH, outputW});
input_ = input_.reshape({1, input.size(0), input.size(1), input.size(2)});
}
// block is limited to 4 warps
// grid handles overflow per each plane
int outputPlaneSize = output_.size(2) *
output_.size(3);
dim3 grid((outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128)
input_.size(1),
input_.size(0));
dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(),
"fractional_max_pool2d_out_cuda_frame",
[&] {
auto devInput = input_.packed_accessor<scalar_t, 4>();
auto devOutput = output_.packed_accessor<scalar_t, 4>();
auto devIndices = indices_.packed_accessor<int64_t, 4>();
auto devSamples = randomSamples.packed_accessor<scalar_t, 3>();
hipLaunchKernelGGL(( fractional_max_pool2d_out_cuda_frame<scalar_t>)
, dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
devOutput, devIndices, devInput, devSamples,
poolSizeH, poolSizeW);
}
);
TORCH_CHECK(hipGetLastError() == hipSuccess,
"fractional_max_pool2d_out_cuda_frame failed with error code ",
hipGetLastError());
}
void fractional_max_pool2d_backward_out_cuda_template(
Tensor& gradInput,
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef pool_size /* unused */,
IntArrayRef output_size,
const Tensor& indices)
{
int dimh = 1;
int dimw = 2;
int ndims = input.ndimension();
if (ndims == 4) {
dimh++;
dimw++;
}
/* sizes */
int inputH = input.size(dimh);
int inputW = input.size(dimw);
int outputH = output_size[0];
int outputW = output_size[1];
TORCH_CHECK(outputH == gradOutput.size(dimh),
"fractional_max_pool2d(): gradOutput height unexpected");
TORCH_CHECK(outputW == gradOutput.size(dimw),
"fractional_max_pool2d(): gradOutput width unexpected");
/* resize */
gradInput.resize_as_(input);
gradInput.zero_();
auto gradInput_ = gradInput;
auto gradOutput_ = gradOutput;
auto indices_ = indices;
if(ndims == 3) {
gradInput_ = gradInput_.reshape({1, input.size(0), inputH, inputW});
gradOutput_ = gradOutput_.reshape({1, gradOutput.size(0), outputH, outputW});
indices_ = indices_.reshape({1, indices_.size(0), outputH, outputW});
}
/* backprop */
// block is limited to 4 warps
// grid handles overflow per each plane
int outputPlaneSize = gradOutput_.size(2) *
gradOutput_.size(3);
dim3 grid((outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128)
gradInput_.size(1),
gradInput_.size(0));
dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize);
auto devIndices = indices.packed_accessor<int64_t, 4>();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(),
"fractional_max_pool2d_backward_out_cuda_frame",
[&] {
auto devGradInput = gradInput_.packed_accessor<scalar_t, 4>();
auto devGradOutput = gradOutput_.packed_accessor<scalar_t, 4>();
hipLaunchKernelGGL(( fractional_max_pool2d_backward_out_cuda_frame<scalar_t>)
, dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
devGradInput, devGradOutput, devIndices);
}
);
TORCH_CHECK(hipGetLastError() == hipSuccess,
"fractional_max_pool2d_backward_out_cuda_frame failed with error code ",
hipGetLastError());
}
}// namespace
std::tuple<Tensor&, Tensor&> fractional_max_pool2d_out_cuda(
at::Tensor& output,
at::Tensor& indices,
const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& randomSamples)
{
fractional_max_pool2d_out_cuda_template(
output,
indices,
input,
pool_size,
output_size,
randomSamples);
return std::tuple<Tensor&, Tensor&>(output, indices);
}
std::tuple<Tensor, Tensor> fractional_max_pool2d_cuda(
const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& randomSamples)
{
Tensor output = at::empty({0}, input.options());
Tensor indices = at::empty({0}, input.options().dtype(kLong));
fractional_max_pool2d_out_cuda_template(
output,
indices,
input,
pool_size,
output_size,
randomSamples);
return std::tuple<Tensor, Tensor>(output, indices);
}
Tensor& fractional_max_pool2d_backward_out_cuda(
at::Tensor& gradInput,
const at::Tensor& gradOutput_,
const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& indices)
{
fractional_max_pool2d_backward_out_cuda_template(
gradInput,
gradOutput_,
input,
pool_size,
output_size,
indices);
return gradInput;
}
Tensor fractional_max_pool2d_backward_cuda(
const at::Tensor& gradOutput_,
const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& indices)
{
Tensor gradInput = at::empty({0}, input.options());
fractional_max_pool2d_backward_out_cuda_template(
gradInput,
gradOutput_,
input,
pool_size,
output_size,
indices);
return gradInput;
}
}// at::native
}// at
| 7a5f8d47adbafdf1e6cba9b1d1937c599f29feb4.cu | #include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/detail/KernelUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <c10/util/Exception.h>
#include <algorithm>
#include <cfloat>
#include <cmath>
namespace at {
namespace native {
using namespace at::cuda::detail;
namespace {
template <typename scalar_t, typename accscalar_t>
__device__ inline int get_interval(accscalar_t sample,
int index, int inputSize, int outputSize, int poolSize) {
accscalar_t alpha = static_cast<accscalar_t>(inputSize - poolSize) /
static_cast<accscalar_t>(outputSize - 1);
if (index == outputSize - 1) {
return inputSize - poolSize;
} else {
return static_cast<int>((index + sample) * alpha) -
static_cast<int>(sample * alpha);
}
}
template <typename scalar_t>
__global__ void fractional_max_pool2d_out_cuda_frame(
PackedTensorAccessor<scalar_t, 4> output,
PackedTensorAccessor<int64_t, 4> indices,
PackedTensorAccessor<scalar_t, 4> input,
PackedTensorAccessor<scalar_t, 3> samples,
int poolSizeH, int poolSizeW) {
using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>;
int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
// Each thread generates a specific output point
if (ourOutputPoint < output.size(2) * output.size(3)) {
int outputW = ourOutputPoint % output.size(3);
int outputH = ourOutputPoint / output.size(3);
int poolW = get_interval<scalar_t, accscalar_t>(
static_cast<accscalar_t>(samples[batch][plane][0]),
outputW, input.size(3), output.size(3), poolSizeW);
int poolH = get_interval<scalar_t, accscalar_t>(
static_cast<accscalar_t>(samples[batch][plane][1]),
outputH, input.size(2), output.size(2), poolSizeH);
scalar_t maxVal = at::numeric_limits<scalar_t>::lowest();
int maxIndex = -1;
for (int h = poolH; h < poolH + poolSizeH; ++h) {
if (poolSizeW < 2 || poolSizeW > 7) {
for (int w = poolW; w < poolW + poolSizeW; ++w) {
scalar_t val = input[batch][plane][h][w];
// for consistency with THNN, favor the first max
if (val > maxVal) {
maxIndex = h * input.size(3) + w;
maxVal = val;
}
}
} else {
for (int i = 0; i < poolSizeW; ++i) {
int w = i + poolW;
scalar_t val = input[batch][plane][h][w];
// for consistency with THNN, favor the first max
if (val > maxVal) {
maxIndex = h * input.size(3) + w;
maxVal = val;
}
}
}
}
assert(maxVal != at::numeric_limits<scalar_t>::lowest());
assert(maxIndex != -1);
indices[batch][plane][outputH][outputW] = maxIndex;
output[batch][plane][outputH][outputW] = maxVal;
}
}
template <typename scalar_t>
__global__ void fractional_max_pool2d_backward_out_cuda_frame(
PackedTensorAccessor<scalar_t, 4> gradInput,
PackedTensorAccessor<scalar_t, 4> gradOutput,
PackedTensorAccessor<int64_t, 4> indices) {
// Output (h, w) point that this thread is responsible for
int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
// Each thread generates a specific output point
if (ourOutputPoint < gradOutput.size(2) *
gradOutput.size(3)) {
int outputW = ourOutputPoint % gradOutput.size(3);
int outputH = ourOutputPoint / gradOutput.size(3);
int index = indices[batch][plane][outputH][outputW];
assert(index >= 0);
int inputW = index % gradInput.size(3);
int inputH = index / gradInput.size(3);
assert(inputH < gradInput.size(2));
atomicAdd(
&gradInput[batch][plane][inputH][inputW],
gradOutput[batch][plane][outputH][outputW]
);
}
}
void fractional_max_pool2d_out_cuda_template(
Tensor & output,
Tensor& indices,
const Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const Tensor& randomSamples) {
int planeDim = 0;
int dimh = 1;
int dimw = 2;
int numBatch = 1;
int ndims = input.ndimension();
TORCH_CHECK(input.numel() > 0,
"fractional_max_pool2d(): expected input to have non-empty ",
"spatial dimensions.");
TORCH_CHECK((ndims == 3 || ndims == 4),
"non-empty 3D or 4D (batch mode) tensor expected for input");
if (ndims == 4) {
numBatch = input.size(0);
planeDim++;
dimh++;
dimw++;
}
/* sizes */
int numPlanes = input.size(planeDim);
int inputH = input.size(dimh);
int inputW = input.size(dimw);
int outputH = output_size[0];
int outputW = output_size[1];
int poolSizeH = pool_size[0];
int poolSizeW = pool_size[1];
TORCH_CHECK(outputH + poolSizeH - 1 <= inputH,
"fractional_max_pool2d(): pool_size height ", poolSizeH,
" too large relative to input height ", inputH);
TORCH_CHECK(outputW + poolSizeW - 1 <= inputW,
"pool_size width ", poolSizeW,
" too large relative to input width ", inputW);
if (ndims == 3) {
/* resize output */
output.resize_({numPlanes, outputH, outputW});
/* indices will contain the locations for each output point */
indices.resize_({numPlanes, outputH, outputW});
} else {
output.resize_({numBatch, numPlanes, outputH, outputW});
indices.resize_({numBatch, numPlanes, outputH, outputW});
}
auto output_ = output;
auto input_ = input;
auto indices_ = indices;
if(ndims == 3) {
output_ = output_.reshape({1, numPlanes, outputH, outputW});
indices_ = indices_.reshape({1, numPlanes, outputH, outputW});
input_ = input_.reshape({1, input.size(0), input.size(1), input.size(2)});
}
// block is limited to 4 warps
// grid handles overflow per each plane
int outputPlaneSize = output_.size(2) *
output_.size(3);
dim3 grid((outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128)
input_.size(1),
input_.size(0));
dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(),
"fractional_max_pool2d_out_cuda_frame",
[&] {
auto devInput = input_.packed_accessor<scalar_t, 4>();
auto devOutput = output_.packed_accessor<scalar_t, 4>();
auto devIndices = indices_.packed_accessor<int64_t, 4>();
auto devSamples = randomSamples.packed_accessor<scalar_t, 3>();
fractional_max_pool2d_out_cuda_frame<scalar_t>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
devOutput, devIndices, devInput, devSamples,
poolSizeH, poolSizeW);
}
);
TORCH_CHECK(cudaGetLastError() == cudaSuccess,
"fractional_max_pool2d_out_cuda_frame failed with error code ",
cudaGetLastError());
}
void fractional_max_pool2d_backward_out_cuda_template(
Tensor& gradInput,
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef pool_size /* unused */,
IntArrayRef output_size,
const Tensor& indices)
{
int dimh = 1;
int dimw = 2;
int ndims = input.ndimension();
if (ndims == 4) {
dimh++;
dimw++;
}
/* sizes */
int inputH = input.size(dimh);
int inputW = input.size(dimw);
int outputH = output_size[0];
int outputW = output_size[1];
TORCH_CHECK(outputH == gradOutput.size(dimh),
"fractional_max_pool2d(): gradOutput height unexpected");
TORCH_CHECK(outputW == gradOutput.size(dimw),
"fractional_max_pool2d(): gradOutput width unexpected");
/* resize */
gradInput.resize_as_(input);
gradInput.zero_();
auto gradInput_ = gradInput;
auto gradOutput_ = gradOutput;
auto indices_ = indices;
if(ndims == 3) {
gradInput_ = gradInput_.reshape({1, input.size(0), inputH, inputW});
gradOutput_ = gradOutput_.reshape({1, gradOutput.size(0), outputH, outputW});
indices_ = indices_.reshape({1, indices_.size(0), outputH, outputW});
}
/* backprop */
// block is limited to 4 warps
// grid handles overflow per each plane
int outputPlaneSize = gradOutput_.size(2) *
gradOutput_.size(3);
dim3 grid((outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128)
gradInput_.size(1),
gradInput_.size(0));
dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize);
auto devIndices = indices.packed_accessor<int64_t, 4>();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(),
"fractional_max_pool2d_backward_out_cuda_frame",
[&] {
auto devGradInput = gradInput_.packed_accessor<scalar_t, 4>();
auto devGradOutput = gradOutput_.packed_accessor<scalar_t, 4>();
fractional_max_pool2d_backward_out_cuda_frame<scalar_t>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
devGradInput, devGradOutput, devIndices);
}
);
TORCH_CHECK(cudaGetLastError() == cudaSuccess,
"fractional_max_pool2d_backward_out_cuda_frame failed with error code ",
cudaGetLastError());
}
}// namespace
std::tuple<Tensor&, Tensor&> fractional_max_pool2d_out_cuda(
at::Tensor& output,
at::Tensor& indices,
const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& randomSamples)
{
fractional_max_pool2d_out_cuda_template(
output,
indices,
input,
pool_size,
output_size,
randomSamples);
return std::tuple<Tensor&, Tensor&>(output, indices);
}
std::tuple<Tensor, Tensor> fractional_max_pool2d_cuda(
const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& randomSamples)
{
Tensor output = at::empty({0}, input.options());
Tensor indices = at::empty({0}, input.options().dtype(kLong));
fractional_max_pool2d_out_cuda_template(
output,
indices,
input,
pool_size,
output_size,
randomSamples);
return std::tuple<Tensor, Tensor>(output, indices);
}
Tensor& fractional_max_pool2d_backward_out_cuda(
at::Tensor& gradInput,
const at::Tensor& gradOutput_,
const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& indices)
{
fractional_max_pool2d_backward_out_cuda_template(
gradInput,
gradOutput_,
input,
pool_size,
output_size,
indices);
return gradInput;
}
Tensor fractional_max_pool2d_backward_cuda(
const at::Tensor& gradOutput_,
const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& indices)
{
Tensor gradInput = at::empty({0}, input.options());
fractional_max_pool2d_backward_out_cuda_template(
gradInput,
gradOutput_,
input,
pool_size,
output_size,
indices);
return gradInput;
}
}// at::native
}// at
|
f2f5392828b094c304ce7bd6969f965771b5aa26.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef TABLE_OP_IMPL_CU
#define TABLE_OP_IMPL_CU
#include "GPU_Dll.h"
/*
* the kernels.
*/
//kernel for interface 1:GPUOnly_getRIDList
__global__ void
getRIDList_kernel(Record *d_R, int delta, int rLen,int *d_RIDList, int *d_output)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
//Record value;
for(int pos=resultID;pos<rLen;pos+=delta)
{
//value=d_R[pos];
d_RIDList[pos]=d_R[pos].x;
d_output[pos]=d_R[pos].y;
}
}
//kernel for interface 2:GPUOnly_copyRelation
__global__ void
copyRelation_kernel(Record *d_R, int delta, int rLen, Record* d_output)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
//Record value;
for(int pos=resultID;pos<rLen;pos+=delta)
{
d_output[pos]=d_R[pos];
}
}
//kernel for interface 3:GPUOnly_setRIDList
__global__ void
setRIDList_kernel(int *d_RIDList, int *d_intput, int delta, int rLen, Record *d_R)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
//Record value;
for(int pos=resultID;pos<rLen;pos+=delta)
{
//value=d_R[pos];
d_R[pos].x=d_RIDList[pos];
d_R[pos].y=d_intput[pos];
}
}
//kernel for interface 3:GPUOnly_setRIDList
__global__ void
setValueList_kernel(int *d_ValueList, int delta, int rLen, Record *d_R)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
//Record value;
for(int pos=resultID;pos<rLen;pos+=delta)
{
//value=d_R[pos];
d_R[pos].x=pos;
d_R[pos].y=d_ValueList[pos];
}
}
/*
* the interfaces.
*/
//Interface 1: get all RIDs into an array. You need to allocate d_RIDList.
extern "C"
void GPUOnly_getRIDList(Record* d_Rin, int rLen, int** d_RIDList, int numThreadPerBlock, int numBlock)
{
int outputSize=sizeof(int)*rLen;
GPUMALLOC((void**)d_RIDList, outputSize);
int* d_tempOutput;
GPUMALLOC((void**)&d_tempOutput, outputSize);
int numThread=numThreadPerBlock*numBlock;
dim3 thread( numThreadPerBlock, 1, 1);
dim3 grid( numBlock, 1 , 1);
hipLaunchKernelGGL(( getRIDList_kernel), dim3(grid),dim3(thread), 0, 0, d_Rin, numThread, rLen, *d_RIDList, d_tempOutput);
GPUFREE(d_tempOutput);
}
//Interface 2: copy a relation to another relation. You need to allocate d_destRin.
extern "C"
void GPUOnly_copyRelation(Record* d_srcRin, int rLen, Record** d_destRin, int numThreadPerBlock, int numBlock)
{
int outputSize=sizeof(Record)*rLen;
GPUMALLOC((void**)d_destRin, outputSize);
int numThread=numThreadPerBlock*numBlock;
dim3 thread( numThreadPerBlock, 1, 1);
dim3 grid( numBlock, 1 , 1);
hipLaunchKernelGGL(( copyRelation_kernel), dim3(grid),dim3(thread), 0, 0, d_srcRin, numThread, rLen, *d_destRin);
}
//Interface3: set the RID according to the RID list.
extern "C"
void GPUOnly_setRIDList(int* d_RIDList, int rLen, Record* d_destRin, int numThreadPerBlock, int numBlock)
{
int outputSize=sizeof(int)*rLen;
int* d_tempOutput;
GPUMALLOC((void**)&d_tempOutput, outputSize);
int numThread=numThreadPerBlock*numBlock;
dim3 thread( numThreadPerBlock, 1, 1);
dim3 grid( numBlock, 1 , 1);
hipLaunchKernelGGL(( setRIDList_kernel), dim3(grid),dim3(thread), 0, 0, d_RIDList, d_tempOutput, numThread, rLen, d_destRin);
GPUFREE(d_tempOutput);
}
void GPUOnly_setValueList(int* d_ValueList, int rLen, Record* d_destRin, int numThreadPerBlock , int numBlock)
{
int numThread=numThreadPerBlock*numBlock;
dim3 thread( numThreadPerBlock, 1, 1);
dim3 grid( numBlock, 1 , 1);
hipLaunchKernelGGL(( setValueList_kernel), dim3(grid),dim3(thread), 0, 0, d_ValueList, numThread, rLen, d_destRin);
}
__global__ void
getValueList_kernel(Record *d_R, int delta, int rLen,int *d_ValueList, int *d_output)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
//Record value;
for(int pos=resultID;pos<rLen;pos+=delta)
{
d_output[pos]=d_R[pos].x;
d_ValueList[pos]=d_R[pos].y;
}
}
extern "C"
void GPUOnly_getValueList(Record* d_Rin, int rLen, int** d_ValueList, int numThreadPerBlock, int numBlock)
{
int outputSize=sizeof(int)*rLen;
GPUMALLOC((void**)d_ValueList, outputSize);
int* d_tempOutput;
GPUMALLOC((void**)&d_tempOutput, outputSize);
int numThread=numThreadPerBlock*numBlock;
dim3 thread( numThreadPerBlock, 1, 1);
dim3 grid( numBlock, 1 , 1);
hipLaunchKernelGGL(( getValueList_kernel), dim3(grid),dim3(thread), 0, 0, d_Rin, numThread, rLen, *d_ValueList, d_tempOutput);
GPUFREE(d_tempOutput);
}
#endif
| f2f5392828b094c304ce7bd6969f965771b5aa26.cu | #ifndef TABLE_OP_IMPL_CU
#define TABLE_OP_IMPL_CU
#include "GPU_Dll.h"
/*
* the kernels.
*/
//kernel for interface 1:GPUOnly_getRIDList
__global__ void
getRIDList_kernel(Record *d_R, int delta, int rLen,int *d_RIDList, int *d_output)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
//Record value;
for(int pos=resultID;pos<rLen;pos+=delta)
{
//value=d_R[pos];
d_RIDList[pos]=d_R[pos].x;
d_output[pos]=d_R[pos].y;
}
}
//kernel for interface 2:GPUOnly_copyRelation
__global__ void
copyRelation_kernel(Record *d_R, int delta, int rLen, Record* d_output)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
//Record value;
for(int pos=resultID;pos<rLen;pos+=delta)
{
d_output[pos]=d_R[pos];
}
}
//kernel for interface 3:GPUOnly_setRIDList
__global__ void
setRIDList_kernel(int *d_RIDList, int *d_intput, int delta, int rLen, Record *d_R)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
//Record value;
for(int pos=resultID;pos<rLen;pos+=delta)
{
//value=d_R[pos];
d_R[pos].x=d_RIDList[pos];
d_R[pos].y=d_intput[pos];
}
}
//kernel for interface 3:GPUOnly_setRIDList
__global__ void
setValueList_kernel(int *d_ValueList, int delta, int rLen, Record *d_R)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
//Record value;
for(int pos=resultID;pos<rLen;pos+=delta)
{
//value=d_R[pos];
d_R[pos].x=pos;
d_R[pos].y=d_ValueList[pos];
}
}
/*
* the interfaces.
*/
//Interface 1: get all RIDs into an array. You need to allocate d_RIDList.
extern "C"
void GPUOnly_getRIDList(Record* d_Rin, int rLen, int** d_RIDList, int numThreadPerBlock, int numBlock)
{
int outputSize=sizeof(int)*rLen;
GPUMALLOC((void**)d_RIDList, outputSize);
int* d_tempOutput;
GPUMALLOC((void**)&d_tempOutput, outputSize);
int numThread=numThreadPerBlock*numBlock;
dim3 thread( numThreadPerBlock, 1, 1);
dim3 grid( numBlock, 1 , 1);
getRIDList_kernel<<<grid,thread>>>(d_Rin, numThread, rLen, *d_RIDList, d_tempOutput);
GPUFREE(d_tempOutput);
}
//Interface 2: copy a relation to another relation. You need to allocate d_destRin.
extern "C"
void GPUOnly_copyRelation(Record* d_srcRin, int rLen, Record** d_destRin, int numThreadPerBlock, int numBlock)
{
int outputSize=sizeof(Record)*rLen;
GPUMALLOC((void**)d_destRin, outputSize);
int numThread=numThreadPerBlock*numBlock;
dim3 thread( numThreadPerBlock, 1, 1);
dim3 grid( numBlock, 1 , 1);
copyRelation_kernel<<<grid,thread>>>(d_srcRin, numThread, rLen, *d_destRin);
}
//Interface3: set the RID according to the RID list.
extern "C"
void GPUOnly_setRIDList(int* d_RIDList, int rLen, Record* d_destRin, int numThreadPerBlock, int numBlock)
{
int outputSize=sizeof(int)*rLen;
int* d_tempOutput;
GPUMALLOC((void**)&d_tempOutput, outputSize);
int numThread=numThreadPerBlock*numBlock;
dim3 thread( numThreadPerBlock, 1, 1);
dim3 grid( numBlock, 1 , 1);
setRIDList_kernel<<<grid,thread>>>(d_RIDList, d_tempOutput, numThread, rLen, d_destRin);
GPUFREE(d_tempOutput);
}
void GPUOnly_setValueList(int* d_ValueList, int rLen, Record* d_destRin, int numThreadPerBlock , int numBlock)
{
int numThread=numThreadPerBlock*numBlock;
dim3 thread( numThreadPerBlock, 1, 1);
dim3 grid( numBlock, 1 , 1);
setValueList_kernel<<<grid,thread>>>(d_ValueList, numThread, rLen, d_destRin);
}
__global__ void
getValueList_kernel(Record *d_R, int delta, int rLen,int *d_ValueList, int *d_output)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
//Record value;
for(int pos=resultID;pos<rLen;pos+=delta)
{
d_output[pos]=d_R[pos].x;
d_ValueList[pos]=d_R[pos].y;
}
}
extern "C"
void GPUOnly_getValueList(Record* d_Rin, int rLen, int** d_ValueList, int numThreadPerBlock, int numBlock)
{
int outputSize=sizeof(int)*rLen;
GPUMALLOC((void**)d_ValueList, outputSize);
int* d_tempOutput;
GPUMALLOC((void**)&d_tempOutput, outputSize);
int numThread=numThreadPerBlock*numBlock;
dim3 thread( numThreadPerBlock, 1, 1);
dim3 grid( numBlock, 1 , 1);
getValueList_kernel<<<grid,thread>>>(d_Rin, numThread, rLen, *d_ValueList, d_tempOutput);
GPUFREE(d_tempOutput);
}
#endif
|
5fcbd68d316d25899b30f3a30c0579f94b78c8cc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_z
#define Ablockinfo(i,j) Ablockinfo[(i)*c_blocks + (j)]
#define Bblockinfo(i,j) Bblockinfo[(i)*c_blocks + (j)]
#define A(i,j) ((Ablockinfo(i,j)-1)*size_b*size_b)
#define B(i,j) ((Bblockinfo(i,j)-1)*size_b*size_b)
//============================================================
#define ldb m
#define lda m
#define ldc m
// every multiprocessor handles one BCSR-block
__global__ void
zbcsrlupivloc_kernel(
int size_b,
int kblocks,
double **A,
magma_int_t *ipiv)
{
if( blockIdx.x < kblocks ) {
if(threadIdx.x < size_b ){
for( int i=0; i<size_b; i++){
int dst = ipiv[i]-1;
if( dst != i ){
double *A1 = A[blockIdx.x]+threadIdx.x*size_b+i;
double *A2 = A[blockIdx.x]+threadIdx.x*size_b+dst;
double tmp = *A2;
*A2 = *A1;
*A1 = tmp;
}
}
}
}
}
/**
Purpose
-------
For a Block-CSR ILU factorization, this routine updates all blocks in
the trailing matrix.
Arguments
---------
@param[in]
size_b magma_int_t
blocksize in BCSR
@param[in]
kblocks magma_int_t
number of blocks
@param[in]
dA magmaDoubleComplex_ptr *
matrix in BCSR
@param[in]
ipiv magmaInt_ptr
array containing pivots
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zbcsrlupivloc(
magma_int_t size_b,
magma_int_t kblocks,
magmaDoubleComplex_ptr *dA,
magmaInt_ptr ipiv,
magma_queue_t queue )
{
#if defined(PRECISION_d)
dim3 threads( 64, 1 );
dim3 grid(kblocks, 1, 1);
hipLaunchKernelGGL(( zbcsrlupivloc_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
size_b, kblocks, dA, ipiv );
#endif
return MAGMA_SUCCESS;
}
| 5fcbd68d316d25899b30f3a30c0579f94b78c8cc.cu | /*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_z
#define Ablockinfo(i,j) Ablockinfo[(i)*c_blocks + (j)]
#define Bblockinfo(i,j) Bblockinfo[(i)*c_blocks + (j)]
#define A(i,j) ((Ablockinfo(i,j)-1)*size_b*size_b)
#define B(i,j) ((Bblockinfo(i,j)-1)*size_b*size_b)
//============================================================
#define ldb m
#define lda m
#define ldc m
// every multiprocessor handles one BCSR-block
__global__ void
zbcsrlupivloc_kernel(
int size_b,
int kblocks,
double **A,
magma_int_t *ipiv)
{
if( blockIdx.x < kblocks ) {
if(threadIdx.x < size_b ){
for( int i=0; i<size_b; i++){
int dst = ipiv[i]-1;
if( dst != i ){
double *A1 = A[blockIdx.x]+threadIdx.x*size_b+i;
double *A2 = A[blockIdx.x]+threadIdx.x*size_b+dst;
double tmp = *A2;
*A2 = *A1;
*A1 = tmp;
}
}
}
}
}
/**
Purpose
-------
For a Block-CSR ILU factorization, this routine updates all blocks in
the trailing matrix.
Arguments
---------
@param[in]
size_b magma_int_t
blocksize in BCSR
@param[in]
kblocks magma_int_t
number of blocks
@param[in]
dA magmaDoubleComplex_ptr *
matrix in BCSR
@param[in]
ipiv magmaInt_ptr
array containing pivots
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zbcsrlupivloc(
magma_int_t size_b,
magma_int_t kblocks,
magmaDoubleComplex_ptr *dA,
magmaInt_ptr ipiv,
magma_queue_t queue )
{
#if defined(PRECISION_d)
dim3 threads( 64, 1 );
dim3 grid(kblocks, 1, 1);
zbcsrlupivloc_kernel<<< grid, threads, 0, queue->cuda_stream() >>>(
size_b, kblocks, dA, ipiv );
#endif
return MAGMA_SUCCESS;
}
|
6789f8d539918c849b371fc3c06e70432108b153.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
/** @file render_buffer.cu
* @author Thomas Mller & Alex Evans, NVIDIA
*/
#include <nerf-cuda/common_device.cuh>
#include <nerf-cuda/common.h>
#include <nerf-cuda/render_buffer.h>
#include <nerf-cuda/npy.hpp>
#include <tiny-cuda-nn/gpu_memory.h>
#include <filesystem/path.h>
#ifdef NGP_GUI
# ifdef _WIN32
# include <GL/gl3w.h>
# else
# include <GL/glew.h>
# endif
# include <GLFW/glfw3.h>
#endif
#include <cuda_gl_interop.h>
using namespace Eigen;
using namespace tcnn;
namespace fs = filesystem;
NGP_NAMESPACE_BEGIN
std::atomic<size_t> g_total_n_bytes_allocated{0};
void CudaSurface2D::free() {
if (m_surface) {
hipDestroySurfaceObject(m_surface);
}
m_surface = 0;
if (m_array) {
hipFreeArray(m_array);
g_total_n_bytes_allocated -= m_size.prod() * sizeof(float4);
}
m_array = nullptr;
}
void CudaSurface2D::resize(const Vector2i& size) {
if (size == m_size) {
return;
}
free();
m_size = size;
hipChannelFormatDesc desc = hipCreateChannelDesc<float4>();
CUDA_CHECK_THROW(hipMallocArray(&m_array, &desc, size.x(), size.y(), hipArraySurfaceLoadStore));
g_total_n_bytes_allocated += m_size.prod() * sizeof(float4);
struct hipResourceDesc resource_desc;
memset(&resource_desc, 0, sizeof(resource_desc));
resource_desc.resType = hipResourceTypeArray;
resource_desc.res.array.array = m_array;
CUDA_CHECK_THROW(hipCreateSurfaceObject(&m_surface, &resource_desc));
}
#ifdef NGP_GUI
GLTexture::~GLTexture() {
m_cuda_mapping.reset();
if (m_texture_id) {
glDeleteTextures(1, &m_texture_id);
}
}
GLuint GLTexture::texture() {
if (!m_texture_id) {
glGenTextures(1, &m_texture_id);
}
return m_texture_id;
}
hipSurfaceObject_t GLTexture::surface() {
if (!m_cuda_mapping) {
m_cuda_mapping = std::make_unique<CUDAMapping>(texture(), m_size);
}
return m_cuda_mapping->surface();
}
hipArray_t GLTexture::array() {
if (!m_cuda_mapping) {
m_cuda_mapping = std::make_unique<CUDAMapping>(texture(), m_size);
}
return m_cuda_mapping->array();
}
void GLTexture::blit_from_cuda_mapping() {
if (!m_cuda_mapping || m_cuda_mapping->is_interop()) {
return;
}
if (m_internal_format != GL_RGBA32F || m_format != GL_RGBA || m_is_8bit) {
throw std::runtime_error{"Can only blit from CUDA mapping if the texture is RGBA float."};
}
const float* data_cpu = m_cuda_mapping->data_cpu();
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, m_size.x(), m_size.y(), 0, GL_RGBA, GL_FLOAT, data_cpu);
}
void GLTexture::load(const float* data, Vector2i new_size, int n_channels) {
resize(new_size, n_channels, false);
glBindTexture(GL_TEXTURE_2D, m_texture_id);
glTexImage2D(GL_TEXTURE_2D, 0, m_internal_format, new_size.x(), new_size.y(), 0, m_format, GL_FLOAT, data);
}
void GLTexture::load(const uint8_t* data, Vector2i new_size, int n_channels) {
resize(new_size, n_channels, true);
glBindTexture(GL_TEXTURE_2D, m_texture_id);
glTexImage2D(GL_TEXTURE_2D, 0, m_internal_format, new_size.x(), new_size.y(), 0, m_format, GL_UNSIGNED_BYTE, data);
}
void GLTexture::resize(const Vector2i& new_size, int n_channels, bool is_8bit) {
if (m_size == new_size && m_n_channels == n_channels && m_is_8bit == is_8bit) {
return;
}
if (m_texture_id) {
m_cuda_mapping.reset();
glDeleteTextures(1, &m_texture_id);
m_texture_id = 0;
}
glGenTextures(1, &m_texture_id);
glBindTexture(GL_TEXTURE_2D, m_texture_id);
switch (n_channels) {
case 1: m_internal_format = is_8bit ? GL_R8 : GL_R32F; m_format = GL_RED; break;
case 2: m_internal_format = is_8bit ? GL_RG8 : GL_RG32F; m_format = GL_RG; break;
case 3: m_internal_format = is_8bit ? GL_RGB8 : GL_RGB32F; m_format = GL_RGB; break;
case 4: m_internal_format = is_8bit ? GL_RGBA8 : GL_RGBA32F; m_format = GL_RGBA; break;
default: tlog::error() << "Unsupported number of channels: " << n_channels;
}
m_is_8bit = is_8bit;
m_size = new_size;
m_n_channels = n_channels;
glTexImage2D(GL_TEXTURE_2D, 0, m_internal_format, new_size.x(), new_size.y(), 0, m_format, is_8bit ? GL_UNSIGNED_BYTE : GL_FLOAT, nullptr);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
}
static bool is_wsl() {
#ifdef _WIN32
return false;
#else
fs::path path = "/proc/sys/kernel/osrelease";
if (!path.exists()) {
return false;
}
std::ifstream f{path.str()};
std::string content((std::istreambuf_iterator<char>(f)), (std::istreambuf_iterator<char>()));
return content.find("microsoft") != std::string::npos;
#endif
}
GLTexture::CUDAMapping::CUDAMapping(GLuint texture_id, const Vector2i& size) : m_size{size} {
static bool s_is_cuda_interop_supported = !is_wsl();
if (s_is_cuda_interop_supported) {
hipError_t err = hipGraphicsGLRegisterImage(&m_graphics_resource, texture_id, GL_TEXTURE_2D, hipGraphicsRegisterFlagsSurfaceLoadStore);
if (err != hipSuccess) {
s_is_cuda_interop_supported = false;
hipGetLastError(); // Reset error
}
}
if (!s_is_cuda_interop_supported) {
// falling back to a regular cuda surface + CPU copy of data
m_cuda_surface = std::make_unique<CudaSurface2D>();
m_cuda_surface->resize(size);
m_data_cpu.resize(m_size.prod() * 4);
return;
}
CUDA_CHECK_THROW(hipGraphicsMapResources(1, &m_graphics_resource));
CUDA_CHECK_THROW(hipGraphicsSubResourceGetMappedArray(&m_mapped_array, m_graphics_resource, 0, 0));
struct hipResourceDesc resource_desc;
memset(&resource_desc, 0, sizeof(resource_desc));
resource_desc.resType = hipResourceTypeArray;
resource_desc.res.array.array = m_mapped_array;
CUDA_CHECK_THROW(hipCreateSurfaceObject(&m_surface, &resource_desc));
}
GLTexture::CUDAMapping::~CUDAMapping() {
if (m_surface) {
hipDestroySurfaceObject(m_surface);
hipGraphicsUnmapResources(1, &m_graphics_resource);
hipGraphicsUnregisterResource(m_graphics_resource);
}
}
const float* GLTexture::CUDAMapping::data_cpu() {
CUDA_CHECK_THROW(hipMemcpy2DFromArray(m_data_cpu.data(), m_size.x() * sizeof(float) * 4, array(), 0, 0, m_size.x() * sizeof(float) * 4, m_size.y(), hipMemcpyDeviceToHost));
return m_data_cpu.data();
}
#endif //NGP_GUI
__global__ void accumulate_kernel(Vector2i resolution, Array4f* frame_buffer, Array4f* accumulate_buffer, float sample_count, EColorSpace color_space) {
uint32_t x = threadIdx.x + blockDim.x * blockIdx.x;
uint32_t y = threadIdx.y + blockDim.y * blockIdx.y;
if (x >= resolution.x() || y >= resolution.y()) {
return;
}
uint32_t idx = x + resolution.x() * y;
Array4f color = frame_buffer[idx];
Array4f tmp = accumulate_buffer[idx];
switch (color_space) {
case EColorSpace::VisPosNeg:
{
float val = color.x() - color.y();
float tmp_val = tmp.x() - tmp.y();
tmp_val = (tmp_val * sample_count + val) / (sample_count+1);
tmp.x() = fmaxf(tmp_val, 0.0f);
tmp.y() = fmaxf(-tmp_val, 0.0f);
break;
}
case EColorSpace::SRGB:
color.head<3>() = linear_to_srgb(color.head<3>());
// fallthrough is intended!
case EColorSpace::Linear:
tmp.head<3>() = (tmp.head<3>() * sample_count + color.head<3>()) / (sample_count+1); break;
}
tmp.w() = (tmp.w() * sample_count + color.w()) / (sample_count+1);
accumulate_buffer[idx] = tmp;
}
__device__ Array3f tonemap(Array3f x, ETonemapCurve curve) {
if (curve == ETonemapCurve::Identity) {
return x;
}
x = x.cwiseMax(0.f);
float k0, k1, k2, k3, k4, k5;
if (curve == ETonemapCurve::ACES) {
// Source: ACES approximation : https://knarkowicz.wordpress.com/2016/01/06/aces-filmic-tone-mapping-curve/
// Include pre - exposure cancelation in constants
k0 = 0.6f * 0.6f * 2.51f;
k1 = 0.6f * 0.03f;
k2 = 0.0f;
k3 = 0.6f * 0.6f * 2.43f;
k4 = 0.6f * 0.59f;
k5 = 0.14f;
} else if (curve == ETonemapCurve::Hable) {
// Source: https://64.github.io/tonemapping/
const float A = 0.15f;
const float B = 0.50f;
const float C = 0.10f;
const float D = 0.20f;
const float E = 0.02f;
const float F = 0.30f;
k0 = A * F - A * E;
k1 = C * B * F - B * E;
k2 = 0.0f;
k3 = A * F;
k4 = B * F;
k5 = D * F * F;
const float W = 11.2f;
const float nom = k0 * (W*W) + k1 * W + k2;
const float denom = k3 * (W*W) + k4 * W + k5;
const float white_scale = denom / nom;
// Include white scale and exposure bias in rational polynomial coefficients
k0 = 4.0f * k0 * white_scale;
k1 = 2.0f * k1 * white_scale;
k2 = k2 * white_scale;
k3 = 4.0f * k3;
k4 = 2.0f * k4;
} else { //if (curve == ETonemapCurve::Reinhard)
const Vector3f luminance_coefficients = Vector3f(0.2126f, 0.7152f, 0.0722f);
float Y = luminance_coefficients.dot(x.matrix());
return x * (1.f / (Y + 1.0f));
}
Array3f color_sq = x * x;
Array3f nom = color_sq * k0 + k1 * x + k2;
Array3f denom = k3 * color_sq + k4 * x + k5;
Array3f tonemapped_color = nom / denom;
return tonemapped_color;
}
__device__ Array3f tonemap(Array3f col, const Array3f& exposure, ETonemapCurve tonemap_curve, EColorSpace color_space, EColorSpace output_color_space) {
// Conversion to output by
// 1. converting to linear. (VisPosNeg is treated as linear red/green)
if (color_space == EColorSpace::SRGB) {
col = srgb_to_linear(col);
}
// 2. applying exposure in linear space
col *= Array3f::Constant(2.0f).pow(exposure);
// 3. tonemapping in linear space according to the specified curve
col = tonemap(col, tonemap_curve);
// 4. converting to output color space.
if (output_color_space == EColorSpace::SRGB) {
col = linear_to_srgb(col);
}
return col;
}
__global__ void overlay_image_kernel(
Vector2i resolution,
float alpha,
Array3f exposure,
Array4f background_color,
const void* __restrict__ image,
EImageDataType image_data_type,
Vector2i image_resolution,
ETonemapCurve tonemap_curve,
EColorSpace color_space,
EColorSpace output_color_space,
int fov_axis,
float zoom,
Eigen::Vector2f screen_center,
hipSurfaceObject_t surface
) {
uint32_t x = threadIdx.x + blockDim.x * blockIdx.x;
uint32_t y = threadIdx.y + blockDim.y * blockIdx.y;
if (x >= resolution.x() || y >= resolution.y()) {
return;
}
float scale = image_resolution[fov_axis] / float(resolution[fov_axis]);
float fx = x+0.5f;
float fy = y+0.5f;
fx-=resolution.x()*0.5f; fx/=zoom; fx+=screen_center.x() * resolution.x();
fy-=resolution.y()*0.5f; fy/=zoom; fy+=screen_center.y() * resolution.y();
float u = (fx-resolution.x()*0.5f) * scale + image_resolution.x()*0.5f;
float v = (fy-resolution.y()*0.5f) * scale + image_resolution.y()*0.5f;
int srcx = floorf(u);
int srcy = floorf(v);
uint32_t idx = x + resolution.x() * y;
uint32_t srcidx = srcx + image_resolution.x() * srcy;
Array4f val;
if (srcx >= image_resolution.x() || srcy >= image_resolution.y() || srcx < 0 || srcy < 0) {
val = Array4f::Zero();
} else {
val = read_rgba(Vector2i{srcx, srcy}, image_resolution, image, image_data_type);
}
Array4f color = {val[0], val[1], val[2], val[3]};
// The background color is represented in SRGB, so convert
// to linear if that's not the space in which we're rendering.
if (color_space != EColorSpace::SRGB) {
background_color.head<3>() = srgb_to_linear(background_color.head<3>());
} else {
if (color.w() > 0) {
color.head<3>() = linear_to_srgb(color.head<3>() / color.w()) * color.w();
} else {
color.head<3>() = Array3f::Zero();
}
}
float weight = (1 - color.w()) * background_color.w();
color.head<3>() += background_color.head<3>() * weight;
color.w() += weight;
color.head<3>() = tonemap(color.head<3>(), exposure, tonemap_curve, color_space, output_color_space);
Array4f prev_color;
surf2Dread((float4*)&prev_color, surface, x * sizeof(float4), y);
color = color * alpha + prev_color * (1.f-alpha);
surf2Dwrite(to_float4(color), surface, x * sizeof(float4), y);
}
__device__ Array3f colormap_turbo(float x) {
const Vector4f kRedVec4 = Vector4f(0.13572138f, 4.61539260f, -42.66032258f, 132.13108234f);
const Vector4f kGreenVec4 = Vector4f(0.09140261f, 2.19418839f, 4.84296658f, -14.18503333f);
const Vector4f kBlueVec4 = Vector4f(0.10667330f, 12.64194608f, -60.58204836f, 110.36276771f);
const Vector2f kRedVec2 = Vector2f(-152.94239396f, 59.28637943f);
const Vector2f kGreenVec2 = Vector2f(4.27729857f, 2.82956604f);
const Vector2f kBlueVec2 = Vector2f(-89.90310912f, 27.34824973f);
x = __saturatef(x);
Vector4f v4 = Vector4f{ 1.0f, x, x * x, x * x * x };
Vector2f v2 = Vector2f{ v4.w() * x, v4.w() * v4.z() };
return Array3f{
v4.dot(kRedVec4) + v2.dot(kRedVec2),
v4.dot(kGreenVec4) + v2.dot(kGreenVec2),
v4.dot(kBlueVec4) + v2.dot(kBlueVec2)
};
}
__global__ void overlay_depth_kernel(
Vector2i resolution,
float alpha,
const float* __restrict__ depth,
float depth_scale,
Vector2i image_resolution,
int fov_axis,
float zoom, Eigen::Vector2f screen_center,
hipSurfaceObject_t surface
) {
uint32_t x = threadIdx.x + blockDim.x * blockIdx.x;
uint32_t y = threadIdx.y + blockDim.y * blockIdx.y;
if (x >= resolution.x() || y >= resolution.y()) {
return;
}
float scale = image_resolution[fov_axis] / float(resolution[fov_axis]);
float fx = x+0.5f;
float fy = y+0.5f;
fx-=resolution.x()*0.5f; fx/=zoom; fx+=screen_center.x() * resolution.x();
fy-=resolution.y()*0.5f; fy/=zoom; fy+=screen_center.y() * resolution.y();
float u = (fx-resolution.x()*0.5f) * scale + image_resolution.x()*0.5f;
float v = (fy-resolution.y()*0.5f) * scale + image_resolution.y()*0.5f;
int srcx = floorf(u);
int srcy = floorf(v);
uint32_t idx = x + resolution.x() * y;
uint32_t srcidx = srcx + image_resolution.x() * srcy;
Array4f color;
if (srcx >= image_resolution.x() || srcy >= image_resolution.y() || srcx < 0 || srcy < 0) {
color = {0.0f, 0.0f, 0.0f, 0.0f};
} else {
float depth_value = depth[srcidx] * depth_scale;
Array3f c = colormap_turbo(depth_value);
color = {c[0], c[1], c[2], 1.0f};
}
Array4f prev_color;
surf2Dread((float4*)&prev_color, surface, x * sizeof(float4), y);
color = color * alpha + prev_color * (1.f-alpha);
surf2Dwrite(to_float4(color), surface, x * sizeof(float4), y);
}
__device__ Array3f colormap_viridis(float x) {
const Array3f c0 = Array3f{0.2777273272234177f, 0.005407344544966578f, 0.3340998053353061f};
const Array3f c1 = Array3f{0.1050930431085774f, 1.404613529898575f, 1.384590162594685f};
const Array3f c2 = Array3f{-0.3308618287255563f, 0.214847559468213f, 0.09509516302823659f};
const Array3f c3 = Array3f{-4.634230498983486f, -5.799100973351585f, -19.33244095627987f};
const Array3f c4 = Array3f{6.228269936347081f, 14.17993336680509f, 56.69055260068105f};
const Array3f c5 = Array3f{4.776384997670288f, -13.74514537774601f, -65.35303263337234f};
const Array3f c6 = Array3f{-5.435455855934631f, 4.645852612178535f, 26.3124352495832f};
x = __saturatef(x);
return (c0+x*(c1+x*(c2+x*(c3+x*(c4+x*(c5+x*c6))))));
}
__global__ void overlay_false_color_kernel(Vector2i resolution, Vector2i training_resolution, bool to_srgb, int fov_axis, hipSurfaceObject_t surface, const float *error_map, Vector2i error_map_resolution, const float *average, float brightness, bool viridis) {
uint32_t x = threadIdx.x + blockDim.x * blockIdx.x;
uint32_t y = threadIdx.y + blockDim.y * blockIdx.y;
if (x >= resolution.x() || y >= resolution.y()) {
return;
}
float error_map_scale = brightness/(0.0000001f+average[0]); // average maps to 1/16th
float scale = training_resolution[fov_axis] / float(resolution[fov_axis]);
float u = (x+0.5f-resolution.x()*0.5f) * scale + training_resolution.x()*0.5f;
float v = (y+0.5f-resolution.y()*0.5f) * scale + training_resolution.y()*0.5f;
int srcx = floorf(u * error_map_resolution.x() / float(max(1.f, (float)training_resolution.x())));
int srcy = floorf(v * error_map_resolution.y() / float(max(1.f, (float)training_resolution.y())));
uint32_t idx = x + resolution.x() * y;
uint32_t srcidx = srcx + error_map_resolution.x() * srcy;
if (srcx >= error_map_resolution.x() || srcy >= error_map_resolution.y() || srcx<0 || srcy<0) {
return;
}
float err = error_map[srcidx] * error_map_scale;
if (viridis) {
err *= 1.f / (1.f+err);
}
Array4f color;
surf2Dread((float4*)&color, surface, x * sizeof(float4), y);
Array3f c = viridis ? colormap_viridis(err) : colormap_turbo(err);
float grey = color.x() * 0.2126f + color.y() * 0.7152f + color.z() * 0.0722f;
color.x() = grey*__saturatef(c.x());
color.y() = grey*__saturatef(c.y());
color.z() = grey*__saturatef(c.z());
surf2Dwrite(to_float4(color), surface, x * sizeof(float4), y);
}
__global__ void tonemap_kernel(Vector2i resolution, float exposure, Array4f background_color, Array4f* accumulate_buffer, EColorSpace color_space, EColorSpace output_color_space, ETonemapCurve tonemap_curve, bool clamp_output_color, hipSurfaceObject_t surface) {
uint32_t x = threadIdx.x + blockDim.x * blockIdx.x;
uint32_t y = threadIdx.y + blockDim.y * blockIdx.y;
if (x >= resolution.x() || y >= resolution.y()) {
return;
}
uint32_t idx = x + resolution.x() * y;
// The background color is represented in SRGB, so convert
// to linear if that's not the space in which we're rendering.
if (color_space != EColorSpace::SRGB) {
background_color.head<3>() = srgb_to_linear(background_color.head<3>());
}
Array4f color = accumulate_buffer[idx];
float weight = (1 - color.w()) * background_color.w();
color.head<3>() += background_color.head<3>() * weight;
color.w() += weight;
color.head<3>() = tonemap(color.head<3>(), Array3f::Constant(exposure), tonemap_curve, color_space, output_color_space);
if (clamp_output_color) {
color = color.cwiseMax(0.0f).cwiseMin(1.0f);
}
surf2Dwrite(to_float4(color), surface, x * sizeof(float4), y);
}
__global__ void dlss_splat_kernel(
Vector2i resolution,
hipSurfaceObject_t dlss_surface,
hipSurfaceObject_t surface
) {
uint32_t x = threadIdx.x + blockDim.x * blockIdx.x;
uint32_t y = threadIdx.y + blockDim.y * blockIdx.y;
if (x >= resolution.x() || y >= resolution.y()) {
return;
}
float4 color;
surf2Dread(&color, dlss_surface, x * sizeof(float4), y);
surf2Dwrite(color, surface, x * sizeof(float4), y);
}
void CudaRenderBuffer::resize(const Vector2i& res) {
m_in_resolution = res;
m_frame_buffer.enlarge(res.x() * res.y());
m_depth_buffer.enlarge(res.x() * res.y());
m_accumulate_buffer.enlarge(res.x() * res.y());
Vector2i out_res = m_dlss ? m_dlss->out_resolution() : res;
auto prev_out_res = out_resolution();
m_surface_provider->resize(out_res);
if (out_resolution() != prev_out_res) {
reset_accumulation();
}
}
void CudaRenderBuffer::clear_frame(hipStream_t stream) {
CUDA_CHECK_THROW(hipMemsetAsync(m_frame_buffer.data(), 0, m_frame_buffer.bytes(), stream));
CUDA_CHECK_THROW(hipMemsetAsync(m_depth_buffer.data(), 0, m_depth_buffer.bytes(), stream));
}
void CudaRenderBuffer::accumulate(float exposure, hipStream_t stream) {
Vector2i res = in_resolution();
uint32_t accum_spp = m_dlss ? 0 : m_spp;
if (accum_spp == 0) {
CUDA_CHECK_THROW(hipMemsetAsync(m_accumulate_buffer.data(), 0, m_accumulate_buffer.bytes(), stream));
}
const dim3 threads = { 16, 8, 1 };
const dim3 blocks = { div_round_up((uint32_t)res.x(), threads.x), div_round_up((uint32_t)res.y(), threads.y), 1 };
hipLaunchKernelGGL(( accumulate_kernel), dim3(blocks), dim3(threads), 0, stream,
res,
frame_buffer(),
accumulate_buffer(),
(float)accum_spp,
m_color_space
);
++m_spp;
}
void CudaRenderBuffer::tonemap(float exposure, const Array4f& background_color, EColorSpace output_color_space, hipStream_t stream) {
assert(m_dlss || out_resolution() == in_resolution());
auto res = m_dlss ? in_resolution() : out_resolution();
const dim3 threads = { 16, 8, 1 };
const dim3 blocks = { div_round_up((uint32_t)res.x(), threads.x), div_round_up((uint32_t)res.y(), threads.y), 1 };
hipLaunchKernelGGL(( tonemap_kernel), dim3(blocks), dim3(threads), 0, stream,
res,
exposure,
background_color,
accumulate_buffer(),
m_color_space,
output_color_space,
m_tonemap_curve,
m_dlss && output_color_space == EColorSpace::SRGB,
m_dlss ? m_dlss->frame() : surface()
);
if (m_dlss) {
assert(out_resolution() == m_dlss->out_resolution());
assert(m_spp >= 1);
uint32_t sample_index = m_spp - 1;
m_dlss->run(
res,
output_color_space == EColorSpace::Linear, /* HDR mode */
m_dlss_sharpening,
Vector2f::Constant(0.5f) - ld_random_pixel_offset(sample_index), /* jitter offset in [-0.5, 0.5] */
sample_index == 0 /* reset history */
);
auto out_res = out_resolution();
const dim3 out_blocks = { div_round_up((uint32_t)out_res.x(), threads.x), div_round_up((uint32_t)out_res.y(), threads.y), 1 };
hipLaunchKernelGGL(( dlss_splat_kernel), dim3(out_blocks), dim3(threads), 0, stream, out_res, m_dlss->output(), surface());
}
}
void CudaRenderBuffer::overlay_image(
float alpha,
const Eigen::Array3f& exposure,
const Array4f& background_color,
EColorSpace output_color_space,
const void* __restrict__ image,
EImageDataType image_data_type,
const Vector2i& image_resolution,
int fov_axis,
float zoom,
const Eigen::Vector2f& screen_center,
hipStream_t stream
) {
auto res = out_resolution();
const dim3 threads = { 16, 8, 1 };
const dim3 blocks = { div_round_up((uint32_t)res.x(), threads.x), div_round_up((uint32_t)res.y(), threads.y), 1 };
hipLaunchKernelGGL(( overlay_image_kernel), dim3(blocks), dim3(threads), 0, stream,
res,
alpha,
exposure,
background_color,
image,
image_data_type,
image_resolution,
m_tonemap_curve,
m_color_space,
output_color_space,
fov_axis,
zoom,
screen_center,
surface()
);
}
void CudaRenderBuffer::overlay_depth(
float alpha,
const float* __restrict__ depth,
float depth_scale,
const Vector2i& image_resolution,
int fov_axis,
float zoom,
const Eigen::Vector2f& screen_center,
hipStream_t stream
) {
auto res = out_resolution();
const dim3 threads = { 16, 8, 1 };
const dim3 blocks = { div_round_up((uint32_t)res.x(), threads.x), div_round_up((uint32_t)res.y(), threads.y), 1 };
hipLaunchKernelGGL(( overlay_depth_kernel), dim3(blocks), dim3(threads), 0, stream,
res,
alpha,
depth,
depth_scale,
image_resolution,
fov_axis,
zoom,
screen_center,
surface()
);
}
void CudaRenderBuffer::overlay_false_color(Vector2i training_resolution, bool to_srgb, int fov_axis, hipStream_t stream, const float* error_map, Vector2i error_map_resolution, const float* average, float brightness, bool viridis) {
auto res = out_resolution();
const dim3 threads = { 16, 8, 1 };
const dim3 blocks = { div_round_up((uint32_t)res.x(), threads.x), div_round_up((uint32_t)res.y(), threads.y), 1 };
hipLaunchKernelGGL(( overlay_false_color_kernel), dim3(blocks), dim3(threads), 0, stream,
res,
training_resolution,
to_srgb,
fov_axis,
surface(),
error_map,
error_map_resolution,
average,
brightness,
viridis
);
}
void CudaRenderBuffer::enable_dlss(const Eigen::Vector2i& out_res) {
#ifdef NGP_VULKAN
if (!m_dlss || m_dlss->out_resolution() != out_res) {
m_dlss = dlss_init(out_res);
}
resize(in_resolution());
#else
throw std::runtime_error{"NGP was compiled without Vulkan/NGX/DLSS support."};
#endif
}
void CudaRenderBuffer::disable_dlss() {
m_dlss = nullptr;
}
NGP_NAMESPACE_END
| 6789f8d539918c849b371fc3c06e70432108b153.cu | /*
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
/** @file render_buffer.cu
* @author Thomas Müller & Alex Evans, NVIDIA
*/
#include <nerf-cuda/common_device.cuh>
#include <nerf-cuda/common.h>
#include <nerf-cuda/render_buffer.h>
#include <nerf-cuda/npy.hpp>
#include <tiny-cuda-nn/gpu_memory.h>
#include <filesystem/path.h>
#ifdef NGP_GUI
# ifdef _WIN32
# include <GL/gl3w.h>
# else
# include <GL/glew.h>
# endif
# include <GLFW/glfw3.h>
#endif
#include <cuda_gl_interop.h>
using namespace Eigen;
using namespace tcnn;
namespace fs = filesystem;
NGP_NAMESPACE_BEGIN
std::atomic<size_t> g_total_n_bytes_allocated{0};
void CudaSurface2D::free() {
if (m_surface) {
cudaDestroySurfaceObject(m_surface);
}
m_surface = 0;
if (m_array) {
cudaFreeArray(m_array);
g_total_n_bytes_allocated -= m_size.prod() * sizeof(float4);
}
m_array = nullptr;
}
void CudaSurface2D::resize(const Vector2i& size) {
if (size == m_size) {
return;
}
free();
m_size = size;
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float4>();
CUDA_CHECK_THROW(cudaMallocArray(&m_array, &desc, size.x(), size.y(), cudaArraySurfaceLoadStore));
g_total_n_bytes_allocated += m_size.prod() * sizeof(float4);
struct cudaResourceDesc resource_desc;
memset(&resource_desc, 0, sizeof(resource_desc));
resource_desc.resType = cudaResourceTypeArray;
resource_desc.res.array.array = m_array;
CUDA_CHECK_THROW(cudaCreateSurfaceObject(&m_surface, &resource_desc));
}
#ifdef NGP_GUI
GLTexture::~GLTexture() {
m_cuda_mapping.reset();
if (m_texture_id) {
glDeleteTextures(1, &m_texture_id);
}
}
GLuint GLTexture::texture() {
if (!m_texture_id) {
glGenTextures(1, &m_texture_id);
}
return m_texture_id;
}
cudaSurfaceObject_t GLTexture::surface() {
if (!m_cuda_mapping) {
m_cuda_mapping = std::make_unique<CUDAMapping>(texture(), m_size);
}
return m_cuda_mapping->surface();
}
cudaArray_t GLTexture::array() {
if (!m_cuda_mapping) {
m_cuda_mapping = std::make_unique<CUDAMapping>(texture(), m_size);
}
return m_cuda_mapping->array();
}
void GLTexture::blit_from_cuda_mapping() {
if (!m_cuda_mapping || m_cuda_mapping->is_interop()) {
return;
}
if (m_internal_format != GL_RGBA32F || m_format != GL_RGBA || m_is_8bit) {
throw std::runtime_error{"Can only blit from CUDA mapping if the texture is RGBA float."};
}
const float* data_cpu = m_cuda_mapping->data_cpu();
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, m_size.x(), m_size.y(), 0, GL_RGBA, GL_FLOAT, data_cpu);
}
void GLTexture::load(const float* data, Vector2i new_size, int n_channels) {
resize(new_size, n_channels, false);
glBindTexture(GL_TEXTURE_2D, m_texture_id);
glTexImage2D(GL_TEXTURE_2D, 0, m_internal_format, new_size.x(), new_size.y(), 0, m_format, GL_FLOAT, data);
}
void GLTexture::load(const uint8_t* data, Vector2i new_size, int n_channels) {
resize(new_size, n_channels, true);
glBindTexture(GL_TEXTURE_2D, m_texture_id);
glTexImage2D(GL_TEXTURE_2D, 0, m_internal_format, new_size.x(), new_size.y(), 0, m_format, GL_UNSIGNED_BYTE, data);
}
void GLTexture::resize(const Vector2i& new_size, int n_channels, bool is_8bit) {
if (m_size == new_size && m_n_channels == n_channels && m_is_8bit == is_8bit) {
return;
}
if (m_texture_id) {
m_cuda_mapping.reset();
glDeleteTextures(1, &m_texture_id);
m_texture_id = 0;
}
glGenTextures(1, &m_texture_id);
glBindTexture(GL_TEXTURE_2D, m_texture_id);
switch (n_channels) {
case 1: m_internal_format = is_8bit ? GL_R8 : GL_R32F; m_format = GL_RED; break;
case 2: m_internal_format = is_8bit ? GL_RG8 : GL_RG32F; m_format = GL_RG; break;
case 3: m_internal_format = is_8bit ? GL_RGB8 : GL_RGB32F; m_format = GL_RGB; break;
case 4: m_internal_format = is_8bit ? GL_RGBA8 : GL_RGBA32F; m_format = GL_RGBA; break;
default: tlog::error() << "Unsupported number of channels: " << n_channels;
}
m_is_8bit = is_8bit;
m_size = new_size;
m_n_channels = n_channels;
glTexImage2D(GL_TEXTURE_2D, 0, m_internal_format, new_size.x(), new_size.y(), 0, m_format, is_8bit ? GL_UNSIGNED_BYTE : GL_FLOAT, nullptr);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
}
static bool is_wsl() {
#ifdef _WIN32
return false;
#else
fs::path path = "/proc/sys/kernel/osrelease";
if (!path.exists()) {
return false;
}
std::ifstream f{path.str()};
std::string content((std::istreambuf_iterator<char>(f)), (std::istreambuf_iterator<char>()));
return content.find("microsoft") != std::string::npos;
#endif
}
GLTexture::CUDAMapping::CUDAMapping(GLuint texture_id, const Vector2i& size) : m_size{size} {
static bool s_is_cuda_interop_supported = !is_wsl();
if (s_is_cuda_interop_supported) {
cudaError_t err = cudaGraphicsGLRegisterImage(&m_graphics_resource, texture_id, GL_TEXTURE_2D, cudaGraphicsRegisterFlagsSurfaceLoadStore);
if (err != cudaSuccess) {
s_is_cuda_interop_supported = false;
cudaGetLastError(); // Reset error
}
}
if (!s_is_cuda_interop_supported) {
// falling back to a regular cuda surface + CPU copy of data
m_cuda_surface = std::make_unique<CudaSurface2D>();
m_cuda_surface->resize(size);
m_data_cpu.resize(m_size.prod() * 4);
return;
}
CUDA_CHECK_THROW(cudaGraphicsMapResources(1, &m_graphics_resource));
CUDA_CHECK_THROW(cudaGraphicsSubResourceGetMappedArray(&m_mapped_array, m_graphics_resource, 0, 0));
struct cudaResourceDesc resource_desc;
memset(&resource_desc, 0, sizeof(resource_desc));
resource_desc.resType = cudaResourceTypeArray;
resource_desc.res.array.array = m_mapped_array;
CUDA_CHECK_THROW(cudaCreateSurfaceObject(&m_surface, &resource_desc));
}
GLTexture::CUDAMapping::~CUDAMapping() {
if (m_surface) {
cudaDestroySurfaceObject(m_surface);
cudaGraphicsUnmapResources(1, &m_graphics_resource);
cudaGraphicsUnregisterResource(m_graphics_resource);
}
}
const float* GLTexture::CUDAMapping::data_cpu() {
CUDA_CHECK_THROW(cudaMemcpy2DFromArray(m_data_cpu.data(), m_size.x() * sizeof(float) * 4, array(), 0, 0, m_size.x() * sizeof(float) * 4, m_size.y(), cudaMemcpyDeviceToHost));
return m_data_cpu.data();
}
#endif //NGP_GUI
__global__ void accumulate_kernel(Vector2i resolution, Array4f* frame_buffer, Array4f* accumulate_buffer, float sample_count, EColorSpace color_space) {
uint32_t x = threadIdx.x + blockDim.x * blockIdx.x;
uint32_t y = threadIdx.y + blockDim.y * blockIdx.y;
if (x >= resolution.x() || y >= resolution.y()) {
return;
}
uint32_t idx = x + resolution.x() * y;
Array4f color = frame_buffer[idx];
Array4f tmp = accumulate_buffer[idx];
switch (color_space) {
case EColorSpace::VisPosNeg:
{
float val = color.x() - color.y();
float tmp_val = tmp.x() - tmp.y();
tmp_val = (tmp_val * sample_count + val) / (sample_count+1);
tmp.x() = fmaxf(tmp_val, 0.0f);
tmp.y() = fmaxf(-tmp_val, 0.0f);
break;
}
case EColorSpace::SRGB:
color.head<3>() = linear_to_srgb(color.head<3>());
// fallthrough is intended!
case EColorSpace::Linear:
tmp.head<3>() = (tmp.head<3>() * sample_count + color.head<3>()) / (sample_count+1); break;
}
tmp.w() = (tmp.w() * sample_count + color.w()) / (sample_count+1);
accumulate_buffer[idx] = tmp;
}
__device__ Array3f tonemap(Array3f x, ETonemapCurve curve) {
if (curve == ETonemapCurve::Identity) {
return x;
}
x = x.cwiseMax(0.f);
float k0, k1, k2, k3, k4, k5;
if (curve == ETonemapCurve::ACES) {
// Source: ACES approximation : https://knarkowicz.wordpress.com/2016/01/06/aces-filmic-tone-mapping-curve/
// Include pre - exposure cancelation in constants
k0 = 0.6f * 0.6f * 2.51f;
k1 = 0.6f * 0.03f;
k2 = 0.0f;
k3 = 0.6f * 0.6f * 2.43f;
k4 = 0.6f * 0.59f;
k5 = 0.14f;
} else if (curve == ETonemapCurve::Hable) {
// Source: https://64.github.io/tonemapping/
const float A = 0.15f;
const float B = 0.50f;
const float C = 0.10f;
const float D = 0.20f;
const float E = 0.02f;
const float F = 0.30f;
k0 = A * F - A * E;
k1 = C * B * F - B * E;
k2 = 0.0f;
k3 = A * F;
k4 = B * F;
k5 = D * F * F;
const float W = 11.2f;
const float nom = k0 * (W*W) + k1 * W + k2;
const float denom = k3 * (W*W) + k4 * W + k5;
const float white_scale = denom / nom;
// Include white scale and exposure bias in rational polynomial coefficients
k0 = 4.0f * k0 * white_scale;
k1 = 2.0f * k1 * white_scale;
k2 = k2 * white_scale;
k3 = 4.0f * k3;
k4 = 2.0f * k4;
} else { //if (curve == ETonemapCurve::Reinhard)
const Vector3f luminance_coefficients = Vector3f(0.2126f, 0.7152f, 0.0722f);
float Y = luminance_coefficients.dot(x.matrix());
return x * (1.f / (Y + 1.0f));
}
Array3f color_sq = x * x;
Array3f nom = color_sq * k0 + k1 * x + k2;
Array3f denom = k3 * color_sq + k4 * x + k5;
Array3f tonemapped_color = nom / denom;
return tonemapped_color;
}
__device__ Array3f tonemap(Array3f col, const Array3f& exposure, ETonemapCurve tonemap_curve, EColorSpace color_space, EColorSpace output_color_space) {
// Conversion to output by
// 1. converting to linear. (VisPosNeg is treated as linear red/green)
if (color_space == EColorSpace::SRGB) {
col = srgb_to_linear(col);
}
// 2. applying exposure in linear space
col *= Array3f::Constant(2.0f).pow(exposure);
// 3. tonemapping in linear space according to the specified curve
col = tonemap(col, tonemap_curve);
// 4. converting to output color space.
if (output_color_space == EColorSpace::SRGB) {
col = linear_to_srgb(col);
}
return col;
}
__global__ void overlay_image_kernel(
Vector2i resolution,
float alpha,
Array3f exposure,
Array4f background_color,
const void* __restrict__ image,
EImageDataType image_data_type,
Vector2i image_resolution,
ETonemapCurve tonemap_curve,
EColorSpace color_space,
EColorSpace output_color_space,
int fov_axis,
float zoom,
Eigen::Vector2f screen_center,
cudaSurfaceObject_t surface
) {
uint32_t x = threadIdx.x + blockDim.x * blockIdx.x;
uint32_t y = threadIdx.y + blockDim.y * blockIdx.y;
if (x >= resolution.x() || y >= resolution.y()) {
return;
}
float scale = image_resolution[fov_axis] / float(resolution[fov_axis]);
float fx = x+0.5f;
float fy = y+0.5f;
fx-=resolution.x()*0.5f; fx/=zoom; fx+=screen_center.x() * resolution.x();
fy-=resolution.y()*0.5f; fy/=zoom; fy+=screen_center.y() * resolution.y();
float u = (fx-resolution.x()*0.5f) * scale + image_resolution.x()*0.5f;
float v = (fy-resolution.y()*0.5f) * scale + image_resolution.y()*0.5f;
int srcx = floorf(u);
int srcy = floorf(v);
uint32_t idx = x + resolution.x() * y;
uint32_t srcidx = srcx + image_resolution.x() * srcy;
Array4f val;
if (srcx >= image_resolution.x() || srcy >= image_resolution.y() || srcx < 0 || srcy < 0) {
val = Array4f::Zero();
} else {
val = read_rgba(Vector2i{srcx, srcy}, image_resolution, image, image_data_type);
}
Array4f color = {val[0], val[1], val[2], val[3]};
// The background color is represented in SRGB, so convert
// to linear if that's not the space in which we're rendering.
if (color_space != EColorSpace::SRGB) {
background_color.head<3>() = srgb_to_linear(background_color.head<3>());
} else {
if (color.w() > 0) {
color.head<3>() = linear_to_srgb(color.head<3>() / color.w()) * color.w();
} else {
color.head<3>() = Array3f::Zero();
}
}
float weight = (1 - color.w()) * background_color.w();
color.head<3>() += background_color.head<3>() * weight;
color.w() += weight;
color.head<3>() = tonemap(color.head<3>(), exposure, tonemap_curve, color_space, output_color_space);
Array4f prev_color;
surf2Dread((float4*)&prev_color, surface, x * sizeof(float4), y);
color = color * alpha + prev_color * (1.f-alpha);
surf2Dwrite(to_float4(color), surface, x * sizeof(float4), y);
}
__device__ Array3f colormap_turbo(float x) {
const Vector4f kRedVec4 = Vector4f(0.13572138f, 4.61539260f, -42.66032258f, 132.13108234f);
const Vector4f kGreenVec4 = Vector4f(0.09140261f, 2.19418839f, 4.84296658f, -14.18503333f);
const Vector4f kBlueVec4 = Vector4f(0.10667330f, 12.64194608f, -60.58204836f, 110.36276771f);
const Vector2f kRedVec2 = Vector2f(-152.94239396f, 59.28637943f);
const Vector2f kGreenVec2 = Vector2f(4.27729857f, 2.82956604f);
const Vector2f kBlueVec2 = Vector2f(-89.90310912f, 27.34824973f);
x = __saturatef(x);
Vector4f v4 = Vector4f{ 1.0f, x, x * x, x * x * x };
Vector2f v2 = Vector2f{ v4.w() * x, v4.w() * v4.z() };
return Array3f{
v4.dot(kRedVec4) + v2.dot(kRedVec2),
v4.dot(kGreenVec4) + v2.dot(kGreenVec2),
v4.dot(kBlueVec4) + v2.dot(kBlueVec2)
};
}
__global__ void overlay_depth_kernel(
Vector2i resolution,
float alpha,
const float* __restrict__ depth,
float depth_scale,
Vector2i image_resolution,
int fov_axis,
float zoom, Eigen::Vector2f screen_center,
cudaSurfaceObject_t surface
) {
uint32_t x = threadIdx.x + blockDim.x * blockIdx.x;
uint32_t y = threadIdx.y + blockDim.y * blockIdx.y;
if (x >= resolution.x() || y >= resolution.y()) {
return;
}
float scale = image_resolution[fov_axis] / float(resolution[fov_axis]);
float fx = x+0.5f;
float fy = y+0.5f;
fx-=resolution.x()*0.5f; fx/=zoom; fx+=screen_center.x() * resolution.x();
fy-=resolution.y()*0.5f; fy/=zoom; fy+=screen_center.y() * resolution.y();
float u = (fx-resolution.x()*0.5f) * scale + image_resolution.x()*0.5f;
float v = (fy-resolution.y()*0.5f) * scale + image_resolution.y()*0.5f;
int srcx = floorf(u);
int srcy = floorf(v);
uint32_t idx = x + resolution.x() * y;
uint32_t srcidx = srcx + image_resolution.x() * srcy;
Array4f color;
if (srcx >= image_resolution.x() || srcy >= image_resolution.y() || srcx < 0 || srcy < 0) {
color = {0.0f, 0.0f, 0.0f, 0.0f};
} else {
float depth_value = depth[srcidx] * depth_scale;
Array3f c = colormap_turbo(depth_value);
color = {c[0], c[1], c[2], 1.0f};
}
Array4f prev_color;
surf2Dread((float4*)&prev_color, surface, x * sizeof(float4), y);
color = color * alpha + prev_color * (1.f-alpha);
surf2Dwrite(to_float4(color), surface, x * sizeof(float4), y);
}
__device__ Array3f colormap_viridis(float x) {
const Array3f c0 = Array3f{0.2777273272234177f, 0.005407344544966578f, 0.3340998053353061f};
const Array3f c1 = Array3f{0.1050930431085774f, 1.404613529898575f, 1.384590162594685f};
const Array3f c2 = Array3f{-0.3308618287255563f, 0.214847559468213f, 0.09509516302823659f};
const Array3f c3 = Array3f{-4.634230498983486f, -5.799100973351585f, -19.33244095627987f};
const Array3f c4 = Array3f{6.228269936347081f, 14.17993336680509f, 56.69055260068105f};
const Array3f c5 = Array3f{4.776384997670288f, -13.74514537774601f, -65.35303263337234f};
const Array3f c6 = Array3f{-5.435455855934631f, 4.645852612178535f, 26.3124352495832f};
x = __saturatef(x);
return (c0+x*(c1+x*(c2+x*(c3+x*(c4+x*(c5+x*c6))))));
}
__global__ void overlay_false_color_kernel(Vector2i resolution, Vector2i training_resolution, bool to_srgb, int fov_axis, cudaSurfaceObject_t surface, const float *error_map, Vector2i error_map_resolution, const float *average, float brightness, bool viridis) {
uint32_t x = threadIdx.x + blockDim.x * blockIdx.x;
uint32_t y = threadIdx.y + blockDim.y * blockIdx.y;
if (x >= resolution.x() || y >= resolution.y()) {
return;
}
float error_map_scale = brightness/(0.0000001f+average[0]); // average maps to 1/16th
float scale = training_resolution[fov_axis] / float(resolution[fov_axis]);
float u = (x+0.5f-resolution.x()*0.5f) * scale + training_resolution.x()*0.5f;
float v = (y+0.5f-resolution.y()*0.5f) * scale + training_resolution.y()*0.5f;
int srcx = floorf(u * error_map_resolution.x() / float(max(1.f, (float)training_resolution.x())));
int srcy = floorf(v * error_map_resolution.y() / float(max(1.f, (float)training_resolution.y())));
uint32_t idx = x + resolution.x() * y;
uint32_t srcidx = srcx + error_map_resolution.x() * srcy;
if (srcx >= error_map_resolution.x() || srcy >= error_map_resolution.y() || srcx<0 || srcy<0) {
return;
}
float err = error_map[srcidx] * error_map_scale;
if (viridis) {
err *= 1.f / (1.f+err);
}
Array4f color;
surf2Dread((float4*)&color, surface, x * sizeof(float4), y);
Array3f c = viridis ? colormap_viridis(err) : colormap_turbo(err);
float grey = color.x() * 0.2126f + color.y() * 0.7152f + color.z() * 0.0722f;
color.x() = grey*__saturatef(c.x());
color.y() = grey*__saturatef(c.y());
color.z() = grey*__saturatef(c.z());
surf2Dwrite(to_float4(color), surface, x * sizeof(float4), y);
}
__global__ void tonemap_kernel(Vector2i resolution, float exposure, Array4f background_color, Array4f* accumulate_buffer, EColorSpace color_space, EColorSpace output_color_space, ETonemapCurve tonemap_curve, bool clamp_output_color, cudaSurfaceObject_t surface) {
uint32_t x = threadIdx.x + blockDim.x * blockIdx.x;
uint32_t y = threadIdx.y + blockDim.y * blockIdx.y;
if (x >= resolution.x() || y >= resolution.y()) {
return;
}
uint32_t idx = x + resolution.x() * y;
// The background color is represented in SRGB, so convert
// to linear if that's not the space in which we're rendering.
if (color_space != EColorSpace::SRGB) {
background_color.head<3>() = srgb_to_linear(background_color.head<3>());
}
Array4f color = accumulate_buffer[idx];
float weight = (1 - color.w()) * background_color.w();
color.head<3>() += background_color.head<3>() * weight;
color.w() += weight;
color.head<3>() = tonemap(color.head<3>(), Array3f::Constant(exposure), tonemap_curve, color_space, output_color_space);
if (clamp_output_color) {
color = color.cwiseMax(0.0f).cwiseMin(1.0f);
}
surf2Dwrite(to_float4(color), surface, x * sizeof(float4), y);
}
__global__ void dlss_splat_kernel(
Vector2i resolution,
cudaSurfaceObject_t dlss_surface,
cudaSurfaceObject_t surface
) {
uint32_t x = threadIdx.x + blockDim.x * blockIdx.x;
uint32_t y = threadIdx.y + blockDim.y * blockIdx.y;
if (x >= resolution.x() || y >= resolution.y()) {
return;
}
float4 color;
surf2Dread(&color, dlss_surface, x * sizeof(float4), y);
surf2Dwrite(color, surface, x * sizeof(float4), y);
}
void CudaRenderBuffer::resize(const Vector2i& res) {
m_in_resolution = res;
m_frame_buffer.enlarge(res.x() * res.y());
m_depth_buffer.enlarge(res.x() * res.y());
m_accumulate_buffer.enlarge(res.x() * res.y());
Vector2i out_res = m_dlss ? m_dlss->out_resolution() : res;
auto prev_out_res = out_resolution();
m_surface_provider->resize(out_res);
if (out_resolution() != prev_out_res) {
reset_accumulation();
}
}
void CudaRenderBuffer::clear_frame(cudaStream_t stream) {
CUDA_CHECK_THROW(cudaMemsetAsync(m_frame_buffer.data(), 0, m_frame_buffer.bytes(), stream));
CUDA_CHECK_THROW(cudaMemsetAsync(m_depth_buffer.data(), 0, m_depth_buffer.bytes(), stream));
}
void CudaRenderBuffer::accumulate(float exposure, cudaStream_t stream) {
Vector2i res = in_resolution();
uint32_t accum_spp = m_dlss ? 0 : m_spp;
if (accum_spp == 0) {
CUDA_CHECK_THROW(cudaMemsetAsync(m_accumulate_buffer.data(), 0, m_accumulate_buffer.bytes(), stream));
}
const dim3 threads = { 16, 8, 1 };
const dim3 blocks = { div_round_up((uint32_t)res.x(), threads.x), div_round_up((uint32_t)res.y(), threads.y), 1 };
accumulate_kernel<<<blocks, threads, 0, stream>>>(
res,
frame_buffer(),
accumulate_buffer(),
(float)accum_spp,
m_color_space
);
++m_spp;
}
void CudaRenderBuffer::tonemap(float exposure, const Array4f& background_color, EColorSpace output_color_space, cudaStream_t stream) {
assert(m_dlss || out_resolution() == in_resolution());
auto res = m_dlss ? in_resolution() : out_resolution();
const dim3 threads = { 16, 8, 1 };
const dim3 blocks = { div_round_up((uint32_t)res.x(), threads.x), div_round_up((uint32_t)res.y(), threads.y), 1 };
tonemap_kernel<<<blocks, threads, 0, stream>>>(
res,
exposure,
background_color,
accumulate_buffer(),
m_color_space,
output_color_space,
m_tonemap_curve,
m_dlss && output_color_space == EColorSpace::SRGB,
m_dlss ? m_dlss->frame() : surface()
);
if (m_dlss) {
assert(out_resolution() == m_dlss->out_resolution());
assert(m_spp >= 1);
uint32_t sample_index = m_spp - 1;
m_dlss->run(
res,
output_color_space == EColorSpace::Linear, /* HDR mode */
m_dlss_sharpening,
Vector2f::Constant(0.5f) - ld_random_pixel_offset(sample_index), /* jitter offset in [-0.5, 0.5] */
sample_index == 0 /* reset history */
);
auto out_res = out_resolution();
const dim3 out_blocks = { div_round_up((uint32_t)out_res.x(), threads.x), div_round_up((uint32_t)out_res.y(), threads.y), 1 };
dlss_splat_kernel<<<out_blocks, threads, 0, stream>>>(out_res, m_dlss->output(), surface());
}
}
void CudaRenderBuffer::overlay_image(
float alpha,
const Eigen::Array3f& exposure,
const Array4f& background_color,
EColorSpace output_color_space,
const void* __restrict__ image,
EImageDataType image_data_type,
const Vector2i& image_resolution,
int fov_axis,
float zoom,
const Eigen::Vector2f& screen_center,
cudaStream_t stream
) {
auto res = out_resolution();
const dim3 threads = { 16, 8, 1 };
const dim3 blocks = { div_round_up((uint32_t)res.x(), threads.x), div_round_up((uint32_t)res.y(), threads.y), 1 };
overlay_image_kernel<<<blocks, threads, 0, stream>>>(
res,
alpha,
exposure,
background_color,
image,
image_data_type,
image_resolution,
m_tonemap_curve,
m_color_space,
output_color_space,
fov_axis,
zoom,
screen_center,
surface()
);
}
void CudaRenderBuffer::overlay_depth(
float alpha,
const float* __restrict__ depth,
float depth_scale,
const Vector2i& image_resolution,
int fov_axis,
float zoom,
const Eigen::Vector2f& screen_center,
cudaStream_t stream
) {
auto res = out_resolution();
const dim3 threads = { 16, 8, 1 };
const dim3 blocks = { div_round_up((uint32_t)res.x(), threads.x), div_round_up((uint32_t)res.y(), threads.y), 1 };
overlay_depth_kernel<<<blocks, threads, 0, stream>>>(
res,
alpha,
depth,
depth_scale,
image_resolution,
fov_axis,
zoom,
screen_center,
surface()
);
}
void CudaRenderBuffer::overlay_false_color(Vector2i training_resolution, bool to_srgb, int fov_axis, cudaStream_t stream, const float* error_map, Vector2i error_map_resolution, const float* average, float brightness, bool viridis) {
auto res = out_resolution();
const dim3 threads = { 16, 8, 1 };
const dim3 blocks = { div_round_up((uint32_t)res.x(), threads.x), div_round_up((uint32_t)res.y(), threads.y), 1 };
overlay_false_color_kernel<<<blocks, threads, 0, stream>>>(
res,
training_resolution,
to_srgb,
fov_axis,
surface(),
error_map,
error_map_resolution,
average,
brightness,
viridis
);
}
void CudaRenderBuffer::enable_dlss(const Eigen::Vector2i& out_res) {
#ifdef NGP_VULKAN
if (!m_dlss || m_dlss->out_resolution() != out_res) {
m_dlss = dlss_init(out_res);
}
resize(in_resolution());
#else
throw std::runtime_error{"NGP was compiled without Vulkan/NGX/DLSS support."};
#endif
}
void CudaRenderBuffer::disable_dlss() {
m_dlss = nullptr;
}
NGP_NAMESPACE_END
|
8d07a72d59fb9ba038455de585faf8d238c7bb1f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma (iuriish@yahoo.com), created on 20.04.2018
//
#include<ops/declarable/helpers/transforms.h>
#include <array/ResultSet.h>
#include <helpers/ShapeUtils.h>
#include <numeric>
#include <NDArrayFactory.h>
#include <helpers/TAD.h>
#include <exceptions/cuda_exception.h>
#include <PointersManager.h>
#include <ConstantTadHelper.h>
namespace nd4j {
namespace ops {
namespace helpers {
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void concatCuda(const int numOfArrs, void* pVx, void* pxShapeInfo, void* pVz, void* pzShapeInfo) {
__shared__ int arrIdx, blocksPerArr;
__shared__ T *x, *z;
__shared__ Nd4jLong *zShapeInfo, *xShapeInfo, arrLen, arrLenPerBlock, start, end;
if (threadIdx.x == 0) {
blocksPerArr = (gridDim.x + numOfArrs - 1) / numOfArrs; // ceil
arrIdx = blockIdx.x / blocksPerArr;
x = reinterpret_cast<T*>(reinterpret_cast<void**>(pVx)[arrIdx]);
z = reinterpret_cast<T*>(reinterpret_cast<void**>(pVz)[arrIdx]);
xShapeInfo = reinterpret_cast<Nd4jLong**>(pxShapeInfo)[arrIdx];
zShapeInfo = reinterpret_cast<Nd4jLong**>(pzShapeInfo)[arrIdx];
arrLen = shape::length(xShapeInfo);
arrLenPerBlock = (arrLen + blocksPerArr - 1) / blocksPerArr; // ceil
start = (blockIdx.x % blocksPerArr) * arrLenPerBlock;
end = (start + arrLenPerBlock) > arrLen ? arrLen : (start + arrLenPerBlock);
}
__syncthreads();
for (Nd4jLong i = start + threadIdx.x; i < end; i += blockDim.x)
z[shape::getIndexOffset(i, zShapeInfo, arrLen)] = x[shape::getIndexOffset(i, xShapeInfo, arrLen)];
}
///////////////////////////////////////////////////////////////////
template<typename T>
__host__ static void concatCudaLauncher(const int numOfArrs, const hipStream_t *stream, void* pVx, void* pxShapeInfo, void* pVz, void* pzShapeInfo) {
hipLaunchKernelGGL(( concatCuda<T>), dim3(512), dim3(256), 1024, *stream, numOfArrs, pVx, pxShapeInfo, pVz, pzShapeInfo);
}
///////////////////////////////////////////////////////////////////
// x - input, y - paddings, z - output
template<typename X, typename Y>
__global__ static void padCuda(const int mode,
const void *vx, const Nd4jLong *xShapeInfo,
const void *vy, const Nd4jLong *yShapeInfo,
void *vz, const Nd4jLong *zShapeInfo,
const void *vPadVal) {
const X padVal = *reinterpret_cast<const X*>(vPadVal);
const auto x = reinterpret_cast<const X*>(vx);
const auto y = reinterpret_cast<const Y*>(vy);
auto z = reinterpret_cast<X*>(vz);
__shared__ int rank, rankMinusOne;
__shared__ Nd4jLong zLen, yLen, totalThreads, *coords, *xShape, *zShape, *xStride, *zStride, shift1, shift2, yStride0;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
coords = reinterpret_cast<Nd4jLong*>(shmem);
zLen = shape::length(zShapeInfo);
xShape = shape::shapeOf(const_cast<Nd4jLong*>(xShapeInfo));
zShape = shape::shapeOf(const_cast<Nd4jLong*>(zShapeInfo));
xStride = shape::stride(const_cast<Nd4jLong*>(xShapeInfo));
zStride = shape::stride(const_cast<Nd4jLong*>(zShapeInfo));
yStride0 = shape::stride(const_cast<Nd4jLong*>(yShapeInfo))[0];
rank = shape::rank(xShapeInfo);
zLen = shape::length(zShapeInfo);
yLen = 2 * rank;
rankMinusOne = rank - 1;
totalThreads = gridDim.x * blockDim.x;
shift1 = mode == 1 ? 0 : 1; // REFLECT : SYMMETRIC
shift2 = mode == 1 ? 2 : 1; // REFLECT : SYMMETRIC
}
__syncthreads();
auto xzCoord = coords + threadIdx.x * rank; // we use xzCoord storage both for x and z arrays
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
if(mode == 0) { // CONSTANT case
for (Nd4jLong i = tid; i < zLen; i += totalThreads) {
shape::index2coords(rank, zShape, i, zLen, xzCoord);
const auto zOffset = shape::getOffset(0, zShape, zStride, xzCoord, rank);
bool within = true;
for(int j = rankMinusOne; j >= 0; --j) {
if(xShape[j] == zShape[j]) continue;
const auto left = y[shape::getIndexOffset(yStride0 * j, yShapeInfo, yLen)];
if(xzCoord[j] < left || xzCoord[j] >= left + xShape[j]) {within = false; break;}
else {xzCoord[j] = xzCoord[j] - left;}
}
if(within)
z[zOffset] = x[shape::getOffset(0, xShape, xStride, xzCoord, rank)];
else
z[zOffset] = padVal;
}
}
else { // REFLECT and SYMMETRIC cases
for (Nd4jLong i = tid; i < zLen; i += totalThreads) {
shape::index2coords(rank, zShape, i, zLen, xzCoord);
const auto zOffset = shape::getOffset(0, zShape, zStride, xzCoord, rank);
for(int j = rankMinusOne; j >= 0; --j) {
if(xShape[j] == zShape[j]) continue;
xzCoord[j] = xzCoord[j] - y[shape::getIndexOffset(yStride0 * j, yShapeInfo, yLen)]; // are ready to fill middle (within input dimension range)
if(xzCoord[j] < 0) xzCoord[j] = -xzCoord[j] - shift1; // means fill from left
else if(xzCoord[j] >= xShape[j]) xzCoord[j] = 2 * xShape[j] - xzCoord[j] - shift2; // means fill from right
}
const auto xOffset = shape::getOffset(0, xShape, xStride, xzCoord, rank);
z[zOffset] = x[xOffset];
}
}
}
///////////////////////////////////////////////////////////////////
template<typename X, typename Y>
static void padCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream,
const int mode,
const void *vx, const Nd4jLong *xShapeInfo,
const void *vy, const Nd4jLong *yShapeInfo,
void *vz, const Nd4jLong *zShapeInfo,
const void* padVal) {
hipLaunchKernelGGL(( padCuda<X,Y>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, mode, vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo, padVal);
}
///////////////////////////////////////////////////////////////////
void pad(nd4j::LaunchContext * context, const int mode, const NDArray& input, const NDArray& paddings, NDArray& output, const NDArray& padValue) {
PointersManager manager(context, "pad");
NDArray::prepareSpecialUse({&output}, {&input, &paddings, &padValue});
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = 8 * threadsPerBlock * output.rankOf() + 128;
const auto xType = input.dataType();
const auto yType = paddings.dataType();
BUILD_DOUBLE_SELECTOR(xType, yType, padCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), mode, input.getSpecialBuffer(), input.getSpecialShapeInfo(), paddings.getSpecialBuffer(), paddings.getSpecialShapeInfo(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), padValue.getSpecialBuffer()), LIBND4J_TYPES, INTEGER_TYPES);
NDArray::registerSpecialUse({&output}, {&input, &paddings, &padValue});
manager.synchronize();
}
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void invertPermutationCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo) {
const T* x = reinterpret_cast<const T*>(vx);
T* z = reinterpret_cast<T*>(vz);
__shared__ Nd4jLong len, totalThreads;
if (threadIdx.x == 0) {
len = shape::length(xShapeInfo);
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < len; i += totalThreads) {
const auto xOffset = shape::getIndexOffset(i, xShapeInfo, len);
const Nd4jLong index = x[xOffset];
const auto zOffset = shape::getIndexOffset(index, zShapeInfo, len);
z[zOffset] = i;
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
__host__ static void invertPermutationCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const hipStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo) {
hipLaunchKernelGGL(( invertPermutationCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), 1024, *stream, vx, xShapeInfo, vz, zShapeInfo);
}
BUILD_SINGLE_TEMPLATE(template void invertPermutationCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo), LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////
void invertPermutation(nd4j::LaunchContext* context, const NDArray& input, NDArray& output) {
const int threadsPerBlock = MAX_NUM_THREADS;
const int blocksPerGrid = (input.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
PointersManager manager(context, "invertPermutation");
NDArray::prepareSpecialUse({&output}, {&input});
BUILD_SINGLE_SELECTOR(input.dataType(), invertPermutationCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.getSpecialBuffer(), output.getSpecialShapeInfo()), LIBND4J_TYPES);
NDArray::registerSpecialUse({&output}, {&input});
manager.synchronize();
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void traceCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint diagLen) {
const auto x = reinterpret_cast<const T*>(vx);
auto z = reinterpret_cast<T*>(vz);
__shared__ T* sharedMem;
__shared__ int xRank, zRank; // xRank = zRank + 2
__shared__ Nd4jLong xLen, zLen, *coordsMem;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<T*>(shmem);
coordsMem = reinterpret_cast<Nd4jLong*>(shmem + blockDim.x * sizeof(T));
xRank = shape::rank(xShapeInfo);
zRank = shape::rank(zShapeInfo);
xLen = shape::length(xShapeInfo);
zLen = shape::length(zShapeInfo); // corresponds to number of matrices
}
__syncthreads();
Nd4jLong* coords = coordsMem + threadIdx.x * xRank;
for (uint m = blockIdx.x; m < zLen; m += gridDim.x) { // one block per each element of z, that is per each matrix
shape::index2coords(zRank, shape::shapeOf(const_cast<Nd4jLong*>(zShapeInfo)), m, zLen, coords);
const auto zOffset = shape::getOffset(0, shape::shapeOf(const_cast<Nd4jLong*>(zShapeInfo)), shape::stride(const_cast<Nd4jLong*>(zShapeInfo)), coords, zRank);
sharedMem[threadIdx.x] = 0;
for (uint i = threadIdx.x; i < diagLen; i += blockDim.x) {
coords[zRank] = coords[zRank + 1] = i;
const auto xOffset = shape::getOffset(0, shape::shapeOf(const_cast<Nd4jLong*>(xShapeInfo)), shape::stride(const_cast<Nd4jLong*>(xShapeInfo)), coords, xRank);
sharedMem[threadIdx.x] += x[xOffset];
}
__syncthreads();
// aggregate sum
for (Nd4jLong activeThreads = blockDim.x / 2; activeThreads > 0; activeThreads /= 2) {
if (threadIdx.x < activeThreads)
sharedMem[threadIdx.x] += sharedMem[threadIdx.x + activeThreads];
__syncthreads();
}
if (threadIdx.x == 0)
z[zOffset] = *sharedMem;
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
static void traceCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream,
const void *vx, const Nd4jLong *xShapeInfo,
void *vz, const Nd4jLong *zShapeInfo,
const uint diagLen) {
hipLaunchKernelGGL(( traceCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vz, zShapeInfo, diagLen);
}
BUILD_SINGLE_TEMPLATE(template void traceCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint diagLen), LIBND4J_TYPES);
///////////////////////////////////////////////////////////////////
void trace(nd4j::LaunchContext* context, const NDArray& input, NDArray& output) {
PointersManager manager(context, "trace");
const uint diagLen = input.sizeAt(-1) < input.sizeAt(-2) ? input.sizeAt(-1) : input.sizeAt(-2);
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * (sizeof(Nd4jLong) * input.rankOf() + input.sizeOfT()) + 128;
NDArray::prepareSpecialUse({&output}, {&input});
BUILD_SINGLE_SELECTOR(input.dataType(), traceCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), diagLen), LIBND4J_TYPES);
NDArray::registerSpecialUse({&output}, {&input});
manager.synchronize();
}
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void triuBPCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int diag) {
// x and z have same shapes
const auto x = reinterpret_cast<const T*>(vx); // gradO
auto z = reinterpret_cast<T*>(vz); // gradI
__shared__ int rank, areSameOffsets; // xRank = zRank
__shared__ Nd4jLong len, totalThreads, *sharedMem; // xLen = zLen
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
areSameOffsets = shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo);
rank = shape::rank(xShapeInfo);
len = shape::length(zShapeInfo);
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
auto coords = sharedMem + threadIdx.x * rank;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < len; i += totalThreads) {
shape::index2coords(rank, zShapeInfo + 1, i, len, coords);
const auto zOffset = shape::getOffset(0, zShapeInfo + 1, zShapeInfo + rank + 1, coords, rank);
if((coords[rank - 2] + diag > coords[rank - 1])) // row + diag > col
z[zOffset] = 0;
else
z[zOffset] = x[areSameOffsets ? zOffset : shape::getOffset(0, xShapeInfo + 1, xShapeInfo + rank + 1, coords, rank)];
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
static void triuBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int diag) {
hipLaunchKernelGGL(( triuBPCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vz, zShapeInfo, diag);
}
BUILD_SINGLE_TEMPLATE(template void triuBPCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int diag), LIBND4J_TYPES);
///////////////////////////////////////////////////////////////////
void triuBP(nd4j::LaunchContext* context, const NDArray& input, const NDArray& gradO, NDArray& gradI, const int diagonal) {
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (gradO.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(Nd4jLong) * gradO.rankOf() + 128;
PointersManager manager(context, "triuBP");
NDArray::prepareSpecialUse({&gradI}, {&gradO});
BUILD_SINGLE_SELECTOR(gradI.dataType(), triuBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), gradO.getSpecialBuffer(), gradO.getSpecialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), diagonal), LIBND4J_TYPES);
NDArray::registerSpecialUse({&gradI}, {&gradO});
manager.synchronize();
}
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void tileBPCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, Nd4jLong* globMem) {
// x and z have same shapes
const auto x = reinterpret_cast<const T*>(vx); // gradO
auto z = reinterpret_cast<T*>(vz); // gradI
__shared__ int xRank, zRank; // xRank >= zRank
__shared__ Nd4jLong numOfXOffsets, zLen, totalThreads, *sharedMem; // xLen >= zLen
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
xRank = shape::rank(zShapeInfo);
zLen = shape::length(zShapeInfo);
numOfXOffsets = shape::length(xShapeInfo) / zLen;
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
auto memBuff = sharedMem + threadIdx.x * 2 * xRank;
auto xOffsets = globMem + tid * numOfXOffsets;
for (Nd4jLong i = tid; i < zLen; i += totalThreads) {
const auto zOffset = shape::getIndexOffset(i, zShapeInfo, zLen);
shape::outerArrayOffsets(xOffsets, i, xShapeInfo, zShapeInfo, memBuff);
z[zOffset] = x[xOffsets[0]]; // first offset
for (Nd4jLong j = 1; j < numOfXOffsets; ++j) // rest offsets
z[zOffset] += x[xOffsets[j]];
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
static void tileBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, Nd4jLong* globMem) {
hipLaunchKernelGGL(( tileBPCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vz, zShapeInfo, globMem);
}
BUILD_SINGLE_TEMPLATE(template void tileBPCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, Nd4jLong* globMem), FLOAT_TYPES);
//////////////////////////////////////////////////////////////////////////
void tileBP(nd4j::LaunchContext * context, const NDArray& gradO /*input*/, NDArray& gradI /*output*/, const std::vector<Nd4jLong> reps) {
NDArray memBuff('c', gradO.getShapeAsVector(), nd4j::DataType::INT64, context); // empty auxiliary array for storing device memory which will be used in kernel calculations
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (gradI.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(Nd4jLong) * 2 * gradO.rankOf() + 128;
PointersManager manager(context, "tileBP");
NDArray::prepareSpecialUse({&gradI}, {&gradO, &memBuff});
BUILD_SINGLE_SELECTOR(gradI.dataType(), tileBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), gradO.getSpecialBuffer(), gradO.getSpecialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), reinterpret_cast<Nd4jLong*>(memBuff.specialBuffer())), FLOAT_TYPES);
NDArray::registerSpecialUse({&gradI}, {&gradO, &memBuff});
manager.synchronize();
}
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void scatterUpdateCuda(const int opCode, const int numOfInd,
void* vx, const Nd4jLong *xShapeInfo, const Nd4jLong *xOffsets,
void* vy, const Nd4jLong *yShapeInfo, const Nd4jLong *yOffsets,
const int* indexes) {
__shared__ T *x, *y;
__shared__ Nd4jLong arrLenX, arrLenY;
for (int e = 0; e < numOfInd; e++ ) {
const auto xIndex = indexes[e];
const bool isOwner = xIndex < gridDim.x ? blockIdx.x == xIndex : blockIdx.x == xIndex % gridDim.x;
if (!isOwner)
continue;
if (threadIdx.x == 0) {
x = reinterpret_cast<T*>(vx) + xOffsets[xIndex];
y = reinterpret_cast<T*>(vy) + yOffsets[e];
arrLenX = shape::length(xShapeInfo);
arrLenY = shape::length(yShapeInfo);
}
__syncthreads();
if (arrLenX != arrLenY)
return;
for (Nd4jLong i = threadIdx.x; i < arrLenX; i += blockDim.x) {
const auto xOffset = shape::getIndexOffset(i, xShapeInfo, arrLenX);
const auto yOffset = shape::getIndexOffset(i, yShapeInfo, arrLenY);
switch (opCode) {
case 0:
x[xOffset] += y[yOffset];
break;
case 1:
x[xOffset] -= y[yOffset];
break;
case 2:
x[xOffset] *= y[yOffset];
break;
case 3:
x[xOffset] /= y[yOffset];
break;
case 4:
x[xOffset] = y[yOffset] - x[xOffset];
break;
case 5:
x[xOffset] = y[yOffset] / x[xOffset];
break;
case 6:
x[xOffset] = y[yOffset];
break;
default:
continue;
}
}
__syncthreads();
}
}
template<typename T>
__host__ static void scatterUpdateCudaLauncher(const hipStream_t* stream, const int opCode, const int numOfInd, void* vx, const Nd4jLong *xShapeInfo, const Nd4jLong *xOffsets, void* vy, const Nd4jLong *yShapeInfo, const Nd4jLong *yOffsets, const int* indexes) {
hipLaunchKernelGGL(( scatterUpdateCuda<T>), dim3(512), dim3(256), MAX_NUM_THREADS, *stream, opCode, numOfInd, vx, xShapeInfo, xOffsets, vy, yShapeInfo, yOffsets, indexes);
}
//////////////////////////////////////////////////////////////////////////
void scatterUpdate(nd4j::LaunchContext* context, NDArray& input, NDArray& updates, const std::vector<int>* intArgs) {
const int opCode = (*intArgs)[0];
const int numOfDims = (*intArgs)[1];
const int numOfInd = (*intArgs)[2 + numOfDims];
std::vector<int> tadDimensions(numOfDims);
for (int e = 2; e < 2 + numOfDims; e++)
tadDimensions[e-2] = (*intArgs)[e];
auto packX = ConstantTadHelper::getInstance()->tadForDimensions(input.getShapeInfo(), tadDimensions);
auto packY = ConstantTadHelper::getInstance()->tadForDimensions(updates.getShapeInfo(), tadDimensions);
NDArray indices(const_cast<int*>(intArgs->data()) + numOfDims + 3, 'c', {numOfInd}, nd4j::DataType::INT32, context);
PointersManager manager(context, "scatterUpdate");
NDArray::prepareSpecialUse({&input}, {&input, &updates, &indices});
BUILD_SINGLE_SELECTOR(input.dataType(), scatterUpdateCudaLauncher, (context->getCudaStream(), opCode, numOfInd, input.specialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), updates.specialBuffer(), packY.platformShapeInfo(), packY.platformOffsets(), reinterpret_cast<int*>(indices.getSpecialBuffer())), LIBND4J_TYPES);
NDArray::registerSpecialUse({&input}, {&input, &updates, &indices});
manager.synchronize();
}
template <typename T>
static __global__ void swapShuffleKernel(T* input, Nd4jLong* shape, Nd4jLong firstDim, Nd4jLong len, nd4j::graph::RandomGenerator* rng) {
auto tid = blockIdx.x * blockDim.x;
auto step = blockDim.x * gridDim.x;
for (int i = firstDim - 1 - tid - threadIdx.x; i > 0; i -= step) {
int r = rng->relativeInt(i) % i;
if (i != r) {
T e0 = input[shape::getIndexOffset(i, shape, len)];
T e1 = input[shape::getIndexOffset(r, shape, len)];
//math::nd4j_swap<T>(input(i), input(r));
input[shape::getIndexOffset(i, shape, len)] = e1;
input[shape::getIndexOffset(r, shape, len)] = e0;
}
}
}
template <typename T>
static __global__ void fillShuffleKernel(T* input, Nd4jLong* inputShape, T* output, Nd4jLong* outputShape, Nd4jLong firstDim, Nd4jLong len, int* indices, nd4j::graph::RandomGenerator* rng) {
// PRAGMA_OMP_PARALLEL_FOR_IF((firstDim-1) > Environment::getInstance()->tadThreshold())
auto tid = blockIdx.x * blockDim.x;
auto step = blockDim.x * gridDim.x;
for(int i = firstDim - 1 - tid - threadIdx.x; i > 0; i -= step) {
int r = rng->relativeInt(i) % i;
output[shape::getIndexOffset(i, outputShape, len)] = input[shape::getIndexOffset(indices[r], inputShape, len)];
if(i != r) {
output[shape::getIndexOffset(r, outputShape, len)] = input[shape::getIndexOffset(indices[i], inputShape, len)];
// output.p(r, input.e<T>(indices[i]));
// math::nd4j_swap<int>(indices[i], indices[r]);
atomicExch(&indices[i], indices[r]);
}
}
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
void randomShuffle_(nd4j::LaunchContext * context, NDArray& input, NDArray& output, nd4j::graph::RandomGenerator& rng, const bool isInplace) {
// check edge cases first
int temp;
const int firstDim = input.sizeAt(0);
auto stream = context->getCudaStream();
NDArray::prepareSpecialUse({&output}, {&input});
if(input.lengthOf() == 1 || firstDim == 1) {
if(!isInplace)
output.assign(input);
}
else if (input.isVector() || shape::isLikeVector(input.getShapeInfo(), temp)) {
// apply Fisher-Yates shuffle
nd4j::graph::RandomGenerator* dRandom = nullptr;
hipMalloc(&dRandom, sizeof(nd4j::graph::RandomGenerator));
hipMemcpy(dRandom, &rng, sizeof(nd4j::graph::RandomGenerator), hipMemcpyHostToDevice);
T* inputBuf = reinterpret_cast<T*>(input.specialBuffer());
if(isInplace) {
hipLaunchKernelGGL(( swapShuffleKernel<T>), dim3(128), dim3(256), 1024, *stream, inputBuf, input.specialShapeInfo(), firstDim, input.lengthOf(), dRandom);
}
else {
std::vector<int> indices(firstDim);
std::iota(indices.begin(), indices.end(), 0);
hipMemcpy(output.specialBuffer(), input.specialBuffer(), sizeof(T), hipMemcpyDeviceToDevice);
//output.p<T>(Nd4jLong(0), input.e<T>(0));
PointersManager pointersManager(context, "helper::randomShuffle_");
int* indicesDev = reinterpret_cast<int*>(pointersManager.replicatePointer(indices.data(), indices.size() * sizeof(int)));
T* outputBuf = reinterpret_cast<T*>(output.specialBuffer());
hipLaunchKernelGGL(( fillShuffleKernel<T>), dim3(128), dim3(256), 1024, *stream, inputBuf, input.specialShapeInfo(), outputBuf, output.specialShapeInfo(), firstDim, input.lengthOf(), indicesDev, dRandom);
pointersManager.synchronize();
}
// rng.rewindH(firstDim - 1);
hipFree(dRandom);
}
else {
// evaluate sub-arrays list of input array through all dimensions excluding first one
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input.rankOf(), {0});
auto subArrsListIn = input.allTensorsAlongDimension(dimensions);
// apply Fisher-Yates shuffle
if(isInplace) {
PRAGMA_OMP_PARALLEL_FOR_IF((firstDim-1) > Environment::getInstance()->elementwiseThreshold())
for(int i = firstDim - 1; i > 0; --i) {
int r = rng.relativeInt(i) % i;
if(i != r)
subArrsListIn->at(i)->swapUnsafe(*subArrsListIn->at(r));
}
}
else {
// evaluate sub-arrays list of output array through all dimensions excluding first one
auto subArrsListOut = output.allTensorsAlongDimension(dimensions);
std::vector<int> indices(firstDim);
std::iota(indices.begin(), indices.end(), 0);
bool isZeroShuffled = false;
PRAGMA_OMP_PARALLEL_FOR_IF((firstDim-1) > Environment::getInstance()->tadThreshold())
for(int i = firstDim - 1; i > 0; --i) {
int r = rng.relativeInt(i) % i;
subArrsListOut->at(i)->assign(subArrsListIn->at(indices[r]));
if(r == 0)
isZeroShuffled = true;
if(i != r) {
subArrsListOut->at(r)->assign(subArrsListIn->at(indices[i]));
math::nd4j_swap<int>(indices[i], indices[r]);
}
}
if(!isZeroShuffled)
subArrsListOut->at(0)->assign(subArrsListIn->at(0));
delete subArrsListOut;
}
rng.rewindH(firstDim-1);
delete subArrsListIn;
}
NDArray::registerSpecialUse({&output}, {&input});
}
void randomShuffle(nd4j::LaunchContext * context, NDArray& input, NDArray& output, nd4j::graph::RandomGenerator& rng, const bool isInplace) {
BUILD_SINGLE_SELECTOR(input.dataType(), randomShuffle_, (context, input, output, rng, isInplace), LIBND4J_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void randomShuffle_, (nd4j::LaunchContext * context, NDArray& input, NDArray& output, nd4j::graph::RandomGenerator& rng, const bool isInplace), LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////
template<typename T>
static void gatherND_(nd4j::LaunchContext * context, NDArray& input, NDArray& indices, NDArray& output) {
}
void gatherND(nd4j::LaunchContext * context, NDArray& input, NDArray& indices, NDArray& output) {
BUILD_SINGLE_SELECTOR(input.dataType(), gatherND_, (context, input, indices, output), LIBND4J_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void gatherND_, (nd4j::LaunchContext * context, NDArray& input, NDArray& indices, NDArray& output), LIBND4J_TYPES);
//////////////////////////////////////////////////////////////////////////
void eye(nd4j::LaunchContext * context, NDArray& output) {
output.setIdentity();
}
//////////////////////////////////////////////////////////////////////////
template <typename T, typename Z>
static __global__ void global_mergeMaxIndex_(void **inArrs, void **inShapes, const int numArrays, void *voutput, Nd4jLong *outputShape, Nd4jLong length) {
auto output = reinterpret_cast<Z*>(voutput);
const auto tid = blockIdx.x * gridDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (Nd4jLong e = tid; e < length; e += step) {
T mVal = -DataTypeUtils::max<T>();
Z mIdx(0);
for (int i = 0; i < numArrays; i++) {
auto x = reinterpret_cast<T*>(inArrs[i]);
auto xShape = reinterpret_cast<Nd4jLong *>(inShapes[i]);
auto val = x[shape::getIndexOffset(e, xShape, length)];;
if (mVal < val)
mIdx = static_cast<Z>(e);
}
__syncthreads();
output[shape::getIndexOffset(e, outputShape, length)] = mIdx;
}
}
template <typename T, typename Z>
static void mergeMaxIndex_(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
std::vector<void *> inBuffers(inArrs.size());
std::vector<void *> inShapes(inArrs.size());
for (int e = 0; e < inArrs.size(); e++) {
inBuffers[e] = inArrs[e]->getSpecialBuffer();
inShapes[e] = inArrs[e]->getSpecialShapeInfo();
}
PointersManager manager(context, "mergeMaxIndex");
auto pInBuffers = reinterpret_cast<void **>(manager.replicatePointer(inBuffers.data(), inBuffers.size() * sizeof(void *)));
auto pInShapes = reinterpret_cast<void **>(manager.replicatePointer(inShapes.data(), inShapes.size() * sizeof(void *)));
auto length = output.lengthOf();
hipLaunchKernelGGL(( global_mergeMaxIndex_<T,Z>), dim3(512), dim3(512), 512, *context->getCudaStream(), pInBuffers, pInShapes, (int) inArrs.size(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), length);
manager.synchronize();
}
void mergeMaxIndex(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
BUILD_DOUBLE_SELECTOR(inArrs[0]->dataType(), output.dataType(), mergeMaxIndex_, (context, inArrs, output), LIBND4J_TYPES, INTEGER_TYPES);
}
BUILD_DOUBLE_TEMPLATE(template void mergeMaxIndex_, (nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output), LIBND4J_TYPES, INTEGER_TYPES);
//////////////////////////////////////////////////////////////////////////
template <typename T>
static __global__ void global_mergeMax_(void **inArrs, void **inShapes, const int numArrays, void *voutput, Nd4jLong *outputShape, Nd4jLong length) {
auto output = reinterpret_cast<T*>(voutput);
const auto tid = blockIdx.x * gridDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (Nd4jLong e = tid; e < length; e += step) {
T mVal = -DataTypeUtils::max<T>();
for (int i = 0; i < numArrays; i++) {
auto x = reinterpret_cast<T*>(inArrs[i]);
auto xShape = reinterpret_cast<Nd4jLong *>(inShapes[i]);
auto val = x[shape::getIndexOffset(e, xShape, length)];;
if (mVal < val)
mVal = val;
}
__syncthreads();
output[shape::getIndexOffset(e, outputShape, length)] = mVal;
}
}
template<typename T>
static void mergeMax_(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
std::vector<void *> inBuffers(inArrs.size());
std::vector<void *> inShapes(inArrs.size());
for (int e = 0; e < inArrs.size(); e++) {
inBuffers[e] = inArrs[e]->getSpecialBuffer();
inShapes[e] = inArrs[e]->getSpecialShapeInfo();
}
PointersManager manager(context, "mergeMax");
auto pInBuffers = reinterpret_cast<void **>(manager.replicatePointer(inBuffers.data(), inBuffers.size() * sizeof(void *)));
auto pInShapes = reinterpret_cast<void **>(manager.replicatePointer(inShapes.data(), inShapes.size() * sizeof(void *)));
auto length = output.lengthOf();
hipLaunchKernelGGL(( global_mergeMax_<T>), dim3(512), dim3(512), 512, *context->getCudaStream(), pInBuffers, pInShapes, (int) inArrs.size(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), length);
manager.synchronize();
}
BUILD_SINGLE_TEMPLATE(template void mergeMax_, (nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output), LIBND4J_TYPES);
void mergeMax(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
BUILD_SINGLE_SELECTOR(output.dataType(), mergeMax_, (context, inArrs, output), LIBND4J_TYPES);
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static __global__ void global_mergeAvg_(void **inArrs, void **inShapes, const int numArrays, void *voutput, Nd4jLong *outputShape, Nd4jLong length) {
auto output = reinterpret_cast<T*>(voutput);
const auto tid = blockIdx.x * gridDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (Nd4jLong e = tid; e < length; e += step) {
T sum(0.0f);
for (int i = 0; i < numArrays; i++) {
auto x = reinterpret_cast<T*>(inArrs[i]);
auto xShape = reinterpret_cast<Nd4jLong *>(inShapes[i]);
sum += x[shape::getIndexOffset(e, xShape, length)];
}
output[shape::getIndexOffset(e, outputShape, length)] = sum / numArrays;
}
}
template<typename T>
static void mergeAvg_(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
std::vector<void *> inBuffers(inArrs.size());
std::vector<void *> inShapes(inArrs.size());
for (int e = 0; e < inArrs.size(); e++) {
inBuffers[e] = inArrs[e]->getSpecialBuffer();
inShapes[e] = inArrs[e]->getSpecialShapeInfo();
}
PointersManager manager(context, "mergeAvg");
auto pInBuffers = reinterpret_cast<void **>(manager.replicatePointer(inBuffers.data(), inBuffers.size() * sizeof(void *)));
auto pInShapes = reinterpret_cast<void **>(manager.replicatePointer(inShapes.data(), inShapes.size() * sizeof(void *)));
auto length = output.lengthOf();
hipLaunchKernelGGL(( global_mergeAvg_<T>), dim3(512), dim3(512), 512, *context->getCudaStream(), pInBuffers, pInShapes, (int) inArrs.size(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), length);
manager.synchronize();
}
BUILD_SINGLE_TEMPLATE(template void mergeAvg_, (nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output), LIBND4J_TYPES);
void mergeAvg(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
BUILD_SINGLE_SELECTOR(output.dataType(), mergeAvg_, (context, inArrs, output), LIBND4J_TYPES);
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static __global__ void global_mergeAdd_(void **inArrs, void **inShapes, const int numArrays, void *voutput, Nd4jLong *outputShape, Nd4jLong length) {
auto output = reinterpret_cast<T*>(voutput);
const auto tid = blockIdx.x * gridDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (Nd4jLong e = tid; e < length; e += step) {
T sum(0.0f);
for (int i = 0; i < numArrays; i++) {
auto x = reinterpret_cast<T*>(inArrs[i]);
auto xShape = reinterpret_cast<Nd4jLong *>(inShapes[i]);
sum += x[shape::getIndexOffset(e, xShape, length)];
}
output[shape::getIndexOffset(e, outputShape, length)] = sum;
}
}
template<typename T>
static void mergeAdd_(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
std::vector<void *> inBuffers(inArrs.size());
std::vector<void *> inShapes(inArrs.size());
for (int e = 0; e < inArrs.size(); e++) {
inBuffers[e] = inArrs[e]->getSpecialBuffer();
inShapes[e] = inArrs[e]->getSpecialShapeInfo();
}
PointersManager manager(context, "mergeAdd");
auto pInBuffers = reinterpret_cast<void **>(manager.replicatePointer(inBuffers.data(), inBuffers.size() * sizeof(void *)));
auto pInShapes = reinterpret_cast<void **>(manager.replicatePointer(inShapes.data(), inShapes.size() * sizeof(void *)));
auto length = output.lengthOf();
hipLaunchKernelGGL(( global_mergeAdd_<T>), dim3(512), dim3(512), 512, *context->getCudaStream(), pInBuffers, pInShapes, (int) inArrs.size(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), length);
manager.synchronize();
}
BUILD_SINGLE_TEMPLATE(template void mergeAdd_, (nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output), LIBND4J_TYPES);
void mergeAdd(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
BUILD_SINGLE_SELECTOR(output.dataType(), mergeAdd_, (context, inArrs, output), LIBND4J_TYPES);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
static __global__ void clipByNormInplaceKernel(Nd4jLong numOfSubArrs, T* inputBuffer, Nd4jLong* shape, Nd4jLong* inputOffsets, T* norm2Buf, Nd4jLong* norm2shape, T clipNorm) {
for (int arr = blockIdx.x; arr < numOfSubArrs; arr += gridDim.x) {
__shared__ T* z;
__shared__ Nd4jLong len;
if (threadIdx.x == 0) {
len = shape::length(shape);
z = inputBuffer + inputOffsets[arr];
}
__syncthreads();
for (int j = threadIdx.x; j < len; j+= blockDim.x) {
auto xIndex = shape::getIndexOffset(j, shape, len);
if(norm2Buf[arr] > clipNorm)
z[xIndex] *= clipNorm / norm2Buf[arr]; // case with ews = 1 and ordering is 'c'
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
static __global__ void clipByNormKernel(Nd4jLong numOfSubArrs, T* inputBuffer, Nd4jLong* shape, Nd4jLong* inputOffsets, T* outputBuffer, Nd4jLong* outputShape, Nd4jLong* outputOffsets, T* norm2Buf, Nd4jLong* norm2shape, T clipNorm) {
for (Nd4jLong arr = blockIdx.x; arr < numOfSubArrs; arr += gridDim.x) {
__shared__ T* x, *z;
__shared__ Nd4jLong lenX, lenZ;
__shared__ T norm2;
if (threadIdx.x == 0) {
lenX = shape::length(shape);
x = inputBuffer + inputOffsets[arr];
z = outputBuffer + outputOffsets[arr];
lenZ = shape::length(outputShape);
norm2 = norm2Buf[shape::getIndexOffset(arr, norm2shape, numOfSubArrs)];
//printf("%d: %lf (vs %lf) %lld %lld\n", arr, norm2, clipNorm, lenX, lenZ);
}
__syncthreads();
for (Nd4jLong j = threadIdx.x; j < lenZ; j+= blockDim.x) {
auto xIndex = shape::getIndexOffset(j, shape, lenX);
auto zIndex = shape::getIndexOffset(j, outputShape, lenZ);
if(norm2 > clipNorm) {
z[zIndex] = x[xIndex] * clipNorm / norm2; // case with ews = 1 and ordering is 'c'
} else {
z[zIndex] = x[xIndex];
}
//printf("%lld: %lf %lf\n", j, z[zIndex], x[xIndex]);
}
__syncthreads();
}
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
static void clipByNorm_(nd4j::LaunchContext * context, NDArray& input, NDArray& output, const std::vector<int>& dimensions, NDArray const& clipNormA, const bool isInplace) {
const int rank = input.rankOf();
auto norm2 = input.reduceAlongDims(reduce::Norm2, dimensions);
clipNormA.syncToHost();
//norm2.printBuffer("Norm2");
T const clipNorm = clipNormA.e<T>(0);
//clipNormA.printBuffer("ClipNorm");
auto stream = context->getCudaStream();
if (isInplace) {
if(norm2.lengthOf() == 1) {
norm2.syncToHost();
T norm2Val = norm2.e<T>(0);
if(norm2Val > clipNorm)
input *= clipNorm / norm2Val;
}
else {
std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(rank, dimensions);
const Nd4jLong numOfSubArrs = ShapeUtils::getNumOfSubArrs(input.getShapeInfo(), dimsToExclude);
auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(input.getShapeInfo(), dimensions);
//auto packZ = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(output.getShapeInfo(), dimsToExclude);
T* inputBuffer = reinterpret_cast<T*>(input.specialBuffer());
T* norm2buf = reinterpret_cast<T*>(norm2.specialBuffer());
hipLaunchKernelGGL(( clipByNormInplaceKernel<T>), dim3(256), dim3(512), 1024, *stream, numOfSubArrs, inputBuffer, packX.specialShapeInfo(), packX.specialOffsets(), norm2buf, norm2.specialShapeInfo(), clipNorm);
}
}
else {
if(norm2.lengthOf() == 1) {
norm2.syncToHost();
T norm2Val = norm2.e<T>(0);
if(norm2Val > clipNorm)
output.assign( input * (clipNorm / norm2Val));
else
output.assign( input );
}
else {
std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(rank, dimensions);
const Nd4jLong numOfSubArrs = ShapeUtils::getNumOfSubArrs(input.getShapeInfo(), dimsToExclude);
auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(input.getShapeInfo(), dimensions);
auto packZ = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(output.getShapeInfo(), dimensions);
T* inputBuffer = reinterpret_cast<T*>(input.specialBuffer());
T* norm2buf = reinterpret_cast<T*>(norm2.specialBuffer());
T* outputBuffer = reinterpret_cast<T*>(output.specialBuffer());
hipLaunchKernelGGL(( clipByNormKernel<T>), dim3(256), dim3(512), 1024, *stream, numOfSubArrs, inputBuffer, packX.specialShapeInfo(), packX.specialOffsets(), outputBuffer, packZ.specialShapeInfo(), packZ.specialOffsets(), norm2buf, norm2.specialShapeInfo(), clipNorm);
}
}
}
void clipByNorm(nd4j::LaunchContext * context, NDArray& input, NDArray& output, const std::vector<int>& dimensions, const NDArray& clipNorm, const bool isInplace) {
BUILD_SINGLE_SELECTOR(output.dataType(), clipByNorm_, (context, input, output, dimensions, clipNorm, isInplace), FLOAT_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void clipByNorm_, (nd4j::LaunchContext * context, NDArray& input, NDArray& output, const std::vector<int>& dimensions, const NDArray& clipNorm, const bool isInplace), FLOAT_TYPES);
template <typename T>
static void clipByGlobalNorm_(nd4j::LaunchContext * context, std::vector<NDArray*> const& inputs, double clipNorm, nd4j::memory::Workspace* workspace, std::vector<NDArray*>& outputs, bool isInplace) {
}
void clipByGlobalNorm(nd4j::LaunchContext * context, std::vector<NDArray*> const& inputs, double clipNorm, nd4j::memory::Workspace* workspace, std::vector<NDArray*>& outputs, bool isInplace) {
BUILD_SINGLE_SELECTOR(outputs[0]->dataType(), clipByGlobalNorm_, (context, inputs, clipNorm, workspace, outputs, isInplace), FLOAT_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void clipByGlobalNorm_, (nd4j::LaunchContext * context, std::vector<NDArray*> const& inputs, double clipNorm, nd4j::memory::Workspace* workspace, std::vector<NDArray*>& outputs, bool isInplace), FLOAT_TYPES);
//////////////////////////////////////////////////////////////////////////
template<typename T>
static void clipByNormBP_(nd4j::LaunchContext * context, const NDArray& input, const NDArray& gradO, NDArray& gradI /*output*/, const std::vector<int>& dimensions, const NDArray& clipNorm) {
}
void clipByNormBP(nd4j::LaunchContext * context, const NDArray& input, const NDArray& gradO, NDArray& gradI /*output*/, const std::vector<int>& dimensions, const NDArray& clipNorm) {
BUILD_SINGLE_SELECTOR(gradI.dataType(), clipByNormBP_, (context, input, gradO, gradI, dimensions, clipNorm), FLOAT_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void clipByNormBP_, (nd4j::LaunchContext * context, const NDArray& input, const NDArray& gradO, NDArray& gradI /*output*/, const std::vector<int>& dimensions, const NDArray& clipNorm), FLOAT_TYPES);
//////////////////////////////////////////////////////////////////////////
template<typename T>
static void clipByAveraged_(nd4j::LaunchContext * context, NDArray& input, NDArray& output, const std::vector<int>& dimensions, const NDArray& clipNorm, const bool isInplace) {
auto cn = clipNorm.e<T>(0);
if (dimensions.size() == 0) {
// all-reduce
T n2 = input.reduceNumber(reduce::Norm2).e<T>(0) / input.lengthOf();
if (n2 <= cn) {
if (!isInplace)
output.assign(input);
}
else {
const T factor = cn / n2;
//auto lambda = LAMBDA_T(_x, factor) { return _x * factor; };
//input.applyLambda<T>(lambda, &output);
output.assign(input * factor);
}
}
else {
// along dimension
auto norm2 = input.reduceAlongDims(reduce::Norm2, dimensions, false);
if (!isInplace)
output.assign(input);
auto tads = output.allTensorsAlongDimension(dimensions);
auto outTads = output.allTensorsAlongDimension(dimensions);
// TODO: make this CUDA-compliant somehow
for (int e = 0; e < tads->size(); e++) {
T n2 = norm2.e<T>(e) / tads->at(e)->lengthOf();
const T factor = cn / n2;
if (n2 > cn) {
//auto lambda = LAMBDA_T(_x, factor) {return _x * factor;};
tads->at(e)->applyScalar(scalar::Multiply, factor, outTads->at(e));//applyLambda<T>(lambda, &output);
}
}
delete tads;
delete outTads;
}
}
void clipByAveraged(nd4j::LaunchContext * context, NDArray& input, NDArray& output, const std::vector<int>& dimensions, const NDArray& clipNorm, const bool isInplace) {
BUILD_SINGLE_SELECTOR(input.dataType(), clipByAveraged_, (context, input, output, dimensions, clipNorm, isInplace), FLOAT_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void clipByAveraged_, (nd4j::LaunchContext * context, NDArray& input, NDArray& output, const std::vector<int>& dimensions, const NDArray& clipNorm, const bool isInplace), FLOAT_TYPES);
/*
if (d1 > params[1])
return params[1];
else if (d1 < params[0])
return params[0];
else return d1;
*/
template <typename T>
static void __global__ clipByValueKernel(void* input, Nd4jLong* inputShape, void* output, Nd4jLong* outputShape, double leftBound, double rightBound) {
__shared__ T* outputBuf;
__shared__ T* inputBuf;
__shared__ Nd4jLong length;
__shared__ bool linearBuffers;
if (threadIdx.x == 0) {
outputBuf = reinterpret_cast<T *>(output);
inputBuf = reinterpret_cast<T *>(input);
length = shape::length(inputShape);
linearBuffers = shape::elementWiseStride(inputShape) == shape::elementWiseStride(outputShape) && shape::elementWiseStride(inputShape) == 1;
}
__syncthreads();
const auto tid = blockIdx.x * gridDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (Nd4jLong e = tid; e < length; e += step) {
if (linearBuffers) {
if (inputBuf[e] > rightBound) outputBuf[e] = (T) rightBound;
else if (inputBuf[e] < leftBound) outputBuf[e] = (T) leftBound;
else outputBuf[e] = inputBuf[e];
}
else {
auto inputOffset = shape::getIndexOffset(e, inputShape, length);
auto outputOffset = shape::getIndexOffset(e, outputShape, length);
if (inputBuf[inputOffset] > rightBound) outputBuf[outputOffset] = (T) rightBound;
else if (inputBuf[inputOffset] < leftBound) outputBuf[outputOffset] = (T) leftBound;
else outputBuf[outputOffset] = inputBuf[outputOffset];
}
}
}
template <typename T>
static void clipByValue_(nd4j::LaunchContext * context, NDArray& input, double leftBound, double rightBound, NDArray& output) {
auto stream = context->getCudaStream();
if (!input.isActualOnDeviceSide())
input.syncToDevice();
NDArray::prepareSpecialUse({&output}, {&input});
hipLaunchKernelGGL(( clipByValueKernel<T>), dim3(256), dim3(512), 8192, *stream, input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), leftBound, rightBound);
NDArray::registerSpecialUse({&output}, {&input});
}
void clipByValue(nd4j::LaunchContext * context, NDArray& input, double leftBound, double rightBound, NDArray& output) {
BUILD_SINGLE_SELECTOR(input.dataType(), clipByValue_, (context, input, leftBound, rightBound, output), FLOAT_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void clipByValue_, (nd4j::LaunchContext * context, NDArray& input, double leftBound, double rightBound, NDArray& output);, FLOAT_TYPES);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
static __global__ void mirrorPadLinearKernel(void const* vx, Nd4jLong* xShape, void* vz, Nd4jLong* zShape, Nd4jLong leftSide, Nd4jLong leftSideCorrected, Nd4jLong xLen, Nd4jLong len, Nd4jLong zLen) {
__shared__ T const* x;
__shared__ T* z;
if (threadIdx.x == 0) {
x = reinterpret_cast<T const*>(vx);
z = reinterpret_cast<T*>(vz);
}
__syncthreads();
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for(int i = start; i < zLen; i+= step) {
auto zIndex = shape::getIndexOffset(i, zShape, zLen);
auto xIndex = shape::getIndexOffset(len - i, xShape, xLen);
if (i < leftSide) // left side
xIndex = shape::getIndexOffset(leftSideCorrected - i, xShape, xLen);
else if(i >= leftSide && i < leftSide + xLen) // middle
xIndex = shape::getIndexOffset(i - leftSide, xShape, xLen);
// else // right side
// z[i] = x[len - i];
z[zIndex] = x[xIndex];
}
}
template <typename F, typename I>
static __global__ void mirrorPadKernel(void const* vx, Nd4jLong* xShape, void* vz, Nd4jLong* zShape, Nd4jLong outLen, void const* paddings, Nd4jLong* paddingShape, int reflBorder) {
__shared__ F const* x;
__shared__ I const* pads;
__shared__ F* z;
__shared__ Nd4jLong zRank, rank;
__shared__ Nd4jLong* xShapeOf, *xStrideOf, *padsShapeOf, *padsStrideOf;
__shared__ Nd4jLong* zShapeOf, *zStrideOf;
__shared__ Nd4jLong* xIdx;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
xIdx = reinterpret_cast<Nd4jLong*>(shmem);
rank = shape::rank(xShape);
x = reinterpret_cast<F const*>(vx);//
pads = reinterpret_cast<I const*>(paddings);
z = reinterpret_cast<F*>(vz);
xShapeOf = shape::shapeOf(xShape);
xStrideOf = shape::stride(xShape);
zShapeOf = shape::shapeOf(zShape);
zRank = shape::rank(zShape);
zStrideOf = shape::stride(zShape);
padsShapeOf = shape::shapeOf(paddingShape);
padsStrideOf = shape::stride(paddingShape);
}
__syncthreads();
auto start = threadIdx.x + blockIdx.x * blockDim.x;
auto step = blockDim.x * gridDim.x;
for(Nd4jLong i = start; i < outLen; i+= step) {
auto xzCoord = xIdx + threadIdx.x * rank;
//auto zxCoord = xIdx + (threadIdx.x + threadIdx.x % 2 + 1) * rank;
shape::index2coords(rank, zShapeOf, i, xzCoord);
auto outOffset = shape::getOffset(0, zShapeOf, zStrideOf, xzCoord, rank);
// auto intStep = blockDim.y * gridDim.y;
for(int j = 0; j < rank; j++) {
const Nd4jLong inLen = shape::sizeAt(xShape, j);
Nd4jLong coords[2] = {j, 0};
auto padOffset = shape::getOffset(0, padsShapeOf, padsStrideOf, coords, 2); // padding already has rank 2
const auto leftSide = pads[padOffset];
const auto leftSideCorrected = leftSide - reflBorder;
const Nd4jLong len = 2 * (inLen - 1) + leftSide + reflBorder;
if(xzCoord[j] < leftSide) // left side
xzCoord[j] = leftSideCorrected - xzCoord[j];
else if(xzCoord[j] >= leftSide && xzCoord[j] < leftSide + inLen) // middle
xzCoord[j] = xzCoord[j] - leftSide;
else if (len > xzCoord[j]) // right side
xzCoord[j] = len - xzCoord[j];
else
xzCoord[j] = xzCoord[j] - len;
}
auto inOffset = shape::getOffset(0, xShapeOf, xStrideOf, xzCoord, rank);
z[outOffset] = x[inOffset];
}
}
template<typename F, typename I>
static void mirrorPad_(nd4j::LaunchContext * context, const NDArray& input, const NDArray& paddings, NDArray& output, const int mode) {
// mode: 0 - REFLECT, else - SYMMETRIC
const int reflBorder = (bool)mode ? 1 : 0;
const int rank = input.rankOf();
const Nd4jLong outLen = output.lengthOf();
auto stream = context->getCudaStream();
NDArray::prepareSpecialUse({&output}, {&input, &paddings});
if(rank <= 1) {
const Nd4jLong inLen = input.lengthOf();
const auto leftSide = paddings.e<Nd4jLong>(0);
const auto leftSideCorrected = leftSide - reflBorder;
const Nd4jLong len = 2*(inLen-1) + leftSide + reflBorder;
hipLaunchKernelGGL(( mirrorPadLinearKernel<F>), dim3(256), dim3(512), 256, *stream, input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), leftSide, leftSideCorrected, inLen, len, outLen);
nd4j::DebugHelper::checkErrorCode(stream, "helpers::mirrorPadLinearKernel(...) failed");
}
else {
hipLaunchKernelGGL(( mirrorPadKernel<F, I>), dim3(256), dim3(256), 8192, *stream, input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), outLen, paddings.getSpecialBuffer(), paddings.getSpecialShapeInfo(), reflBorder);
nd4j::DebugHelper::checkErrorCode(stream, "helpers::mirrorPadKernel(...) failed");
}
NDArray::registerSpecialUse({&output}, {&input, &paddings});
}
void mirrorPad(nd4j::LaunchContext * context, const NDArray& input, const NDArray& paddings, NDArray& output, const int mode) {
BUILD_DOUBLE_SELECTOR(input.dataType(), paddings.dataType(), mirrorPad_, (context, input, paddings, output, mode), LIBND4J_TYPES, INTEGER_TYPES);
}
BUILD_DOUBLE_TEMPLATE(template void mirrorPad_, (nd4j::LaunchContext * context, const NDArray& input, const NDArray& paddings, NDArray& output, const int mode), LIBND4J_TYPES, INTEGER_TYPES);
//////////////////////////////////////////////////////////////////////////
void concat(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output, const int axis) {
const int numOfArrs = inArrs.size();
for(int i = 0; i < numOfArrs; ++i)
if(!inArrs[i]->isActualOnDeviceSide()) inArrs[i]->syncToDevice();
const int rank = inArrs[0]->rankOf();
const int rank2 = 2*rank;
std::vector<std::vector<Nd4jLong>> indices(numOfArrs, std::vector<Nd4jLong>(rank2,0));
// take into account indices for first array
indices[0][2 * axis + 1] = inArrs[0]->sizeAt(axis);
// loop through the rest of input arrays
for(int i = 1; i < numOfArrs; ++i) {
indices[i][2 * axis] = indices[i-1][2 * axis + 1]; // index start from
indices[i][2 * axis + 1] = indices[i-1][2 * axis + 1] + inArrs[i]->sizeAt(axis); // index end with (excluding)
}
std::vector<NDArray*> outSubArrs(numOfArrs);
for(int i = 0; i < numOfArrs; ++i)
outSubArrs[i] = new NDArray(output(indices[i], true));
// prepare arrays of pointers on buffers and shapes
std::vector<void*> hOutBuffers(numOfArrs), hInBuffers(numOfArrs);
std::vector<Nd4jLong*> hOutShapeInfo(numOfArrs), hInShapeInfo(numOfArrs);
for(int i = 0; i < numOfArrs; ++i) {
hOutBuffers[i] = outSubArrs[i]->getSpecialBuffer();
hInBuffers[i] = inArrs[i]->getSpecialBuffer();
hOutShapeInfo[i] = outSubArrs[i]->getSpecialShapeInfo();
hInShapeInfo[i] = inArrs[i]->getSpecialShapeInfo();
}
// allocate and copy all buffers and shapes arrays to global memory
PointersManager manager(context, "helpers::concat");
void* dOutBuffers = manager.replicatePointer(hOutBuffers.data(), hOutBuffers.size() * sizeof(void*));
void* dInBuffers = manager.replicatePointer(hInBuffers.data(), hInBuffers.size() * sizeof(void*));
void* dInShapeInfo = manager.replicatePointer(hInShapeInfo.data(), hInShapeInfo.size() * sizeof(Nd4jLong*));
void* dOutShapeInfo = manager.replicatePointer(hOutShapeInfo.data(), hOutShapeInfo.size() * sizeof(Nd4jLong*));
BUILD_SINGLE_SELECTOR(inArrs[0]->dataType(), concatCudaLauncher, (numOfArrs, context->getCudaStream(), dInBuffers, dInShapeInfo, dOutBuffers, dOutShapeInfo), LIBND4J_TYPES);
manager.synchronize();
for(int i = 0; i < numOfArrs; ++i)
delete outSubArrs[i];
for(int i = 0; i < numOfArrs; ++i)
inArrs[i]->tickReadHost();
output.tickWriteDevice();
}
template <typename X, typename Y>
static _CUDA_G void scatterSimpleKernel(void *vx, Nd4jLong *xTadShape, Nd4jLong *xTadOffsets, Nd4jLong xLength, Nd4jLong numTads, void *vi, Nd4jLong *iShapeInfo, Nd4jLong iLength, void *vu, Nd4jLong *uShapeInfo, Nd4jLong uLength) {
auto u = reinterpret_cast<X*>(vu);
auto indices = reinterpret_cast<Y*>(vi);
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < iLength; i += blockDim.x * gridDim.x) {
auto x = reinterpret_cast<X*>(vx) + xTadOffsets[i];
auto idx = indices[shape::getIndexOffset(i, iShapeInfo, iLength)];
x[shape::getIndexOffset(idx, xTadShape, xLength)] = u[shape::getIndexOffset(i, uShapeInfo, uLength)];
}
}
template <typename X, typename Y>
void scatterSimple_(nd4j::LaunchContext * context, const int opId, NDArray& input, const NDArray& updates, const NDArray& indices, const std::vector<int>& dimensions) {
auto dims = ShapeUtils::evalDimsToExclude(input.rankOf(), dimensions);
auto packX = ConstantTadHelper::getInstance()->tadForDimensions(input.getShapeInfo(), dims);
auto xLength = shape::length(packX.primaryShapeInfo());
auto iLength = indices.lengthOf();
auto uLength = updates.lengthOf();
hipLaunchKernelGGL(( scatterSimpleKernel<X,Y>), dim3(256), dim3(256), 1024, *context->getCudaStream(), input.getSpecialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), xLength, packX.numberOfTads(), indices.getSpecialBuffer(), indices.getSpecialShapeInfo(), iLength, updates.getSpecialBuffer(), updates.getSpecialShapeInfo(), uLength);
}
void scatterSimple(nd4j::LaunchContext * context, const int opId, NDArray& input, const NDArray& updates, const NDArray& indices, const std::vector<int>& dimensions) {
auto xType = input.dataType();
auto yType = indices.dataType();
if (opId != 6)
throw std::runtime_error("scatterSimple: only copy op is supported");
NDArray::prepareSpecialUse({&input}, {&updates, &indices});
BUILD_DOUBLE_SELECTOR(xType, yType, scatterSimple_, (context, opId, input, updates, indices, dimensions), LIBND4J_TYPES, INTEGER_TYPES);
NDArray::registerSpecialUse({&input}, {&updates, &indices});
}
BUILD_SINGLE_TEMPLATE(template void concatCudaLauncher, (const int numOfArrs, const hipStream_t *stream, void* pVx, void* pxShapeInfo, void* pVz, void* pzShapeInfo), LIBND4J_TYPES);
BUILD_DOUBLE_TEMPLATE(template void padCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const int mode, const void *vx, const Nd4jLong *xShapeInfo, const void *vy, const Nd4jLong *yShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const void* vPadVal), LIBND4J_TYPES, INTEGER_TYPES);
}
}
}
| 8d07a72d59fb9ba038455de585faf8d238c7bb1f.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma (iuriish@yahoo.com), created on 20.04.2018
//
#include<ops/declarable/helpers/transforms.h>
#include <array/ResultSet.h>
#include <helpers/ShapeUtils.h>
#include <numeric>
#include <NDArrayFactory.h>
#include <helpers/TAD.h>
#include <exceptions/cuda_exception.h>
#include <PointersManager.h>
#include <ConstantTadHelper.h>
namespace nd4j {
namespace ops {
namespace helpers {
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void concatCuda(const int numOfArrs, void* pVx, void* pxShapeInfo, void* pVz, void* pzShapeInfo) {
__shared__ int arrIdx, blocksPerArr;
__shared__ T *x, *z;
__shared__ Nd4jLong *zShapeInfo, *xShapeInfo, arrLen, arrLenPerBlock, start, end;
if (threadIdx.x == 0) {
blocksPerArr = (gridDim.x + numOfArrs - 1) / numOfArrs; // ceil
arrIdx = blockIdx.x / blocksPerArr;
x = reinterpret_cast<T*>(reinterpret_cast<void**>(pVx)[arrIdx]);
z = reinterpret_cast<T*>(reinterpret_cast<void**>(pVz)[arrIdx]);
xShapeInfo = reinterpret_cast<Nd4jLong**>(pxShapeInfo)[arrIdx];
zShapeInfo = reinterpret_cast<Nd4jLong**>(pzShapeInfo)[arrIdx];
arrLen = shape::length(xShapeInfo);
arrLenPerBlock = (arrLen + blocksPerArr - 1) / blocksPerArr; // ceil
start = (blockIdx.x % blocksPerArr) * arrLenPerBlock;
end = (start + arrLenPerBlock) > arrLen ? arrLen : (start + arrLenPerBlock);
}
__syncthreads();
for (Nd4jLong i = start + threadIdx.x; i < end; i += blockDim.x)
z[shape::getIndexOffset(i, zShapeInfo, arrLen)] = x[shape::getIndexOffset(i, xShapeInfo, arrLen)];
}
///////////////////////////////////////////////////////////////////
template<typename T>
__host__ static void concatCudaLauncher(const int numOfArrs, const cudaStream_t *stream, void* pVx, void* pxShapeInfo, void* pVz, void* pzShapeInfo) {
concatCuda<T><<<512, 256, 1024, *stream>>>(numOfArrs, pVx, pxShapeInfo, pVz, pzShapeInfo);
}
///////////////////////////////////////////////////////////////////
// x - input, y - paddings, z - output
template<typename X, typename Y>
__global__ static void padCuda(const int mode,
const void *vx, const Nd4jLong *xShapeInfo,
const void *vy, const Nd4jLong *yShapeInfo,
void *vz, const Nd4jLong *zShapeInfo,
const void *vPadVal) {
const X padVal = *reinterpret_cast<const X*>(vPadVal);
const auto x = reinterpret_cast<const X*>(vx);
const auto y = reinterpret_cast<const Y*>(vy);
auto z = reinterpret_cast<X*>(vz);
__shared__ int rank, rankMinusOne;
__shared__ Nd4jLong zLen, yLen, totalThreads, *coords, *xShape, *zShape, *xStride, *zStride, shift1, shift2, yStride0;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
coords = reinterpret_cast<Nd4jLong*>(shmem);
zLen = shape::length(zShapeInfo);
xShape = shape::shapeOf(const_cast<Nd4jLong*>(xShapeInfo));
zShape = shape::shapeOf(const_cast<Nd4jLong*>(zShapeInfo));
xStride = shape::stride(const_cast<Nd4jLong*>(xShapeInfo));
zStride = shape::stride(const_cast<Nd4jLong*>(zShapeInfo));
yStride0 = shape::stride(const_cast<Nd4jLong*>(yShapeInfo))[0];
rank = shape::rank(xShapeInfo);
zLen = shape::length(zShapeInfo);
yLen = 2 * rank;
rankMinusOne = rank - 1;
totalThreads = gridDim.x * blockDim.x;
shift1 = mode == 1 ? 0 : 1; // REFLECT : SYMMETRIC
shift2 = mode == 1 ? 2 : 1; // REFLECT : SYMMETRIC
}
__syncthreads();
auto xzCoord = coords + threadIdx.x * rank; // we use xzCoord storage both for x and z arrays
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
if(mode == 0) { // CONSTANT case
for (Nd4jLong i = tid; i < zLen; i += totalThreads) {
shape::index2coords(rank, zShape, i, zLen, xzCoord);
const auto zOffset = shape::getOffset(0, zShape, zStride, xzCoord, rank);
bool within = true;
for(int j = rankMinusOne; j >= 0; --j) {
if(xShape[j] == zShape[j]) continue;
const auto left = y[shape::getIndexOffset(yStride0 * j, yShapeInfo, yLen)];
if(xzCoord[j] < left || xzCoord[j] >= left + xShape[j]) {within = false; break;}
else {xzCoord[j] = xzCoord[j] - left;}
}
if(within)
z[zOffset] = x[shape::getOffset(0, xShape, xStride, xzCoord, rank)];
else
z[zOffset] = padVal;
}
}
else { // REFLECT and SYMMETRIC cases
for (Nd4jLong i = tid; i < zLen; i += totalThreads) {
shape::index2coords(rank, zShape, i, zLen, xzCoord);
const auto zOffset = shape::getOffset(0, zShape, zStride, xzCoord, rank);
for(int j = rankMinusOne; j >= 0; --j) {
if(xShape[j] == zShape[j]) continue;
xzCoord[j] = xzCoord[j] - y[shape::getIndexOffset(yStride0 * j, yShapeInfo, yLen)]; // are ready to fill middle (within input dimension range)
if(xzCoord[j] < 0) xzCoord[j] = -xzCoord[j] - shift1; // means fill from left
else if(xzCoord[j] >= xShape[j]) xzCoord[j] = 2 * xShape[j] - xzCoord[j] - shift2; // means fill from right
}
const auto xOffset = shape::getOffset(0, xShape, xStride, xzCoord, rank);
z[zOffset] = x[xOffset];
}
}
}
///////////////////////////////////////////////////////////////////
template<typename X, typename Y>
static void padCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream,
const int mode,
const void *vx, const Nd4jLong *xShapeInfo,
const void *vy, const Nd4jLong *yShapeInfo,
void *vz, const Nd4jLong *zShapeInfo,
const void* padVal) {
padCuda<X,Y><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(mode, vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo, padVal);
}
///////////////////////////////////////////////////////////////////
void pad(nd4j::LaunchContext * context, const int mode, const NDArray& input, const NDArray& paddings, NDArray& output, const NDArray& padValue) {
PointersManager manager(context, "pad");
NDArray::prepareSpecialUse({&output}, {&input, &paddings, &padValue});
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = 8 * threadsPerBlock * output.rankOf() + 128;
const auto xType = input.dataType();
const auto yType = paddings.dataType();
BUILD_DOUBLE_SELECTOR(xType, yType, padCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), mode, input.getSpecialBuffer(), input.getSpecialShapeInfo(), paddings.getSpecialBuffer(), paddings.getSpecialShapeInfo(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), padValue.getSpecialBuffer()), LIBND4J_TYPES, INTEGER_TYPES);
NDArray::registerSpecialUse({&output}, {&input, &paddings, &padValue});
manager.synchronize();
}
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void invertPermutationCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo) {
const T* x = reinterpret_cast<const T*>(vx);
T* z = reinterpret_cast<T*>(vz);
__shared__ Nd4jLong len, totalThreads;
if (threadIdx.x == 0) {
len = shape::length(xShapeInfo);
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < len; i += totalThreads) {
const auto xOffset = shape::getIndexOffset(i, xShapeInfo, len);
const Nd4jLong index = x[xOffset];
const auto zOffset = shape::getIndexOffset(index, zShapeInfo, len);
z[zOffset] = i;
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
__host__ static void invertPermutationCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo) {
invertPermutationCuda<T><<<blocksPerGrid, threadsPerBlock, 1024, *stream>>>(vx, xShapeInfo, vz, zShapeInfo);
}
BUILD_SINGLE_TEMPLATE(template void invertPermutationCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo), LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////
void invertPermutation(nd4j::LaunchContext* context, const NDArray& input, NDArray& output) {
const int threadsPerBlock = MAX_NUM_THREADS;
const int blocksPerGrid = (input.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
PointersManager manager(context, "invertPermutation");
NDArray::prepareSpecialUse({&output}, {&input});
BUILD_SINGLE_SELECTOR(input.dataType(), invertPermutationCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.getSpecialBuffer(), output.getSpecialShapeInfo()), LIBND4J_TYPES);
NDArray::registerSpecialUse({&output}, {&input});
manager.synchronize();
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void traceCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint diagLen) {
const auto x = reinterpret_cast<const T*>(vx);
auto z = reinterpret_cast<T*>(vz);
__shared__ T* sharedMem;
__shared__ int xRank, zRank; // xRank = zRank + 2
__shared__ Nd4jLong xLen, zLen, *coordsMem;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<T*>(shmem);
coordsMem = reinterpret_cast<Nd4jLong*>(shmem + blockDim.x * sizeof(T));
xRank = shape::rank(xShapeInfo);
zRank = shape::rank(zShapeInfo);
xLen = shape::length(xShapeInfo);
zLen = shape::length(zShapeInfo); // corresponds to number of matrices
}
__syncthreads();
Nd4jLong* coords = coordsMem + threadIdx.x * xRank;
for (uint m = blockIdx.x; m < zLen; m += gridDim.x) { // one block per each element of z, that is per each matrix
shape::index2coords(zRank, shape::shapeOf(const_cast<Nd4jLong*>(zShapeInfo)), m, zLen, coords);
const auto zOffset = shape::getOffset(0, shape::shapeOf(const_cast<Nd4jLong*>(zShapeInfo)), shape::stride(const_cast<Nd4jLong*>(zShapeInfo)), coords, zRank);
sharedMem[threadIdx.x] = 0;
for (uint i = threadIdx.x; i < diagLen; i += blockDim.x) {
coords[zRank] = coords[zRank + 1] = i;
const auto xOffset = shape::getOffset(0, shape::shapeOf(const_cast<Nd4jLong*>(xShapeInfo)), shape::stride(const_cast<Nd4jLong*>(xShapeInfo)), coords, xRank);
sharedMem[threadIdx.x] += x[xOffset];
}
__syncthreads();
// aggregate sum
for (Nd4jLong activeThreads = blockDim.x / 2; activeThreads > 0; activeThreads /= 2) {
if (threadIdx.x < activeThreads)
sharedMem[threadIdx.x] += sharedMem[threadIdx.x + activeThreads];
__syncthreads();
}
if (threadIdx.x == 0)
z[zOffset] = *sharedMem;
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
static void traceCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream,
const void *vx, const Nd4jLong *xShapeInfo,
void *vz, const Nd4jLong *zShapeInfo,
const uint diagLen) {
traceCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, diagLen);
}
BUILD_SINGLE_TEMPLATE(template void traceCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint diagLen), LIBND4J_TYPES);
///////////////////////////////////////////////////////////////////
void trace(nd4j::LaunchContext* context, const NDArray& input, NDArray& output) {
PointersManager manager(context, "trace");
const uint diagLen = input.sizeAt(-1) < input.sizeAt(-2) ? input.sizeAt(-1) : input.sizeAt(-2);
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * (sizeof(Nd4jLong) * input.rankOf() + input.sizeOfT()) + 128;
NDArray::prepareSpecialUse({&output}, {&input});
BUILD_SINGLE_SELECTOR(input.dataType(), traceCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), diagLen), LIBND4J_TYPES);
NDArray::registerSpecialUse({&output}, {&input});
manager.synchronize();
}
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void triuBPCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int diag) {
// x and z have same shapes
const auto x = reinterpret_cast<const T*>(vx); // gradO
auto z = reinterpret_cast<T*>(vz); // gradI
__shared__ int rank, areSameOffsets; // xRank = zRank
__shared__ Nd4jLong len, totalThreads, *sharedMem; // xLen = zLen
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
areSameOffsets = shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo);
rank = shape::rank(xShapeInfo);
len = shape::length(zShapeInfo);
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
auto coords = sharedMem + threadIdx.x * rank;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < len; i += totalThreads) {
shape::index2coords(rank, zShapeInfo + 1, i, len, coords);
const auto zOffset = shape::getOffset(0, zShapeInfo + 1, zShapeInfo + rank + 1, coords, rank);
if((coords[rank - 2] + diag > coords[rank - 1])) // row + diag > col
z[zOffset] = 0;
else
z[zOffset] = x[areSameOffsets ? zOffset : shape::getOffset(0, xShapeInfo + 1, xShapeInfo + rank + 1, coords, rank)];
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
static void triuBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int diag) {
triuBPCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, diag);
}
BUILD_SINGLE_TEMPLATE(template void triuBPCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int diag), LIBND4J_TYPES);
///////////////////////////////////////////////////////////////////
void triuBP(nd4j::LaunchContext* context, const NDArray& input, const NDArray& gradO, NDArray& gradI, const int diagonal) {
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (gradO.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(Nd4jLong) * gradO.rankOf() + 128;
PointersManager manager(context, "triuBP");
NDArray::prepareSpecialUse({&gradI}, {&gradO});
BUILD_SINGLE_SELECTOR(gradI.dataType(), triuBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), gradO.getSpecialBuffer(), gradO.getSpecialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), diagonal), LIBND4J_TYPES);
NDArray::registerSpecialUse({&gradI}, {&gradO});
manager.synchronize();
}
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void tileBPCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, Nd4jLong* globMem) {
// x and z have same shapes
const auto x = reinterpret_cast<const T*>(vx); // gradO
auto z = reinterpret_cast<T*>(vz); // gradI
__shared__ int xRank, zRank; // xRank >= zRank
__shared__ Nd4jLong numOfXOffsets, zLen, totalThreads, *sharedMem; // xLen >= zLen
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
xRank = shape::rank(zShapeInfo);
zLen = shape::length(zShapeInfo);
numOfXOffsets = shape::length(xShapeInfo) / zLen;
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
auto memBuff = sharedMem + threadIdx.x * 2 * xRank;
auto xOffsets = globMem + tid * numOfXOffsets;
for (Nd4jLong i = tid; i < zLen; i += totalThreads) {
const auto zOffset = shape::getIndexOffset(i, zShapeInfo, zLen);
shape::outerArrayOffsets(xOffsets, i, xShapeInfo, zShapeInfo, memBuff);
z[zOffset] = x[xOffsets[0]]; // first offset
for (Nd4jLong j = 1; j < numOfXOffsets; ++j) // rest offsets
z[zOffset] += x[xOffsets[j]];
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
static void tileBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, Nd4jLong* globMem) {
tileBPCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, globMem);
}
BUILD_SINGLE_TEMPLATE(template void tileBPCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, Nd4jLong* globMem), FLOAT_TYPES);
//////////////////////////////////////////////////////////////////////////
void tileBP(nd4j::LaunchContext * context, const NDArray& gradO /*input*/, NDArray& gradI /*output*/, const std::vector<Nd4jLong> reps) {
NDArray memBuff('c', gradO.getShapeAsVector(), nd4j::DataType::INT64, context); // empty auxiliary array for storing device memory which will be used in kernel calculations
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (gradI.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(Nd4jLong) * 2 * gradO.rankOf() + 128;
PointersManager manager(context, "tileBP");
NDArray::prepareSpecialUse({&gradI}, {&gradO, &memBuff});
BUILD_SINGLE_SELECTOR(gradI.dataType(), tileBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), gradO.getSpecialBuffer(), gradO.getSpecialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), reinterpret_cast<Nd4jLong*>(memBuff.specialBuffer())), FLOAT_TYPES);
NDArray::registerSpecialUse({&gradI}, {&gradO, &memBuff});
manager.synchronize();
}
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void scatterUpdateCuda(const int opCode, const int numOfInd,
void* vx, const Nd4jLong *xShapeInfo, const Nd4jLong *xOffsets,
void* vy, const Nd4jLong *yShapeInfo, const Nd4jLong *yOffsets,
const int* indexes) {
__shared__ T *x, *y;
__shared__ Nd4jLong arrLenX, arrLenY;
for (int e = 0; e < numOfInd; e++ ) {
const auto xIndex = indexes[e];
const bool isOwner = xIndex < gridDim.x ? blockIdx.x == xIndex : blockIdx.x == xIndex % gridDim.x;
if (!isOwner)
continue;
if (threadIdx.x == 0) {
x = reinterpret_cast<T*>(vx) + xOffsets[xIndex];
y = reinterpret_cast<T*>(vy) + yOffsets[e];
arrLenX = shape::length(xShapeInfo);
arrLenY = shape::length(yShapeInfo);
}
__syncthreads();
if (arrLenX != arrLenY)
return;
for (Nd4jLong i = threadIdx.x; i < arrLenX; i += blockDim.x) {
const auto xOffset = shape::getIndexOffset(i, xShapeInfo, arrLenX);
const auto yOffset = shape::getIndexOffset(i, yShapeInfo, arrLenY);
switch (opCode) {
case 0:
x[xOffset] += y[yOffset];
break;
case 1:
x[xOffset] -= y[yOffset];
break;
case 2:
x[xOffset] *= y[yOffset];
break;
case 3:
x[xOffset] /= y[yOffset];
break;
case 4:
x[xOffset] = y[yOffset] - x[xOffset];
break;
case 5:
x[xOffset] = y[yOffset] / x[xOffset];
break;
case 6:
x[xOffset] = y[yOffset];
break;
default:
continue;
}
}
__syncthreads();
}
}
template<typename T>
__host__ static void scatterUpdateCudaLauncher(const cudaStream_t* stream, const int opCode, const int numOfInd, void* vx, const Nd4jLong *xShapeInfo, const Nd4jLong *xOffsets, void* vy, const Nd4jLong *yShapeInfo, const Nd4jLong *yOffsets, const int* indexes) {
scatterUpdateCuda<T><<<512, 256, MAX_NUM_THREADS, *stream>>>(opCode, numOfInd, vx, xShapeInfo, xOffsets, vy, yShapeInfo, yOffsets, indexes);
}
//////////////////////////////////////////////////////////////////////////
void scatterUpdate(nd4j::LaunchContext* context, NDArray& input, NDArray& updates, const std::vector<int>* intArgs) {
const int opCode = (*intArgs)[0];
const int numOfDims = (*intArgs)[1];
const int numOfInd = (*intArgs)[2 + numOfDims];
std::vector<int> tadDimensions(numOfDims);
for (int e = 2; e < 2 + numOfDims; e++)
tadDimensions[e-2] = (*intArgs)[e];
auto packX = ConstantTadHelper::getInstance()->tadForDimensions(input.getShapeInfo(), tadDimensions);
auto packY = ConstantTadHelper::getInstance()->tadForDimensions(updates.getShapeInfo(), tadDimensions);
NDArray indices(const_cast<int*>(intArgs->data()) + numOfDims + 3, 'c', {numOfInd}, nd4j::DataType::INT32, context);
PointersManager manager(context, "scatterUpdate");
NDArray::prepareSpecialUse({&input}, {&input, &updates, &indices});
BUILD_SINGLE_SELECTOR(input.dataType(), scatterUpdateCudaLauncher, (context->getCudaStream(), opCode, numOfInd, input.specialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), updates.specialBuffer(), packY.platformShapeInfo(), packY.platformOffsets(), reinterpret_cast<int*>(indices.getSpecialBuffer())), LIBND4J_TYPES);
NDArray::registerSpecialUse({&input}, {&input, &updates, &indices});
manager.synchronize();
}
template <typename T>
static __global__ void swapShuffleKernel(T* input, Nd4jLong* shape, Nd4jLong firstDim, Nd4jLong len, nd4j::graph::RandomGenerator* rng) {
auto tid = blockIdx.x * blockDim.x;
auto step = blockDim.x * gridDim.x;
for (int i = firstDim - 1 - tid - threadIdx.x; i > 0; i -= step) {
int r = rng->relativeInt(i) % i;
if (i != r) {
T e0 = input[shape::getIndexOffset(i, shape, len)];
T e1 = input[shape::getIndexOffset(r, shape, len)];
//math::nd4j_swap<T>(input(i), input(r));
input[shape::getIndexOffset(i, shape, len)] = e1;
input[shape::getIndexOffset(r, shape, len)] = e0;
}
}
}
template <typename T>
static __global__ void fillShuffleKernel(T* input, Nd4jLong* inputShape, T* output, Nd4jLong* outputShape, Nd4jLong firstDim, Nd4jLong len, int* indices, nd4j::graph::RandomGenerator* rng) {
// PRAGMA_OMP_PARALLEL_FOR_IF((firstDim-1) > Environment::getInstance()->tadThreshold())
auto tid = blockIdx.x * blockDim.x;
auto step = blockDim.x * gridDim.x;
for(int i = firstDim - 1 - tid - threadIdx.x; i > 0; i -= step) {
int r = rng->relativeInt(i) % i;
output[shape::getIndexOffset(i, outputShape, len)] = input[shape::getIndexOffset(indices[r], inputShape, len)];
if(i != r) {
output[shape::getIndexOffset(r, outputShape, len)] = input[shape::getIndexOffset(indices[i], inputShape, len)];
// output.p(r, input.e<T>(indices[i]));
// math::nd4j_swap<int>(indices[i], indices[r]);
atomicExch(&indices[i], indices[r]);
}
}
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
void randomShuffle_(nd4j::LaunchContext * context, NDArray& input, NDArray& output, nd4j::graph::RandomGenerator& rng, const bool isInplace) {
// check edge cases first
int temp;
const int firstDim = input.sizeAt(0);
auto stream = context->getCudaStream();
NDArray::prepareSpecialUse({&output}, {&input});
if(input.lengthOf() == 1 || firstDim == 1) {
if(!isInplace)
output.assign(input);
}
else if (input.isVector() || shape::isLikeVector(input.getShapeInfo(), temp)) {
// apply Fisher-Yates shuffle
nd4j::graph::RandomGenerator* dRandom = nullptr;
cudaMalloc(&dRandom, sizeof(nd4j::graph::RandomGenerator));
cudaMemcpy(dRandom, &rng, sizeof(nd4j::graph::RandomGenerator), cudaMemcpyHostToDevice);
T* inputBuf = reinterpret_cast<T*>(input.specialBuffer());
if(isInplace) {
swapShuffleKernel<T><<<128, 256, 1024, *stream>>>(inputBuf, input.specialShapeInfo(), firstDim, input.lengthOf(), dRandom);
}
else {
std::vector<int> indices(firstDim);
std::iota(indices.begin(), indices.end(), 0);
cudaMemcpy(output.specialBuffer(), input.specialBuffer(), sizeof(T), cudaMemcpyDeviceToDevice);
//output.p<T>(Nd4jLong(0), input.e<T>(0));
PointersManager pointersManager(context, "helper::randomShuffle_");
int* indicesDev = reinterpret_cast<int*>(pointersManager.replicatePointer(indices.data(), indices.size() * sizeof(int)));
T* outputBuf = reinterpret_cast<T*>(output.specialBuffer());
fillShuffleKernel<T><<<128, 256, 1024, *stream>>>(inputBuf, input.specialShapeInfo(), outputBuf, output.specialShapeInfo(), firstDim, input.lengthOf(), indicesDev, dRandom);
pointersManager.synchronize();
}
// rng.rewindH(firstDim - 1);
cudaFree(dRandom);
}
else {
// evaluate sub-arrays list of input array through all dimensions excluding first one
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input.rankOf(), {0});
auto subArrsListIn = input.allTensorsAlongDimension(dimensions);
// apply Fisher-Yates shuffle
if(isInplace) {
PRAGMA_OMP_PARALLEL_FOR_IF((firstDim-1) > Environment::getInstance()->elementwiseThreshold())
for(int i = firstDim - 1; i > 0; --i) {
int r = rng.relativeInt(i) % i;
if(i != r)
subArrsListIn->at(i)->swapUnsafe(*subArrsListIn->at(r));
}
}
else {
// evaluate sub-arrays list of output array through all dimensions excluding first one
auto subArrsListOut = output.allTensorsAlongDimension(dimensions);
std::vector<int> indices(firstDim);
std::iota(indices.begin(), indices.end(), 0);
bool isZeroShuffled = false;
PRAGMA_OMP_PARALLEL_FOR_IF((firstDim-1) > Environment::getInstance()->tadThreshold())
for(int i = firstDim - 1; i > 0; --i) {
int r = rng.relativeInt(i) % i;
subArrsListOut->at(i)->assign(subArrsListIn->at(indices[r]));
if(r == 0)
isZeroShuffled = true;
if(i != r) {
subArrsListOut->at(r)->assign(subArrsListIn->at(indices[i]));
math::nd4j_swap<int>(indices[i], indices[r]);
}
}
if(!isZeroShuffled)
subArrsListOut->at(0)->assign(subArrsListIn->at(0));
delete subArrsListOut;
}
rng.rewindH(firstDim-1);
delete subArrsListIn;
}
NDArray::registerSpecialUse({&output}, {&input});
}
void randomShuffle(nd4j::LaunchContext * context, NDArray& input, NDArray& output, nd4j::graph::RandomGenerator& rng, const bool isInplace) {
BUILD_SINGLE_SELECTOR(input.dataType(), randomShuffle_, (context, input, output, rng, isInplace), LIBND4J_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void randomShuffle_, (nd4j::LaunchContext * context, NDArray& input, NDArray& output, nd4j::graph::RandomGenerator& rng, const bool isInplace), LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////
template<typename T>
static void gatherND_(nd4j::LaunchContext * context, NDArray& input, NDArray& indices, NDArray& output) {
}
void gatherND(nd4j::LaunchContext * context, NDArray& input, NDArray& indices, NDArray& output) {
BUILD_SINGLE_SELECTOR(input.dataType(), gatherND_, (context, input, indices, output), LIBND4J_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void gatherND_, (nd4j::LaunchContext * context, NDArray& input, NDArray& indices, NDArray& output), LIBND4J_TYPES);
//////////////////////////////////////////////////////////////////////////
void eye(nd4j::LaunchContext * context, NDArray& output) {
output.setIdentity();
}
//////////////////////////////////////////////////////////////////////////
template <typename T, typename Z>
static __global__ void global_mergeMaxIndex_(void **inArrs, void **inShapes, const int numArrays, void *voutput, Nd4jLong *outputShape, Nd4jLong length) {
auto output = reinterpret_cast<Z*>(voutput);
const auto tid = blockIdx.x * gridDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (Nd4jLong e = tid; e < length; e += step) {
T mVal = -DataTypeUtils::max<T>();
Z mIdx(0);
for (int i = 0; i < numArrays; i++) {
auto x = reinterpret_cast<T*>(inArrs[i]);
auto xShape = reinterpret_cast<Nd4jLong *>(inShapes[i]);
auto val = x[shape::getIndexOffset(e, xShape, length)];;
if (mVal < val)
mIdx = static_cast<Z>(e);
}
__syncthreads();
output[shape::getIndexOffset(e, outputShape, length)] = mIdx;
}
}
template <typename T, typename Z>
static void mergeMaxIndex_(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
std::vector<void *> inBuffers(inArrs.size());
std::vector<void *> inShapes(inArrs.size());
for (int e = 0; e < inArrs.size(); e++) {
inBuffers[e] = inArrs[e]->getSpecialBuffer();
inShapes[e] = inArrs[e]->getSpecialShapeInfo();
}
PointersManager manager(context, "mergeMaxIndex");
auto pInBuffers = reinterpret_cast<void **>(manager.replicatePointer(inBuffers.data(), inBuffers.size() * sizeof(void *)));
auto pInShapes = reinterpret_cast<void **>(manager.replicatePointer(inShapes.data(), inShapes.size() * sizeof(void *)));
auto length = output.lengthOf();
global_mergeMaxIndex_<T,Z><<<512, 512, 512, *context->getCudaStream()>>>(pInBuffers, pInShapes, (int) inArrs.size(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), length);
manager.synchronize();
}
void mergeMaxIndex(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
BUILD_DOUBLE_SELECTOR(inArrs[0]->dataType(), output.dataType(), mergeMaxIndex_, (context, inArrs, output), LIBND4J_TYPES, INTEGER_TYPES);
}
BUILD_DOUBLE_TEMPLATE(template void mergeMaxIndex_, (nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output), LIBND4J_TYPES, INTEGER_TYPES);
//////////////////////////////////////////////////////////////////////////
template <typename T>
static __global__ void global_mergeMax_(void **inArrs, void **inShapes, const int numArrays, void *voutput, Nd4jLong *outputShape, Nd4jLong length) {
auto output = reinterpret_cast<T*>(voutput);
const auto tid = blockIdx.x * gridDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (Nd4jLong e = tid; e < length; e += step) {
T mVal = -DataTypeUtils::max<T>();
for (int i = 0; i < numArrays; i++) {
auto x = reinterpret_cast<T*>(inArrs[i]);
auto xShape = reinterpret_cast<Nd4jLong *>(inShapes[i]);
auto val = x[shape::getIndexOffset(e, xShape, length)];;
if (mVal < val)
mVal = val;
}
__syncthreads();
output[shape::getIndexOffset(e, outputShape, length)] = mVal;
}
}
template<typename T>
static void mergeMax_(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
std::vector<void *> inBuffers(inArrs.size());
std::vector<void *> inShapes(inArrs.size());
for (int e = 0; e < inArrs.size(); e++) {
inBuffers[e] = inArrs[e]->getSpecialBuffer();
inShapes[e] = inArrs[e]->getSpecialShapeInfo();
}
PointersManager manager(context, "mergeMax");
auto pInBuffers = reinterpret_cast<void **>(manager.replicatePointer(inBuffers.data(), inBuffers.size() * sizeof(void *)));
auto pInShapes = reinterpret_cast<void **>(manager.replicatePointer(inShapes.data(), inShapes.size() * sizeof(void *)));
auto length = output.lengthOf();
global_mergeMax_<T><<<512, 512, 512, *context->getCudaStream()>>>(pInBuffers, pInShapes, (int) inArrs.size(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), length);
manager.synchronize();
}
BUILD_SINGLE_TEMPLATE(template void mergeMax_, (nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output), LIBND4J_TYPES);
void mergeMax(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
BUILD_SINGLE_SELECTOR(output.dataType(), mergeMax_, (context, inArrs, output), LIBND4J_TYPES);
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static __global__ void global_mergeAvg_(void **inArrs, void **inShapes, const int numArrays, void *voutput, Nd4jLong *outputShape, Nd4jLong length) {
auto output = reinterpret_cast<T*>(voutput);
const auto tid = blockIdx.x * gridDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (Nd4jLong e = tid; e < length; e += step) {
T sum(0.0f);
for (int i = 0; i < numArrays; i++) {
auto x = reinterpret_cast<T*>(inArrs[i]);
auto xShape = reinterpret_cast<Nd4jLong *>(inShapes[i]);
sum += x[shape::getIndexOffset(e, xShape, length)];
}
output[shape::getIndexOffset(e, outputShape, length)] = sum / numArrays;
}
}
template<typename T>
static void mergeAvg_(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
std::vector<void *> inBuffers(inArrs.size());
std::vector<void *> inShapes(inArrs.size());
for (int e = 0; e < inArrs.size(); e++) {
inBuffers[e] = inArrs[e]->getSpecialBuffer();
inShapes[e] = inArrs[e]->getSpecialShapeInfo();
}
PointersManager manager(context, "mergeAvg");
auto pInBuffers = reinterpret_cast<void **>(manager.replicatePointer(inBuffers.data(), inBuffers.size() * sizeof(void *)));
auto pInShapes = reinterpret_cast<void **>(manager.replicatePointer(inShapes.data(), inShapes.size() * sizeof(void *)));
auto length = output.lengthOf();
global_mergeAvg_<T><<<512, 512, 512, *context->getCudaStream()>>>(pInBuffers, pInShapes, (int) inArrs.size(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), length);
manager.synchronize();
}
BUILD_SINGLE_TEMPLATE(template void mergeAvg_, (nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output), LIBND4J_TYPES);
void mergeAvg(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
BUILD_SINGLE_SELECTOR(output.dataType(), mergeAvg_, (context, inArrs, output), LIBND4J_TYPES);
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static __global__ void global_mergeAdd_(void **inArrs, void **inShapes, const int numArrays, void *voutput, Nd4jLong *outputShape, Nd4jLong length) {
auto output = reinterpret_cast<T*>(voutput);
const auto tid = blockIdx.x * gridDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (Nd4jLong e = tid; e < length; e += step) {
T sum(0.0f);
for (int i = 0; i < numArrays; i++) {
auto x = reinterpret_cast<T*>(inArrs[i]);
auto xShape = reinterpret_cast<Nd4jLong *>(inShapes[i]);
sum += x[shape::getIndexOffset(e, xShape, length)];
}
output[shape::getIndexOffset(e, outputShape, length)] = sum;
}
}
template<typename T>
static void mergeAdd_(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
std::vector<void *> inBuffers(inArrs.size());
std::vector<void *> inShapes(inArrs.size());
for (int e = 0; e < inArrs.size(); e++) {
inBuffers[e] = inArrs[e]->getSpecialBuffer();
inShapes[e] = inArrs[e]->getSpecialShapeInfo();
}
PointersManager manager(context, "mergeAdd");
auto pInBuffers = reinterpret_cast<void **>(manager.replicatePointer(inBuffers.data(), inBuffers.size() * sizeof(void *)));
auto pInShapes = reinterpret_cast<void **>(manager.replicatePointer(inShapes.data(), inShapes.size() * sizeof(void *)));
auto length = output.lengthOf();
global_mergeAdd_<T><<<512, 512, 512, *context->getCudaStream()>>>(pInBuffers, pInShapes, (int) inArrs.size(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), length);
manager.synchronize();
}
BUILD_SINGLE_TEMPLATE(template void mergeAdd_, (nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output), LIBND4J_TYPES);
void mergeAdd(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
BUILD_SINGLE_SELECTOR(output.dataType(), mergeAdd_, (context, inArrs, output), LIBND4J_TYPES);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
static __global__ void clipByNormInplaceKernel(Nd4jLong numOfSubArrs, T* inputBuffer, Nd4jLong* shape, Nd4jLong* inputOffsets, T* norm2Buf, Nd4jLong* norm2shape, T clipNorm) {
for (int arr = blockIdx.x; arr < numOfSubArrs; arr += gridDim.x) {
__shared__ T* z;
__shared__ Nd4jLong len;
if (threadIdx.x == 0) {
len = shape::length(shape);
z = inputBuffer + inputOffsets[arr];
}
__syncthreads();
for (int j = threadIdx.x; j < len; j+= blockDim.x) {
auto xIndex = shape::getIndexOffset(j, shape, len);
if(norm2Buf[arr] > clipNorm)
z[xIndex] *= clipNorm / norm2Buf[arr]; // case with ews = 1 and ordering is 'c'
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
static __global__ void clipByNormKernel(Nd4jLong numOfSubArrs, T* inputBuffer, Nd4jLong* shape, Nd4jLong* inputOffsets, T* outputBuffer, Nd4jLong* outputShape, Nd4jLong* outputOffsets, T* norm2Buf, Nd4jLong* norm2shape, T clipNorm) {
for (Nd4jLong arr = blockIdx.x; arr < numOfSubArrs; arr += gridDim.x) {
__shared__ T* x, *z;
__shared__ Nd4jLong lenX, lenZ;
__shared__ T norm2;
if (threadIdx.x == 0) {
lenX = shape::length(shape);
x = inputBuffer + inputOffsets[arr];
z = outputBuffer + outputOffsets[arr];
lenZ = shape::length(outputShape);
norm2 = norm2Buf[shape::getIndexOffset(arr, norm2shape, numOfSubArrs)];
//printf("%d: %lf (vs %lf) %lld %lld\n", arr, norm2, clipNorm, lenX, lenZ);
}
__syncthreads();
for (Nd4jLong j = threadIdx.x; j < lenZ; j+= blockDim.x) {
auto xIndex = shape::getIndexOffset(j, shape, lenX);
auto zIndex = shape::getIndexOffset(j, outputShape, lenZ);
if(norm2 > clipNorm) {
z[zIndex] = x[xIndex] * clipNorm / norm2; // case with ews = 1 and ordering is 'c'
} else {
z[zIndex] = x[xIndex];
}
//printf("%lld: %lf %lf\n", j, z[zIndex], x[xIndex]);
}
__syncthreads();
}
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
static void clipByNorm_(nd4j::LaunchContext * context, NDArray& input, NDArray& output, const std::vector<int>& dimensions, NDArray const& clipNormA, const bool isInplace) {
const int rank = input.rankOf();
auto norm2 = input.reduceAlongDims(reduce::Norm2, dimensions);
clipNormA.syncToHost();
//norm2.printBuffer("Norm2");
T const clipNorm = clipNormA.e<T>(0);
//clipNormA.printBuffer("ClipNorm");
auto stream = context->getCudaStream();
if (isInplace) {
if(norm2.lengthOf() == 1) {
norm2.syncToHost();
T norm2Val = norm2.e<T>(0);
if(norm2Val > clipNorm)
input *= clipNorm / norm2Val;
}
else {
std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(rank, dimensions);
const Nd4jLong numOfSubArrs = ShapeUtils::getNumOfSubArrs(input.getShapeInfo(), dimsToExclude);
auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(input.getShapeInfo(), dimensions);
//auto packZ = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(output.getShapeInfo(), dimsToExclude);
T* inputBuffer = reinterpret_cast<T*>(input.specialBuffer());
T* norm2buf = reinterpret_cast<T*>(norm2.specialBuffer());
clipByNormInplaceKernel<T><<<256, 512, 1024, *stream>>>(numOfSubArrs, inputBuffer, packX.specialShapeInfo(), packX.specialOffsets(), norm2buf, norm2.specialShapeInfo(), clipNorm);
}
}
else {
if(norm2.lengthOf() == 1) {
norm2.syncToHost();
T norm2Val = norm2.e<T>(0);
if(norm2Val > clipNorm)
output.assign( input * (clipNorm / norm2Val));
else
output.assign( input );
}
else {
std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(rank, dimensions);
const Nd4jLong numOfSubArrs = ShapeUtils::getNumOfSubArrs(input.getShapeInfo(), dimsToExclude);
auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(input.getShapeInfo(), dimensions);
auto packZ = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(output.getShapeInfo(), dimensions);
T* inputBuffer = reinterpret_cast<T*>(input.specialBuffer());
T* norm2buf = reinterpret_cast<T*>(norm2.specialBuffer());
T* outputBuffer = reinterpret_cast<T*>(output.specialBuffer());
clipByNormKernel<T><<<256, 512, 1024, *stream>>>(numOfSubArrs, inputBuffer, packX.specialShapeInfo(), packX.specialOffsets(), outputBuffer, packZ.specialShapeInfo(), packZ.specialOffsets(), norm2buf, norm2.specialShapeInfo(), clipNorm);
}
}
}
void clipByNorm(nd4j::LaunchContext * context, NDArray& input, NDArray& output, const std::vector<int>& dimensions, const NDArray& clipNorm, const bool isInplace) {
BUILD_SINGLE_SELECTOR(output.dataType(), clipByNorm_, (context, input, output, dimensions, clipNorm, isInplace), FLOAT_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void clipByNorm_, (nd4j::LaunchContext * context, NDArray& input, NDArray& output, const std::vector<int>& dimensions, const NDArray& clipNorm, const bool isInplace), FLOAT_TYPES);
template <typename T>
static void clipByGlobalNorm_(nd4j::LaunchContext * context, std::vector<NDArray*> const& inputs, double clipNorm, nd4j::memory::Workspace* workspace, std::vector<NDArray*>& outputs, bool isInplace) {
}
void clipByGlobalNorm(nd4j::LaunchContext * context, std::vector<NDArray*> const& inputs, double clipNorm, nd4j::memory::Workspace* workspace, std::vector<NDArray*>& outputs, bool isInplace) {
BUILD_SINGLE_SELECTOR(outputs[0]->dataType(), clipByGlobalNorm_, (context, inputs, clipNorm, workspace, outputs, isInplace), FLOAT_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void clipByGlobalNorm_, (nd4j::LaunchContext * context, std::vector<NDArray*> const& inputs, double clipNorm, nd4j::memory::Workspace* workspace, std::vector<NDArray*>& outputs, bool isInplace), FLOAT_TYPES);
//////////////////////////////////////////////////////////////////////////
template<typename T>
static void clipByNormBP_(nd4j::LaunchContext * context, const NDArray& input, const NDArray& gradO, NDArray& gradI /*output*/, const std::vector<int>& dimensions, const NDArray& clipNorm) {
}
void clipByNormBP(nd4j::LaunchContext * context, const NDArray& input, const NDArray& gradO, NDArray& gradI /*output*/, const std::vector<int>& dimensions, const NDArray& clipNorm) {
BUILD_SINGLE_SELECTOR(gradI.dataType(), clipByNormBP_, (context, input, gradO, gradI, dimensions, clipNorm), FLOAT_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void clipByNormBP_, (nd4j::LaunchContext * context, const NDArray& input, const NDArray& gradO, NDArray& gradI /*output*/, const std::vector<int>& dimensions, const NDArray& clipNorm), FLOAT_TYPES);
//////////////////////////////////////////////////////////////////////////
template<typename T>
static void clipByAveraged_(nd4j::LaunchContext * context, NDArray& input, NDArray& output, const std::vector<int>& dimensions, const NDArray& clipNorm, const bool isInplace) {
auto cn = clipNorm.e<T>(0);
if (dimensions.size() == 0) {
// all-reduce
T n2 = input.reduceNumber(reduce::Norm2).e<T>(0) / input.lengthOf();
if (n2 <= cn) {
if (!isInplace)
output.assign(input);
}
else {
const T factor = cn / n2;
//auto lambda = LAMBDA_T(_x, factor) { return _x * factor; };
//input.applyLambda<T>(lambda, &output);
output.assign(input * factor);
}
}
else {
// along dimension
auto norm2 = input.reduceAlongDims(reduce::Norm2, dimensions, false);
if (!isInplace)
output.assign(input);
auto tads = output.allTensorsAlongDimension(dimensions);
auto outTads = output.allTensorsAlongDimension(dimensions);
// TODO: make this CUDA-compliant somehow
for (int e = 0; e < tads->size(); e++) {
T n2 = norm2.e<T>(e) / tads->at(e)->lengthOf();
const T factor = cn / n2;
if (n2 > cn) {
//auto lambda = LAMBDA_T(_x, factor) {return _x * factor;};
tads->at(e)->applyScalar(scalar::Multiply, factor, outTads->at(e));//applyLambda<T>(lambda, &output);
}
}
delete tads;
delete outTads;
}
}
void clipByAveraged(nd4j::LaunchContext * context, NDArray& input, NDArray& output, const std::vector<int>& dimensions, const NDArray& clipNorm, const bool isInplace) {
BUILD_SINGLE_SELECTOR(input.dataType(), clipByAveraged_, (context, input, output, dimensions, clipNorm, isInplace), FLOAT_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void clipByAveraged_, (nd4j::LaunchContext * context, NDArray& input, NDArray& output, const std::vector<int>& dimensions, const NDArray& clipNorm, const bool isInplace), FLOAT_TYPES);
/*
if (d1 > params[1])
return params[1];
else if (d1 < params[0])
return params[0];
else return d1;
*/
template <typename T>
static void __global__ clipByValueKernel(void* input, Nd4jLong* inputShape, void* output, Nd4jLong* outputShape, double leftBound, double rightBound) {
__shared__ T* outputBuf;
__shared__ T* inputBuf;
__shared__ Nd4jLong length;
__shared__ bool linearBuffers;
if (threadIdx.x == 0) {
outputBuf = reinterpret_cast<T *>(output);
inputBuf = reinterpret_cast<T *>(input);
length = shape::length(inputShape);
linearBuffers = shape::elementWiseStride(inputShape) == shape::elementWiseStride(outputShape) && shape::elementWiseStride(inputShape) == 1;
}
__syncthreads();
const auto tid = blockIdx.x * gridDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (Nd4jLong e = tid; e < length; e += step) {
if (linearBuffers) {
if (inputBuf[e] > rightBound) outputBuf[e] = (T) rightBound;
else if (inputBuf[e] < leftBound) outputBuf[e] = (T) leftBound;
else outputBuf[e] = inputBuf[e];
}
else {
auto inputOffset = shape::getIndexOffset(e, inputShape, length);
auto outputOffset = shape::getIndexOffset(e, outputShape, length);
if (inputBuf[inputOffset] > rightBound) outputBuf[outputOffset] = (T) rightBound;
else if (inputBuf[inputOffset] < leftBound) outputBuf[outputOffset] = (T) leftBound;
else outputBuf[outputOffset] = inputBuf[outputOffset];
}
}
}
template <typename T>
static void clipByValue_(nd4j::LaunchContext * context, NDArray& input, double leftBound, double rightBound, NDArray& output) {
auto stream = context->getCudaStream();
if (!input.isActualOnDeviceSide())
input.syncToDevice();
NDArray::prepareSpecialUse({&output}, {&input});
clipByValueKernel<T><<<256, 512, 8192, *stream>>>(input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), leftBound, rightBound);
NDArray::registerSpecialUse({&output}, {&input});
}
void clipByValue(nd4j::LaunchContext * context, NDArray& input, double leftBound, double rightBound, NDArray& output) {
BUILD_SINGLE_SELECTOR(input.dataType(), clipByValue_, (context, input, leftBound, rightBound, output), FLOAT_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void clipByValue_, (nd4j::LaunchContext * context, NDArray& input, double leftBound, double rightBound, NDArray& output);, FLOAT_TYPES);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
static __global__ void mirrorPadLinearKernel(void const* vx, Nd4jLong* xShape, void* vz, Nd4jLong* zShape, Nd4jLong leftSide, Nd4jLong leftSideCorrected, Nd4jLong xLen, Nd4jLong len, Nd4jLong zLen) {
__shared__ T const* x;
__shared__ T* z;
if (threadIdx.x == 0) {
x = reinterpret_cast<T const*>(vx);
z = reinterpret_cast<T*>(vz);
}
__syncthreads();
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for(int i = start; i < zLen; i+= step) {
auto zIndex = shape::getIndexOffset(i, zShape, zLen);
auto xIndex = shape::getIndexOffset(len - i, xShape, xLen);
if (i < leftSide) // left side
xIndex = shape::getIndexOffset(leftSideCorrected - i, xShape, xLen);
else if(i >= leftSide && i < leftSide + xLen) // middle
xIndex = shape::getIndexOffset(i - leftSide, xShape, xLen);
// else // right side
// z[i] = x[len - i];
z[zIndex] = x[xIndex];
}
}
template <typename F, typename I>
static __global__ void mirrorPadKernel(void const* vx, Nd4jLong* xShape, void* vz, Nd4jLong* zShape, Nd4jLong outLen, void const* paddings, Nd4jLong* paddingShape, int reflBorder) {
__shared__ F const* x;
__shared__ I const* pads;
__shared__ F* z;
__shared__ Nd4jLong zRank, rank;
__shared__ Nd4jLong* xShapeOf, *xStrideOf, *padsShapeOf, *padsStrideOf;
__shared__ Nd4jLong* zShapeOf, *zStrideOf;
__shared__ Nd4jLong* xIdx;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
xIdx = reinterpret_cast<Nd4jLong*>(shmem);
rank = shape::rank(xShape);
x = reinterpret_cast<F const*>(vx);//
pads = reinterpret_cast<I const*>(paddings);
z = reinterpret_cast<F*>(vz);
xShapeOf = shape::shapeOf(xShape);
xStrideOf = shape::stride(xShape);
zShapeOf = shape::shapeOf(zShape);
zRank = shape::rank(zShape);
zStrideOf = shape::stride(zShape);
padsShapeOf = shape::shapeOf(paddingShape);
padsStrideOf = shape::stride(paddingShape);
}
__syncthreads();
auto start = threadIdx.x + blockIdx.x * blockDim.x;
auto step = blockDim.x * gridDim.x;
for(Nd4jLong i = start; i < outLen; i+= step) {
auto xzCoord = xIdx + threadIdx.x * rank;
//auto zxCoord = xIdx + (threadIdx.x + threadIdx.x % 2 + 1) * rank;
shape::index2coords(rank, zShapeOf, i, xzCoord);
auto outOffset = shape::getOffset(0, zShapeOf, zStrideOf, xzCoord, rank);
// auto intStep = blockDim.y * gridDim.y;
for(int j = 0; j < rank; j++) {
const Nd4jLong inLen = shape::sizeAt(xShape, j);
Nd4jLong coords[2] = {j, 0};
auto padOffset = shape::getOffset(0, padsShapeOf, padsStrideOf, coords, 2); // padding already has rank 2
const auto leftSide = pads[padOffset];
const auto leftSideCorrected = leftSide - reflBorder;
const Nd4jLong len = 2 * (inLen - 1) + leftSide + reflBorder;
if(xzCoord[j] < leftSide) // left side
xzCoord[j] = leftSideCorrected - xzCoord[j];
else if(xzCoord[j] >= leftSide && xzCoord[j] < leftSide + inLen) // middle
xzCoord[j] = xzCoord[j] - leftSide;
else if (len > xzCoord[j]) // right side
xzCoord[j] = len - xzCoord[j];
else
xzCoord[j] = xzCoord[j] - len;
}
auto inOffset = shape::getOffset(0, xShapeOf, xStrideOf, xzCoord, rank);
z[outOffset] = x[inOffset];
}
}
template<typename F, typename I>
static void mirrorPad_(nd4j::LaunchContext * context, const NDArray& input, const NDArray& paddings, NDArray& output, const int mode) {
// mode: 0 - REFLECT, else - SYMMETRIC
const int reflBorder = (bool)mode ? 1 : 0;
const int rank = input.rankOf();
const Nd4jLong outLen = output.lengthOf();
auto stream = context->getCudaStream();
NDArray::prepareSpecialUse({&output}, {&input, &paddings});
if(rank <= 1) {
const Nd4jLong inLen = input.lengthOf();
const auto leftSide = paddings.e<Nd4jLong>(0);
const auto leftSideCorrected = leftSide - reflBorder;
const Nd4jLong len = 2*(inLen-1) + leftSide + reflBorder;
mirrorPadLinearKernel<F><<<256, 512, 256, *stream>>>(input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), leftSide, leftSideCorrected, inLen, len, outLen);
nd4j::DebugHelper::checkErrorCode(stream, "helpers::mirrorPadLinearKernel(...) failed");
}
else {
mirrorPadKernel<F, I><<<256, 256, 8192, *stream>>>(input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), outLen, paddings.getSpecialBuffer(), paddings.getSpecialShapeInfo(), reflBorder);
nd4j::DebugHelper::checkErrorCode(stream, "helpers::mirrorPadKernel(...) failed");
}
NDArray::registerSpecialUse({&output}, {&input, &paddings});
}
void mirrorPad(nd4j::LaunchContext * context, const NDArray& input, const NDArray& paddings, NDArray& output, const int mode) {
BUILD_DOUBLE_SELECTOR(input.dataType(), paddings.dataType(), mirrorPad_, (context, input, paddings, output, mode), LIBND4J_TYPES, INTEGER_TYPES);
}
BUILD_DOUBLE_TEMPLATE(template void mirrorPad_, (nd4j::LaunchContext * context, const NDArray& input, const NDArray& paddings, NDArray& output, const int mode), LIBND4J_TYPES, INTEGER_TYPES);
//////////////////////////////////////////////////////////////////////////
void concat(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output, const int axis) {
const int numOfArrs = inArrs.size();
for(int i = 0; i < numOfArrs; ++i)
if(!inArrs[i]->isActualOnDeviceSide()) inArrs[i]->syncToDevice();
const int rank = inArrs[0]->rankOf();
const int rank2 = 2*rank;
std::vector<std::vector<Nd4jLong>> indices(numOfArrs, std::vector<Nd4jLong>(rank2,0));
// take into account indices for first array
indices[0][2 * axis + 1] = inArrs[0]->sizeAt(axis);
// loop through the rest of input arrays
for(int i = 1; i < numOfArrs; ++i) {
indices[i][2 * axis] = indices[i-1][2 * axis + 1]; // index start from
indices[i][2 * axis + 1] = indices[i-1][2 * axis + 1] + inArrs[i]->sizeAt(axis); // index end with (excluding)
}
std::vector<NDArray*> outSubArrs(numOfArrs);
for(int i = 0; i < numOfArrs; ++i)
outSubArrs[i] = new NDArray(output(indices[i], true));
// prepare arrays of pointers on buffers and shapes
std::vector<void*> hOutBuffers(numOfArrs), hInBuffers(numOfArrs);
std::vector<Nd4jLong*> hOutShapeInfo(numOfArrs), hInShapeInfo(numOfArrs);
for(int i = 0; i < numOfArrs; ++i) {
hOutBuffers[i] = outSubArrs[i]->getSpecialBuffer();
hInBuffers[i] = inArrs[i]->getSpecialBuffer();
hOutShapeInfo[i] = outSubArrs[i]->getSpecialShapeInfo();
hInShapeInfo[i] = inArrs[i]->getSpecialShapeInfo();
}
// allocate and copy all buffers and shapes arrays to global memory
PointersManager manager(context, "helpers::concat");
void* dOutBuffers = manager.replicatePointer(hOutBuffers.data(), hOutBuffers.size() * sizeof(void*));
void* dInBuffers = manager.replicatePointer(hInBuffers.data(), hInBuffers.size() * sizeof(void*));
void* dInShapeInfo = manager.replicatePointer(hInShapeInfo.data(), hInShapeInfo.size() * sizeof(Nd4jLong*));
void* dOutShapeInfo = manager.replicatePointer(hOutShapeInfo.data(), hOutShapeInfo.size() * sizeof(Nd4jLong*));
BUILD_SINGLE_SELECTOR(inArrs[0]->dataType(), concatCudaLauncher, (numOfArrs, context->getCudaStream(), dInBuffers, dInShapeInfo, dOutBuffers, dOutShapeInfo), LIBND4J_TYPES);
manager.synchronize();
for(int i = 0; i < numOfArrs; ++i)
delete outSubArrs[i];
for(int i = 0; i < numOfArrs; ++i)
inArrs[i]->tickReadHost();
output.tickWriteDevice();
}
template <typename X, typename Y>
static _CUDA_G void scatterSimpleKernel(void *vx, Nd4jLong *xTadShape, Nd4jLong *xTadOffsets, Nd4jLong xLength, Nd4jLong numTads, void *vi, Nd4jLong *iShapeInfo, Nd4jLong iLength, void *vu, Nd4jLong *uShapeInfo, Nd4jLong uLength) {
auto u = reinterpret_cast<X*>(vu);
auto indices = reinterpret_cast<Y*>(vi);
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < iLength; i += blockDim.x * gridDim.x) {
auto x = reinterpret_cast<X*>(vx) + xTadOffsets[i];
auto idx = indices[shape::getIndexOffset(i, iShapeInfo, iLength)];
x[shape::getIndexOffset(idx, xTadShape, xLength)] = u[shape::getIndexOffset(i, uShapeInfo, uLength)];
}
}
template <typename X, typename Y>
void scatterSimple_(nd4j::LaunchContext * context, const int opId, NDArray& input, const NDArray& updates, const NDArray& indices, const std::vector<int>& dimensions) {
auto dims = ShapeUtils::evalDimsToExclude(input.rankOf(), dimensions);
auto packX = ConstantTadHelper::getInstance()->tadForDimensions(input.getShapeInfo(), dims);
auto xLength = shape::length(packX.primaryShapeInfo());
auto iLength = indices.lengthOf();
auto uLength = updates.lengthOf();
scatterSimpleKernel<X,Y><<<256, 256, 1024, *context->getCudaStream()>>>(input.getSpecialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), xLength, packX.numberOfTads(), indices.getSpecialBuffer(), indices.getSpecialShapeInfo(), iLength, updates.getSpecialBuffer(), updates.getSpecialShapeInfo(), uLength);
}
void scatterSimple(nd4j::LaunchContext * context, const int opId, NDArray& input, const NDArray& updates, const NDArray& indices, const std::vector<int>& dimensions) {
auto xType = input.dataType();
auto yType = indices.dataType();
if (opId != 6)
throw std::runtime_error("scatterSimple: only copy op is supported");
NDArray::prepareSpecialUse({&input}, {&updates, &indices});
BUILD_DOUBLE_SELECTOR(xType, yType, scatterSimple_, (context, opId, input, updates, indices, dimensions), LIBND4J_TYPES, INTEGER_TYPES);
NDArray::registerSpecialUse({&input}, {&updates, &indices});
}
BUILD_SINGLE_TEMPLATE(template void concatCudaLauncher, (const int numOfArrs, const cudaStream_t *stream, void* pVx, void* pxShapeInfo, void* pVz, void* pzShapeInfo), LIBND4J_TYPES);
BUILD_DOUBLE_TEMPLATE(template void padCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const int mode, const void *vx, const Nd4jLong *xShapeInfo, const void *vy, const Nd4jLong *yShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const void* vPadVal), LIBND4J_TYPES, INTEGER_TYPES);
}
}
}
|
beb9105b6ae0879027b5558024e4a2d4ee1476a5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <cstdio>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <iostream>
#include <algorithm>
#define CSC(call) \
do { \
hipError_t res = call; \
if (res != hipSuccess) { \
fprintf(stderr, "ERROR in %s:%d. Message: %s\n", \
__FILE__, __LINE__, hipGetErrorString(res)); \
exit(0); \
} \
} while(0)
__global__ void Histohram(int* devCount, int* arr, int size)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int offsetx = blockDim.x * gridDim.x;
for (int i = idx; i < size; i+= offsetx)
{
atomicAdd(&devCount[arr[i]], 1);
}
}
__global__ void CountSort(int* devScan, int* arr, int* out, int size)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int offsetx = blockDim.x * gridDim.x;
for (int i = idx; i < size; i += offsetx)
{
out[atomicAdd(&devScan[arr[i]], -1) - 1] = arr[i];
}
}
const int BLOCK_SIZE = 1024;
__global__ void KernelBlockScan(int* devArr, int* newDevArr)
{
int blockSize = blockDim.x;
__shared__ int arr[BLOCK_SIZE];
int idx = blockDim.x * blockIdx.x + threadIdx.x;
arr[threadIdx.x] = devArr[idx];
__syncthreads();
int d = 1;
while (d < blockSize)
{
if (2 * d + threadIdx.x * 2 * d - 1 < blockSize)
arr[2 * d + threadIdx.x * 2 * d - 1] += arr[d + threadIdx.x * 2 * d - 1];
d *= 2;
__syncthreads();
}
int last = 0;
if (threadIdx.x == blockSize - 1)
{
last = arr[threadIdx.x];
arr[threadIdx.x] = 0;
}
d /= 2;
__syncthreads();
while (d >= 1)
{
if (d * 2 * threadIdx.x + 2 * d - 1 < blockSize)
{
auto t = arr[d * 2 * threadIdx.x + d - 1];
arr[d * 2 * threadIdx.x + d - 1] = arr[d * 2 * threadIdx.x + 2 * d - 1];
arr[d * 2 * threadIdx.x + 2 * d - 1] += t;
}
d /= 2;
__syncthreads();
}
if (threadIdx.x == blockSize - 1)
{
devArr[idx] = last;
newDevArr[blockIdx.x] = last;
}
else
{
devArr[idx] = arr[threadIdx.x + 1];
}
}
__global__ void KernelBlockShift(int* devArr, int* newArr)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (blockIdx.x > 0)
devArr[idx] += newArr[blockIdx.x - 1];
}
int Max(int a, int b)
{
return a > b
? a
: b;
}
int Min(int a, int b)
{
return a < b
? a
: b;
}
void Scan(int* devCount, int size)
{
int blockCount = Max(1, size / BLOCK_SIZE);
int blockSize = Min(size, BLOCK_SIZE);
int* newDevCount;
hipMalloc((void**)&newDevCount, sizeof(int) * blockCount);
hipLaunchKernelGGL(( KernelBlockScan), dim3(blockCount), dim3(blockSize) , 0, 0, devCount, newDevCount);
hipDeviceSynchronize();
if (size > BLOCK_SIZE)
{
Scan(newDevCount, size / BLOCK_SIZE);
hipLaunchKernelGGL(( KernelBlockShift), dim3(size / BLOCK_SIZE), dim3(BLOCK_SIZE) , 0, 0, devCount, newDevCount);
hipDeviceSynchronize();
}
hipFree(newDevCount);
}
using namespace std;
const int MAX_NUMBER = 16777215;
__global__ void TestAdd(int* devInt)
{
atomicAdd(&devInt[0], 1);
}
int main(int argc, const char** argv)
{
int size;
fread(&size, sizeof(int), 1, stdin);
auto hostArray = new int[size];
fread(hostArray, sizeof(int), size, stdin);
int* devCount;
CSC(hipMalloc((void**)&devCount, sizeof(int) * (MAX_NUMBER + 1)));
CSC(hipMemset(devCount, 0, sizeof(int) * (MAX_NUMBER + 1)));
int* devArray;
CSC(hipMalloc((void**)&devArray, sizeof(int) * size));
CSC(hipMemcpy(devArray, hostArray, sizeof(int) * size, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( Histohram), dim3(256), dim3(256), 0, 0, devCount, devArray, size);
hipDeviceSynchronize();
Scan(devCount, MAX_NUMBER + 1);
int* outDevArray;
CSC(hipMalloc((void**)&outDevArray, sizeof(int) * size));
hipLaunchKernelGGL(( CountSort), dim3(256), dim3(256), 0, 0, devCount, devArray, outDevArray, size);
hipDeviceSynchronize();
CSC(hipMemcpy(hostArray, outDevArray, sizeof(int) * size, hipMemcpyDeviceToHost));
fwrite(hostArray, sizeof(int), size, stdout);
} | beb9105b6ae0879027b5558024e4a2d4ee1476a5.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cstdio>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <iostream>
#include <algorithm>
#define CSC(call) \
do { \
cudaError_t res = call; \
if (res != cudaSuccess) { \
fprintf(stderr, "ERROR in %s:%d. Message: %s\n", \
__FILE__, __LINE__, cudaGetErrorString(res)); \
exit(0); \
} \
} while(0)
__global__ void Histohram(int* devCount, int* arr, int size)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int offsetx = blockDim.x * gridDim.x;
for (int i = idx; i < size; i+= offsetx)
{
atomicAdd(&devCount[arr[i]], 1);
}
}
__global__ void CountSort(int* devScan, int* arr, int* out, int size)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int offsetx = blockDim.x * gridDim.x;
for (int i = idx; i < size; i += offsetx)
{
out[atomicAdd(&devScan[arr[i]], -1) - 1] = arr[i];
}
}
const int BLOCK_SIZE = 1024;
__global__ void KernelBlockScan(int* devArr, int* newDevArr)
{
int blockSize = blockDim.x;
__shared__ int arr[BLOCK_SIZE];
int idx = blockDim.x * blockIdx.x + threadIdx.x;
arr[threadIdx.x] = devArr[idx];
__syncthreads();
int d = 1;
while (d < blockSize)
{
if (2 * d + threadIdx.x * 2 * d - 1 < blockSize)
arr[2 * d + threadIdx.x * 2 * d - 1] += arr[d + threadIdx.x * 2 * d - 1];
d *= 2;
__syncthreads();
}
int last = 0;
if (threadIdx.x == blockSize - 1)
{
last = arr[threadIdx.x];
arr[threadIdx.x] = 0;
}
d /= 2;
__syncthreads();
while (d >= 1)
{
if (d * 2 * threadIdx.x + 2 * d - 1 < blockSize)
{
auto t = arr[d * 2 * threadIdx.x + d - 1];
arr[d * 2 * threadIdx.x + d - 1] = arr[d * 2 * threadIdx.x + 2 * d - 1];
arr[d * 2 * threadIdx.x + 2 * d - 1] += t;
}
d /= 2;
__syncthreads();
}
if (threadIdx.x == blockSize - 1)
{
devArr[idx] = last;
newDevArr[blockIdx.x] = last;
}
else
{
devArr[idx] = arr[threadIdx.x + 1];
}
}
__global__ void KernelBlockShift(int* devArr, int* newArr)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (blockIdx.x > 0)
devArr[idx] += newArr[blockIdx.x - 1];
}
int Max(int a, int b)
{
return a > b
? a
: b;
}
int Min(int a, int b)
{
return a < b
? a
: b;
}
void Scan(int* devCount, int size)
{
int blockCount = Max(1, size / BLOCK_SIZE);
int blockSize = Min(size, BLOCK_SIZE);
int* newDevCount;
cudaMalloc((void**)&newDevCount, sizeof(int) * blockCount);
KernelBlockScan<<< blockCount, blockSize >>>(devCount, newDevCount);
cudaDeviceSynchronize();
if (size > BLOCK_SIZE)
{
Scan(newDevCount, size / BLOCK_SIZE);
KernelBlockShift<<<size / BLOCK_SIZE, BLOCK_SIZE >>>(devCount, newDevCount);
cudaDeviceSynchronize();
}
cudaFree(newDevCount);
}
using namespace std;
const int MAX_NUMBER = 16777215;
__global__ void TestAdd(int* devInt)
{
atomicAdd(&devInt[0], 1);
}
int main(int argc, const char** argv)
{
int size;
fread(&size, sizeof(int), 1, stdin);
auto hostArray = new int[size];
fread(hostArray, sizeof(int), size, stdin);
int* devCount;
CSC(cudaMalloc((void**)&devCount, sizeof(int) * (MAX_NUMBER + 1)));
CSC(cudaMemset(devCount, 0, sizeof(int) * (MAX_NUMBER + 1)));
int* devArray;
CSC(cudaMalloc((void**)&devArray, sizeof(int) * size));
CSC(cudaMemcpy(devArray, hostArray, sizeof(int) * size, cudaMemcpyHostToDevice));
Histohram<<<256, 256>>>(devCount, devArray, size);
cudaDeviceSynchronize();
Scan(devCount, MAX_NUMBER + 1);
int* outDevArray;
CSC(cudaMalloc((void**)&outDevArray, sizeof(int) * size));
CountSort<<<256, 256>>>(devCount, devArray, outDevArray, size);
cudaDeviceSynchronize();
CSC(cudaMemcpy(hostArray, outDevArray, sizeof(int) * size, cudaMemcpyDeviceToHost));
fwrite(hostArray, sizeof(int), size, stdout);
} |
1baeedcd7c08bd0d43fdc400f8c68adbb01397d4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from zlaqps2_gpu.cu normal z -> d, Fri Sep 11 18:29:20 2015
*/
#include "common_magma.h"
#include "commonblas_d.h"
#define PRECISION_d
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
/* --------------------------------------------------------------------------- */
/**
Purpose
-------
DLAQPS computes a step of QR factorization with column pivoting
of a real M-by-N matrix A by using Blas-3. It tries to factorize
NB columns from A starting from the row OFFSET+1, and updates all
of the matrix with Blas-3 xGEMM.
In some cases, due to catastrophic cancellations, it cannot
factorize NB columns. Hence, the actual number of factorized
columns is returned in KB.
Block A(1:OFFSET,1:N) is accordingly pivoted, but not factorized.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0
@param[in]
offset INTEGER
The number of rows of A that have been factorized in
previous steps.
@param[in]
nb INTEGER
The number of columns to factorize.
@param[out]
kb INTEGER
The number of columns actually factorized.
@param[in,out]
dA DOUBLE PRECISION array, dimension (LDDA,N)
On entry, the M-by-N matrix A.
On exit, block A(OFFSET+1:M,1:KB) is the triangular
factor obtained and block A(1:OFFSET,1:N) has been
accordingly pivoted, but no factorized.
The rest of the matrix, block A(OFFSET+1:M,KB+1:N) has
been updated.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[in,out]
jpvt INTEGER array, dimension (N)
JPVT(I) = K <==> Column K of the full matrix A has been
permuted into position I in AP.
@param[out]
dtau DOUBLE PRECISION array, dimension (KB)
The scalar factors of the elementary reflectors.
@param[in,out]
dvn1 DOUBLE PRECISION array, dimension (N)
The vector with the partial column norms.
@param[in,out]
dvn2 DOUBLE PRECISION array, dimension (N)
The vector with the exact column norms.
@param[in,out]
dauxv DOUBLE PRECISION array, dimension (NB)
Auxiliar vector.
@param[in,out]
dF DOUBLE PRECISION array, dimension (LDDF,NB)
Matrix F**H = L * Y**H * A.
@param[in]
lddf INTEGER
The leading dimension of the array F. LDDF >= max(1,N).
@ingroup magma_dgeqp3_aux
********************************************************************/
extern "C" magma_int_t
magma_dlaqps2_gpu(
magma_int_t m, magma_int_t n, magma_int_t offset,
magma_int_t nb, magma_int_t *kb,
magmaDouble_ptr dA, magma_int_t ldda,
magma_int_t *jpvt,
magmaDouble_ptr dtau,
magmaDouble_ptr dvn1, magmaDouble_ptr dvn2,
magmaDouble_ptr dauxv,
magmaDouble_ptr dF, magma_int_t lddf)
{
#define dA(i_, j_) (dA + (i_) + (j_)*(ldda))
#define dF(i_, j_) (dF + (i_) + (j_)*(lddf))
double c_zero = MAGMA_D_MAKE( 0.,0.);
double c_one = MAGMA_D_MAKE( 1.,0.);
double c_neg_one = MAGMA_D_MAKE(-1.,0.);
magma_int_t ione = 1;
magma_int_t i__1, i__2;
magma_int_t k, rk;
double tauk;
magma_int_t pvt, itemp;
double tol3z;
magmaDouble_ptr dAkk = dauxv;
dauxv += nb;
double lsticc, *lsticcs;
magma_dmalloc( &lsticcs, 1+256*(n+255)/256 );
tol3z = magma_dsqrt( lapackf77_dlamch("Epsilon"));
lsticc = 0;
k = 0;
while( k < nb && lsticc == 0 ) {
rk = offset + k;
/* Determine ith pivot column and swap if necessary */
pvt = k - 1 + magma_idamax( n-k, &dvn1[k], ione );
if (pvt != k) {
magmablas_dswap( k+1, dF(pvt,0), lddf, dF(k,0), lddf);
itemp = jpvt[pvt];
jpvt[pvt] = jpvt[k];
jpvt[k] = itemp;
magma_dswap( 2, &dvn1[pvt], n+offset, &dvn1[k], n+offset );
magmablas_dswap( m, dA(0,pvt), ione, dA(0, k), ione );
}
/* Apply previous Householder reflectors to column K:
A(RK:M,K) := A(RK:M,K) - A(RK:M,1:K-1)*F(K,1:K-1)'.
Optimization: multiply with beta=0; wait for vector and subtract */
if (k > 0) {
magmablas_dgemv_conjv( m-rk, k,
c_neg_one, dA(rk, 0), ldda,
dF(k, 0), lddf,
c_one, dA(rk, k), ione );
}
/* Generate elementary reflector H(k). */
magma_dlarfg_gpu(m-rk, dA(rk, k), dA(rk + 1, k), &dtau[k], &dvn1[k], &dAkk[k]);
magma_dsetvector( 1, &c_one, 1, dA(rk, k), 1 );
/* Compute Kth column of F:
Compute F(K+1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K) on the GPU */
if (k < n-1 || k > 0 ) magma_dgetvector( 1, &dtau[k], 1, &tauk, 1 );
if (k < n-1) {
magma_dgemv( MagmaConjTrans, m-rk, n-k-1,
tauk, dA( rk, k+1 ), ldda,
dA( rk, k ), 1,
c_zero, dF( k+1, k ), 1 );
}
/* Incremental updating of F:
F(1:N,K) := F(1:N,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)'*A(RK:M,K).
F(1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)'*A(RK:M,K)
:= tau(K)(A(RK:M,K+1:N)' - F(1:N,1:K-1)*A(RK:M,1:K-1)') A(RK:M,K)
so, F is (updated A)*V */
if (k > 0) {
/*z__1 = MAGMA_D_NEGATE( tauk );
magma_dgemv( MagmaConjTrans, m-rk, k,
z__1, dA(rk, 0), ldda,
dA(rk, k), ione,
c_zero, dauxv, ione ); */
hipLaunchKernelGGL(( magma_dgemv_kernel3), dim3(k), dim3(BLOCK_SIZE), 0, magma_stream , m-rk, dA(rk, 0), ldda,
dA(rk, k), dauxv, dtau+k);
/* I think we only need stricly lower-triangular part */
magma_dgemv( MagmaNoTrans, n-k-1, k,
c_one, dF(k+1,0), lddf,
dauxv, ione,
c_one, dF(k+1,k), ione );
}
/* Update the current row of A:
A(RK,K+1:N) := A(RK,K+1:N) - A(RK,1:K)*F(K+1:N,1:K)'. */
if (k < n-1) {
i__1 = n - k - 1;
i__2 = k + 1;
/* left-looking update of rows, *
* since F=A**H v with original A, so no right-looking */
magma_dgemm( MagmaNoTrans, MagmaConjTrans, ione, i__1, i__2,
c_neg_one, dA(rk, 0 ), ldda,
dF(k+1,0 ), lddf,
c_one, dA(rk, k+1), ldda );
}
/* Update partial column norms. */
if (rk < min(m, n+offset)-1) {
magmablas_dnrm2_row_check_adjust(n-k-1, tol3z, &dvn1[k+1],
&dvn2[k+1], dA(rk,k+1), ldda, lsticcs);
magma_dgetvector( 1, &lsticcs[0], 1, &lsticc, 1 );
}
//*dA(rk, k) = Akk;
//magma_dsetvector( 1, &Akk, 1, dA(rk, k), 1 );
//magmablas_dlacpy(MagmaUpperLower, 1, 1, dAkk, 1, dA(rk, k), 1);
++k;
}
// restore the diagonals
magma_dcopymatrix( 1, k, dAkk, 1, dA(offset, 0), ldda+1 );
// leave k as the last column done
--k;
*kb = k + 1;
rk = offset + *kb - 1;
/* Apply the block reflector to the rest of the matrix:
A(OFFSET+KB+1:M,KB+1:N) := A(OFFSET+KB+1:M,KB+1:N) -
A(OFFSET+KB+1:M,1:KB)*F(KB+1:N,1:KB)' */
if (*kb < min(n, m - offset)) {
i__1 = m - rk - 1;
i__2 = n - *kb;
magma_dgemm( MagmaNoTrans, MagmaConjTrans, i__1, i__2, *kb,
c_neg_one, dA(rk+1, 0 ), ldda,
dF(*kb, 0 ), lddf,
c_one, dA(rk+1, *kb), ldda );
}
/* Recomputation of difficult columns. */
if ( lsticc > 0 ) {
// printf( " -- recompute dnorms --\n" );
magmablas_dnrm2_check(m-rk-1, n-*kb, dA(rk+1,*kb), ldda,
&dvn1[*kb], lsticcs);
magma_dcopymatrix( n-*kb, 1, &dvn1[*kb], n, &dvn2[*kb], n );
}
magma_free(lsticcs);
return MAGMA_SUCCESS;
} /* magma_dlaqps */
| 1baeedcd7c08bd0d43fdc400f8c68adbb01397d4.cu | /*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from zlaqps2_gpu.cu normal z -> d, Fri Sep 11 18:29:20 2015
*/
#include "common_magma.h"
#include "commonblas_d.h"
#define PRECISION_d
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
/* --------------------------------------------------------------------------- */
/**
Purpose
-------
DLAQPS computes a step of QR factorization with column pivoting
of a real M-by-N matrix A by using Blas-3. It tries to factorize
NB columns from A starting from the row OFFSET+1, and updates all
of the matrix with Blas-3 xGEMM.
In some cases, due to catastrophic cancellations, it cannot
factorize NB columns. Hence, the actual number of factorized
columns is returned in KB.
Block A(1:OFFSET,1:N) is accordingly pivoted, but not factorized.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0
@param[in]
offset INTEGER
The number of rows of A that have been factorized in
previous steps.
@param[in]
nb INTEGER
The number of columns to factorize.
@param[out]
kb INTEGER
The number of columns actually factorized.
@param[in,out]
dA DOUBLE PRECISION array, dimension (LDDA,N)
On entry, the M-by-N matrix A.
On exit, block A(OFFSET+1:M,1:KB) is the triangular
factor obtained and block A(1:OFFSET,1:N) has been
accordingly pivoted, but no factorized.
The rest of the matrix, block A(OFFSET+1:M,KB+1:N) has
been updated.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[in,out]
jpvt INTEGER array, dimension (N)
JPVT(I) = K <==> Column K of the full matrix A has been
permuted into position I in AP.
@param[out]
dtau DOUBLE PRECISION array, dimension (KB)
The scalar factors of the elementary reflectors.
@param[in,out]
dvn1 DOUBLE PRECISION array, dimension (N)
The vector with the partial column norms.
@param[in,out]
dvn2 DOUBLE PRECISION array, dimension (N)
The vector with the exact column norms.
@param[in,out]
dauxv DOUBLE PRECISION array, dimension (NB)
Auxiliar vector.
@param[in,out]
dF DOUBLE PRECISION array, dimension (LDDF,NB)
Matrix F**H = L * Y**H * A.
@param[in]
lddf INTEGER
The leading dimension of the array F. LDDF >= max(1,N).
@ingroup magma_dgeqp3_aux
********************************************************************/
extern "C" magma_int_t
magma_dlaqps2_gpu(
magma_int_t m, magma_int_t n, magma_int_t offset,
magma_int_t nb, magma_int_t *kb,
magmaDouble_ptr dA, magma_int_t ldda,
magma_int_t *jpvt,
magmaDouble_ptr dtau,
magmaDouble_ptr dvn1, magmaDouble_ptr dvn2,
magmaDouble_ptr dauxv,
magmaDouble_ptr dF, magma_int_t lddf)
{
#define dA(i_, j_) (dA + (i_) + (j_)*(ldda))
#define dF(i_, j_) (dF + (i_) + (j_)*(lddf))
double c_zero = MAGMA_D_MAKE( 0.,0.);
double c_one = MAGMA_D_MAKE( 1.,0.);
double c_neg_one = MAGMA_D_MAKE(-1.,0.);
magma_int_t ione = 1;
magma_int_t i__1, i__2;
magma_int_t k, rk;
double tauk;
magma_int_t pvt, itemp;
double tol3z;
magmaDouble_ptr dAkk = dauxv;
dauxv += nb;
double lsticc, *lsticcs;
magma_dmalloc( &lsticcs, 1+256*(n+255)/256 );
tol3z = magma_dsqrt( lapackf77_dlamch("Epsilon"));
lsticc = 0;
k = 0;
while( k < nb && lsticc == 0 ) {
rk = offset + k;
/* Determine ith pivot column and swap if necessary */
pvt = k - 1 + magma_idamax( n-k, &dvn1[k], ione );
if (pvt != k) {
magmablas_dswap( k+1, dF(pvt,0), lddf, dF(k,0), lddf);
itemp = jpvt[pvt];
jpvt[pvt] = jpvt[k];
jpvt[k] = itemp;
magma_dswap( 2, &dvn1[pvt], n+offset, &dvn1[k], n+offset );
magmablas_dswap( m, dA(0,pvt), ione, dA(0, k), ione );
}
/* Apply previous Householder reflectors to column K:
A(RK:M,K) := A(RK:M,K) - A(RK:M,1:K-1)*F(K,1:K-1)'.
Optimization: multiply with beta=0; wait for vector and subtract */
if (k > 0) {
magmablas_dgemv_conjv( m-rk, k,
c_neg_one, dA(rk, 0), ldda,
dF(k, 0), lddf,
c_one, dA(rk, k), ione );
}
/* Generate elementary reflector H(k). */
magma_dlarfg_gpu(m-rk, dA(rk, k), dA(rk + 1, k), &dtau[k], &dvn1[k], &dAkk[k]);
magma_dsetvector( 1, &c_one, 1, dA(rk, k), 1 );
/* Compute Kth column of F:
Compute F(K+1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K) on the GPU */
if (k < n-1 || k > 0 ) magma_dgetvector( 1, &dtau[k], 1, &tauk, 1 );
if (k < n-1) {
magma_dgemv( MagmaConjTrans, m-rk, n-k-1,
tauk, dA( rk, k+1 ), ldda,
dA( rk, k ), 1,
c_zero, dF( k+1, k ), 1 );
}
/* Incremental updating of F:
F(1:N,K) := F(1:N,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)'*A(RK:M,K).
F(1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)'*A(RK:M,K)
:= tau(K)(A(RK:M,K+1:N)' - F(1:N,1:K-1)*A(RK:M,1:K-1)') A(RK:M,K)
so, F is (updated A)*V */
if (k > 0) {
/*z__1 = MAGMA_D_NEGATE( tauk );
magma_dgemv( MagmaConjTrans, m-rk, k,
z__1, dA(rk, 0), ldda,
dA(rk, k), ione,
c_zero, dauxv, ione ); */
magma_dgemv_kernel3<<< k, BLOCK_SIZE, 0, magma_stream >>>(m-rk, dA(rk, 0), ldda,
dA(rk, k), dauxv, dtau+k);
/* I think we only need stricly lower-triangular part */
magma_dgemv( MagmaNoTrans, n-k-1, k,
c_one, dF(k+1,0), lddf,
dauxv, ione,
c_one, dF(k+1,k), ione );
}
/* Update the current row of A:
A(RK,K+1:N) := A(RK,K+1:N) - A(RK,1:K)*F(K+1:N,1:K)'. */
if (k < n-1) {
i__1 = n - k - 1;
i__2 = k + 1;
/* left-looking update of rows, *
* since F=A**H v with original A, so no right-looking */
magma_dgemm( MagmaNoTrans, MagmaConjTrans, ione, i__1, i__2,
c_neg_one, dA(rk, 0 ), ldda,
dF(k+1,0 ), lddf,
c_one, dA(rk, k+1), ldda );
}
/* Update partial column norms. */
if (rk < min(m, n+offset)-1) {
magmablas_dnrm2_row_check_adjust(n-k-1, tol3z, &dvn1[k+1],
&dvn2[k+1], dA(rk,k+1), ldda, lsticcs);
magma_dgetvector( 1, &lsticcs[0], 1, &lsticc, 1 );
}
//*dA(rk, k) = Akk;
//magma_dsetvector( 1, &Akk, 1, dA(rk, k), 1 );
//magmablas_dlacpy(MagmaUpperLower, 1, 1, dAkk, 1, dA(rk, k), 1);
++k;
}
// restore the diagonals
magma_dcopymatrix( 1, k, dAkk, 1, dA(offset, 0), ldda+1 );
// leave k as the last column done
--k;
*kb = k + 1;
rk = offset + *kb - 1;
/* Apply the block reflector to the rest of the matrix:
A(OFFSET+KB+1:M,KB+1:N) := A(OFFSET+KB+1:M,KB+1:N) -
A(OFFSET+KB+1:M,1:KB)*F(KB+1:N,1:KB)' */
if (*kb < min(n, m - offset)) {
i__1 = m - rk - 1;
i__2 = n - *kb;
magma_dgemm( MagmaNoTrans, MagmaConjTrans, i__1, i__2, *kb,
c_neg_one, dA(rk+1, 0 ), ldda,
dF(*kb, 0 ), lddf,
c_one, dA(rk+1, *kb), ldda );
}
/* Recomputation of difficult columns. */
if ( lsticc > 0 ) {
// printf( " -- recompute dnorms --\n" );
magmablas_dnrm2_check(m-rk-1, n-*kb, dA(rk+1,*kb), ldda,
&dvn1[*kb], lsticcs);
magma_dcopymatrix( n-*kb, 1, &dvn1[*kb], n, &dvn2[*kb], n );
}
magma_free(lsticcs);
return MAGMA_SUCCESS;
} /* magma_dlaqps */
|
742e07813a90ec87ea57bab292ba14e2ac84e875.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "SortDistances.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *dist = NULL;
hipMalloc(&dist, XSIZE*YSIZE);
int *idMat = NULL;
hipMalloc(&idMat, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int k = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
SortDistances), dim3(gridBlock),dim3(threadBlock), 0, 0, dist,idMat,n,k);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
SortDistances), dim3(gridBlock),dim3(threadBlock), 0, 0, dist,idMat,n,k);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
SortDistances), dim3(gridBlock),dim3(threadBlock), 0, 0, dist,idMat,n,k);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 742e07813a90ec87ea57bab292ba14e2ac84e875.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "SortDistances.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *dist = NULL;
cudaMalloc(&dist, XSIZE*YSIZE);
int *idMat = NULL;
cudaMalloc(&idMat, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int k = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
SortDistances<<<gridBlock,threadBlock>>>(dist,idMat,n,k);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
SortDistances<<<gridBlock,threadBlock>>>(dist,idMat,n,k);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
SortDistances<<<gridBlock,threadBlock>>>(dist,idMat,n,k);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
17df28a92f1e79195f2c962707b85576decb2317.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
#include "cutil_math.h" // required for float3 vector math
__global__ void ball_intersect_point_kernel(
int b, int n, int m, float radius,
int n_max,
const float *__restrict__ ray_start,
const float *__restrict__ ray_dir,
const float *__restrict__ points,
int *__restrict__ idx,
float *__restrict__ min_depth,
float *__restrict__ max_depth) {
int batch_index = blockIdx.x;
points += batch_index * n * 3;
ray_start += batch_index * m * 3;
ray_dir += batch_index * m * 3;
idx += batch_index * m * n_max;
min_depth += batch_index * m * n_max;
max_depth += batch_index * m * n_max;
int index = threadIdx.x;
int stride = blockDim.x;
float radius2 = radius * radius;
for (int j = index; j < m; j += stride) {
float x0 = ray_start[j * 3 + 0];
float y0 = ray_start[j * 3 + 1];
float z0 = ray_start[j * 3 + 2];
float xw = ray_dir[j * 3 + 0];
float yw = ray_dir[j * 3 + 1];
float zw = ray_dir[j * 3 + 2];
for (int l = 0; l < n_max; ++l) {
idx[j * n_max + l] = -1;
}
for (int k = 0, cnt = 0; k < n && cnt < n_max; ++k) {
float x = points[k * 3 + 0] - x0;
float y = points[k * 3 + 1] - y0;
float z = points[k * 3 + 2] - z0;
float d2 = x * x + y * y + z * z;
float d2_proj = pow(x * xw + y * yw + z * zw, 2);
float r2 = d2 - d2_proj;
if (r2 < radius2) {
idx[j * n_max + cnt] = k;
float depth = sqrt(d2_proj);
float depth_blur = sqrt(radius2 - r2);
min_depth[j * n_max + cnt] = depth - depth_blur;
max_depth[j * n_max + cnt] = depth + depth_blur;
++cnt;
}
}
}
}
__device__ float2 RayAABBIntersection(
const float3 &ori,
const float3 &dir,
const float3 ¢er,
float half_voxel) {
float f_low = 0;
float f_high = 100000.;
float f_dim_low, f_dim_high, temp, inv_ray_dir, start, aabb;
for (int d = 0; d < 3; ++d) {
switch (d) {
case 0:
inv_ray_dir = __fdividef(1.0f, dir.x); start = ori.x; aabb = center.x; break;
case 1:
inv_ray_dir = __fdividef(1.0f, dir.y); start = ori.y; aabb = center.y; break;
case 2:
inv_ray_dir = __fdividef(1.0f, dir.z); start = ori.z; aabb = center.z; break;
}
f_dim_low = (aabb - half_voxel - start) * inv_ray_dir;
f_dim_high = (aabb + half_voxel - start) * inv_ray_dir;
// Make sure low is less than high
if (f_dim_high < f_dim_low) {
temp = f_dim_low;
f_dim_low = f_dim_high;
f_dim_high = temp;
}
// If this dimension's high is less than the low we got then we definitely missed.
if (f_dim_high < f_low) {
return make_float2(-1.0f, -1.0f);
}
// Likewise if the low is less than the high.
if (f_dim_low > f_high) {
return make_float2(-1.0f, -1.0f);
}
// Add the clip from this dimension to the previous results
f_low = (f_dim_low > f_low) ? f_dim_low : f_low;
f_high = (f_dim_high < f_high) ? f_dim_high : f_high;
if (f_low > f_high) {
return make_float2(-1.0f, -1.0f);
}
}
return make_float2(f_low, f_high);
}
__global__ void aabb_intersect_point_kernel(
int b, int n, int m, float voxelsize,
int n_max,
const float *__restrict__ ray_start,
const float *__restrict__ ray_dir,
const float *__restrict__ points,
int *__restrict__ idx,
float *__restrict__ min_depth,
float *__restrict__ max_depth) {
int batch_index = blockIdx.x;
points += batch_index * n * 3;
ray_start += batch_index * m * 3;
ray_dir += batch_index * m * 3;
idx += batch_index * m * n_max;
min_depth += batch_index * m * n_max;
max_depth += batch_index * m * n_max;
int index = threadIdx.x;
int stride = blockDim.x;
float half_voxel = voxelsize * 0.5;
for (int j = index; j < m; j += stride) {
for (int l = 0; l < n_max; ++l) {
idx[j * n_max + l] = -1;
}
for (int k = 0, cnt = 0; k < n && cnt < n_max; ++k) {
float2 depths = RayAABBIntersection(
make_float3(ray_start[j * 3 + 0], ray_start[j * 3 + 1], ray_start[j * 3 + 2]),
make_float3(ray_dir[j * 3 + 0], ray_dir[j * 3 + 1], ray_dir[j * 3 + 2]),
make_float3(points[k * 3 + 0], points[k * 3 + 1], points[k * 3 + 2]),
half_voxel);
if (depths.x > -1.0f){
idx[j * n_max + cnt] = k;
min_depth[j * n_max + cnt] = depths.x;
max_depth[j * n_max + cnt] = depths.y;
++cnt;
}
}
}
}
__global__ void svo_intersect_point_kernel(
int b, int n, int m, float voxelsize,
int n_max,
const float *__restrict__ ray_start,
const float *__restrict__ ray_dir,
const float *__restrict__ points,
const int *__restrict__ children,
int *__restrict__ idx,
float *__restrict__ min_depth,
float *__restrict__ max_depth) {
/*
TODO: this is an inefficient implementation of the
navie Ray -- Sparse Voxel Octree Intersection.
It can be further improved using:
Revelles, Jorge, Carlos Urena, and Miguel Lastra.
"An efficient parametric algorithm for octree traversal." (2000).
*/
int batch_index = blockIdx.x;
points += batch_index * n * 3;
children += batch_index * n * 9;
ray_start += batch_index * m * 3;
ray_dir += batch_index * m * 3;
idx += batch_index * m * n_max;
min_depth += batch_index * m * n_max;
max_depth += batch_index * m * n_max;
int index = threadIdx.x;
int stride = blockDim.x;
float half_voxel = voxelsize * 0.5;
for (int j = index; j < m; j += stride) {
for (int l = 0; l < n_max; ++l) {
idx[j * n_max + l] = -1;
}
int stack[256] = {-1}; // DFS, initialize the stack
int ptr = 0, cnt = 0, k = -1;
stack[ptr] = n - 1; // ROOT node is always the last
while (ptr > -1 && cnt < n_max) {
assert((ptr < 256));
// evaluate the current node
k = stack[ptr];
float2 depths = RayAABBIntersection(
make_float3(ray_start[j * 3 + 0], ray_start[j * 3 + 1], ray_start[j * 3 + 2]),
make_float3(ray_dir[j * 3 + 0], ray_dir[j * 3 + 1], ray_dir[j * 3 + 2]),
make_float3(points[k * 3 + 0], points[k * 3 + 1], points[k * 3 + 2]),
half_voxel * float(children[k * 9 + 8]));
stack[ptr] = -1; ptr--;
if (depths.x > -1.0f) { // ray did not miss the voxel
// TODO: here it should be able to know which children is ok, further optimize the code
if (children[k * 9 + 8] == 1) { // this is a terminal node
idx[j * n_max + cnt] = k;
min_depth[j * n_max + cnt] = depths.x;
max_depth[j * n_max + cnt] = depths.y;
++cnt; continue;
}
for (int u = 0; u < 8; u++) {
if (children[k * 9 + u] > -1) {
ptr++; stack[ptr] = children[k * 9 + u]; // push child to the stack
}
}
}
}
}
}
__device__ float3 RayTriangleIntersection(
const float3 &ori,
const float3 &dir,
const float3 &v0,
const float3 &v1,
const float3 &v2,
float blur) {
float3 v0v1 = v1 - v0;
float3 v0v2 = v2 - v0;
float3 v0O = ori - v0;
float3 dir_crs_v0v2 = cross(dir, v0v2);
float det = dot(v0v1, dir_crs_v0v2);
det = __fdividef(1.0f, det); // CUDA intrinsic function
float u = dot(v0O, dir_crs_v0v2) * det;
if ((u < 0.0f - blur) || (u > 1.0f + blur))
return make_float3(-1.0f, 0.0f, 0.0f);
float3 v0O_crs_v0v1 = cross(v0O, v0v1);
float v = dot(dir, v0O_crs_v0v1) * det;
if ((v < 0.0f - blur) || (v > 1.0f + blur))
return make_float3(-1.0f, 0.0f, 0.0f);
if (((u + v) < 0.0f - blur) || ((u + v) > 1.0f + blur))
return make_float3(-1.0f, 0.0f, 0.0f);
float t = dot(v0v2, v0O_crs_v0v1) * det;
return make_float3(t, u, v);
}
__global__ void triangle_intersect_point_kernel(
int b, int n, int m, float cagesize,
float blur, int n_max,
const float *__restrict__ ray_start,
const float *__restrict__ ray_dir,
const float *__restrict__ face_points,
int *__restrict__ idx,
float *__restrict__ depth,
float *__restrict__ uv) {
int batch_index = blockIdx.x;
face_points += batch_index * n * 9;
ray_start += batch_index * m * 3;
ray_dir += batch_index * m * 3;
idx += batch_index * m * n_max;
depth += batch_index * m * n_max * 3;
uv += batch_index * m * n_max * 2;
int index = threadIdx.x;
int stride = blockDim.x;
for (int j = index; j < m; j += stride) {
// go over rays
for (int l = 0; l < n_max; ++l) {
idx[j * n_max + l] = -1;
}
int cnt = 0;
for (int k = 0; k < n && cnt < n_max; ++k) {
// go over triangles
float3 tuv = RayTriangleIntersection(
make_float3(ray_start[j * 3 + 0], ray_start[j * 3 + 1], ray_start[j * 3 + 2]),
make_float3(ray_dir[j * 3 + 0], ray_dir[j * 3 + 1], ray_dir[j * 3 + 2]),
make_float3(face_points[k * 9 + 0], face_points[k * 9 + 1], face_points[k * 9 + 2]),
make_float3(face_points[k * 9 + 3], face_points[k * 9 + 4], face_points[k * 9 + 5]),
make_float3(face_points[k * 9 + 6], face_points[k * 9 + 7], face_points[k * 9 + 8]),
blur);
if (tuv.x > 0) {
int ki = k;
float d = tuv.x, u = tuv.y, v = tuv.z;
// sort
for (int l = 0; l < cnt; l++) {
if (d < depth[j * n_max * 3 + l * 3]) {
swap(ki, idx[j * n_max + l]);
swap(d, depth[j * n_max * 3 + l * 3]);
swap(u, uv[j * n_max * 2 + l * 2]);
swap(v, uv[j * n_max * 2 + l * 2 + 1]);
}
}
idx[j * n_max + cnt] = ki;
depth[j * n_max * 3 + cnt * 3] = d;
uv[j * n_max * 2 + cnt * 2] = u;
uv[j * n_max * 2 + cnt * 2 + 1] = v;
cnt++;
}
}
for (int l = 0; l < cnt; l++) {
// compute min_depth
if (l == 0)
depth[j * n_max * 3 + l * 3 + 1] = -cagesize;
else
depth[j * n_max * 3 + l * 3 + 1] = -fminf(cagesize,
.5 * (depth[j * n_max * 3 + l * 3] - depth[j * n_max * 3 + l * 3 - 3]));
// compute max_depth
if (l == cnt - 1)
depth[j * n_max * 3 + l * 3 + 2] = cagesize;
else
depth[j * n_max * 3 + l * 3 + 2] = fminf(cagesize,
.5 * (depth[j * n_max * 3 + l * 3 + 3] - depth[j * n_max * 3 + l * 3]));
}
}
}
void ball_intersect_point_kernel_wrapper(
int b, int n, int m, float radius, int n_max,
const float *ray_start, const float *ray_dir, const float *points,
int *idx, float *min_depth, float *max_depth) {
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( ball_intersect_point_kernel), dim3(b), dim3(opt_n_threads(m)), 0, stream,
b, n, m, radius, n_max, ray_start, ray_dir, points, idx, min_depth, max_depth);
CUDA_CHECK_ERRORS();
}
void aabb_intersect_point_kernel_wrapper(
int b, int n, int m, float voxelsize, int n_max,
const float *ray_start, const float *ray_dir, const float *points,
int *idx, float *min_depth, float *max_depth) {
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( aabb_intersect_point_kernel), dim3(b), dim3(opt_n_threads(m)), 0, stream,
b, n, m, voxelsize, n_max, ray_start, ray_dir, points, idx, min_depth, max_depth);
CUDA_CHECK_ERRORS();
}
void svo_intersect_point_kernel_wrapper(
int b, int n, int m, float voxelsize, int n_max,
const float *ray_start, const float *ray_dir, const float *points, const int *children,
int *idx, float *min_depth, float *max_depth) {
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( svo_intersect_point_kernel), dim3(b), dim3(opt_n_threads(m)), 0, stream,
b, n, m, voxelsize, n_max, ray_start, ray_dir, points, children, idx, min_depth, max_depth);
CUDA_CHECK_ERRORS();
}
void triangle_intersect_point_kernel_wrapper(
int b, int n, int m, float cagesize, float blur, int n_max,
const float *ray_start, const float *ray_dir, const float *face_points,
int *idx, float *depth, float *uv) {
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( triangle_intersect_point_kernel), dim3(b), dim3(opt_n_threads(m)), 0, stream,
b, n, m, cagesize, blur, n_max, ray_start, ray_dir, face_points, idx, depth, uv);
CUDA_CHECK_ERRORS();
}
| 17df28a92f1e79195f2c962707b85576decb2317.cu | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
#include "cutil_math.h" // required for float3 vector math
__global__ void ball_intersect_point_kernel(
int b, int n, int m, float radius,
int n_max,
const float *__restrict__ ray_start,
const float *__restrict__ ray_dir,
const float *__restrict__ points,
int *__restrict__ idx,
float *__restrict__ min_depth,
float *__restrict__ max_depth) {
int batch_index = blockIdx.x;
points += batch_index * n * 3;
ray_start += batch_index * m * 3;
ray_dir += batch_index * m * 3;
idx += batch_index * m * n_max;
min_depth += batch_index * m * n_max;
max_depth += batch_index * m * n_max;
int index = threadIdx.x;
int stride = blockDim.x;
float radius2 = radius * radius;
for (int j = index; j < m; j += stride) {
float x0 = ray_start[j * 3 + 0];
float y0 = ray_start[j * 3 + 1];
float z0 = ray_start[j * 3 + 2];
float xw = ray_dir[j * 3 + 0];
float yw = ray_dir[j * 3 + 1];
float zw = ray_dir[j * 3 + 2];
for (int l = 0; l < n_max; ++l) {
idx[j * n_max + l] = -1;
}
for (int k = 0, cnt = 0; k < n && cnt < n_max; ++k) {
float x = points[k * 3 + 0] - x0;
float y = points[k * 3 + 1] - y0;
float z = points[k * 3 + 2] - z0;
float d2 = x * x + y * y + z * z;
float d2_proj = pow(x * xw + y * yw + z * zw, 2);
float r2 = d2 - d2_proj;
if (r2 < radius2) {
idx[j * n_max + cnt] = k;
float depth = sqrt(d2_proj);
float depth_blur = sqrt(radius2 - r2);
min_depth[j * n_max + cnt] = depth - depth_blur;
max_depth[j * n_max + cnt] = depth + depth_blur;
++cnt;
}
}
}
}
__device__ float2 RayAABBIntersection(
const float3 &ori,
const float3 &dir,
const float3 ¢er,
float half_voxel) {
float f_low = 0;
float f_high = 100000.;
float f_dim_low, f_dim_high, temp, inv_ray_dir, start, aabb;
for (int d = 0; d < 3; ++d) {
switch (d) {
case 0:
inv_ray_dir = __fdividef(1.0f, dir.x); start = ori.x; aabb = center.x; break;
case 1:
inv_ray_dir = __fdividef(1.0f, dir.y); start = ori.y; aabb = center.y; break;
case 2:
inv_ray_dir = __fdividef(1.0f, dir.z); start = ori.z; aabb = center.z; break;
}
f_dim_low = (aabb - half_voxel - start) * inv_ray_dir;
f_dim_high = (aabb + half_voxel - start) * inv_ray_dir;
// Make sure low is less than high
if (f_dim_high < f_dim_low) {
temp = f_dim_low;
f_dim_low = f_dim_high;
f_dim_high = temp;
}
// If this dimension's high is less than the low we got then we definitely missed.
if (f_dim_high < f_low) {
return make_float2(-1.0f, -1.0f);
}
// Likewise if the low is less than the high.
if (f_dim_low > f_high) {
return make_float2(-1.0f, -1.0f);
}
// Add the clip from this dimension to the previous results
f_low = (f_dim_low > f_low) ? f_dim_low : f_low;
f_high = (f_dim_high < f_high) ? f_dim_high : f_high;
if (f_low > f_high) {
return make_float2(-1.0f, -1.0f);
}
}
return make_float2(f_low, f_high);
}
__global__ void aabb_intersect_point_kernel(
int b, int n, int m, float voxelsize,
int n_max,
const float *__restrict__ ray_start,
const float *__restrict__ ray_dir,
const float *__restrict__ points,
int *__restrict__ idx,
float *__restrict__ min_depth,
float *__restrict__ max_depth) {
int batch_index = blockIdx.x;
points += batch_index * n * 3;
ray_start += batch_index * m * 3;
ray_dir += batch_index * m * 3;
idx += batch_index * m * n_max;
min_depth += batch_index * m * n_max;
max_depth += batch_index * m * n_max;
int index = threadIdx.x;
int stride = blockDim.x;
float half_voxel = voxelsize * 0.5;
for (int j = index; j < m; j += stride) {
for (int l = 0; l < n_max; ++l) {
idx[j * n_max + l] = -1;
}
for (int k = 0, cnt = 0; k < n && cnt < n_max; ++k) {
float2 depths = RayAABBIntersection(
make_float3(ray_start[j * 3 + 0], ray_start[j * 3 + 1], ray_start[j * 3 + 2]),
make_float3(ray_dir[j * 3 + 0], ray_dir[j * 3 + 1], ray_dir[j * 3 + 2]),
make_float3(points[k * 3 + 0], points[k * 3 + 1], points[k * 3 + 2]),
half_voxel);
if (depths.x > -1.0f){
idx[j * n_max + cnt] = k;
min_depth[j * n_max + cnt] = depths.x;
max_depth[j * n_max + cnt] = depths.y;
++cnt;
}
}
}
}
__global__ void svo_intersect_point_kernel(
int b, int n, int m, float voxelsize,
int n_max,
const float *__restrict__ ray_start,
const float *__restrict__ ray_dir,
const float *__restrict__ points,
const int *__restrict__ children,
int *__restrict__ idx,
float *__restrict__ min_depth,
float *__restrict__ max_depth) {
/*
TODO: this is an inefficient implementation of the
navie Ray -- Sparse Voxel Octree Intersection.
It can be further improved using:
Revelles, Jorge, Carlos Urena, and Miguel Lastra.
"An efficient parametric algorithm for octree traversal." (2000).
*/
int batch_index = blockIdx.x;
points += batch_index * n * 3;
children += batch_index * n * 9;
ray_start += batch_index * m * 3;
ray_dir += batch_index * m * 3;
idx += batch_index * m * n_max;
min_depth += batch_index * m * n_max;
max_depth += batch_index * m * n_max;
int index = threadIdx.x;
int stride = blockDim.x;
float half_voxel = voxelsize * 0.5;
for (int j = index; j < m; j += stride) {
for (int l = 0; l < n_max; ++l) {
idx[j * n_max + l] = -1;
}
int stack[256] = {-1}; // DFS, initialize the stack
int ptr = 0, cnt = 0, k = -1;
stack[ptr] = n - 1; // ROOT node is always the last
while (ptr > -1 && cnt < n_max) {
assert((ptr < 256));
// evaluate the current node
k = stack[ptr];
float2 depths = RayAABBIntersection(
make_float3(ray_start[j * 3 + 0], ray_start[j * 3 + 1], ray_start[j * 3 + 2]),
make_float3(ray_dir[j * 3 + 0], ray_dir[j * 3 + 1], ray_dir[j * 3 + 2]),
make_float3(points[k * 3 + 0], points[k * 3 + 1], points[k * 3 + 2]),
half_voxel * float(children[k * 9 + 8]));
stack[ptr] = -1; ptr--;
if (depths.x > -1.0f) { // ray did not miss the voxel
// TODO: here it should be able to know which children is ok, further optimize the code
if (children[k * 9 + 8] == 1) { // this is a terminal node
idx[j * n_max + cnt] = k;
min_depth[j * n_max + cnt] = depths.x;
max_depth[j * n_max + cnt] = depths.y;
++cnt; continue;
}
for (int u = 0; u < 8; u++) {
if (children[k * 9 + u] > -1) {
ptr++; stack[ptr] = children[k * 9 + u]; // push child to the stack
}
}
}
}
}
}
__device__ float3 RayTriangleIntersection(
const float3 &ori,
const float3 &dir,
const float3 &v0,
const float3 &v1,
const float3 &v2,
float blur) {
float3 v0v1 = v1 - v0;
float3 v0v2 = v2 - v0;
float3 v0O = ori - v0;
float3 dir_crs_v0v2 = cross(dir, v0v2);
float det = dot(v0v1, dir_crs_v0v2);
det = __fdividef(1.0f, det); // CUDA intrinsic function
float u = dot(v0O, dir_crs_v0v2) * det;
if ((u < 0.0f - blur) || (u > 1.0f + blur))
return make_float3(-1.0f, 0.0f, 0.0f);
float3 v0O_crs_v0v1 = cross(v0O, v0v1);
float v = dot(dir, v0O_crs_v0v1) * det;
if ((v < 0.0f - blur) || (v > 1.0f + blur))
return make_float3(-1.0f, 0.0f, 0.0f);
if (((u + v) < 0.0f - blur) || ((u + v) > 1.0f + blur))
return make_float3(-1.0f, 0.0f, 0.0f);
float t = dot(v0v2, v0O_crs_v0v1) * det;
return make_float3(t, u, v);
}
__global__ void triangle_intersect_point_kernel(
int b, int n, int m, float cagesize,
float blur, int n_max,
const float *__restrict__ ray_start,
const float *__restrict__ ray_dir,
const float *__restrict__ face_points,
int *__restrict__ idx,
float *__restrict__ depth,
float *__restrict__ uv) {
int batch_index = blockIdx.x;
face_points += batch_index * n * 9;
ray_start += batch_index * m * 3;
ray_dir += batch_index * m * 3;
idx += batch_index * m * n_max;
depth += batch_index * m * n_max * 3;
uv += batch_index * m * n_max * 2;
int index = threadIdx.x;
int stride = blockDim.x;
for (int j = index; j < m; j += stride) {
// go over rays
for (int l = 0; l < n_max; ++l) {
idx[j * n_max + l] = -1;
}
int cnt = 0;
for (int k = 0; k < n && cnt < n_max; ++k) {
// go over triangles
float3 tuv = RayTriangleIntersection(
make_float3(ray_start[j * 3 + 0], ray_start[j * 3 + 1], ray_start[j * 3 + 2]),
make_float3(ray_dir[j * 3 + 0], ray_dir[j * 3 + 1], ray_dir[j * 3 + 2]),
make_float3(face_points[k * 9 + 0], face_points[k * 9 + 1], face_points[k * 9 + 2]),
make_float3(face_points[k * 9 + 3], face_points[k * 9 + 4], face_points[k * 9 + 5]),
make_float3(face_points[k * 9 + 6], face_points[k * 9 + 7], face_points[k * 9 + 8]),
blur);
if (tuv.x > 0) {
int ki = k;
float d = tuv.x, u = tuv.y, v = tuv.z;
// sort
for (int l = 0; l < cnt; l++) {
if (d < depth[j * n_max * 3 + l * 3]) {
swap(ki, idx[j * n_max + l]);
swap(d, depth[j * n_max * 3 + l * 3]);
swap(u, uv[j * n_max * 2 + l * 2]);
swap(v, uv[j * n_max * 2 + l * 2 + 1]);
}
}
idx[j * n_max + cnt] = ki;
depth[j * n_max * 3 + cnt * 3] = d;
uv[j * n_max * 2 + cnt * 2] = u;
uv[j * n_max * 2 + cnt * 2 + 1] = v;
cnt++;
}
}
for (int l = 0; l < cnt; l++) {
// compute min_depth
if (l == 0)
depth[j * n_max * 3 + l * 3 + 1] = -cagesize;
else
depth[j * n_max * 3 + l * 3 + 1] = -fminf(cagesize,
.5 * (depth[j * n_max * 3 + l * 3] - depth[j * n_max * 3 + l * 3 - 3]));
// compute max_depth
if (l == cnt - 1)
depth[j * n_max * 3 + l * 3 + 2] = cagesize;
else
depth[j * n_max * 3 + l * 3 + 2] = fminf(cagesize,
.5 * (depth[j * n_max * 3 + l * 3 + 3] - depth[j * n_max * 3 + l * 3]));
}
}
}
void ball_intersect_point_kernel_wrapper(
int b, int n, int m, float radius, int n_max,
const float *ray_start, const float *ray_dir, const float *points,
int *idx, float *min_depth, float *max_depth) {
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
ball_intersect_point_kernel<<<b, opt_n_threads(m), 0, stream>>>(
b, n, m, radius, n_max, ray_start, ray_dir, points, idx, min_depth, max_depth);
CUDA_CHECK_ERRORS();
}
void aabb_intersect_point_kernel_wrapper(
int b, int n, int m, float voxelsize, int n_max,
const float *ray_start, const float *ray_dir, const float *points,
int *idx, float *min_depth, float *max_depth) {
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
aabb_intersect_point_kernel<<<b, opt_n_threads(m), 0, stream>>>(
b, n, m, voxelsize, n_max, ray_start, ray_dir, points, idx, min_depth, max_depth);
CUDA_CHECK_ERRORS();
}
void svo_intersect_point_kernel_wrapper(
int b, int n, int m, float voxelsize, int n_max,
const float *ray_start, const float *ray_dir, const float *points, const int *children,
int *idx, float *min_depth, float *max_depth) {
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
svo_intersect_point_kernel<<<b, opt_n_threads(m), 0, stream>>>(
b, n, m, voxelsize, n_max, ray_start, ray_dir, points, children, idx, min_depth, max_depth);
CUDA_CHECK_ERRORS();
}
void triangle_intersect_point_kernel_wrapper(
int b, int n, int m, float cagesize, float blur, int n_max,
const float *ray_start, const float *ray_dir, const float *face_points,
int *idx, float *depth, float *uv) {
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
triangle_intersect_point_kernel<<<b, opt_n_threads(m), 0, stream>>>(
b, n, m, cagesize, blur, n_max, ray_start, ray_dir, face_points, idx, depth, uv);
CUDA_CHECK_ERRORS();
}
|
13bf7db0a3aa9adb2de7510265835af159234ac5.hip | // !!! This is a file automatically generated by hipify!!!
#define GLM_FORCE_CUDA
#define TORCH_HIP_VERSION 8000
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <memory>
#include <ctime>
#include "rendering/Scene.h"
#include <cimg/CImg.h>
#include <tbb/tbb.h>
#include <tbb/tick_count.h>
void load_pixels(cimg_library::CImg<uint8_t>& image, glm::vec3* pixels_buf) {
if (image.spectrum() != 3) {
throw std::runtime_error("Can't copy");
}
size_t pixels_size = image.width() * image.height();
tbb::parallel_for(tbb::blocked_range<size_t>(0, pixels_size),
[&](const tbb::blocked_range<size_t>& range) {
for (size_t idx = range.begin(), idx_end = range.end(); idx < idx_end; idx++) {
size_t y = idx / image.width(), x = idx % image.width();
for (size_t c = 0; c < 3; c++) {
image(x, y, 0, c) = uint8_t(pixels_buf[idx][c] * 255);
}
}
});
}
int main() {
// Hack, because first malloc is very slow.
int * t;
hipMalloc(&t, sizeof(int) * 1000);
hipFree(t);
Scene scene(1280, 768);
scene.antialiasing(true);
cimg_library::CImg<uint8_t> image(scene.width(), scene.height(), 1, 3, 0);
tbb::task_scheduler_init init(1);
double render_avg_s = 0;
double overall_avg_s = 0;
size_t measures = 0;
tbb::tick_count t_start, t_stop1, t_stop2;
cimg_library::CImgDisplay main_disp(image, "W, A, S, D to move camera and E, R to rotate camera");
while (!main_disp.is_closed()) {
t_start = tbb::tick_count::now();
std::unique_ptr<glm::vec3[]> pixels(scene.render());
t_stop1 = tbb::tick_count::now();
load_pixels(image, pixels.get());
t_stop2 = tbb::tick_count::now();
double duration1 = (t_stop1 - t_start).seconds();
double duration2 = (t_stop2 - t_start).seconds();
render_avg_s = (render_avg_s * measures + duration1) / (measures + 1);
overall_avg_s = (overall_avg_s * measures + duration2) / (measures + 1);
measures = measures + 1;
printf("Render = %lf secs\n", render_avg_s);
printf("Overall = %lf secs\n", overall_avg_s);
image.display(main_disp);
if (main_disp.is_keyW()) {
scene.camera().move_forward(0.3f);
}
if (main_disp.is_keyS()) {
scene.camera().move_backward(0.3f);
}
if (main_disp.is_keyA()) {
scene.camera().move_left(0.3f);
}
if (main_disp.is_keyD()) {
scene.camera().move_right(0.3f);
}
if (main_disp.is_keyR()) {
scene.camera().rotate(0.1f);
}
if (main_disp.is_keyE()) {
scene.camera().rotate(-0.1f);
}
if (main_disp.is_keyP()) {
scene.antialiasing(!scene.antialiasing());
}
main_disp.wait();
}
return 0;
}
| 13bf7db0a3aa9adb2de7510265835af159234ac5.cu | #define GLM_FORCE_CUDA
#define CUDA_VERSION 8000
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <memory>
#include <ctime>
#include "rendering/Scene.h"
#include <cimg/CImg.h>
#include <tbb/tbb.h>
#include <tbb/tick_count.h>
void load_pixels(cimg_library::CImg<uint8_t>& image, glm::vec3* pixels_buf) {
if (image.spectrum() != 3) {
throw std::runtime_error("Can't copy");
}
size_t pixels_size = image.width() * image.height();
tbb::parallel_for(tbb::blocked_range<size_t>(0, pixels_size),
[&](const tbb::blocked_range<size_t>& range) {
for (size_t idx = range.begin(), idx_end = range.end(); idx < idx_end; idx++) {
size_t y = idx / image.width(), x = idx % image.width();
for (size_t c = 0; c < 3; c++) {
image(x, y, 0, c) = uint8_t(pixels_buf[idx][c] * 255);
}
}
});
}
int main() {
// Hack, because first malloc is very slow.
int * t;
cudaMalloc(&t, sizeof(int) * 1000);
cudaFree(t);
Scene scene(1280, 768);
scene.antialiasing(true);
cimg_library::CImg<uint8_t> image(scene.width(), scene.height(), 1, 3, 0);
tbb::task_scheduler_init init(1);
double render_avg_s = 0;
double overall_avg_s = 0;
size_t measures = 0;
tbb::tick_count t_start, t_stop1, t_stop2;
cimg_library::CImgDisplay main_disp(image, "W, A, S, D to move camera and E, R to rotate camera");
while (!main_disp.is_closed()) {
t_start = tbb::tick_count::now();
std::unique_ptr<glm::vec3[]> pixels(scene.render());
t_stop1 = tbb::tick_count::now();
load_pixels(image, pixels.get());
t_stop2 = tbb::tick_count::now();
double duration1 = (t_stop1 - t_start).seconds();
double duration2 = (t_stop2 - t_start).seconds();
render_avg_s = (render_avg_s * measures + duration1) / (measures + 1);
overall_avg_s = (overall_avg_s * measures + duration2) / (measures + 1);
measures = measures + 1;
printf("Render = %lf secs\n", render_avg_s);
printf("Overall = %lf secs\n", overall_avg_s);
image.display(main_disp);
if (main_disp.is_keyW()) {
scene.camera().move_forward(0.3f);
}
if (main_disp.is_keyS()) {
scene.camera().move_backward(0.3f);
}
if (main_disp.is_keyA()) {
scene.camera().move_left(0.3f);
}
if (main_disp.is_keyD()) {
scene.camera().move_right(0.3f);
}
if (main_disp.is_keyR()) {
scene.camera().rotate(0.1f);
}
if (main_disp.is_keyE()) {
scene.camera().rotate(-0.1f);
}
if (main_disp.is_keyP()) {
scene.antialiasing(!scene.antialiasing());
}
main_disp.wait();
}
return 0;
}
|
a65075159497beabb46b66b92835bb4a2a7e751f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Joseph Zhong
// josephz@cs.washington.edu
// 19 May 2017
// CSE 599I: Final Project
// Instructor Tanner Schmidt
// Exploring Dynamic Parallism in CUDA C with Mandelbrot Sets
//
// cudaNaive.cu
// ---
//
// This is the naive CUDA C implementation without usage of Dynamic
// Parallelism.
//
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <stdbool.h>
#include <time.h>
#include "cudaNaive.h"
#include "common.h"
#include "defaults.h"
#include "metrics.h"
__host__ void cudaNaiveMandelbrotSets(int height, int width, int maxIterations, const float radius,
const complexNum cMin, const complexNum cMax, const char *filename) {
// Host input setup: image.
const int OUTPUT_SIZE = sizeof(int) * height * width;
int *h_output = (int*) malloc(OUTPUT_SIZE);
// Device output setup: image.
int *d_output;
cudaCheck(hipMalloc(&d_output, OUTPUT_SIZE));
// Kernel Size.
dim3 gridSize(ceil(width / TILE_WIDTH), ceil(height / TILE_WIDTH), 1);
dim3 blockSize(TILE_WIDTH, TILE_WIDTH, 1);
// Begin timer.
clock_t start = clock();
// Launch Kernel.
hipLaunchKernelGGL(( cudaNaiveMandelbrotSetsKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_output, width, height, maxIterations, radius,
cMin, cMax);
// Synchronize across threads once completed.
cudaCheck(hipDeviceSynchronize());
// Stop timer.
endClock(start);
if (filename != NULL) {
// Copy output.
cudaCheck(hipMemcpy(h_output, d_output, OUTPUT_SIZE, hipMemcpyDeviceToHost));
// Write to output.
saveImage(filename, h_output, width, height, maxIterations);
}
// Free output.
hipFree(d_output);
free(h_output);
}
__global__ void cudaNaiveMandelbrotSetsKernel(int *d_output,
int width, int height, int maxIterations, const float radius,
complexNum cMin, complexNum cMax) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= width || y >= height) return;
int value = calculatePixelValue(width, height, maxIterations,
cMin, cMax, x, y, radius);
d_output[y * width + x] = value;
}
| a65075159497beabb46b66b92835bb4a2a7e751f.cu | //
// Joseph Zhong
// josephz@cs.washington.edu
// 19 May 2017
// CSE 599I: Final Project
// Instructor Tanner Schmidt
// Exploring Dynamic Parallism in CUDA C with Mandelbrot Sets
//
// cudaNaive.cu
// ---
//
// This is the naive CUDA C implementation without usage of Dynamic
// Parallelism.
//
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <stdbool.h>
#include <time.h>
#include "cudaNaive.h"
#include "common.h"
#include "defaults.h"
#include "metrics.h"
__host__ void cudaNaiveMandelbrotSets(int height, int width, int maxIterations, const float radius,
const complexNum cMin, const complexNum cMax, const char *filename) {
// Host input setup: image.
const int OUTPUT_SIZE = sizeof(int) * height * width;
int *h_output = (int*) malloc(OUTPUT_SIZE);
// Device output setup: image.
int *d_output;
cudaCheck(cudaMalloc(&d_output, OUTPUT_SIZE));
// Kernel Size.
dim3 gridSize(ceil(width / TILE_WIDTH), ceil(height / TILE_WIDTH), 1);
dim3 blockSize(TILE_WIDTH, TILE_WIDTH, 1);
// Begin timer.
clock_t start = clock();
// Launch Kernel.
cudaNaiveMandelbrotSetsKernel<<<gridSize, blockSize>>>(d_output, width, height, maxIterations, radius,
cMin, cMax);
// Synchronize across threads once completed.
cudaCheck(cudaThreadSynchronize());
// Stop timer.
endClock(start);
if (filename != NULL) {
// Copy output.
cudaCheck(cudaMemcpy(h_output, d_output, OUTPUT_SIZE, cudaMemcpyDeviceToHost));
// Write to output.
saveImage(filename, h_output, width, height, maxIterations);
}
// Free output.
cudaFree(d_output);
free(h_output);
}
__global__ void cudaNaiveMandelbrotSetsKernel(int *d_output,
int width, int height, int maxIterations, const float radius,
complexNum cMin, complexNum cMax) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= width || y >= height) return;
int value = calculatePixelValue(width, height, maxIterations,
cMin, cMax, x, y, radius);
d_output[y * width + x] = value;
}
|
d9bb46512ab2e2fbdd978c7191e8c413af6f454d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__device__ unsigned int getGid3d3d(){
int blockId = blockIdx.x + blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z;
int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z)
+ (threadIdx.y * blockDim.x)
+ (threadIdx.z * (blockDim.x * blockDim.y)) + threadIdx.x;
return threadId;
}
__global__ void kconstant_A(double *x, double *y, double *z, double xMax, double yMax, double zMax, double omegaX, double omegaY, double omegaZ, double omega, double fudge, double *A){
int gid = getGid3d3d();
A[gid] = 0;
} | d9bb46512ab2e2fbdd978c7191e8c413af6f454d.cu | #include "includes.h"
__device__ unsigned int getGid3d3d(){
int blockId = blockIdx.x + blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z;
int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z)
+ (threadIdx.y * blockDim.x)
+ (threadIdx.z * (blockDim.x * blockDim.y)) + threadIdx.x;
return threadId;
}
__global__ void kconstant_A(double *x, double *y, double *z, double xMax, double yMax, double zMax, double omegaX, double omegaY, double omegaZ, double omega, double fudge, double *A){
int gid = getGid3d3d();
A[gid] = 0;
} |
fe5eacfea180fd6eaecd114d1659534d8eb2e08d.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "copy_if_hip.cuh"
#include <cudf/legacy/table.hpp>
#include <thrust/logical.h>
#include <thrust/count.h>
namespace {
using bit_mask_t = bit_mask::bit_mask_t;
// Returns true if the valid mask is true for index i in at least keep_threshold
// columns
struct valid_table_filter
{
__device__ inline
bool operator()(cudf::size_type i)
{
auto valid = [i](auto mask) {
return (mask == nullptr) || bit_mask::is_valid(mask, i);
};
auto count =
thrust::count_if(thrust::seq, d_masks, d_masks + num_columns, valid);
return (count >= keep_threshold);
}
static auto create(cudf::table const &table,
cudf::size_type keep_threshold,
hipStream_t stream = 0)
{
std::vector<bit_mask_t*> h_masks(table.num_columns());
std::transform(std::cbegin(table), std::cend(table), std::begin(h_masks),
[](auto col) { return reinterpret_cast<bit_mask_t*>(col->valid); }
);
size_t masks_size = sizeof(bit_mask_t*) * table.num_columns();
bit_mask_t **device_masks = nullptr;
RMM_TRY(RMM_ALLOC(&device_masks, masks_size, stream));
CUDA_TRY(hipMemcpyAsync(device_masks, h_masks.data(), masks_size,
hipMemcpyHostToDevice, stream));
CHECK_CUDA(stream);
auto deleter = [stream](valid_table_filter* f) { f->destroy(stream); };
std::unique_ptr<valid_table_filter, decltype(deleter)> p {
new valid_table_filter(device_masks, table.num_columns(), keep_threshold),
deleter
};
CHECK_CUDA(stream);
return p;
}
__host__ void destroy(hipStream_t stream = 0) {
RMM_FREE(d_masks, stream);
delete this;
}
valid_table_filter() = delete;
~valid_table_filter() = default;
protected:
valid_table_filter(bit_mask_t **masks,
cudf::size_type num_columns,
cudf::size_type keep_threshold)
: keep_threshold(keep_threshold),
num_columns(num_columns),
d_masks(masks) {}
cudf::size_type keep_threshold;
cudf::size_type num_columns;
bit_mask_t **d_masks;
};
} // namespace
namespace cudf {
/*
* Filters a table to remove null elements.
*/
table drop_nulls(table const &input,
table const &keys,
cudf::size_type keep_threshold) {
if (keys.num_columns() == 0 || keys.num_rows() == 0 ||
not cudf::has_nulls(keys))
return cudf::copy(input);
CUDF_EXPECTS(keys.num_rows() <= input.num_rows(),
"Column size mismatch");
auto filter = valid_table_filter::create(keys, keep_threshold);
return detail::copy_if(input, *filter.get());
}
/*
* Filters a table to remove null elements.
*/
table drop_nulls(table const &input,
table const &keys)
{
return drop_nulls(input, keys, keys.num_columns());
}
} // namespace cudf
| fe5eacfea180fd6eaecd114d1659534d8eb2e08d.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "copy_if.cuh"
#include <cudf/legacy/table.hpp>
#include <thrust/logical.h>
#include <thrust/count.h>
namespace {
using bit_mask_t = bit_mask::bit_mask_t;
// Returns true if the valid mask is true for index i in at least keep_threshold
// columns
struct valid_table_filter
{
__device__ inline
bool operator()(cudf::size_type i)
{
auto valid = [i](auto mask) {
return (mask == nullptr) || bit_mask::is_valid(mask, i);
};
auto count =
thrust::count_if(thrust::seq, d_masks, d_masks + num_columns, valid);
return (count >= keep_threshold);
}
static auto create(cudf::table const &table,
cudf::size_type keep_threshold,
cudaStream_t stream = 0)
{
std::vector<bit_mask_t*> h_masks(table.num_columns());
std::transform(std::cbegin(table), std::cend(table), std::begin(h_masks),
[](auto col) { return reinterpret_cast<bit_mask_t*>(col->valid); }
);
size_t masks_size = sizeof(bit_mask_t*) * table.num_columns();
bit_mask_t **device_masks = nullptr;
RMM_TRY(RMM_ALLOC(&device_masks, masks_size, stream));
CUDA_TRY(cudaMemcpyAsync(device_masks, h_masks.data(), masks_size,
cudaMemcpyHostToDevice, stream));
CHECK_CUDA(stream);
auto deleter = [stream](valid_table_filter* f) { f->destroy(stream); };
std::unique_ptr<valid_table_filter, decltype(deleter)> p {
new valid_table_filter(device_masks, table.num_columns(), keep_threshold),
deleter
};
CHECK_CUDA(stream);
return p;
}
__host__ void destroy(cudaStream_t stream = 0) {
RMM_FREE(d_masks, stream);
delete this;
}
valid_table_filter() = delete;
~valid_table_filter() = default;
protected:
valid_table_filter(bit_mask_t **masks,
cudf::size_type num_columns,
cudf::size_type keep_threshold)
: keep_threshold(keep_threshold),
num_columns(num_columns),
d_masks(masks) {}
cudf::size_type keep_threshold;
cudf::size_type num_columns;
bit_mask_t **d_masks;
};
} // namespace
namespace cudf {
/*
* Filters a table to remove null elements.
*/
table drop_nulls(table const &input,
table const &keys,
cudf::size_type keep_threshold) {
if (keys.num_columns() == 0 || keys.num_rows() == 0 ||
not cudf::has_nulls(keys))
return cudf::copy(input);
CUDF_EXPECTS(keys.num_rows() <= input.num_rows(),
"Column size mismatch");
auto filter = valid_table_filter::create(keys, keep_threshold);
return detail::copy_if(input, *filter.get());
}
/*
* Filters a table to remove null elements.
*/
table drop_nulls(table const &input,
table const &keys)
{
return drop_nulls(input, keys, keys.num_columns());
}
} // namespace cudf
|
SceCells.hip | // !!! This is a file automatically generated by hipify!!!
#include "SceCells.h"
#include <cmath>
double epsilon = 1.0e-12;
__constant__ double membrEquLen;
__constant__ double membrStiff;
__constant__ double membrStiff_Mitotic; //Ali June 30
__constant__ double pI;
__constant__ double minLength;
__constant__ double minDivisor;
__constant__ uint maxAllNodePerCell;
__constant__ uint maxMembrPerCell;
__constant__ uint maxIntnlPerCell;
__constant__ double bendCoeff;
__constant__ double bendCoeff_Mitotic;//AAMIRI
__constant__ double sceIB_M[5];
__constant__ double sceIBDiv_M[5];
__constant__ double sceII_M[5];
__constant__ double sceIIDiv_M[5];
__constant__ double grthPrgrCriEnd_M;
__constant__ double F_Ext_Incline_M2 ; //Ali
//Ali & Abu June 30th
__device__
double calMembrForce_Mitotic(double& length, double& progress, double mitoticCri) {
if (progress <= mitoticCri) {
return (length - membrEquLen) * membrStiff;
} else {
return (length - membrEquLen) *(membrStiff+ (membrStiff_Mitotic-membrStiff)* (progress-mitoticCri)/(1.0-mitoticCri));
}
}
//
//Ali
__device__
double calExtForce(double& curTime) {
return curTime * F_Ext_Incline_M2;
}
//Ali
__device__
double obtainRandAngle(uint& cellRank, uint& seed) {
thrust::default_random_engine rng(seed);
// discard n numbers to avoid correlation
rng.discard(cellRank);
thrust::uniform_real_distribution<double> u0Pi(0, 2.0 * pI);
double randomAngle = u0Pi(rng);
return randomAngle;
}
__device__ uint obtainNewIntnlNodeIndex(uint& cellRank, uint& curActiveCount) {
return (cellRank * maxAllNodePerCell + maxMembrPerCell + curActiveCount);
}
//AAMIRI
__device__ uint obtainLastIntnlNodeIndex(uint& cellRank, uint& curActiveCount) {
return (cellRank * maxAllNodePerCell + maxMembrPerCell + curActiveCount );
}
//AAMIRI
__device__ uint obtainMembEndNode(uint& cellRank, uint& activeMembrNodeThis) {
return (cellRank * maxAllNodePerCell + activeMembrNodeThis - 1 );
}
__device__
bool isAllIntnlFilled(uint& currentIntnlCount) {
if (currentIntnlCount < maxIntnlPerCell) {
return false;
} else {
return true;
}
}
//AAMIRI
__device__
int obtainRemovingMembrNodeID(uint &cellRank, uint& activeMembrNodes, uint& seed) {
thrust::default_random_engine rng(seed);
// discard n numbers to avoid correlation
rng.discard(activeMembrNodes);
thrust::uniform_int_distribution<double> dist(0, activeMembrNodes-1);
int randomNode = dist(rng);
return (cellRank * maxAllNodePerCell + randomNode);
}
//AAMIRI
__device__
bool isAllIntnlEmptied(uint& currentIntnlCount) {
if (currentIntnlCount > 0) {
return false;
} else {
return true;
}
}
//AAMIRI
__device__
bool isAllMembrEmptied(uint& currentMembrCount) {
if (currentMembrCount > 0) {
return false;
} else {
return true;
}
}
__device__
bool longEnough(double& length) {
if (length > minLength) {
return true;
} else {
return false;
}
}
__device__
double compDist2D(double &xPos, double &yPos, double &xPos2, double &yPos2) {
return sqrt(
(xPos - xPos2) * (xPos - xPos2) + (yPos - yPos2) * (yPos - yPos2));
}
void SceCells::distributeBdryIsActiveInfo() {
thrust::fill(nodes->getInfoVecs().nodeIsActive.begin(),
nodes->getInfoVecs().nodeIsActive.begin()
+ allocPara.startPosProfile, true);
}
void SceCells::distributeProfileIsActiveInfo() {
thrust::fill(
nodes->getInfoVecs().nodeIsActive.begin()
+ allocPara.startPosProfile,
nodes->getInfoVecs().nodeIsActive.begin()
+ allocPara.startPosProfile
+ nodes->getAllocPara().currentActiveProfileNodeCount,
true);
}
void SceCells::distributeECMIsActiveInfo() {
uint totalNodeCountForActiveECM = allocPara.currentActiveECM
* allocPara.maxNodePerECM;
thrust::counting_iterator<uint> countingBegin(0);
thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveECM);
thrust::fill(
nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosECM,
nodes->getInfoVecs().nodeIsActive.begin()
+ totalNodeCountForActiveECM + allocPara.startPosECM, true);
}
void SceCells::distributeCellIsActiveInfo() {
totalNodeCountForActiveCells = allocPara.currentActiveCellCount
* allocPara.maxNodeOfOneCell;
thrust::counting_iterator<uint> countingBegin(0);
thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells);
thrust::transform(
thrust::make_transform_iterator(countingBegin,
ModuloFunctor(allocPara.maxNodeOfOneCell)),
thrust::make_transform_iterator(countingEnd,
ModuloFunctor(allocPara.maxNodeOfOneCell)),
thrust::make_permutation_iterator(
cellInfoVecs.activeNodeCountOfThisCell.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(allocPara.maxNodeOfOneCell))),
nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosCells,
thrust::less<uint>());
}
void SceCells::distributeCellGrowthProgress() {
totalNodeCountForActiveCells = allocPara.currentActiveCellCount
* allocPara.maxNodeOfOneCell;
thrust::counting_iterator<uint> countingBegin(0);
thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells);
thrust::copy(
thrust::make_permutation_iterator(
cellInfoVecs.growthProgress.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(allocPara.maxNodeOfOneCell))),
thrust::make_permutation_iterator(
cellInfoVecs.growthProgress.begin(),
make_transform_iterator(countingEnd,
DivideFunctor(allocPara.maxNodeOfOneCell))),
nodes->getInfoVecs().nodeGrowPro.begin() + allocPara.startPosCells);
}
void MembrPara::initFromConfig() {
membrEquLenCPU = globalConfigVars.getConfigValue("MembrEquLen").toDouble();
membrStiffCPU = globalConfigVars.getConfigValue("MembrStiff").toDouble();
membrStiff_Mitotic = globalConfigVars.getConfigValue("MembrStiff_Mitotic").toDouble(); //Ali June30
membrGrowCoeff_Ori =
globalConfigVars.getConfigValue("MembrGrowCoeff").toDouble();
membrGrowLimit_Ori =
globalConfigVars.getConfigValue("MembrGrowLimit").toDouble();
membrGrowCoeff = membrGrowCoeff_Ori;
membrGrowLimit = membrGrowLimit_Ori;
//Ali
F_Ext_Incline =
globalConfigVars.getConfigValue("FExtIncline").toDouble();
//Ali
membrBendCoeff =
globalConfigVars.getConfigValue("MembrBenCoeff").toDouble();
//AAMIRI
membrBendCoeff_Mitotic =
globalConfigVars.getConfigValue("MembrBenCoeff_Mitotic").toDouble();
adjustLimit =
globalConfigVars.getConfigValue("MembrAdjustLimit").toDouble();
adjustCoeff =
globalConfigVars.getConfigValue("MembrAdjustCoeff").toDouble();
growthConst_N =
globalConfigVars.getConfigValue("MembrGrowthConst").toDouble();
initMembrCt_N =
globalConfigVars.getConfigValue("InitMembrNodeCount").toInt();
initIntnlCt_N =
globalConfigVars.getConfigValue("InitCellNodeCount").toInt();
}
SceCells::SceCells() {
//curTime = 0 + 55800.0;//AAMIRI // Ali I comment that our safely on 04/04/2017
std ::cout << "I am in SceCells constructor with zero element "<<InitTimeStage<<std::endl ;
}
void SceCells::growAtRandom(double d_t) {
totalNodeCountForActiveCells = allocPara.currentActiveCellCount
* allocPara.maxNodeOfOneCell;
// randomly select growth direction and speed.
randomizeGrowth();
//std::cout << "after copy grow info" << std::endl;
updateGrowthProgress();
//std::cout << "after update growth progress" << std::endl;
decideIsScheduleToGrow();
//std::cout << "after decode os schedule to grow" << std::endl;
computeCellTargetLength();
//std::cout << "after compute cell target length" << std::endl;
computeDistToCellCenter();
//std::cout << "after compute dist to center" << std::endl;
findMinAndMaxDistToCenter();
//std::cout << "after find min and max dist" << std::endl;
computeLenDiffExpCur();
//std::cout << "after compute diff " << std::endl;
stretchCellGivenLenDiff();
//std::cout << "after apply stretch force" << std::endl;
cellChemotaxis();
//std::cout << "after apply cell chemotaxis" << std::endl;
addPointIfScheduledToGrow();
//std::cout << "after adding node" << std::endl;
}
/**
* Use the growth magnitude and dt to update growthProgress.
*/
void SceCells::updateGrowthProgress() {
thrust::transform(cellInfoVecs.growthSpeed.begin(),
cellInfoVecs.growthSpeed.begin() + allocPara.currentActiveCellCount,
cellInfoVecs.growthProgress.begin(),
cellInfoVecs.growthProgress.begin(), SaxpyFunctorWithMaxOfOne(dt));
}
/**
* Decide if the cells are going to add a node or not.
* Use lastCheckPoint and growthProgress to decide whether add point or not
*/
void SceCells::decideIsScheduleToGrow() {
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.growthProgress.begin(),
cellInfoVecs.lastCheckPoint.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.growthProgress.begin(),
cellInfoVecs.lastCheckPoint.begin()))
+ allocPara.currentActiveCellCount,
cellInfoVecs.isScheduledToGrow.begin(),
PtCondiOp(miscPara.growThreshold));
}
/**
* Calculate target length of cell given the cell growth progress.
* length is along the growth direction.
*/
void SceCells::computeCellTargetLength() {
thrust::transform(cellInfoVecs.growthProgress.begin(),
cellInfoVecs.growthProgress.begin()
+ allocPara.currentActiveCellCount,
cellInfoVecs.expectedLength.begin(),
CompuTarLen(bioPara.cellInitLength, bioPara.cellFinalLength));
}
/**
* Compute distance of each node to its corresponding cell center.
* The distantce could be either positive or negative, depending on the pre-defined
* growth direction.
*/
void SceCells::computeDistToCellCenter() {
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(
make_permutation_iterator(
cellInfoVecs.centerCoordX.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(
allocPara.maxNodeOfOneCell))),
make_permutation_iterator(
cellInfoVecs.centerCoordY.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(
allocPara.maxNodeOfOneCell))),
make_permutation_iterator(
cellInfoVecs.growthXDir.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(
allocPara.maxNodeOfOneCell))),
make_permutation_iterator(
cellInfoVecs.growthYDir.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(
allocPara.maxNodeOfOneCell))),
nodes->getInfoVecs().nodeLocX.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeLocY.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeIsActive.begin()
+ allocPara.startPosCells)),
thrust::make_zip_iterator(
thrust::make_tuple(
make_permutation_iterator(
cellInfoVecs.centerCoordX.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(
allocPara.maxNodeOfOneCell))),
make_permutation_iterator(
cellInfoVecs.centerCoordY.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(
allocPara.maxNodeOfOneCell))),
make_permutation_iterator(
cellInfoVecs.growthXDir.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(
allocPara.maxNodeOfOneCell))),
make_permutation_iterator(
cellInfoVecs.growthYDir.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(
allocPara.maxNodeOfOneCell))),
nodes->getInfoVecs().nodeLocX.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeLocY.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeIsActive.begin()
+ allocPara.startPosCells))
+ totalNodeCountForActiveCells,
cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), CompuDist());
}
/**
* For nodes of each cell, find the maximum and minimum distance to the center.
* We will then calculate the current length of a cell along its growth direction
* using max and min distance to the center.
*/
void SceCells::findMinAndMaxDistToCenter() {
thrust::reduce_by_key(
make_transform_iterator(countingBegin,
DivideFunctor(allocPara.maxNodeOfOneCell)),
make_transform_iterator(countingBegin,
DivideFunctor(allocPara.maxNodeOfOneCell))
+ totalNodeCountForActiveCells,
cellNodeInfoVecs.distToCenterAlongGrowDir.begin(),
cellInfoVecs.cellRanksTmpStorage.begin(),
cellInfoVecs.smallestDistance.begin(), thrust::equal_to<uint>(),
thrust::minimum<double>());
// for nodes of each cell, find the maximum distance from the node to the corresponding
// cell center along the pre-defined growth direction.
thrust::reduce_by_key(
make_transform_iterator(countingBegin,
DivideFunctor(allocPara.maxNodeOfOneCell)),
make_transform_iterator(countingBegin,
DivideFunctor(allocPara.maxNodeOfOneCell))
+ totalNodeCountForActiveCells,
cellNodeInfoVecs.distToCenterAlongGrowDir.begin(),
cellInfoVecs.cellRanksTmpStorage.begin(),
cellInfoVecs.biggestDistance.begin(), thrust::equal_to<uint>(),
thrust::maximum<double>());
}
/**
* Compute the difference for cells between their expected length and current length.
*/
void SceCells::computeLenDiffExpCur() {
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.expectedLength.begin(),
cellInfoVecs.smallestDistance.begin(),
cellInfoVecs.biggestDistance.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.expectedLength.begin(),
cellInfoVecs.smallestDistance.begin(),
cellInfoVecs.biggestDistance.begin()))
+ allocPara.currentActiveCellCount,
cellInfoVecs.lengthDifference.begin(), CompuDiff());
}
/**
* Use the difference that just computed and growthXDir&growthYDir
* to apply stretching force (velocity) on nodes of all cells
*/
void SceCells::stretchCellGivenLenDiff() {
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(
cellNodeInfoVecs.distToCenterAlongGrowDir.begin(),
make_permutation_iterator(
cellInfoVecs.lengthDifference.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(
allocPara.maxNodeOfOneCell))),
make_permutation_iterator(
cellInfoVecs.growthXDir.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(
allocPara.maxNodeOfOneCell))),
make_permutation_iterator(
cellInfoVecs.growthYDir.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(
allocPara.maxNodeOfOneCell))),
nodes->getInfoVecs().nodeVelX.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeVelY.begin()
+ allocPara.startPosCells)),
thrust::make_zip_iterator(
thrust::make_tuple(
cellNodeInfoVecs.distToCenterAlongGrowDir.begin(),
make_permutation_iterator(
cellInfoVecs.lengthDifference.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(
allocPara.maxNodeOfOneCell))),
make_permutation_iterator(
cellInfoVecs.growthXDir.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(
allocPara.maxNodeOfOneCell))),
make_permutation_iterator(
cellInfoVecs.growthYDir.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(
allocPara.maxNodeOfOneCell))),
nodes->getInfoVecs().nodeVelX.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeVelY.begin()
+ allocPara.startPosCells))
+ totalNodeCountForActiveCells,
thrust::make_zip_iterator(
thrust::make_tuple(
nodes->getInfoVecs().nodeVelX.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeVelY.begin()
+ allocPara.startPosCells)),
ApplyStretchForce(bioPara.elongationCoefficient));
}
/**
* This is just an attempt. Cells move according to chemicals.
*/
void SceCells::cellChemotaxis() {
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(
make_permutation_iterator(
cellInfoVecs.growthSpeed.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(
allocPara.maxNodeOfOneCell))),
make_permutation_iterator(
cellInfoVecs.growthXDir.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(
allocPara.maxNodeOfOneCell))),
make_permutation_iterator(
cellInfoVecs.growthYDir.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(
allocPara.maxNodeOfOneCell))),
nodes->getInfoVecs().nodeVelX.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeVelY.begin()
+ allocPara.startPosCells)),
thrust::make_zip_iterator(
thrust::make_tuple(
make_permutation_iterator(
cellInfoVecs.growthSpeed.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(
allocPara.maxNodeOfOneCell))),
make_permutation_iterator(
cellInfoVecs.growthXDir.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(
allocPara.maxNodeOfOneCell))),
make_permutation_iterator(
cellInfoVecs.growthYDir.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(
allocPara.maxNodeOfOneCell))),
nodes->getInfoVecs().nodeVelX.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeVelY.begin()
+ allocPara.startPosCells))
+ totalNodeCountForActiveCells,
thrust::make_zip_iterator(
thrust::make_tuple(
nodes->getInfoVecs().nodeVelX.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeVelY.begin()
+ allocPara.startPosCells)),
ApplyChemoVel(bioPara.chemoCoefficient));
}
/**
* Adjust the velocities of nodes.
* For example, velocity of boundary nodes must be zero.
*/
void SceCells::adjustNodeVel() {
thrust::counting_iterator<uint> countingIterBegin(0);
thrust::counting_iterator<uint> countingIterEnd(
totalNodeCountForActiveCells + allocPara.startPosCells);
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(),
nodes->getInfoVecs().nodeVelY.begin(),
nodes->getInfoVecs().nodeIsActive.begin(),
nodes->getInfoVecs().nodeCellType.begin(),
countingIterBegin)),
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(),
nodes->getInfoVecs().nodeVelY.begin(),
nodes->getInfoVecs().nodeIsActive.begin(),
nodes->getInfoVecs().nodeCellType.begin(),
countingIterBegin)) + totalNodeCountForActiveCells
+ allocPara.startPosCells,
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(),
nodes->getInfoVecs().nodeVelY.begin())),
VelocityModifier(allocPara.startPosProfile,
allocPara.currentActiveProfileNodeCount));
}
/**
* Move nodes according to the velocity we just adjusted.
*/
void SceCells::moveNodes() {
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(),
nodes->getInfoVecs().nodeVelY.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(),
nodes->getInfoVecs().nodeVelY.begin()))
+ totalNodeCountForActiveCells + allocPara.startPosCells,
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(),
nodes->getInfoVecs().nodeLocY.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(),
nodes->getInfoVecs().nodeLocY.begin())),
SaxpyFunctorDim2(dt));
}
/**
* Add a point to a cell if it is scheduled to grow.
* This step does not guarantee success ; If adding new point failed, it will not change
* isScheduleToGrow and activeNodeCount;
*/
void SceCells::addPointIfScheduledToGrow() {
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.isScheduledToGrow.begin(),
cellInfoVecs.activeNodeCountOfThisCell.begin(),
cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin(), countingBegin,
cellInfoVecs.lastCheckPoint.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.isScheduledToGrow.begin(),
cellInfoVecs.activeNodeCountOfThisCell.begin(),
cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin(), countingBegin,
cellInfoVecs.lastCheckPoint.begin()))
+ allocPara.currentActiveCellCount,
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.isScheduledToGrow.begin(),
cellInfoVecs.activeNodeCountOfThisCell.begin(),
cellInfoVecs.lastCheckPoint.begin())),
AddPtOp(allocPara.maxNodeOfOneCell, miscPara.addNodeDistance,
miscPara.minDistanceToOtherNode,
growthAuxData.nodeIsActiveAddress,
growthAuxData.nodeXPosAddress,
growthAuxData.nodeYPosAddress, time(NULL),
miscPara.growThreshold));
}
//Ali commented this constructor in 04/04/2017
SceCells::SceCells(SceNodes* nodesInput,
std::vector<uint>& numOfInitActiveNodesOfCells,
std::vector<SceNodeType>& cellTypes) :
countingBegin(0), initIntnlNodeCount(
nodesInput->getAllocPara().maxNodeOfOneCell / 2), initGrowthProgress(
0.0) {
curTime = 0.0 + 55800.0;//AAMIRI
std ::cout << "I am in SceCells constructor with polymorphism shape "<<InitTimeStage<<std::endl ;
initialize(nodesInput);
copyInitActiveNodeCount(numOfInitActiveNodesOfCells);
thrust::device_vector<SceNodeType> cellTypesToPass = cellTypes;
setCellTypes(cellTypesToPass);
distributeIsActiveInfo();
}
SceCells::SceCells(SceNodes* nodesInput,
std::vector<uint>& initActiveMembrNodeCounts,
std::vector<uint>& initActiveIntnlNodeCounts,
std::vector<double> &initGrowProgVec, double InitTimeStage) {
// curTime = 0.0 + 55800.0;//AAMIRIi
curTime=InitTimeStage ;
std ::cout << "I am in SceCells constructor with number of inputs "<<InitTimeStage<<std::endl ;
lastTimeExchange=0 ;
firstTimeReadDpp=true ;
//currentActiveCellCountOld=1 ; // small number
tmpDebug = false;
aniDebug = false;
membrPara.initFromConfig();
shrinkRatio = globalConfigVars.getConfigValue("ShrinkRatio").toDouble();
centerShiftRatio =
globalConfigVars.getConfigValue("CenterShiftRatio").toDouble();
memNewSpacing = globalConfigVars.getConfigValue("MembrLenDiv").toDouble();
initialize_M(nodesInput);
cout<< "size of dpp in constructor is "<< cellInfoVecs.cell_Dpp.size() << endl ;
copyToGPUConstMem();
copyInitActiveNodeCount_M(initActiveMembrNodeCounts,
initActiveIntnlNodeCounts, initGrowProgVec);
}
void SceCells::initCellInfoVecs() {
cellInfoVecs.growthProgress.resize(allocPara.maxCellCount, 0.0);
cellInfoVecs.expectedLength.resize(allocPara.maxCellCount,
bioPara.cellInitLength);
cellInfoVecs.lengthDifference.resize(allocPara.maxCellCount, 0.0);
cellInfoVecs.smallestDistance.resize(allocPara.maxCellCount);
cellInfoVecs.biggestDistance.resize(allocPara.maxCellCount);
cellInfoVecs.activeNodeCountOfThisCell.resize(allocPara.maxCellCount);
cellInfoVecs.lastCheckPoint.resize(allocPara.maxCellCount, 0.0);
cellInfoVecs.isDividing.resize(allocPara.maxCellCount);
cellInfoVecs.cellTypes.resize(allocPara.maxCellCount, MX);
cellInfoVecs.isScheduledToGrow.resize(allocPara.maxCellCount, false);
cellInfoVecs.centerCoordX.resize(allocPara.maxCellCount);
cellInfoVecs.centerCoordY.resize(allocPara.maxCellCount);
cellInfoVecs.centerCoordZ.resize(allocPara.maxCellCount);
cellInfoVecs.cellRanksTmpStorage.resize(allocPara.maxCellCount);
cellInfoVecs.growthSpeed.resize(allocPara.maxCellCount, 0.0);
cellInfoVecs.growthXDir.resize(allocPara.maxCellCount);
cellInfoVecs.growthYDir.resize(allocPara.maxCellCount);
cellInfoVecs.isRandGrowInited.resize(allocPara.maxCellCount, false);
}
void SceCells::initCellInfoVecs_M() {
//std::cout << "max cell count = " << allocPara_m.maxCellCount << std::endl;
cellInfoVecs.Cell_Damp.resize(allocPara_m.maxCellCount, 36.0); //Ali
cellInfoVecs.cell_Dpp.resize(allocPara_m.maxCellCount, 0.0); //Ali
cellInfoVecs.cell_DppOld.resize(allocPara_m.maxCellCount, 0.0); //Ali
//cout<< "size of dpp in init is "<< cellInfoVecs.cell_Dpp.size() << endl ;
cellInfoVecs.growthProgress.resize(allocPara_m.maxCellCount, 0.0); //A&A
cellInfoVecs.growthProgressOld.resize(allocPara_m.maxCellCount, 0.0);//Ali
cellInfoVecs.Cell_Time.resize(allocPara_m.maxCellCount, 0.0); //Ali
cellInfoVecs.expectedLength.resize(allocPara_m.maxCellCount,
bioPara.cellInitLength);
cellInfoVecs.lengthDifference.resize(allocPara_m.maxCellCount, 0.0);
cellInfoVecs.smallestDistance.resize(allocPara_m.maxCellCount);
cellInfoVecs.biggestDistance.resize(allocPara_m.maxCellCount);
cellInfoVecs.activeMembrNodeCounts.resize(allocPara_m.maxCellCount);
cellInfoVecs.activeIntnlNodeCounts.resize(allocPara_m.maxCellCount);
cellInfoVecs.lastCheckPoint.resize(allocPara_m.maxCellCount, 0.0);
cellInfoVecs.isDividing.resize(allocPara_m.maxCellCount);
cellInfoVecs.isEnteringMitotic.resize(allocPara_m.maxCellCount, false); //A&A
//cellInfoVecs.isRemoving.resize(allocPara.maxCellCount);//AAMIRI
cellInfoVecs.isScheduledToGrow.resize(allocPara_m.maxCellCount, false);
cellInfoVecs.isScheduledToShrink.resize(allocPara_m.maxCellCount, false);//AAMIRI
cellInfoVecs.isCellActive.resize(allocPara_m.maxCellCount, false);//AAMIRI
cellInfoVecs.centerCoordX.resize(allocPara_m.maxCellCount);
cellInfoVecs.centerCoordY.resize(allocPara_m.maxCellCount);
cellInfoVecs.centerCoordZ.resize(allocPara_m.maxCellCount);
cellInfoVecs.HertwigXdir.resize(allocPara_m.maxCellCount,0.0); //A&A
cellInfoVecs.HertwigYdir.resize(allocPara_m.maxCellCount,0.0); //A&A
cellInfoVecs.cellRanksTmpStorage.resize(allocPara_m.maxCellCount);
cellInfoVecs.growthSpeed.resize(allocPara_m.maxCellCount, 0.0);
cellInfoVecs.growthXDir.resize(allocPara_m.maxCellCount);
cellInfoVecs.growthYDir.resize(allocPara_m.maxCellCount);
cellInfoVecs.isRandGrowInited.resize(allocPara_m.maxCellCount, false);
cellInfoVecs.isMembrAddingNode.resize(allocPara_m.maxCellCount, false);
cellInfoVecs.maxTenIndxVec.resize(allocPara_m.maxCellCount);
cellInfoVecs.maxTenRiVec.resize(allocPara_m.maxCellCount);
cellInfoVecs.maxDistToRiVec.resize(allocPara_m.maxCellCount); //Ali
cellInfoVecs.maxTenRiMidXVec.resize(allocPara_m.maxCellCount);
cellInfoVecs.maxTenRiMidYVec.resize(allocPara_m.maxCellCount);
cellInfoVecs.aveTension.resize(allocPara_m.maxCellCount);
cellInfoVecs.membrGrowProgress.resize(allocPara_m.maxCellCount, 0.0);
cellInfoVecs.membrGrowSpeed.resize(allocPara_m.maxCellCount, 0.0);
cellInfoVecs.cellAreaVec.resize(allocPara_m.maxCellCount, 0.0);
cellInfoVecs.cellPerimVec.resize(allocPara_m.maxCellCount, 0.0);//AAMIRI
std::cout << "finished " << std::endl;
}
void SceCells::initCellNodeInfoVecs() {
cellNodeInfoVecs.cellRanks.resize(allocPara.maxTotalCellNodeCount);
cellNodeInfoVecs.activeXPoss.resize(allocPara.maxTotalCellNodeCount);
cellNodeInfoVecs.activeYPoss.resize(allocPara.maxTotalCellNodeCount);
cellNodeInfoVecs.activeZPoss.resize(allocPara.maxTotalCellNodeCount);
cellNodeInfoVecs.distToCenterAlongGrowDir.resize(
allocPara.maxTotalCellNodeCount);
}
void SceCells::initCellNodeInfoVecs_M() {
std::cout << "max total node count = " << allocPara_m.maxTotalNodeCount
<< std::endl;
cellNodeInfoVecs.cellRanks.resize(allocPara_m.maxTotalNodeCount);
cellNodeInfoVecs.activeXPoss.resize(allocPara_m.maxTotalNodeCount);
cellNodeInfoVecs.activeYPoss.resize(allocPara_m.maxTotalNodeCount);
cellNodeInfoVecs.activeZPoss.resize(allocPara_m.maxTotalNodeCount);
cellNodeInfoVecs.distToCenterAlongGrowDir.resize(
allocPara_m.maxTotalNodeCount);
}
void SceCells::initGrowthAuxData() {
growthAuxData.nodeIsActiveAddress = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().nodeIsActive[allocPara.startPosCells]));
growthAuxData.nodeXPosAddress = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().nodeLocX[allocPara.startPosCells]));
growthAuxData.nodeYPosAddress = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().nodeLocY[allocPara.startPosCells]));
growthAuxData.randomGrowthSpeedMin = globalConfigVars.getConfigValue(
"RandomGrowthSpeedMin").toDouble();
growthAuxData.randomGrowthSpeedMax = globalConfigVars.getConfigValue(
"RandomGrowthSpeedMax").toDouble();
growthAuxData.randGenAuxPara = globalConfigVars.getConfigValue(
"RandomGenerationAuxPara").toDouble();
if (controlPara.simuType == SingleCellTest) {
growthAuxData.fixedGrowthSpeed = globalConfigVars.getConfigValue(
"FixedGrowthSpeed").toDouble();
}
}
void SceCells::initGrowthAuxData_M() {
growthAuxData.nodeIsActiveAddress = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().nodeIsActive[allocPara_m.bdryNodeCount]));
growthAuxData.nodeXPosAddress = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().nodeLocX[allocPara_m.bdryNodeCount]));
growthAuxData.nodeYPosAddress = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().nodeLocY[allocPara_m.bdryNodeCount]));
growthAuxData.adhIndxAddr = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().nodeAdhereIndex[allocPara_m.bdryNodeCount]));
growthAuxData.randomGrowthSpeedMin_Ori = globalConfigVars.getConfigValue(
"RandomGrowthSpeedMin").toDouble();
growthAuxData.randomGrowthSpeedMax_Ori = globalConfigVars.getConfigValue(
"RandomGrowthSpeedMax").toDouble();
growthAuxData.randomGrowthSpeedMin = growthAuxData.randomGrowthSpeedMin_Ori;
growthAuxData.randomGrowthSpeedMax = growthAuxData.randomGrowthSpeedMax_Ori;
growthAuxData.grthPrgrCriVal_M_Ori = globalConfigVars.getConfigValue(
"GrowthPrgrCriVal").toDouble();
growthAuxData.grthProgrEndCPU = globalConfigVars.getConfigValue(
"GrowthPrgrValEnd").toDouble();
}
void SceCells::initialize(SceNodes* nodesInput) {
nodes = nodesInput;
controlPara = nodes->getControlPara();
readMiscPara();
readBioPara();
allocPara = nodesInput->getAllocPara();
// max internal node count must be even number.
assert(allocPara_m.maxIntnlNodePerCell % 2 == 0);
initCellInfoVecs();
initCellNodeInfoVecs();
initGrowthAuxData();
distributeIsCellRank();
}
void SceCells::initialize_M(SceNodes* nodesInput) {
std::cout << "Initializing cells ...... " << std::endl;
//std::cout.flush();
nodes = nodesInput;
allocPara_m = nodesInput->getAllocParaM();
// max internal node count must be even number.
assert(allocPara_m.maxIntnlNodePerCell % 2 == 0);
//std::cout << "break point 1 " << std::endl;
//std::cout.flush();
controlPara = nodes->getControlPara();
//std::cout << "break point 2 " << std::endl;
//std::cout.flush();
readMiscPara_M();
//std::cout << "break point 3 " << std::endl;
//std::cout.flush();
initCellInfoVecs_M();
cout<< "size of dpp initilizie is "<< cellInfoVecs.cell_Dpp.size() << endl ;
//std::cout << "break point 4 " << std::endl;
//std::cout.flush();
readBioPara();
//std::cout << "break point 5 " << std::endl;
//std::cout.flush();
//std::cout << "break point 6 " << std::endl;
//std::cout.flush();
initCellNodeInfoVecs_M();
//std::cout << "break point 7 " << std::endl;
//std::cout.flush();
initGrowthAuxData_M();
//std::cout << "break point 8 " << std::endl;
//std::cout.flush();
}
void SceCells::copyInitActiveNodeCount(
std::vector<uint>& numOfInitActiveNodesOfCells) {
thrust::copy(numOfInitActiveNodesOfCells.begin(),
numOfInitActiveNodesOfCells.end(),
cellInfoVecs.activeNodeCountOfThisCell.begin());
}
void SceCells::allComponentsMove() {
adjustNodeVel();
moveNodes();
}
/**
* Mark cell node as either activdistributeIsActiveInfo()e or inactive.
* left part of the node array will be active and right part will be inactive.
* the threshold is defined by array activeNodeCountOfThisCell.
* e.g. activeNodeCountOfThisCell = {2,3} and maxNodeOfOneCell = 5
*/
void SceCells::distributeIsActiveInfo() {
//std::cout << "before distribute bdry isActive" << std::endl;
distributeBdryIsActiveInfo();
//std::cout << "before distribute profile isActive" << std::endl;
distributeProfileIsActiveInfo();
//std::cout << "before distribute ecm isActive" << std::endl;
distributeECMIsActiveInfo();
//std::cout << "before distribute cells isActive" << std::endl;
distributeCellIsActiveInfo();
}
void SceCells::distributeIsCellRank() {
uint totalNodeCountForActiveCells = allocPara.currentActiveCellCount
* allocPara.maxNodeOfOneCell;
thrust::counting_iterator<uint> countingBegin(0);
thrust::counting_iterator<uint> countingCellEnd(
totalNodeCountForActiveCells);
std::cerr << "totalNodeCount for active cells "
<< totalNodeCountForActiveCells << std::endl;
//thrust::counting_iterator<uint> countingECMEnd(countingECMEnd);
// only computes the cell ranks of cells. the rest remain unchanged.
thrust::transform(countingBegin, countingCellEnd,
nodes->getInfoVecs().nodeCellRank.begin() + allocPara.startPosCells,
DivideFunctor(allocPara.maxNodeOfOneCell));
std::cerr << "finished cellRank transformation" << std::endl;
}
/**
* This method computes center of all cells.
* more efficient then simply iterating the cell because of parallel reducing.
*/
void SceCells::computeCenterPos() {
uint totalNodeCountForActiveCells = allocPara.currentActiveCellCount
* allocPara.maxNodeOfOneCell;
thrust::counting_iterator<uint> countingBegin(0);
thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells);
uint totalNumberOfActiveNodes = thrust::reduce(
cellInfoVecs.activeNodeCountOfThisCell.begin(),
cellInfoVecs.activeNodeCountOfThisCell.begin()
+ allocPara.currentActiveCellCount);
thrust::copy_if(
thrust::make_zip_iterator(
thrust::make_tuple(
make_transform_iterator(countingBegin,
DivideFunctor(allocPara.maxNodeOfOneCell)),
nodes->getInfoVecs().nodeLocX.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeLocY.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeLocZ.begin()
+ allocPara.startPosCells)),
thrust::make_zip_iterator(
thrust::make_tuple(
make_transform_iterator(countingBegin,
DivideFunctor(allocPara.maxNodeOfOneCell)),
nodes->getInfoVecs().nodeLocX.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeLocY.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeLocZ.begin()
+ allocPara.startPosCells))
+ totalNodeCountForActiveCells,
nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosCells,
thrust::make_zip_iterator(
thrust::make_tuple(cellNodeInfoVecs.cellRanks.begin(),
cellNodeInfoVecs.activeXPoss.begin(),
cellNodeInfoVecs.activeYPoss.begin(),
cellNodeInfoVecs.activeZPoss.begin())), isTrue());
thrust::reduce_by_key(cellNodeInfoVecs.cellRanks.begin(),
cellNodeInfoVecs.cellRanks.begin() + totalNumberOfActiveNodes,
thrust::make_zip_iterator(
thrust::make_tuple(cellNodeInfoVecs.activeXPoss.begin(),
cellNodeInfoVecs.activeYPoss.begin(),
cellNodeInfoVecs.activeZPoss.begin())),
cellInfoVecs.cellRanksTmpStorage.begin(),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin(),
cellInfoVecs.centerCoordZ.begin())),
thrust::equal_to<uint>(), CVec3Add());
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin(),
cellInfoVecs.centerCoordZ.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin(),
cellInfoVecs.centerCoordZ.begin()))
+ allocPara.currentActiveCellCount,
cellInfoVecs.activeNodeCountOfThisCell.begin(),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin(),
cellInfoVecs.centerCoordZ.begin())), CVec3Divide());
}
/**
* 2D version of cell division.
* Division process is done by creating two temporary vectors to hold the node information
* that are going to divide.
*
* step 1: based on lengthDifference, expectedLength and growthProgress,
* this process determines whether a certain cell is ready to divide and then assign
* a boolean value to isDivided.
*
* step 2. copy those cells that will divide in to the temp vectors created
*
* step 3. For each cell in the temp vectors, we sort its nodes by its distance to the
* corresponding cell center.
* This step is not very effcient when the number of cells going to divide is big.
* but this is unlikely to happen because cells will divide according to external chemical signaling
* and each will have different divide progress.
*
* step 4. copy the right part of each cell of the sorted array (temp1) to left part of each cell of
* another array
*
* step 5. transform isActive vector of both temp1 and temp2, making only left part of each cell active.
*
* step 6. insert temp2 to the end of the cell array
*
* step 7. copy temp1 to the previous position of the cell array.
*
* step 8. add activeCellCount of the system.
*
* step 9. mark isDivide of all cells to false.
*/
void SceCells::divide2DSimplified() {
bool isDivisionPresent = decideIfGoingToDivide();
if (!isDivisionPresent) {
return;
}
copyCellsPreDivision();
sortNodesAccordingToDist();
copyLeftAndRightToSeperateArrays();
transformIsActiveArrayOfBothArrays();
addSecondArrayToCellArray();
copyFirstArrayToPreviousPos();
updateActiveCellCount();
markIsDivideFalse();
}
bool SceCells::decideIfGoingToDivide() {
// step 1
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.lengthDifference.begin(),
cellInfoVecs.expectedLength.begin(),
cellInfoVecs.growthProgress.begin(),
cellInfoVecs.activeNodeCountOfThisCell.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.lengthDifference.begin(),
cellInfoVecs.expectedLength.begin(),
cellInfoVecs.growthProgress.begin(),
cellInfoVecs.activeNodeCountOfThisCell.begin()))
+ allocPara.currentActiveCellCount,
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.isDividing.begin(),
cellInfoVecs.growthProgress.begin())),
CompuIsDivide(miscPara.isDivideCriticalRatio,
allocPara.maxNodeOfOneCell));
// sum all bool values which indicate whether the cell is going to divide.
// toBeDivideCount is the total number of cells going to divide.
divAuxData.toBeDivideCount = thrust::reduce(cellInfoVecs.isDividing.begin(),
cellInfoVecs.isDividing.begin() + allocPara.currentActiveCellCount,
(uint) (0));
if (divAuxData.toBeDivideCount > 0) {
return true;
} else {
return false;
}
}
void SceCells::copyCellsPreDivision() {
// step 2 : copy all cell rank and distance to its corresponding center with divide flag = 1
totalNodeCountForActiveCells = allocPara.currentActiveCellCount
* allocPara.maxNodeOfOneCell;
divAuxData.nodeStorageCount = divAuxData.toBeDivideCount
* allocPara.maxNodeOfOneCell;
divAuxData.tmpIsActiveHold1 = thrust::device_vector<bool>(
divAuxData.nodeStorageCount, true);
divAuxData.tmpDistToCenter1 = thrust::device_vector<double>(
divAuxData.nodeStorageCount, 0.0);
divAuxData.tmpCellRankHold1 = thrust::device_vector<uint>(
divAuxData.nodeStorageCount, 0.0);
divAuxData.tmpXValueHold1 = thrust::device_vector<double>(
divAuxData.nodeStorageCount, 0.0);
divAuxData.tmpYValueHold1 = thrust::device_vector<double>(
divAuxData.nodeStorageCount, 0.0);
divAuxData.tmpZValueHold1 = thrust::device_vector<double>(
divAuxData.nodeStorageCount, 0.0);
divAuxData.tmpCellTypes = thrust::device_vector<SceNodeType>(
divAuxData.nodeStorageCount);
divAuxData.tmpIsActiveHold2 = thrust::device_vector<bool>(
divAuxData.nodeStorageCount, false);
divAuxData.tmpDistToCenter2 = thrust::device_vector<double>(
divAuxData.nodeStorageCount, 0.0);
divAuxData.tmpXValueHold2 = thrust::device_vector<double>(
divAuxData.nodeStorageCount, 0.0);
divAuxData.tmpYValueHold2 = thrust::device_vector<double>(
divAuxData.nodeStorageCount, 0.0);
divAuxData.tmpZValueHold2 = thrust::device_vector<double>(
divAuxData.nodeStorageCount, 0.0);
// step 2 , continued
thrust::copy_if(
thrust::make_zip_iterator(
thrust::make_tuple(
make_transform_iterator(countingBegin,
DivideFunctor(allocPara.maxNodeOfOneCell)),
cellNodeInfoVecs.distToCenterAlongGrowDir.begin(),
nodes->getInfoVecs().nodeLocX.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeLocY.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeLocZ.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeCellType.begin()
+ allocPara.startPosCells)),
thrust::make_zip_iterator(
thrust::make_tuple(
make_transform_iterator(countingBegin,
DivideFunctor(allocPara.maxNodeOfOneCell)),
cellNodeInfoVecs.distToCenterAlongGrowDir.begin(),
nodes->getInfoVecs().nodeLocX.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeLocY.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeLocZ.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeCellType.begin()
+ allocPara.startPosCells))
+ totalNodeCountForActiveCells,
thrust::make_permutation_iterator(cellInfoVecs.isDividing.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(allocPara.maxNodeOfOneCell))),
thrust::make_zip_iterator(
thrust::make_tuple(divAuxData.tmpCellRankHold1.begin(),
divAuxData.tmpDistToCenter1.begin(),
divAuxData.tmpXValueHold1.begin(),
divAuxData.tmpYValueHold1.begin(),
divAuxData.tmpZValueHold1.begin(),
divAuxData.tmpCellTypes.begin())), isTrue());
}
/**
* performance wise, this implementation is not the best because I can use only one sort_by_key
* with speciialized comparision operator. However, This implementation is more robust and won't
* compromise performance too much.
*/
void SceCells::sortNodesAccordingToDist() {
//step 3
for (uint i = 0; i < divAuxData.toBeDivideCount; i++) {
thrust::sort_by_key(
divAuxData.tmpDistToCenter1.begin()
+ i * allocPara.maxNodeOfOneCell,
divAuxData.tmpDistToCenter1.begin()
+ (i + 1) * allocPara.maxNodeOfOneCell,
thrust::make_zip_iterator(
thrust::make_tuple(
divAuxData.tmpXValueHold1.begin()
+ i * allocPara.maxNodeOfOneCell,
divAuxData.tmpYValueHold1.begin()
+ i * allocPara.maxNodeOfOneCell,
divAuxData.tmpZValueHold1.begin()
+ i * allocPara.maxNodeOfOneCell)));
}
}
/**
* scatter_if() is a thrust function.
* inputIter1 first,
* inputIter1 last,
* inputIter2 map,
* inputIter3 stencil
* randomAccessIter output
*/
void SceCells::copyLeftAndRightToSeperateArrays() {
//step 4.
thrust::scatter_if(
thrust::make_zip_iterator(
thrust::make_tuple(divAuxData.tmpXValueHold1.begin(),
divAuxData.tmpYValueHold1.begin(),
divAuxData.tmpZValueHold1.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(divAuxData.tmpXValueHold1.end(),
divAuxData.tmpYValueHold1.end(),
divAuxData.tmpZValueHold1.end())),
make_transform_iterator(countingBegin,
LeftShiftFunctor(allocPara.maxNodeOfOneCell)),
make_transform_iterator(countingBegin,
IsRightSide(allocPara.maxNodeOfOneCell)),
thrust::make_zip_iterator(
thrust::make_tuple(divAuxData.tmpXValueHold2.begin(),
divAuxData.tmpYValueHold2.begin(),
divAuxData.tmpZValueHold2.begin())));
}
void SceCells::transformIsActiveArrayOfBothArrays() {
thrust::transform(countingBegin,
countingBegin + divAuxData.nodeStorageCount,
divAuxData.tmpIsActiveHold1.begin(),
IsLeftSide(allocPara.maxNodeOfOneCell));
thrust::transform(countingBegin,
countingBegin + divAuxData.nodeStorageCount,
divAuxData.tmpIsActiveHold2.begin(),
IsLeftSide(allocPara.maxNodeOfOneCell));
if (divAuxData.toBeDivideCount != 0) {
std::cout << "before insert, active cell count in nodes:"
<< nodes->getAllocPara().currentActiveCellCount << std::endl;
}
}
void SceCells::addSecondArrayToCellArray() {
/// step 6. call SceNodes function to add newly divided cells
nodes->addNewlyDividedCells(divAuxData.tmpXValueHold2,
divAuxData.tmpYValueHold2, divAuxData.tmpZValueHold2,
divAuxData.tmpIsActiveHold2, divAuxData.tmpCellTypes);
}
void SceCells::copyFirstArrayToPreviousPos() {
thrust::scatter(
thrust::make_zip_iterator(
thrust::make_tuple(divAuxData.tmpIsActiveHold1.begin(),
divAuxData.tmpXValueHold1.begin(),
divAuxData.tmpYValueHold1.begin(),
divAuxData.tmpZValueHold1.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(divAuxData.tmpIsActiveHold1.end(),
divAuxData.tmpXValueHold1.end(),
divAuxData.tmpYValueHold1.end(),
divAuxData.tmpZValueHold1.end())),
thrust::make_transform_iterator(
thrust::make_zip_iterator(
thrust::make_tuple(countingBegin,
divAuxData.tmpCellRankHold1.begin())),
CompuPos(allocPara.maxNodeOfOneCell)),
thrust::make_zip_iterator(
thrust::make_tuple(
nodes->getInfoVecs().nodeIsActive.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeLocX.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeLocY.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeLocZ.begin()
+ allocPara.startPosCells)));
/**
* after dividing, the cell should resume the initial
* (1) node count, which defaults to be half size of max node count
* (2) growth progress, which defaults to 0
* (3) last check point, which defaults to 0
*/
thrust::scatter_if(
thrust::make_zip_iterator(
thrust::make_tuple(initIntnlNodeCount, initGrowthProgress,
initGrowthProgress)),
thrust::make_zip_iterator(
thrust::make_tuple(initIntnlNodeCount, initGrowthProgress,
initGrowthProgress))
+ allocPara.currentActiveCellCount, countingBegin,
cellInfoVecs.isDividing.begin(),
thrust::make_zip_iterator(
thrust::make_tuple(
cellInfoVecs.activeNodeCountOfThisCell.begin(),
cellInfoVecs.growthProgress.begin(),
cellInfoVecs.lastCheckPoint.begin())), isTrue());
// TODO: combine this one with the previous scatter_if to improve efficiency.
thrust::fill(
cellInfoVecs.activeNodeCountOfThisCell.begin()
+ allocPara.currentActiveCellCount,
cellInfoVecs.activeNodeCountOfThisCell.begin()
+ allocPara.currentActiveCellCount
+ divAuxData.toBeDivideCount,
allocPara.maxNodeOfOneCell / 2);
}
void SceCells::updateActiveCellCount() {
allocPara.currentActiveCellCount = allocPara.currentActiveCellCount
+ divAuxData.toBeDivideCount;
NodeAllocPara para = nodes->getAllocPara();
para.currentActiveCellCount = allocPara.currentActiveCellCount;
nodes->setAllocPara(para);
}
void SceCells::markIsDivideFalse() {
thrust::fill(cellInfoVecs.isDividing.begin(),
cellInfoVecs.isDividing.begin() + allocPara.currentActiveCellCount,
false);
}
void SceCells::readMiscPara() {
miscPara.addNodeDistance = globalConfigVars.getConfigValue(
"DistanceForAddingNode").toDouble();
miscPara.minDistanceToOtherNode = globalConfigVars.getConfigValue(
"MinDistanceToOtherNode").toDouble();
miscPara.isDivideCriticalRatio = globalConfigVars.getConfigValue(
"IsDivideCrticalRatio").toDouble();
// reason for adding a small term here is to avoid scenario when checkpoint might add many times
// up to 0.99999999 which is theoretically 1.0 but not in computer memory. If we don't include
// this small term we might risk adding one more node.
int maxNodeOfOneCell =
globalConfigVars.getConfigValue("MaxNodePerCell").toInt();
miscPara.growThreshold = 1.0 / (maxNodeOfOneCell - maxNodeOfOneCell / 2)
+ epsilon;
}
void SceCells::readMiscPara_M() {
miscPara.addNodeDistance = globalConfigVars.getConfigValue(
"DistanceForAddingNode").toDouble();
miscPara.minDistanceToOtherNode = globalConfigVars.getConfigValue(
"MinDistanceToOtherNode").toDouble();
miscPara.isDivideCriticalRatio = globalConfigVars.getConfigValue(
"IsDivideCrticalRatio").toDouble();
// reason for adding a small term here is to avoid scenario when checkpoint might add many times
// up to 0.99999999 which is theoretically 1.0 but not in computer memory. If we don't include
// this small term we might risk adding one more node.
int maxIntnlNodePerCell = globalConfigVars.getConfigValue(
"MaxIntnlNodeCountPerCell").toInt();
miscPara.growThreshold = 1.0
/ (maxIntnlNodePerCell - maxIntnlNodePerCell / 2) + epsilon;
miscPara.prolifDecayCoeff = globalConfigVars.getConfigValue(
"ProlifDecayCoeff").toDouble();
}
void SceCells::readBioPara() {
if (controlPara.simuType != Disc_M) {
bioPara.cellInitLength = globalConfigVars.getConfigValue(
"CellInitLength").toDouble();
std::cout << "break point 1 " << bioPara.cellInitLength << std::endl;
std::cout.flush();
bioPara.cellFinalLength = globalConfigVars.getConfigValue(
"CellFinalLength").toDouble();
std::cout << "break point 2 " << bioPara.cellFinalLength << std::endl;
std::cout.flush();
bioPara.elongationCoefficient = globalConfigVars.getConfigValue(
"ElongateCoefficient").toDouble();
std::cout << "break point 3 " << bioPara.elongationCoefficient
<< std::endl;
std::cout.flush();
}
if (controlPara.simuType == Beak) {
std::cout << "break point 4 " << std::endl;
std::cout.flush();
bioPara.chemoCoefficient = globalConfigVars.getConfigValue(
"ChemoCoefficient").toDouble();
}
//std::cin >> jj;
}
void SceCells::randomizeGrowth() {
thrust::counting_iterator<uint> countingBegin(0);
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.growthXDir.begin(),
cellInfoVecs.growthYDir.begin(),
cellInfoVecs.isRandGrowInited.begin(),
countingBegin)),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.growthXDir.begin(),
cellInfoVecs.growthYDir.begin(),
cellInfoVecs.isRandGrowInited.begin(),
countingBegin)) + allocPara.currentActiveCellCount,
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.growthSpeed.begin(),
cellInfoVecs.growthXDir.begin(),
cellInfoVecs.growthYDir.begin(),
cellInfoVecs.isRandGrowInited.begin())),
AssignRandIfNotInit(growthAuxData.randomGrowthSpeedMin,
growthAuxData.randomGrowthSpeedMax,
allocPara.currentActiveCellCount,
growthAuxData.randGenAuxPara));
}
/**
* To run all the cell level logics.
* First step we got center positions of cells.
* Grow.
*/
void SceCells::runAllCellLevelLogicsDisc(double dt) {
this->dt = dt;
//std::cerr << "enter run all cell level logics" << std::endl;
computeCenterPos();
//std::cerr << "after compute center position." << std::endl;
if (nodes->getControlPara().controlSwitchs.stab == OFF) {
growAtRandom(dt);
//grow2DTwoRegions(dt, region1, region2);
//std::cerr << "after grow cells" << std::endl;
//distributeIsActiveInfo();
//std::cerr << "after distribute is active info." << std::endl;
divide2DSimplified();
//std::cerr << "after divide 2D simplified." << std::endl;
distributeIsActiveInfo();
//std::cerr << "after distribute is active info." << std::endl;
distributeCellGrowthProgress();
}
allComponentsMove();
//std::cerr << "after all components move." << std::endl;
}
//Ali void SceCells::runAllCellLogicsDisc_M(double dt) {
void SceCells::runAllCellLogicsDisc_M(double dt, double Damp_Coef, double InitTimeStage) { //Ali
std::cout << " *** 1 ***" << endl;
std::cout.flush();
this->dt = dt;
this->Damp_Coef=Damp_Coef ; //Ali
this->InitTimeStage=InitTimeStage ; //A & A
growthAuxData.prolifDecay =1.0 ; // no decay for right now exp(-curTime * miscPara.prolifDecayCoeff);
cout<< "The important curTime used in simulation is here which is"<<curTime <<endl;
growthAuxData.randomGrowthSpeedMin = growthAuxData.prolifDecay
* growthAuxData.randomGrowthSpeedMin_Ori;
growthAuxData.randomGrowthSpeedMax = growthAuxData.prolifDecay
* growthAuxData.randomGrowthSpeedMax_Ori;
curTime = curTime + dt;
std::cout << " *** 2 ***" << endl;
std::cout.flush();
applySceCellDisc_M();
std::cout << " *** 3 ***" << endl;
std::cout.flush();
//Ali
computeCenterPos_M();
exchSignal();
BC_Imp_M() ;
std::cout << " *** 3.5 ***" << endl;
std::cout.flush();
//Ali
applyMemForce_M();
std::cout << " *** 4 ***" << endl;
std::cout.flush();
//Ali cmment //
// computeCenterPos_M();
std::cout << " *** 5 ***" << endl;
std::cout.flush();
//Ali cmment //
growAtRandom_M(dt);
std::cout << " *** 6 ***" << endl;
std::cout.flush();
//if (curTime<3300.0)
divide2D_M();
std::cout << " *** 7 ***" << endl;
std::cout.flush();
distributeCellGrowthProgress_M();
std::cout << " *** 8 ***" << endl;
std::cout.flush();
findTangentAndNormal_M();//AAMIRI ADDED May29
allComponentsMove_M();
std::cout << " *** 9 ***" << endl;
std::cout.flush();
handleMembrGrowth_M();
std::cout << " *** 10 ***" << endl;
std::cout.flush();
}
void SceCells::exchSignal(){
if (firstTimeReadDpp) {
uint maxTotalNodes=nodes->getInfoVecs().nodeLocX.size() ;
signal.Initialize(allocPara_m.maxAllNodePerCell,allocPara_m.maxMembrNodePerCell,maxTotalNodes, allocPara_m.maxCellCount) ;
cout << " I passed the initializtion for signaling module" << endl ;
}
lastTimeExchange=lastTimeExchange+dt ;
cout << "last time exchange is " << lastTimeExchange << endl ;
cout << "dt is " << dt << endl ;
double exchPeriod=360 ;
if ( lastTimeExchange>exchPeriod) {
lastTimeExchange=0 ;
//vector<CVector> cellCentersHost ;
//cellCentersHost=getAllCellCenters(); //Ali
cout << "I entered the function to update dpp" << endl ;
thrust::device_vector<double>::iterator MinX_Itr=thrust::min_element(cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordX.begin()+allocPara_m.currentActiveCellCount) ;
thrust::device_vector<double>::iterator MaxX_Itr=thrust::max_element(cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordX.begin()+ allocPara_m.currentActiveCellCount) ;
thrust::device_vector<double>::iterator MinY_Itr=thrust::min_element(cellInfoVecs.centerCoordY.begin(),
cellInfoVecs.centerCoordY.begin()+ allocPara_m.currentActiveCellCount) ;
thrust::device_vector<double>::iterator MaxY_Itr=thrust::max_element(cellInfoVecs.centerCoordY.begin(),
cellInfoVecs.centerCoordY.begin()+ allocPara_m.currentActiveCellCount) ;
Tisu_MinX= *MinX_Itr ;
Tisu_MaxX= *MaxX_Itr ;
Tisu_MinY= *MinY_Itr ;
Tisu_MaxY= *MaxY_Itr ;
Tisu_R=0.5*(0.5*(Tisu_MaxX-Tisu_MinX)+0.5*(Tisu_MaxY-Tisu_MinY)) ;
totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount
* allocPara_m.maxAllNodePerCell;
thrust:: copy (nodes->getInfoVecs().nodeIsActive.begin(),nodes->getInfoVecs().nodeIsActive.begin()+ totalNodeCountForActiveCells, signal.nodeIsActiveHost.begin());
thrust:: copy (nodes->getInfoVecs().nodeLocX.begin(),nodes->getInfoVecs().nodeLocX.begin()+ totalNodeCountForActiveCells, signal.nodeLocXHost.begin());
thrust:: copy (nodes->getInfoVecs().nodeLocY.begin(),nodes->getInfoVecs().nodeLocY.begin()+ totalNodeCountForActiveCells, signal.nodeLocYHost.begin());
thrust:: copy (cellInfoVecs.centerCoordX.begin(),cellInfoVecs.centerCoordX.begin()+allocPara_m.currentActiveCellCount, signal.cellCenterX.begin());
thrust:: copy (cellInfoVecs.centerCoordY.begin(),cellInfoVecs.centerCoordY.begin()+allocPara_m.currentActiveCellCount, signal.cellCenterY.begin());
signal.updateSignal(Tisu_MinX,Tisu_MaxX,Tisu_MinY,Tisu_MaxY,curTime,totalNodeCountForActiveCells,allocPara_m.currentActiveCellCount) ; //Ali
assert(cellInfoVecs.cell_Dpp.size()==signal.dppLevel.size());
thrust::copy(signal.dppLevel.begin(),signal.dppLevel.end(),cellInfoVecs.cell_Dpp.begin()) ;
//currentActiveCellCountOld=allocPara_m.currentActiveCellCount;
}
if (firstTimeReadDpp) {
thrust::copy(signal.dppLevel.begin(),signal.dppLevel.end(),cellInfoVecs.cell_DppOld.begin()) ;
firstTimeReadDpp=false ;
}
}
void SceCells::runStretchTest(double dt) {
this->dt = dt;
computeCenterPos();
growAlongX(false, dt);
moveNodes();
}
void SceCells::growAlongX(bool isAddPt, double d_t) {
totalNodeCountForActiveCells = allocPara.currentActiveCellCount
* allocPara.maxNodeOfOneCell;
setGrowthDirXAxis();
//std::cout << "after copy grow info" << std::endl;
updateGrowthProgress();
//std::cout << "after update growth progress" << std::endl;
decideIsScheduleToGrow();
//std::cout << "after decode os schedule to grow" << std::endl;
computeCellTargetLength();
//std::cout << "after compute cell target length" << std::endl;
computeDistToCellCenter();
//std::cout << "after compute dist to center" << std::endl;
findMinAndMaxDistToCenter();
//std::cout << "after find min and max dist" << std::endl;
computeLenDiffExpCur();
//std::cout << "after compute diff " << std::endl;
stretchCellGivenLenDiff();
if (isAddPt) {
addPointIfScheduledToGrow();
}
}
void SceCells::growWithStress(double d_t) {
}
std::vector<CVector> SceCells::getAllCellCenters() {
//void SceCells::getAllCellCenters() {
//thrust::host_vector<double> centerX = cellInfoVecs.centerCoordX;
//thrust::host_vector<double> centerY = cellInfoVecs.centerCoordY;
//thrust::host_vector<double> centerZ = cellInfoVecs.centerCoordZ;
thrust::host_vector<double> centerX(
allocPara_m.currentActiveCellCount);
thrust::copy(cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordX.begin()
+ allocPara_m.currentActiveCellCount,
centerX.begin());
thrust::host_vector<double> centerY(
allocPara_m.currentActiveCellCount);
thrust::copy(cellInfoVecs.centerCoordY.begin(),
cellInfoVecs.centerCoordY.begin()
+ allocPara_m.currentActiveCellCount,
centerY.begin());
thrust::host_vector<double> centerZ(
allocPara_m.currentActiveCellCount);
thrust::copy(cellInfoVecs.centerCoordZ.begin(),
cellInfoVecs.centerCoordZ.begin()
+ allocPara_m.currentActiveCellCount,
centerZ.begin());
//infoForSignal.sCenterX=centerX[4] ;
//infoForSignal.sCenterY=centerY[4] ;
//infoForSignal.sCenterZ=centerZ[4] ;
std::vector<CVector> result;
for (uint i = 0; i < allocPara_m.currentActiveCellCount; i++) {
CVector pos = CVector(centerX[i], centerY[i], centerZ[i]);
//infoForSignal.sCenterX=centerX[i] ;
//infoForSignal.sCenterY=centerY[i] ;
//infoForSignal.sCenterZ=centerZ[i] ;
result.push_back(pos);
}
return result;
}
void SceCells::setGrowthDirXAxis() {
thrust::fill(cellInfoVecs.growthXDir.begin(),
cellInfoVecs.growthXDir.begin() + allocPara.currentActiveCellCount,
1.0);
thrust::fill(cellInfoVecs.growthYDir.begin(),
cellInfoVecs.growthYDir.begin() + allocPara.currentActiveCellCount,
0.0);
thrust::fill(cellInfoVecs.growthSpeed.begin(),
cellInfoVecs.growthSpeed.begin() + allocPara.currentActiveCellCount,
growthAuxData.fixedGrowthSpeed);
}
std::vector<double> SceCells::getGrowthProgressVec() {
thrust::host_vector<double> growthProVec = cellInfoVecs.growthProgress;
std::vector<double> result;
for (uint i = 0; i < allocPara.currentActiveCellCount; i++) {
result.push_back(growthProVec[i]);
}
return result;
}
void SceCells::copyCellsPreDivision_M() {
totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount
* allocPara_m.maxAllNodePerCell;
divAuxData.nodeStorageCount = divAuxData.toBeDivideCount
* allocPara_m.maxAllNodePerCell;
divAuxData.tmpIsActive_M = thrust::device_vector<bool>(
divAuxData.nodeStorageCount, true);
divAuxData.tmpNodePosX_M = thrust::device_vector<double>(
divAuxData.nodeStorageCount, 0.0);
divAuxData.tmpNodePosY_M = thrust::device_vector<double>(
divAuxData.nodeStorageCount, 0.0);
divAuxData.tmpCellRank_M = thrust::device_vector<uint>(
divAuxData.toBeDivideCount, 0);
divAuxData.tmpDivDirX_M = thrust::device_vector<double>(
divAuxData.toBeDivideCount, 0);
divAuxData.tmpDivDirY_M = thrust::device_vector<double>(
divAuxData.toBeDivideCount, 0);
divAuxData.tmpCenterPosX_M = thrust::device_vector<double>(
divAuxData.toBeDivideCount, 0);
divAuxData.tmpCenterPosY_M = thrust::device_vector<double>(
divAuxData.toBeDivideCount, 0);
divAuxData.tmpIsActive1_M = thrust::device_vector<bool>(
divAuxData.nodeStorageCount, false);
divAuxData.tmpXPos1_M = thrust::device_vector<double>(
divAuxData.nodeStorageCount, 0.0);
divAuxData.tmpYPos1_M = thrust::device_vector<double>(
divAuxData.nodeStorageCount, 0.0);
divAuxData.tmpIsActive2_M = thrust::device_vector<bool>(
divAuxData.nodeStorageCount, false);
divAuxData.tmpXPos2_M = thrust::device_vector<double>(
divAuxData.nodeStorageCount, 0.0);
divAuxData.tmpYPos2_M = thrust::device_vector<double>(
divAuxData.nodeStorageCount, 0.0);
//A&A
divAuxData.tmpHertwigXdir = thrust::device_vector<double>(
divAuxData.nodeStorageCount, 0.0);
divAuxData.tmpHertwigYdir = thrust::device_vector<double>(
divAuxData.nodeStorageCount, 0.0);
//A&A
// step 2 , continued // copy node info values ready for division /comment A&A
thrust::counting_iterator<uint> iStart(0);
thrust::copy_if(
thrust::make_zip_iterator(
thrust::make_tuple(
nodes->getInfoVecs().nodeIsActive.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeLocX.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeLocY.begin()
+ allocPara_m.bdryNodeCount)),
thrust::make_zip_iterator(
thrust::make_tuple(
nodes->getInfoVecs().nodeIsActive.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeLocX.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeLocY.begin()
+ allocPara_m.bdryNodeCount))
+ totalNodeCountForActiveCells,
thrust::make_permutation_iterator(cellInfoVecs.isDividing.begin(),
make_transform_iterator(iStart,
DivideFunctor(allocPara_m.maxAllNodePerCell))),
thrust::make_zip_iterator(
thrust::make_tuple(divAuxData.tmpIsActive_M.begin(),
divAuxData.tmpNodePosX_M.begin(),
divAuxData.tmpNodePosY_M.begin())), isTrue());
// step 3 , continued //copy cell info values ready for division /comment A&A
thrust::counting_iterator<uint> iBegin(0);
thrust::copy_if(
thrust::make_zip_iterator(
thrust::make_tuple(iBegin, cellInfoVecs.growthXDir.begin(),
cellInfoVecs.growthYDir.begin(),
cellInfoVecs.HertwigXdir.begin(),
cellInfoVecs.HertwigYdir.begin(),
cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(iBegin, cellInfoVecs.growthXDir.begin(),
cellInfoVecs.growthYDir.begin(),
cellInfoVecs.HertwigXdir.begin(),
cellInfoVecs.HertwigYdir.begin(),
cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin()))
+ allocPara_m.currentActiveCellCount,
cellInfoVecs.isDividing.begin(),
thrust::make_zip_iterator(
thrust::make_tuple(divAuxData.tmpCellRank_M.begin(),
divAuxData.tmpDivDirX_M.begin(),
divAuxData.tmpDivDirY_M.begin(),
divAuxData.tmpHertwigXdir.begin(),
divAuxData.tmpHertwigYdir.begin(),
divAuxData.tmpCenterPosX_M.begin(),
divAuxData.tmpCenterPosY_M.begin())), isTrue());
}
void SceCells::copyCellsEnterMitotic() {
totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount
* allocPara_m.maxAllNodePerCell;
divAuxData.nodeStorageCount = divAuxData.toEnterMitoticCount
* allocPara_m.maxAllNodePerCell;
divAuxData.tmpIsActive_M = thrust::device_vector<bool>(
divAuxData.nodeStorageCount, true);
divAuxData.tmpNodePosX_M = thrust::device_vector<double>(
divAuxData.nodeStorageCount, 0.0);
divAuxData.tmpNodePosY_M = thrust::device_vector<double>(
divAuxData.nodeStorageCount, 0.0);
divAuxData.tmpCellRank_M = thrust::device_vector<uint>(
divAuxData.toEnterMitoticCount, 0);
divAuxData.tmpDivDirX_M = thrust::device_vector<double>(
divAuxData.toEnterMitoticCount, 0);
divAuxData.tmpDivDirY_M = thrust::device_vector<double>(
divAuxData.toEnterMitoticCount, 0);
divAuxData.tmpCenterPosX_M = thrust::device_vector<double>(
divAuxData.toEnterMitoticCount, 0);
divAuxData.tmpCenterPosY_M = thrust::device_vector<double>(
divAuxData.toEnterMitoticCount, 0);
divAuxData.tmpIsActive1_M = thrust::device_vector<bool>(
divAuxData.nodeStorageCount, false);
divAuxData.tmpXPos1_M = thrust::device_vector<double>(
divAuxData.nodeStorageCount, 0.0);
divAuxData.tmpYPos1_M = thrust::device_vector<double>(
divAuxData.nodeStorageCount, 0.0);
divAuxData.tmpIsActive2_M = thrust::device_vector<bool>(
divAuxData.nodeStorageCount, false);
divAuxData.tmpXPos2_M = thrust::device_vector<double>(
divAuxData.nodeStorageCount, 0.0);
divAuxData.tmpYPos2_M = thrust::device_vector<double>(
divAuxData.nodeStorageCount, 0.0);
// step 2 , continued // copy node info values ready for division /comment A&A
thrust::counting_iterator<uint> iStart(0);
thrust::copy_if(
thrust::make_zip_iterator(
thrust::make_tuple(
nodes->getInfoVecs().nodeIsActive.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeLocX.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeLocY.begin()
+ allocPara_m.bdryNodeCount)),
thrust::make_zip_iterator(
thrust::make_tuple(
nodes->getInfoVecs().nodeIsActive.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeLocX.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeLocY.begin()
+ allocPara_m.bdryNodeCount))
+ totalNodeCountForActiveCells,
thrust::make_permutation_iterator(cellInfoVecs.isEnteringMitotic.begin(),
make_transform_iterator(iStart,
DivideFunctor(allocPara_m.maxAllNodePerCell))),
thrust::make_zip_iterator(
thrust::make_tuple(divAuxData.tmpIsActive_M.begin(),
divAuxData.tmpNodePosX_M.begin(),
divAuxData.tmpNodePosY_M.begin())), isTrue());
// step 3 , continued //copy cell info values ready for division /comment A&A
thrust::counting_iterator<uint> iBegin(0);
thrust::copy_if(
thrust::make_zip_iterator(
thrust::make_tuple(iBegin, cellInfoVecs.growthXDir.begin(),
cellInfoVecs.growthYDir.begin(),
cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(iBegin, cellInfoVecs.growthXDir.begin(),
cellInfoVecs.growthYDir.begin(),
cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin()))
+ allocPara_m.currentActiveCellCount,
cellInfoVecs.isEnteringMitotic.begin(),
thrust::make_zip_iterator(
thrust::make_tuple(divAuxData.tmpCellRank_M.begin(),
divAuxData.tmpDivDirX_M.begin(),
divAuxData.tmpDivDirY_M.begin(),
divAuxData.tmpCenterPosX_M.begin(),
divAuxData.tmpCenterPosY_M.begin())), isTrue());
}
void SceCells::createTwoNewCellArr_M() {
divAuxData.tmp1MemActiveCounts.clear();
divAuxData.tmp1InternalActiveCounts.clear();
divAuxData.tmp2MemActiveCounts.clear();
divAuxData.tmp2InternalActiveCounts.clear();
//divDebug();
for (uint i = 0; i < divAuxData.toBeDivideCount; i++) {
divAuxData.tmp1IntnlVec.clear();
divAuxData.tmp2IntnlVec.clear();
vector<CVector> membrNodes;
vector<CVector> intnlNodes;
obtainMembrAndIntnlNodes(i, membrNodes, intnlNodes);
CVector oldCenter = obtainCenter(i);
//A&A commented
//CVector divDir = calDivDir_MajorAxis(oldCenter, membrNodes,
// lenAlongMajorAxis);
/*CVector divDir = calDivDir_MajorAxis(oldCenter, membrNodes,
lenAlongMajorAxis);*/
CVector divDir;
divDir.x = divAuxData.tmpHertwigXdir[i] ; //A&A
divDir.y = divAuxData.tmpHertwigYdir[i] ; //A&A
double lenAlongHertwigAxis = calLengthAlongHertwigAxis(divDir, oldCenter, membrNodes);//A&A added
//
std::vector<VecVal> tmp1Membr, tmp2Membr;
CVector cell1Center, cell2Center;
obtainTwoNewCenters(oldCenter, divDir, lenAlongHertwigAxis, cell1Center,
cell2Center);
prepareTmpVec(i, divDir, oldCenter, tmp1Membr, tmp2Membr);
processMemVec(tmp1Membr, tmp2Membr);
shiftIntnlNodesByCellCenter(cell1Center, cell2Center);
assembleVecForTwoCells(i);
}
//divDebug();
}
//A&A
void SceCells::findHertwigAxis() {
divAuxData.tmp1MemActiveCounts.clear();
divAuxData.tmp1InternalActiveCounts.clear();
divAuxData.tmp2MemActiveCounts.clear();
divAuxData.tmp2InternalActiveCounts.clear();
//divDebug();
for (uint i = 0; i < divAuxData.toEnterMitoticCount; i++) {
uint cellRank = divAuxData.tmpCellRank_M[i];
vector<CVector> membrNodes;
vector<CVector> intnlNodes;
obtainMembrAndIntnlNodes(i, membrNodes, intnlNodes);
CVector oldCenter = obtainCenter(i);
double lenAlongMajorAxis;
CVector divDir = calDivDir_MajorAxis(oldCenter, membrNodes,
lenAlongMajorAxis);
cellInfoVecs.HertwigXdir[cellRank]=divDir.x ;
cellInfoVecs.HertwigYdir[cellRank]=divDir.y ;
//std::cout<<cellInfoVecs.HertwigXdir[cellRank]<<"HertwigXdir Thrust" <<std::endl;
//std::cout<<cellInfoVecs.HertwigYdir[cellRank]<<"HertwigYdir Thrust" <<std::endl;
//std::cout<<divDir.x<<"HertwigXdir " <<std::endl;
//std::cout<<divDir.y<<"HertwigYdir " <<std::endl;
}
//divDebug();
}
void SceCells::copyFirstCellArr_M() {
uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell;
for (uint i = 0; i < divAuxData.toBeDivideCount; i++) {
uint cellRank = divAuxData.tmpCellRank_M[i];
uint nodeStartIndx = cellRank * maxAllNodePerCell
+ allocPara_m.bdryNodeCount;
uint tmpStartIndx = i * maxAllNodePerCell;
uint tmpEndIndx = (i + 1) * maxAllNodePerCell;
thrust::constant_iterator<int> noAdhesion(-1), noAdhesion2(-1);
thrust::copy(
thrust::make_zip_iterator(
thrust::make_tuple(divAuxData.tmpXPos1_M.begin(),
divAuxData.tmpYPos1_M.begin(),
divAuxData.tmpIsActive1_M.begin(), noAdhesion,
noAdhesion2)) + tmpStartIndx,
thrust::make_zip_iterator(
thrust::make_tuple(divAuxData.tmpXPos1_M.begin(),
divAuxData.tmpYPos1_M.begin(),
divAuxData.tmpIsActive1_M.begin(), noAdhesion,
noAdhesion2)) + tmpEndIndx,
thrust::make_zip_iterator(
thrust::make_tuple(
nodes->getInfoVecs().nodeLocX.begin(),
nodes->getInfoVecs().nodeLocY.begin(),
nodes->getInfoVecs().nodeIsActive.begin(),
nodes->getInfoVecs().nodeAdhereIndex.begin(),
nodes->getInfoVecs().membrIntnlIndex.begin()))
+ nodeStartIndx);
cellInfoVecs.activeIntnlNodeCounts[cellRank] =
divAuxData.tmp1InternalActiveCounts[i];
cellInfoVecs.activeMembrNodeCounts[cellRank] =
divAuxData.tmp1MemActiveCounts[i];
cellInfoVecs.growthProgress[cellRank] = 0;
cellInfoVecs.membrGrowProgress[cellRank] = 0.0;
cellInfoVecs.isRandGrowInited[cellRank] = false;
cellInfoVecs.lastCheckPoint[cellRank] = 0;
cellInfoVecs.cell_DppOld[cellRank] = cellInfoVecs.cell_Dpp[cellRank];
}
}
void SceCells::copySecondCellArr_M() {
uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell;
for (uint i = 0; i < divAuxData.toBeDivideCount; i++) {
uint cellRankMother = divAuxData.tmpCellRank_M[i];
uint cellRank = allocPara_m.currentActiveCellCount + i;
uint nodeStartIndx = cellRank * maxAllNodePerCell
+ allocPara_m.bdryNodeCount;
uint tmpStartIndx = i * maxAllNodePerCell;
uint tmpEndIndx = (i + 1) * maxAllNodePerCell;
thrust::constant_iterator<int> noAdhesion(-1), noAdhesion2(-1);
thrust::copy(
thrust::make_zip_iterator(
thrust::make_tuple(divAuxData.tmpXPos2_M.begin(),
divAuxData.tmpYPos2_M.begin(),
divAuxData.tmpIsActive2_M.begin(), noAdhesion,
noAdhesion2)) + tmpStartIndx,
thrust::make_zip_iterator(
thrust::make_tuple(divAuxData.tmpXPos2_M.begin(),
divAuxData.tmpYPos2_M.begin(),
divAuxData.tmpIsActive2_M.begin(), noAdhesion,
noAdhesion2)) + tmpEndIndx,
thrust::make_zip_iterator(
thrust::make_tuple(
nodes->getInfoVecs().nodeLocX.begin(),
nodes->getInfoVecs().nodeLocY.begin(),
nodes->getInfoVecs().nodeIsActive.begin(),
nodes->getInfoVecs().nodeAdhereIndex.begin(),
nodes->getInfoVecs().membrIntnlIndex.begin()))
+ nodeStartIndx);
cellInfoVecs.activeIntnlNodeCounts[cellRank] =
divAuxData.tmp2InternalActiveCounts[i];
cellInfoVecs.activeMembrNodeCounts[cellRank] =
divAuxData.tmp2MemActiveCounts[i];
cellInfoVecs.growthProgress[cellRank] = 0;
cellInfoVecs.membrGrowProgress[cellRank] = 0;
cellInfoVecs.isRandGrowInited[cellRank] = false;
cellInfoVecs.lastCheckPoint[cellRank] = 0;
cellInfoVecs.cell_DppOld[cellRank] = cellInfoVecs.cell_Dpp[cellRankMother];
cellInfoVecs.cell_Dpp[cellRank] = cellInfoVecs.cell_Dpp[cellRankMother];
}
}
//AAMIRI
/*
void SceCells::removeCellArr_M() {
uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell;
for (uint i = 0; i < divAuxData.toBeDivideCount; i++) {
uint cellRank = divAuxData.tmpCellRank_M[i];
uint nodeStartIndx = cellRank * maxAllNodePerCell
+ allocPara_m.bdryNodeCount;
uint tmpStartIndx = i * maxAllNodePerCell;
uint tmpEndIndx = (i + 1) * maxAllNodePerCell;
thrust::constant_iterator<int> noAdhesion(-1), noAdhesion2(-1);
thrust::copy(
thrust::make_zip_iterator(
thrust::make_tuple(divAuxData.tmpXPos1_M.begin(),
divAuxData.tmpYPos1_M.begin(),
divAuxData.tmpIsActive1_M.begin(), noAdhesion,
noAdhesion2)) + tmpStartIndx,
thrust::make_zip_iterator(
thrust::make_tuple(divAuxData.tmpXPos1_M.begin(),
divAuxData.tmpYPos1_M.begin(),
divAuxData.tmpIsActive1_M.begin(), noAdhesion,
noAdhesion2)) + tmpEndIndx,
thrust::make_zip_iterator(
thrust::make_tuple(
nodes->getInfoVecs().nodeLocX.begin(),
nodes->getInfoVecs().nodeLocY.begin(),
nodes->getInfoVecs().nodeIsActive.begin(),
nodes->getInfoVecs().nodeAdhereIndex.begin(),
nodes->getInfoVecs().membrIntnlIndex.begin()))
+ nodeStartIndx);
cellInfoVecs.activeIntnlNodeCounts[cellRank] =
divAuxData.tmp1InternalActiveCounts[i];
cellInfoVecs.activeMembrNodeCounts[cellRank] =
divAuxData.tmp1MemActiveCounts[i];
cellInfoVecs.growthProgress[cellRank] = 0;
cellInfoVecs.membrGrowProgress[cellRank] = 0.0;
cellInfoVecs.isRandGrowInited[cellRank] = false;
cellInfoVecs.lastCheckPoint[cellRank] = 0;
}
}
*/
void SceCells::updateActiveCellCount_M() {
allocPara_m.currentActiveCellCount = allocPara_m.currentActiveCellCount
+ divAuxData.toBeDivideCount;
nodes->setActiveCellCount(allocPara_m.currentActiveCellCount);
}
//AAMIRI
/*
void SceCells::updateActiveCellCountAfterRemoval_M() {
allocPara_m.currentActiveCellCount = allocPara_m.currentActiveCellCount
+ divAuxData.toBeDivideCount;
nodes->setActiveCellCount(allocPara_m.currentActiveCellCount);
}
*/
void SceCells::markIsDivideFalse_M() {
thrust::fill(cellInfoVecs.isDividing.begin(),
cellInfoVecs.isDividing.begin()
+ allocPara_m.currentActiveCellCount, false);
}
void SceCells::adjustNodeVel_M() {
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(),
nodes->getInfoVecs().nodeVelY.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(),
nodes->getInfoVecs().nodeVelY.begin()))
+ allocPara_m.bdryNodeCount + totalNodeCountForActiveCells,
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(),
nodes->getInfoVecs().nodeVelY.begin())),
ForceZero());
}
void SceCells::moveNodes_M() {
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(),
nodes->getInfoVecs().nodeVelY.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(),
nodes->getInfoVecs().nodeVelY.begin()))
+ totalNodeCountForActiveCells + allocPara_m.bdryNodeCount,
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(),
nodes->getInfoVecs().nodeLocY.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(),
nodes->getInfoVecs().nodeLocY.begin())),
//Ali SaxpyFunctorDim2(dt));
SaxpyFunctorDim2_Damp(dt,Damp_Coef)); //Ali
}
//Ali // This function is written to assigned different damping coefficients to cells, therefore the boundary cells can have more damping
void SceCells::moveNodes_BC_M() {
thrust::counting_iterator<uint> iBegin2(0);
uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell;
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(
thrust::make_permutation_iterator(
cellInfoVecs.Cell_Damp.begin(),
make_transform_iterator(iBegin2,
DivideFunctor(maxAllNodePerCell))),
nodes->getInfoVecs().nodeVelX.begin(),
nodes->getInfoVecs().nodeVelY.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(
thrust::make_permutation_iterator(
cellInfoVecs.Cell_Damp.begin(),
make_transform_iterator(iBegin2,
DivideFunctor(maxAllNodePerCell))),
nodes->getInfoVecs().nodeVelX.begin(),
nodes->getInfoVecs().nodeVelY.begin()))
+ totalNodeCountForActiveCells + allocPara_m.bdryNodeCount,
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(),
nodes->getInfoVecs().nodeLocY.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(),
nodes->getInfoVecs().nodeLocY.begin())),
SaxpyFunctorDim2_BC_Damp(dt));
}
//Ali
void SceCells::applyMemForce_M() {
totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount
* allocPara_m.maxAllNodePerCell;
uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell;
thrust::counting_iterator<uint> iBegin(0), iBegin1(0);
//Ali
thrust::fill(cellInfoVecs.Cell_Time.begin(),cellInfoVecs.Cell_Time.begin() +allocPara_m.currentActiveCellCount,curTime);
//Ali
/*
thrust::device_vector<double>::iterator MinX_Itr=thrust::min_element(nodes->getInfoVecs().nodeLocX.begin()+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeLocX.begin()+ allocPara_m.bdryNodeCount+ totalNodeCountForActiveCells) ;
thrust::device_vector<double>::iterator MaxX_Itr=thrust::max_element(nodes->getInfoVecs().nodeLocX.begin()+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeLocX.begin()+ allocPara_m.bdryNodeCount+ totalNodeCountForActiveCells) ;
thrust::device_vector<double>::iterator MinY_Itr=thrust::min_element(nodes->getInfoVecs().nodeLocY.begin()+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeLocY.begin()+ allocPara_m.bdryNodeCount+ totalNodeCountForActiveCells) ;
thrust::device_vector<double>::iterator MaxY_Itr=thrust::max_element(nodes->getInfoVecs().nodeLocY.begin()+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeLocY.begin()+ allocPara_m.bdryNodeCount+ totalNodeCountForActiveCells) ;
Tisu_MinX= *MinX_Itr ;
Tisu_MaxX= *MaxX_Itr ;
Tisu_MinY= *MinY_Itr ;
Tisu_MaxY= *MaxY_Itr ;
*/
//cout<< "# of boundary nodes"<< allocPara_m.bdryNodeCount<<endl ;
//cout<< "# of total active nodes"<<totalNodeCountForActiveCells <<endl ;
//cout<<"The minimum location in X in applyMemForce_M is="<<Tisu_MinX<< endl;
//cout<<"The maximum location in X in applyMemForce_M is="<<Tisu_MaxX<< endl;
//cout<<"The minimum location in Y in applyMemForce_M is="<<Tisu_MinY<< endl;
//cout<<"The maximum location in Y in applyMemForce_M is="<<Tisu_MaxY<< endl;
//Ali
double* nodeLocXAddr = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().nodeLocX[0]));
double* nodeLocYAddr = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().nodeLocY[0]));
bool* nodeIsActiveAddr = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().nodeIsActive[0]));
double grthPrgrCriVal_M = growthAuxData.grthPrgrCriVal_M_Ori; // for now constant //growthAuxData.grthProgrEndCPU
// - growthAuxData.prolifDecay
// * (growthAuxData.grthProgrEndCPU
// - growthAuxData.grthPrgrCriVal_M_Ori);
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(
thrust::make_permutation_iterator(
cellInfoVecs.growthProgress.begin(),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell))),
thrust::make_permutation_iterator(
cellInfoVecs.activeMembrNodeCounts.begin(),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell))),
thrust::make_permutation_iterator(
cellInfoVecs.centerCoordX.begin(),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell))),
thrust::make_permutation_iterator(
cellInfoVecs.Cell_Time.begin(),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell))),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell)),
make_transform_iterator(iBegin,
ModuloFunctor(maxAllNodePerCell)),
nodes->getInfoVecs().nodeLocX.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeLocY.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeVelX.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeVelY.begin()
+ allocPara_m.bdryNodeCount)),
thrust::make_zip_iterator(
thrust::make_tuple(
thrust::make_permutation_iterator(
cellInfoVecs.growthProgress.begin(),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell))),
thrust::make_permutation_iterator(
cellInfoVecs.activeMembrNodeCounts.begin(),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell))),
thrust::make_permutation_iterator(
cellInfoVecs.centerCoordX.begin(),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell))),
thrust::make_permutation_iterator(
cellInfoVecs.Cell_Time.begin(),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell))),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell)),
make_transform_iterator(iBegin,
ModuloFunctor(maxAllNodePerCell)),
nodes->getInfoVecs().nodeLocX.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeLocY.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeVelX.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeVelY.begin()
+ allocPara_m.bdryNodeCount))
+ totalNodeCountForActiveCells,
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(),
nodes->getInfoVecs().nodeVelY.begin(),
nodes->getInfoVecs().membrTensionMag.begin(),
nodes->getInfoVecs().membrTenMagRi.begin(),
nodes->getInfoVecs().membrLinkRiMidX.begin(),
nodes->getInfoVecs().membrLinkRiMidY.begin(),
nodes->getInfoVecs().membrBendLeftX.begin(),
nodes->getInfoVecs().membrBendLeftY.begin(),
nodes->getInfoVecs().membrBendRightX.begin(),
nodes->getInfoVecs().membrBendRightY.begin()))
+ allocPara_m.bdryNodeCount,
AddMembrForce(allocPara_m.bdryNodeCount, maxAllNodePerCell,
nodeLocXAddr, nodeLocYAddr, nodeIsActiveAddr, grthPrgrCriVal_M));
/**Ali Comment start
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(
thrust::make_permutation_iterator(
cellInfoVecs.activeMembrNodeCounts.begin(),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell))),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell)),
make_transform_iterator(iBegin,
ModuloFunctor(maxAllNodePerCell)),
nodes->getInfoVecs().nodeLocX.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeLocY.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeVelX.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeVelY.begin()
+ allocPara_m.bdryNodeCount)),
thrust::make_zip_iterator(
thrust::make_tuple(
thrust::make_permutation_iterator(
cellInfoVecs.activeMembrNodeCounts.begin(),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell))),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell)),
make_transform_iterator(iBegin,
ModuloFunctor(maxAllNodePerCell)),
nodes->getInfoVecs().nodeLocX.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeLocY.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeVelX.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeVelY.begin()
+ allocPara_m.bdryNodeCount))
+ totalNodeCountForActiveCells,
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(),
nodes->getInfoVecs().nodeVelY.begin(),
nodes->getInfoVecs().membrTensionMag.begin(),
nodes->getInfoVecs().membrTenMagRi.begin(),
nodes->getInfoVecs().membrLinkRiMidX.begin(),
nodes->getInfoVecs().membrLinkRiMidY.begin(),
nodes->getInfoVecs().membrBendLeftX.begin(),
nodes->getInfoVecs().membrBendLeftY.begin(),
nodes->getInfoVecs().membrBendRightX.begin(),
nodes->getInfoVecs().membrBendRightY.begin()))
+ allocPara_m.bdryNodeCount,
AddMembrForce(allocPara_m.bdryNodeCount, maxAllNodePerCell,
nodeLocXAddr, nodeLocYAddr, nodeIsActiveAddr));
**/
// Ali comment end
//Ali
//Ali
double* bendLeftXAddr = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().membrBendLeftX[0]));
double* bendLeftYAddr = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().membrBendLeftY[0]));
double* bendRightXAddr = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().membrBendRightX[0]));
double* bendRightYAddr = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().membrBendRightY[0]));
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(
thrust::make_permutation_iterator(
cellInfoVecs.activeMembrNodeCounts.begin(),
make_transform_iterator(iBegin1,
DivideFunctor(maxAllNodePerCell))),
make_transform_iterator(iBegin1,
DivideFunctor(maxAllNodePerCell)),
make_transform_iterator(iBegin1,
ModuloFunctor(maxAllNodePerCell)),
nodes->getInfoVecs().nodeVelX.begin(),
nodes->getInfoVecs().nodeVelY.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(
thrust::make_permutation_iterator(
cellInfoVecs.activeMembrNodeCounts.begin(),
make_transform_iterator(iBegin1,
DivideFunctor(maxAllNodePerCell))),
make_transform_iterator(iBegin1,
DivideFunctor(maxAllNodePerCell)),
make_transform_iterator(iBegin1,
ModuloFunctor(maxAllNodePerCell)),
nodes->getInfoVecs().nodeVelX.begin(),
nodes->getInfoVecs().nodeVelY.begin()))
+ totalNodeCountForActiveCells,
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(),
nodes->getInfoVecs().nodeVelY.begin())),
AddMembrBend(maxAllNodePerCell, nodeIsActiveAddr, bendLeftXAddr,
bendLeftYAddr, bendRightXAddr, bendRightYAddr));
}
//AAMIRI
void SceCells::findTangentAndNormal_M() {
uint totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount
* allocPara_m.maxAllNodePerCell;
uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell;
thrust::counting_iterator<uint> iBegin(0), iBegin1(0);
double* nodeLocXAddr = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().nodeLocX[0]));
double* nodeLocYAddr = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().nodeLocY[0]));
bool* nodeIsActiveAddr = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().nodeIsActive[0]));
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(
thrust::make_permutation_iterator(
cellInfoVecs.activeMembrNodeCounts.begin(),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell))),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell)),
make_transform_iterator(iBegin1,
ModuloFunctor(maxAllNodePerCell)),
nodes->getInfoVecs().nodeF_MI_M_x.begin(), //AliE
nodes->getInfoVecs().nodeF_MI_M_y.begin(), //AliE
nodes->getInfoVecs().nodeF_MI_M_T.begin(), //AliE
nodes->getInfoVecs().nodeF_MI_M_N.begin(), //AliE
nodes->getInfoVecs().nodeCurvature.begin(),
nodes->getInfoVecs().nodeExtForceX.begin(),
nodes->getInfoVecs().nodeExtForceY.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(
thrust::make_permutation_iterator(
cellInfoVecs.activeMembrNodeCounts.begin(),
make_transform_iterator(iBegin1,
DivideFunctor(maxAllNodePerCell))),
make_transform_iterator(iBegin1,
DivideFunctor(maxAllNodePerCell)),
make_transform_iterator(iBegin1,
ModuloFunctor(maxAllNodePerCell)),
nodes->getInfoVecs().nodeF_MI_M_x.begin(), //AliE
nodes->getInfoVecs().nodeF_MI_M_y.begin(), //AliE
nodes->getInfoVecs().nodeF_MI_M_T.begin(), //AliE
nodes->getInfoVecs().nodeF_MI_M_N.begin(), //AliE
nodes->getInfoVecs().nodeCurvature.begin(),
nodes->getInfoVecs().nodeExtForceX.begin(),
nodes->getInfoVecs().nodeExtForceY.begin()))
+ totalNodeCountForActiveCells,
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeF_MI_M_T.begin(),
nodes->getInfoVecs().nodeF_MI_M_N.begin(),
nodes->getInfoVecs().nodeCurvature.begin(),
nodes->getInfoVecs().nodeExtForceTangent.begin(),
nodes->getInfoVecs().nodeExtForceNormal.begin(),
nodes->getInfoVecs().membrDistToRi.begin())),
CalCurvatures(maxAllNodePerCell, nodeIsActiveAddr, nodeLocXAddr, nodeLocYAddr));
}
void SceCells::runAblationTest(AblationEvent& ablEvent) {
for (uint i = 0; i < ablEvent.ablationCells.size(); i++) {
int cellRank = ablEvent.ablationCells[i].cellNum;
std::vector<uint> removeSeq = ablEvent.ablationCells[i].nodeNums;
cellInfoVecs.activeNodeCountOfThisCell[cellRank] =
cellInfoVecs.activeNodeCountOfThisCell[cellRank]
- removeSeq.size();
nodes->removeNodes(cellRank, removeSeq);
}
}
void SceCells::computeCenterPos_M() {
totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount
* allocPara_m.maxAllNodePerCell;
thrust::counting_iterator<uint> iBegin(0);
thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells);
//uint totalMembrActiveNodeCount = thrust::reduce(
// cellInfoVecs.activeMembrNodeCounts.begin(),
// cellInfoVecs.activeMembrNodeCounts.begin()
// + allocPara_m.currentActiveCellCount);
uint totalIntnlActiveNodeCount = thrust::reduce(
cellInfoVecs.activeIntnlNodeCounts.begin(),
cellInfoVecs.activeIntnlNodeCounts.begin()
+ allocPara_m.currentActiveCellCount);
thrust::copy_if(
thrust::make_zip_iterator(
thrust::make_tuple(
make_transform_iterator(iBegin,
DivideFunctor(
allocPara_m.maxAllNodePerCell)),
nodes->getInfoVecs().nodeLocX.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeLocY.begin()
+ allocPara_m.bdryNodeCount)),
thrust::make_zip_iterator(
thrust::make_tuple(
make_transform_iterator(iBegin,
DivideFunctor(
allocPara_m.maxAllNodePerCell)),
nodes->getInfoVecs().nodeLocX.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeLocY.begin()
+ allocPara_m.bdryNodeCount))
+ totalNodeCountForActiveCells,
thrust::make_zip_iterator(
thrust::make_tuple(
nodes->getInfoVecs().nodeIsActive.begin(),
nodes->getInfoVecs().nodeCellType.begin()))
+ allocPara_m.bdryNodeCount,
thrust::make_zip_iterator(
thrust::make_tuple(cellNodeInfoVecs.cellRanks.begin(),
cellNodeInfoVecs.activeXPoss.begin(),
cellNodeInfoVecs.activeYPoss.begin())),
ActiveAndIntnl());
thrust::reduce_by_key(cellNodeInfoVecs.cellRanks.begin(),
cellNodeInfoVecs.cellRanks.begin() + totalIntnlActiveNodeCount,
thrust::make_zip_iterator(
thrust::make_tuple(cellNodeInfoVecs.activeXPoss.begin(),
cellNodeInfoVecs.activeYPoss.begin())),
cellInfoVecs.cellRanksTmpStorage.begin(),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin())),
thrust::equal_to<uint>(), CVec2Add());
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin()))
+ allocPara_m.currentActiveCellCount,
cellInfoVecs.activeIntnlNodeCounts.begin(),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin())), CVec2Divide());
}
void SceCells::BC_Imp_M() {
/*
thrust::device_vector<double>::iterator MinX_Itr=thrust::min_element(
cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordX.begin()+allocPara_m.currentActiveCellCount ) ;
thrust::device_vector<double>::iterator MaxX_Itr=thrust::max_element(
cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordX.begin()+allocPara_m.currentActiveCellCount ) ;
thrust::device_vector<double>::iterator MinY_Itr=thrust::min_element(
cellInfoVecs.centerCoordY.begin(),
cellInfoVecs.centerCoordY.begin()+allocPara_m.currentActiveCellCount ) ;
thrust::device_vector<double>::iterator MaxY_Itr=thrust::max_element(
cellInfoVecs.centerCoordY.begin(),
cellInfoVecs.centerCoordY.begin()+allocPara_m.currentActiveCellCount ) ;
MinX= *MinX_Itr ;
MaxX= *MaxX_Itr ;
MinY= *MinY_Itr ;
MaxY= *MaxY_Itr ;
*/
//cout<<"The minimum location of cell cetners in Y in BC_Imp_M is="<<Tisu_MinX<< endl;
//cout<<"The maximum location of cell centers in Y in BC_Imp_M is="<<Tisu_MaxX<< endl;
//cout<<"The minimum location of cell centers in Y in BC_Imp_M is="<<Tisu_MinY<< endl;
//cout<<"The maximum location of cell centers in Y in BC_Imp_M is="<<Tisu_MaxY<< endl;
/** thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin())
),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin())) + allocPara_m.currentActiveCellCount,
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin())),
BC_Tissue_Damp(Damp_Coef)) ;
**/
int NumActCells=allocPara_m.currentActiveCellCount ;
//Ali
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin(),
cellInfoVecs.Cell_Damp.begin())
),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin(),
cellInfoVecs.Cell_Damp.begin())) + allocPara_m.currentActiveCellCount,
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.Cell_Damp.begin())),
BC_Tissue_Damp(Tisu_MinX,Tisu_MaxX,Tisu_MinY,Tisu_MaxY,Damp_Coef,NumActCells)) ;
/**void SceCells::randomizeGrowth() {
thrust::counting_iterator<uint> countingBegin(0);
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.growthXDir.begin(),
cellInfoVecs.growthYDir.begin(),
cellInfoVecs.isRandGrowInited.begin(),
countingBegin)),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.growthXDir.begin(),
cellInfoVecs.growthYDir.begin(),
cellInfoVecs.isRandGrowInited.begin(),
countingBegin)) + allocPara.currentActiveCellCount,
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.growthSpeed.begin(),
cellInfoVecs.growthXDir.begin(),
cellInfoVecs.growthYDir.begin(),
cellInfoVecs.isRandGrowInited.begin())),
AssignRandIfNotInit(growthAuxData.randomGrowthSpeedMin,
growthAuxData.randomGrowthSpeedMax,
allocPara.currentActiveCellCount,
growthAuxData.randGenAuxPara));
}
**/
}
void SceCells::growAtRandom_M(double dt) {
totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount
* allocPara_m.maxAllNodePerCell;
randomizeGrowth_M();
updateGrowthProgress_M();
decideIsScheduleToGrow_M();
//computeCellTargetLength_M();
//computeDistToCellCenter_M();
//findMinAndMaxDistToCenter_M();
//computeLenDiffExpCur_M();
//stretchCellGivenLenDiff_M();
addPointIfScheduledToGrow_M();
//decideIsScheduleToShrink_M();// AAMIRI May5
//delPointIfScheduledToGrow_M();//AAMIRI - commented out on June20
adjustGrowthInfo_M();
}
void SceCells::divide2D_M() {
bool isDivisionPresent = decideIfGoingToDivide_M();
bool isEnteringMitotic = decideIfAnyCellEnteringMitotic() ; //A&A
//A&A
if (isEnteringMitotic){
std::cout<< "I am in EnteringMitotic"<< std::endl;
copyCellsEnterMitotic();
findHertwigAxis();
}
//A&A
if (!isDivisionPresent) {
return;
}
//aniDebug = true;
copyCellsPreDivision_M();
createTwoNewCellArr_M();
copyFirstCellArr_M();
copySecondCellArr_M();
updateActiveCellCount_M();
markIsDivideFalse_M();
//divDebug();
}
void SceCells::distributeCellGrowthProgress_M() {
totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount
* allocPara_m.maxAllNodePerCell;
thrust::counting_iterator<uint> countingBegin(0);
thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells);
thrust::copy(
thrust::make_permutation_iterator(
cellInfoVecs.growthProgress.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(allocPara_m.maxAllNodePerCell))),
thrust::make_permutation_iterator(
cellInfoVecs.growthProgress.begin(),
make_transform_iterator(countingEnd,
DivideFunctor(allocPara_m.maxAllNodePerCell))),
nodes->getInfoVecs().nodeGrowPro.begin()
+ allocPara_m.bdryNodeCount);
std::cout << "the vlaue of init time stage in distributeCellGrowthProgress_M is"<< InitTimeStage << std:: endl ;
if (curTime <= InitTimeStage+dt)//AAMIRI /A & A
thrust::copy(
cellInfoVecs.growthProgress.begin(),
cellInfoVecs.growthProgress.end(),
cellInfoVecs.lastCheckPoint.begin()
);
}
void SceCells::allComponentsMove_M() {
//moveNodes_M(); //Ali
moveNodes_BC_M(); //Ali
}
//Ali modified this function to introduce differential proliferation rates
void SceCells::randomizeGrowth_M() {
double CntrTisuX=0.5*(Tisu_MaxX-Tisu_MinX) ;
double CntrTisuY=0.5*(Tisu_MaxY-Tisu_MinY) ;
//cout<<"The minimum location of cell cetners in Y in randomizeGrowth_M is="<<Tisu_MinX<< endl;
//cout<<"The maximum location of cell centers in Y in randomizeGrowth_M is="<<Tisu_MaxX<< endl;
//cout<<"The minimum location of cell centers in Y in randomizeGrowth_M is="<<Tisu_MinY<< endl;
//cout<<"The maximum location of cell centers in Y in randomizeGrowth_M is="<<Tisu_MaxY<< endl;
uint seed = time(NULL);
thrust::counting_iterator<uint> countingBegin(0);
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.growthSpeed.begin(),
cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin(),
cellInfoVecs.isRandGrowInited.begin(),
countingBegin)),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.growthSpeed.begin(),
cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin(),
cellInfoVecs.isRandGrowInited.begin(),
countingBegin))
+ allocPara_m.currentActiveCellCount,
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.growthSpeed.begin(),
cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin(),
cellInfoVecs.isRandGrowInited.begin())),
RandomizeGrow_M(CntrTisuX,CntrTisuY,Tisu_R,growthAuxData.randomGrowthSpeedMin,
growthAuxData.randomGrowthSpeedMax, seed));
}
void SceCells::updateGrowthProgress_M() {
thrust::copy(cellInfoVecs.growthProgress.begin(),
cellInfoVecs.growthProgress.begin()
+ allocPara_m.currentActiveCellCount,
cellInfoVecs.growthProgressOld.begin());
/*
thrust::transform(cellInfoVecs.growthSpeed.begin(),
cellInfoVecs.growthSpeed.begin()
+ allocPara_m.currentActiveCellCount,
cellInfoVecs.growthProgress.begin(),
cellInfoVecs.growthProgress.begin(), SaxpyFunctorWithMaxOfOne(dt));
*/
cout << " I am trying to update growth progress" << endl ;
//double dummy=0 ;
double mitoticCheckPoint=growthAuxData.grthPrgrCriVal_M_Ori ;
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.cell_Dpp.begin(),
cellInfoVecs.cell_DppOld.begin(),
cellInfoVecs.growthProgress.begin(),
cellInfoVecs.growthSpeed.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.cell_Dpp.begin(),
cellInfoVecs.cell_DppOld.begin(),
cellInfoVecs.growthProgress.begin(),
cellInfoVecs.growthSpeed.begin()))
+ allocPara_m.currentActiveCellCount,
cellInfoVecs.growthProgress.begin(),
DppGrowRegulator(dt,mitoticCheckPoint));
}
void SceCells::decideIsScheduleToGrow_M() {
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.growthProgress.begin(),
cellInfoVecs.lastCheckPoint.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.growthProgress.begin(),
cellInfoVecs.lastCheckPoint.begin()))
+ allocPara_m.currentActiveCellCount,
cellInfoVecs.isScheduledToGrow.begin(),
PtCondiOp(miscPara.growThreshold));
}
//AAMIRI May5
void SceCells::decideIsScheduleToShrink_M() {
double laserCenterY = 25.0;
double laserCenterX = 25.0;
double laserRadius = 4.0;
thrust::counting_iterator<uint> iBegin(0);
thrust::counting_iterator<uint> iEnd(allocPara_m.currentActiveCellCount);
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(iBegin,
cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin(),
cellInfoVecs.isScheduledToShrink.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(iEnd,
cellInfoVecs.centerCoordX.begin()+allocPara_m.currentActiveCellCount,
cellInfoVecs.centerCoordY.begin()+allocPara_m.currentActiveCellCount,
cellInfoVecs.isScheduledToShrink.begin()+allocPara_m.currentActiveCellCount)),
cellInfoVecs.isScheduledToShrink.begin(),
isDelOp(laserCenterX, laserCenterY, laserRadius));
}
void SceCells::computeCellTargetLength_M() {
thrust::transform(cellInfoVecs.growthProgress.begin(),
cellInfoVecs.growthProgress.begin()
+ allocPara_m.currentActiveCellCount,
cellInfoVecs.expectedLength.begin(),
CompuTarLen(bioPara.cellInitLength, bioPara.cellFinalLength));
}
void SceCells::computeDistToCellCenter_M() {
thrust::counting_iterator<uint> iBegin(0);
thrust::counting_iterator<uint> iEnd(totalNodeCountForActiveCells);
uint endIndx = allocPara_m.bdryNodeCount + totalNodeCountForActiveCells;
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(
make_permutation_iterator(
cellInfoVecs.centerCoordX.begin(),
make_transform_iterator(iBegin,
DivideFunctor(
allocPara_m.maxAllNodePerCell))),
make_permutation_iterator(
cellInfoVecs.centerCoordY.begin(),
make_transform_iterator(iBegin,
DivideFunctor(
allocPara_m.maxAllNodePerCell))),
make_permutation_iterator(
cellInfoVecs.growthXDir.begin(),
make_transform_iterator(iBegin,
DivideFunctor(
allocPara_m.maxAllNodePerCell))),
make_permutation_iterator(
cellInfoVecs.growthYDir.begin(),
make_transform_iterator(iBegin,
DivideFunctor(
allocPara_m.maxAllNodePerCell))),
nodes->getInfoVecs().nodeLocX.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeLocY.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeIsActive.begin()
+ allocPara_m.bdryNodeCount)),
thrust::make_zip_iterator(
thrust::make_tuple(
make_permutation_iterator(
cellInfoVecs.centerCoordX.begin(),
make_transform_iterator(iEnd,
DivideFunctor(
allocPara_m.maxAllNodePerCell))),
make_permutation_iterator(
cellInfoVecs.centerCoordY.begin(),
make_transform_iterator(iEnd,
DivideFunctor(
allocPara_m.maxAllNodePerCell))),
make_permutation_iterator(
cellInfoVecs.growthXDir.begin(),
make_transform_iterator(iEnd,
DivideFunctor(
allocPara_m.maxAllNodePerCell))),
make_permutation_iterator(
cellInfoVecs.growthYDir.begin(),
make_transform_iterator(iEnd,
DivideFunctor(
allocPara_m.maxAllNodePerCell))),
nodes->getInfoVecs().nodeLocX.begin() + endIndx,
nodes->getInfoVecs().nodeLocY.begin() + endIndx,
nodes->getInfoVecs().nodeIsActive.begin()
+ endIndx)),
cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), CompuDist());
}
void SceCells::findMinAndMaxDistToCenter_M() {
thrust::reduce_by_key(
make_transform_iterator(countingBegin,
DivideFunctor(allocPara_m.maxAllNodePerCell)),
make_transform_iterator(countingBegin,
DivideFunctor(allocPara_m.maxAllNodePerCell))
+ totalNodeCountForActiveCells,
cellNodeInfoVecs.distToCenterAlongGrowDir.begin(),
cellInfoVecs.cellRanksTmpStorage.begin(),
cellInfoVecs.smallestDistance.begin(), thrust::equal_to<uint>(),
thrust::minimum<double>());
// for nodes of each cell, find the maximum distance from the node to the corresponding
// cell center along the pre-defined growth direction.
thrust::reduce_by_key(
make_transform_iterator(countingBegin,
DivideFunctor(allocPara_m.maxAllNodePerCell)),
make_transform_iterator(countingBegin,
DivideFunctor(allocPara_m.maxAllNodePerCell))
+ totalNodeCountForActiveCells,
cellNodeInfoVecs.distToCenterAlongGrowDir.begin(),
cellInfoVecs.cellRanksTmpStorage.begin(),
cellInfoVecs.biggestDistance.begin(), thrust::equal_to<uint>(),
thrust::maximum<double>());
}
void SceCells::computeLenDiffExpCur_M() {
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.expectedLength.begin(),
cellInfoVecs.smallestDistance.begin(),
cellInfoVecs.biggestDistance.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.expectedLength.begin(),
cellInfoVecs.smallestDistance.begin(),
cellInfoVecs.biggestDistance.begin()))
+ allocPara_m.currentActiveCellCount,
cellInfoVecs.lengthDifference.begin(), CompuDiff());
}
void SceCells::stretchCellGivenLenDiff_M() {
uint count = allocPara_m.maxAllNodePerCell;
uint bdry = allocPara_m.bdryNodeCount;
uint actCount = totalNodeCountForActiveCells;
uint all = bdry + actCount;
thrust::counting_iterator<uint> iBegin(0);
thrust::counting_iterator<uint> iEnd(actCount);
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(
cellNodeInfoVecs.distToCenterAlongGrowDir.begin(),
make_permutation_iterator(
cellInfoVecs.lengthDifference.begin(),
make_transform_iterator(iBegin,
DivideFunctor(count))),
make_permutation_iterator(
cellInfoVecs.growthXDir.begin(),
make_transform_iterator(iBegin,
DivideFunctor(count))),
make_permutation_iterator(
cellInfoVecs.growthYDir.begin(),
make_transform_iterator(iBegin,
DivideFunctor(count))),
nodes->getInfoVecs().nodeVelX.begin() + bdry,
nodes->getInfoVecs().nodeVelY.begin() + bdry,
make_transform_iterator(iBegin,
ModuloFunctor(count)))),
thrust::make_zip_iterator(
thrust::make_tuple(
cellNodeInfoVecs.distToCenterAlongGrowDir.begin()
+ actCount,
make_permutation_iterator(
cellInfoVecs.lengthDifference.begin(),
make_transform_iterator(iEnd,
DivideFunctor(count))),
make_permutation_iterator(
cellInfoVecs.growthXDir.begin(),
make_transform_iterator(iEnd,
DivideFunctor(count))),
make_permutation_iterator(
cellInfoVecs.growthYDir.begin(),
make_transform_iterator(iEnd,
DivideFunctor(count))),
nodes->getInfoVecs().nodeVelX.begin() + all,
nodes->getInfoVecs().nodeVelY.begin() + all,
make_transform_iterator(iEnd,
ModuloFunctor(count)))),
thrust::make_zip_iterator(
thrust::make_tuple(
nodes->getInfoVecs().nodeVelX.begin() + bdry,
nodes->getInfoVecs().nodeVelY.begin() + bdry)),
ApplyStretchForce_M(bioPara.elongationCoefficient,
allocPara_m.maxMembrNodePerCell));
}
void SceCells::addPointIfScheduledToGrow_M() {
uint seed = time(NULL);
uint activeCellCount = allocPara_m.currentActiveCellCount;
thrust::counting_iterator<uint> iBegin(0);
thrust::counting_iterator<uint> iEnd(activeCellCount);
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.isScheduledToGrow.begin(),
cellInfoVecs.activeIntnlNodeCounts.begin(),
cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin(), iBegin,
cellInfoVecs.lastCheckPoint.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(
cellInfoVecs.isScheduledToGrow.begin()
+ activeCellCount,
cellInfoVecs.activeIntnlNodeCounts.begin()
+ activeCellCount,
cellInfoVecs.centerCoordX.begin() + activeCellCount,
cellInfoVecs.centerCoordY.begin() + activeCellCount,
iEnd,
cellInfoVecs.lastCheckPoint.begin()
+ activeCellCount)),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.lastCheckPoint.begin(),
cellInfoVecs.activeIntnlNodeCounts.begin())),
AddPtOp_M(seed, miscPara.addNodeDistance, miscPara.growThreshold,
growthAuxData.nodeXPosAddress,
growthAuxData.nodeYPosAddress,
growthAuxData.nodeIsActiveAddress));
}
//AAMIRI
void SceCells::delPointIfScheduledToGrow_M() {
uint seed = time(NULL);
uint activeCellCount = allocPara_m.currentActiveCellCount;
thrust::counting_iterator<uint> iBegin(0);
thrust::counting_iterator<uint> iEnd(activeCellCount);
int timeStep = curTime/dt;
if (curTime>70000.0 && curTime<70000.1){
decideIsScheduleToShrink_M();// AAMIRI
}
if (curTime > 70000.0)
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.isScheduledToShrink.begin(),
cellInfoVecs.activeIntnlNodeCounts.begin(),
cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin(), iBegin,
cellInfoVecs.activeMembrNodeCounts.begin(),
cellInfoVecs.isCellActive.begin(),
cellInfoVecs.growthSpeed.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(
cellInfoVecs.isScheduledToShrink.begin()
+ activeCellCount,
cellInfoVecs.activeIntnlNodeCounts.begin()
+ activeCellCount,
cellInfoVecs.centerCoordX.begin() + activeCellCount,
cellInfoVecs.centerCoordY.begin() + activeCellCount,
iEnd,
cellInfoVecs.activeMembrNodeCounts.begin()
+ activeCellCount,
cellInfoVecs.isCellActive.begin()
+ activeCellCount,
cellInfoVecs.growthSpeed.begin()
+ activeCellCount)),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.activeMembrNodeCounts.begin(),
cellInfoVecs.activeIntnlNodeCounts.begin(), cellInfoVecs.isCellActive.begin(),
cellInfoVecs.growthSpeed.begin())),
DelPtOp_M(seed, timeStep, growthAuxData.adhIndxAddr,
growthAuxData.nodeXPosAddress,
growthAuxData.nodeYPosAddress,
growthAuxData.nodeIsActiveAddress));
}
bool SceCells::decideIfGoingToDivide_M() {
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.growthProgress.begin(),
cellInfoVecs.activeIntnlNodeCounts.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.growthProgress.begin(),
cellInfoVecs.activeIntnlNodeCounts.begin()))
+ allocPara_m.currentActiveCellCount,
cellInfoVecs.isDividing.begin(),
CompuIsDivide_M(allocPara_m.maxIntnlNodePerCell));
// sum all bool values which indicate whether the cell is going to divide.
// toBeDivideCount is the total number of cells going to divide.
divAuxData.toBeDivideCount = thrust::reduce(cellInfoVecs.isDividing.begin(),
cellInfoVecs.isDividing.begin()
+ allocPara_m.currentActiveCellCount, (uint) (0));
if (divAuxData.toBeDivideCount > 0) {
return true;
} else {
return false;
}
}
//A&A
bool SceCells::decideIfAnyCellEnteringMitotic() {
double grthPrgrCriVal_M =growthAuxData.grthPrgrCriVal_M_Ori ; // for now constant growthAuxData.grthProgrEndCPU
// - growthAuxData.prolifDecay
// * (growthAuxData.grthProgrEndCPU
// - growthAuxData.grthPrgrCriVal_M_Ori);
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.growthProgress.begin(),
cellInfoVecs.growthProgressOld.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.growthProgress.begin(),
cellInfoVecs.growthProgressOld.begin()))
+ allocPara_m.currentActiveCellCount,
cellInfoVecs.isEnteringMitotic.begin(),
CompuIsEnteringMitotic_M(grthPrgrCriVal_M));
// sum all bool values which indicate whether the cell is going to divide.
// toBeDivideCount is the total number of cells going to divide.
divAuxData.toEnterMitoticCount = thrust::reduce(cellInfoVecs.isEnteringMitotic.begin(),
cellInfoVecs.isEnteringMitotic.begin()
+ allocPara_m.currentActiveCellCount, (uint) (0));
if (divAuxData.toEnterMitoticCount > 0) {
return true;
} else {
return false;
}
}
//AAMIRI
/*
bool SceCells::decideIfGoingToRemove_M() {
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.growthProgress.begin(),
cellInfoVecs.activeIntnlNodeCounts.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.growthProgress.begin(),
cellInfoVecs.activeIntnlNodeCounts.begin()))
+ allocPara_m.currentActiveCellCount,
cellInfoVecs.isRemoving.begin(),
CompuIsRemoving_M(allocPara_m.maxIntnlNodePerCell));
// sum all bool values which indicate whether the cell is going to divide.
// toBeDivideCount is the total number of cells going to divide.
divAuxData.toBeRemovingCount = thrust::reduce(cellInfoVecs.isRemoving.begin(),
cellInfoVecs.isRemoving.begin()
+ allocPara_m.currentActiveCellCount, (uint) (0));
if (divAuxData.toBeRemovingCount > 0) {
return true;
} else {
return false;
}
}
*/
AniRawData SceCells::obtainAniRawData(AnimationCriteria& aniCri) {
uint activeCellCount = allocPara_m.currentActiveCellCount;
uint maxNodePerCell = allocPara_m.maxAllNodePerCell;
uint maxMemNodePerCell = allocPara_m.maxMembrNodePerCell;
uint beginIndx = allocPara_m.bdryNodeCount;
AniRawData rawAniData;
//cout << "size of potential pairs = " << pairs.size() << endl;
// unordered_map is more efficient than map, but it is a c++ 11 feature
// and c++ 11 seems to be incompatible with Thrust.
IndexMap locIndexToAniIndexMap;
uint maxActiveNode = activeCellCount * maxNodePerCell;
thrust::host_vector<double> hostTmpVectorLocX(maxActiveNode);
thrust::host_vector<double> hostTmpVectorLocY(maxActiveNode);
thrust::host_vector<bool> hostIsActiveVec(maxActiveNode);
thrust::host_vector<int> hostBondVec(maxActiveNode);
thrust::host_vector<double> hostTmpVectorTenMag(maxActiveNode);
thrust::copy(
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(),
nodes->getInfoVecs().nodeLocY.begin(),
nodes->getInfoVecs().nodeIsActive.begin(),
nodes->getInfoVecs().nodeAdhereIndex.begin(),
nodes->getInfoVecs().membrTensionMag.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(),
nodes->getInfoVecs().nodeLocY.begin(),
nodes->getInfoVecs().nodeIsActive.begin(),
nodes->getInfoVecs().nodeAdhereIndex.begin(),
nodes->getInfoVecs().membrTensionMag.begin()))
+ maxActiveNode,
thrust::make_zip_iterator(
thrust::make_tuple(hostTmpVectorLocX.begin(),
hostTmpVectorLocY.begin(),
hostIsActiveVec.begin(),
hostBondVec.begin(), hostTmpVectorTenMag.begin())));
thrust::host_vector<uint> curActiveMemNodeCounts =
cellInfoVecs.activeMembrNodeCounts;
CVector tmpPos;
uint index1;
int index2;
std::vector<BondInfo> bondInfoVec;
double node1X, node1Y;
double node2X, node2Y;
double aniVal;
for (uint i = 0; i < activeCellCount; i++) {
for (uint j = 0; j < maxMemNodePerCell; j++) {
index1 = beginIndx + i * maxNodePerCell + j;
if (hostIsActiveVec[index1] == true) {
index2 = hostBondVec[index1];
if (index2 > index1 && index2 != -1) {
BondInfo bond;
bond.cellRank1 = i;
bond.pos1 = CVector(hostTmpVectorLocX[index1],
hostTmpVectorLocY[index1], 0);
bond.cellRank2 = (index2 - beginIndx) / maxNodePerCell;
bond.pos2 = CVector(hostTmpVectorLocX[index2],
hostTmpVectorLocY[index2], 0);
bondInfoVec.push_back(bond);
}
}
}
}
rawAniData.bondsArr = bondInfoVec;
uint curIndex = 0;
for (uint i = 0; i < activeCellCount; i++) {
for (uint j = 0; j < curActiveMemNodeCounts[i]; j++) {
index1 = beginIndx + i * maxNodePerCell + j;
if (j == curActiveMemNodeCounts[i] - 1) {
index2 = beginIndx + i * maxNodePerCell;
} else {
index2 = beginIndx + i * maxNodePerCell + j + 1;
}
if (hostIsActiveVec[index1] == true
&& hostIsActiveVec[index2] == true) {
node1X = hostTmpVectorLocX[index1];
node1Y = hostTmpVectorLocY[index1];
node2X = hostTmpVectorLocX[index2];
node2Y = hostTmpVectorLocY[index2];
IndexMap::iterator it = locIndexToAniIndexMap.find(index1);
if (it == locIndexToAniIndexMap.end()) {
locIndexToAniIndexMap.insert(
std::pair<uint, uint>(index1, curIndex));
curIndex++;
tmpPos = CVector(node1X, node1Y, 0);
//aniVal = hostTmpVectorNodeType[index1];
aniVal = hostTmpVectorTenMag[index1];
rawAniData.aniNodePosArr.push_back(tmpPos);
rawAniData.aniNodeVal.push_back(aniVal);
}
it = locIndexToAniIndexMap.find(index2);
if (it == locIndexToAniIndexMap.end()) {
locIndexToAniIndexMap.insert(
std::pair<uint, uint>(index2, curIndex));
curIndex++;
tmpPos = CVector(node2X, node2Y, 0);
//aniVal = hostTmpVectorNodeType[index2];
aniVal = hostTmpVectorTenMag[index2];
rawAniData.aniNodePosArr.push_back(tmpPos);
rawAniData.aniNodeVal.push_back(aniVal);
}
it = locIndexToAniIndexMap.find(index1);
uint aniIndex1 = it->second;
it = locIndexToAniIndexMap.find(index2);
uint aniIndex2 = it->second;
LinkAniData linkData;
linkData.node1Index = aniIndex1;
linkData.node2Index = aniIndex2;
rawAniData.memLinks.push_back(linkData);
}
}
}
for (uint i = 0; i < activeCellCount; i++) {
for (uint j = 0; j < allocPara_m.maxIntnlNodePerCell; j++) {
for (uint k = j + 1; k < allocPara_m.maxIntnlNodePerCell; k++) {
index1 = i * maxNodePerCell + maxMemNodePerCell + j;
index2 = i * maxNodePerCell + maxMemNodePerCell + k;
if (hostIsActiveVec[index1] && hostIsActiveVec[index2]) {
node1X = hostTmpVectorLocX[index1];
node1Y = hostTmpVectorLocY[index1];
node2X = hostTmpVectorLocX[index2];
node2Y = hostTmpVectorLocY[index2];
if (aniCri.isPairQualify_M(node1X, node1Y, node2X,
node2Y)) {
IndexMap::iterator it = locIndexToAniIndexMap.find(
index1);
if (it == locIndexToAniIndexMap.end()) {
locIndexToAniIndexMap.insert(
std::pair<uint, uint>(index1, curIndex));
curIndex++;
tmpPos = CVector(node1X, node1Y, 0);
//aniVal = hostTmpVectorNodeType[index1];
aniVal = -1;
rawAniData.aniNodePosArr.push_back(tmpPos);
rawAniData.aniNodeVal.push_back(aniVal);
}
it = locIndexToAniIndexMap.find(index2);
if (it == locIndexToAniIndexMap.end()) {
locIndexToAniIndexMap.insert(
std::pair<uint, uint>(index2, curIndex));
curIndex++;
tmpPos = CVector(node2X, node2Y, 0);
//aniVal = hostTmpVectorNodeType[index1];
aniVal = -1;
rawAniData.aniNodePosArr.push_back(tmpPos);
rawAniData.aniNodeVal.push_back(aniVal);
}
it = locIndexToAniIndexMap.find(index1);
uint aniIndex1 = it->second;
it = locIndexToAniIndexMap.find(index2);
uint aniIndex2 = it->second;
LinkAniData linkData;
linkData.node1Index = aniIndex1;
linkData.node2Index = aniIndex2;
rawAniData.internalLinks.push_back(linkData);
}
}
}
}
}
return rawAniData;
}
AniRawData SceCells::obtainAniRawDataGivenCellColor(vector<double>& cellColors,
AnimationCriteria& aniCri, vector<double>& cellsPerimeter, vector <double> & cellsDppLevel) { //AliE
cout << "I am in obtainAniRawDataGivenCellColor start"<<endl;
uint activeCellCount = allocPara_m.currentActiveCellCount;
uint maxNodePerCell = allocPara_m.maxAllNodePerCell;
uint maxMemNodePerCell = allocPara_m.maxMembrNodePerCell;
uint beginIndx = allocPara_m.bdryNodeCount;
assert(cellColors.size() >= activeCellCount);
assert(cellsPerimeter.size() == activeCellCount); //AliE
AniRawData rawAniData;
//cout << "size of potential pairs = " << pairs.size() << endl;
// unordered_map is more efficient than map, but it is a c++ 11 feature
// and c++ 11 seems to be incompatible with Thrust.
IndexMap locIndexToAniIndexMap;
uint maxActiveNode = activeCellCount * maxNodePerCell;
thrust::host_vector<double> hostTmpVectorLocX(maxActiveNode);
thrust::host_vector<double> hostTmpVectorLocY(maxActiveNode);
thrust::host_vector<bool> hostIsActiveVec(maxActiveNode);
thrust::host_vector<int> hostBondVec(maxActiveNode);
thrust::host_vector<double> hostTmpVectorTenMag(maxActiveNode);
thrust::host_vector<double> hostTmpVectorF_MI_M_x(maxActiveNode);//AAMIRI //AliE
thrust::host_vector<double> hostTmpVectorF_MI_M_y(maxActiveNode);//AAMIRI //AliE
thrust::host_vector<double> hostTmpVectorF_MI_M_T(maxActiveNode); //AliE
thrust::host_vector<double> hostTmpVectorF_MI_M_N(maxActiveNode);//AliE
thrust::host_vector<double> hostTmpVectorNodeCurvature(maxActiveNode);//AAMIRI
thrust::host_vector<double> hostTmpVectorExtForceTangent(maxActiveNode);//AAMIRI
thrust::host_vector<double> hostTmpVectorExtForceNormal(maxActiveNode);//AAMIRI
thrust::copy(
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(),
nodes->getInfoVecs().nodeLocY.begin(),
nodes->getInfoVecs().nodeF_MI_M_x.begin(),//AAMIRI //AliE
nodes->getInfoVecs().nodeF_MI_M_y.begin(),//AAMIRI //AliE
nodes->getInfoVecs().nodeCurvature.begin(),//AAMIRI
nodes->getInfoVecs().nodeIsActive.begin(),
nodes->getInfoVecs().nodeAdhereIndex.begin(),
nodes->getInfoVecs().membrTensionMag.begin(),
nodes->getInfoVecs().nodeExtForceTangent.begin(),//AAMIRI
nodes->getInfoVecs().nodeExtForceNormal.begin())),//AAMIRI
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(),
nodes->getInfoVecs().nodeLocY.begin(),
nodes->getInfoVecs().nodeF_MI_M_x.begin(),//AAMIRI //AliE
nodes->getInfoVecs().nodeF_MI_M_y.begin(),//AAMIRI //AliE
nodes->getInfoVecs().nodeCurvature.begin(),//AAMIRI
nodes->getInfoVecs().nodeIsActive.begin(),
nodes->getInfoVecs().nodeAdhereIndex.begin(),
nodes->getInfoVecs().membrTensionMag.begin(),
nodes->getInfoVecs().nodeExtForceTangent.begin(),//AAMIRI
nodes->getInfoVecs().nodeExtForceNormal.begin()))//AAMIRI
+ maxActiveNode,
thrust::make_zip_iterator(
thrust::make_tuple(hostTmpVectorLocX.begin(),
hostTmpVectorLocY.begin(),
hostTmpVectorF_MI_M_x.begin(), hostTmpVectorF_MI_M_y.begin(),//AAMIRI
hostTmpVectorNodeCurvature.begin(), //AAMIRI
hostIsActiveVec.begin(),
hostBondVec.begin(), hostTmpVectorTenMag.begin(),
hostTmpVectorExtForceTangent.begin(), hostTmpVectorExtForceNormal.begin())));//AAMIRI
//Copy more than 10 elements is not allowed so, I separate it
thrust::copy(
thrust::make_zip_iterator(
thrust::make_tuple(
nodes->getInfoVecs().nodeF_MI_M_T.begin(), //Ali
nodes->getInfoVecs().nodeF_MI_M_N.begin() //Ali
)),
thrust::make_zip_iterator(
thrust::make_tuple(
nodes->getInfoVecs().nodeF_MI_M_T.begin(),//AliE
nodes->getInfoVecs().nodeF_MI_M_N.begin() //AliE
))
+ maxActiveNode,
thrust::make_zip_iterator(
thrust::make_tuple(
hostTmpVectorF_MI_M_T.begin(), hostTmpVectorF_MI_M_N.begin()
)));
thrust::host_vector<uint> curActiveMemNodeCounts =
cellInfoVecs.activeMembrNodeCounts;
thrust::host_vector<uint> curActiveIntnlNodeCounts =
cellInfoVecs.activeIntnlNodeCounts;
CVector tmpPos;
CVector tmpF_MI_M ;//AAmiri
CVector tmpExtForce;//AAMIRI
double tmpCurv;
uint index1;
int index2;
std::vector<BondInfo> bondInfoVec;
double node1X, node1Y;
double node2X, node2Y;
double node1F_MI_M_x, node1F_MI_M_y;//AAMIRI //AliE
double nodeExtForceT, nodeExtForceN;//AAMIRI
double aniVal;
double aniVal2;
double tmpF_MI_M_MagN_Int[activeCellCount-1] ; //AliE
//This is how the VTK file is intended to be written. First the memmbraen nodes are going to be written and then internal nodes.
//loop on membrane nodes
for (uint i = 0; i < activeCellCount; i++) {
tmpF_MI_M_MagN_Int[i]=0.0 ;
for (uint j = 0; j < curActiveMemNodeCounts[i]; j++) {
index1 = beginIndx + i * maxNodePerCell + j;
if ( hostIsActiveVec[index1]==true) {
tmpCurv = hostTmpVectorNodeCurvature[index1];//AAMIRI
rawAniData.aniNodeCurvature.push_back(tmpCurv);//AAMIRI
node1F_MI_M_x= hostTmpVectorF_MI_M_x[index1]; //AliE
node1F_MI_M_y= hostTmpVectorF_MI_M_y[index1]; //AliE
tmpF_MI_M= CVector(node1F_MI_M_x, node1F_MI_M_y, 0.0); //AliE
rawAniData.aniNodeF_MI_M.push_back(tmpF_MI_M); //AliE
// tmpF_MI_M_MagN_Int[i]=tmpF_MI_M_MagN_Int[i]+sqrt(pow(hostTmpVectorF_MI_M_x[index1],2)+pow(hostTmpVectorF_MI_M_y[index1],2)) ; //AliE
tmpF_MI_M_MagN_Int[i]=tmpF_MI_M_MagN_Int[i]+abs(hostTmpVectorF_MI_M_N[index1]) ; //AliE
nodeExtForceT = hostTmpVectorExtForceTangent[index1];//AAMIRI
nodeExtForceN = hostTmpVectorExtForceNormal[index1];//AAMIRI
tmpExtForce = CVector(nodeExtForceT, nodeExtForceN, 0.0);//AAMIRI
rawAniData.aniNodeExtForceArr.push_back(tmpExtForce);
rawAniData.aniNodeRank.push_back(i);//AAMIRI
}
}
}
//loop on internal nodes
for (uint i=0; i<activeCellCount; i++){
for (uint j = maxMemNodePerCell; j < maxNodePerCell; j++) {
index1 = beginIndx + i * maxNodePerCell + j;
if ( hostIsActiveVec[index1]==true ) {
tmpCurv = hostTmpVectorNodeCurvature[index1];//AAMIRI
rawAniData.aniNodeCurvature.push_back(tmpCurv);//AAMIRI
node1F_MI_M_x= hostTmpVectorF_MI_M_x[index1]; //AliE
node1F_MI_M_y= hostTmpVectorF_MI_M_y[index1]; //AliE
tmpF_MI_M= CVector(node1F_MI_M_x, node1F_MI_M_y, 0.0); //AliE
rawAniData.aniNodeF_MI_M.push_back(tmpF_MI_M);
nodeExtForceT = hostTmpVectorExtForceTangent[index1];//AAMIRI
nodeExtForceN = hostTmpVectorExtForceNormal[index1];//AAMIRI
tmpExtForce = CVector(nodeExtForceT, nodeExtForceN, 0.0);//AAMIRI
rawAniData.aniNodeExtForceArr.push_back(tmpExtForce);
rawAniData.aniNodeRank.push_back(i);//AAMIRI
}
}
}
for (uint i = 0; i < activeCellCount; i++) {
for (uint j = 0; j < maxMemNodePerCell; j++) {
index1 = beginIndx + i * maxNodePerCell + j;
if (hostIsActiveVec[index1] == true) {
index2 = hostBondVec[index1];
if (index2 > index1 && index2 != -1) {
BondInfo bond;
bond.cellRank1 = i;
bond.pos1 = CVector(hostTmpVectorLocX[index1],
hostTmpVectorLocY[index1], 0);
bond.cellRank2 = (index2 - beginIndx) / maxNodePerCell;
bond.pos2 = CVector(hostTmpVectorLocX[index2],
hostTmpVectorLocY[index2], 0);
bondInfoVec.push_back(bond);
}
}
}
}
rawAniData.bondsArr = bondInfoVec;
uint curIndex = 0;
//loop on membrane nodes
for (uint i = 0; i < activeCellCount; i++) {
for (uint j = 0; j < curActiveMemNodeCounts[i]; j++) {
index1 = beginIndx + i * maxNodePerCell + j;
if (j == curActiveMemNodeCounts[i] - 1) {
index2 = beginIndx + i * maxNodePerCell;
} else {
index2 = beginIndx + i * maxNodePerCell + j + 1;
}
if (hostIsActiveVec[index1] == true
&& hostIsActiveVec[index2] == true) {
node1X = hostTmpVectorLocX[index1];
node1Y = hostTmpVectorLocY[index1];
node2X = hostTmpVectorLocX[index2];
node2Y = hostTmpVectorLocY[index2];
IndexMap::iterator it = locIndexToAniIndexMap.find(index1);
if (it == locIndexToAniIndexMap.end()) {
locIndexToAniIndexMap.insert(
std::pair<uint, uint>(index1, curIndex));
curIndex++;
tmpPos = CVector(node1X, node1Y, 0);
//aniVal = hostTmpVectorNodeType[index1];
aniVal = cellColors[i];
rawAniData.aniNodeF_MI_M_MagN_Int.push_back(tmpF_MI_M_MagN_Int[i]/cellsPerimeter[i]) ; //Ali added
//aniVal2=dppLevels_Cell[i] ;
aniVal2=cellsDppLevel[i] ;
rawAniData.dppLevel.push_back(aniVal2) ; //Ali Added
rawAniData.aniNodePosArr.push_back(tmpPos);
rawAniData.aniNodeVal.push_back(aniVal);
}
it = locIndexToAniIndexMap.find(index2);
if (it == locIndexToAniIndexMap.end()) {
locIndexToAniIndexMap.insert(
std::pair<uint, uint>(index2, curIndex));
curIndex++;
tmpPos = CVector(node2X, node2Y, 0);
//aniVal = hostTmpVectorNodeType[index2];
aniVal = cellColors[i];
rawAniData.aniNodeF_MI_M_MagN_Int.push_back(tmpF_MI_M_MagN_Int[i]/cellsPerimeter[i]) ; //Ali Added
//aniVal2=dppLevels_Cell[i];
aniVal2=cellsDppLevel[i];
rawAniData.dppLevel.push_back(aniVal2) ; //Ali Added
rawAniData.aniNodePosArr.push_back(tmpPos);
rawAniData.aniNodeVal.push_back(aniVal);
}
it = locIndexToAniIndexMap.find(index1);
uint aniIndex1 = it->second;
it = locIndexToAniIndexMap.find(index2);
uint aniIndex2 = it->second;
LinkAniData linkData;
linkData.node1Index = aniIndex1;
linkData.node2Index = aniIndex2;
rawAniData.memLinks.push_back(linkData);
}
}
}
//loop on internal nodes
for (uint i = 0; i < activeCellCount; i++) {
// for (uint j = 0; j < allocPara_m.maxAllNodePerCell; j++) {
for (uint j = 0; j < allocPara_m.maxIntnlNodePerCell; j++) {
for (uint k = 0; k < allocPara_m.maxAllNodePerCell; k++) { //Ali
//for (uint k = j + 1; k < allocPara_m.maxIntnlNodePerCell; k++) { //Ali comment
index1 = i * maxNodePerCell + maxMemNodePerCell + j;
index2 = i * maxNodePerCell + k; //Ali
// index2 = i * maxNodePerCell + maxMemNodePerCell + k; //Ali comment
// if (hostIsActiveVec[index1] && hostIsActiveVec[index2]) {
if (hostIsActiveVec[index1] && hostIsActiveVec[index2]&& index1 !=index2 ) {
node1X = hostTmpVectorLocX[index1];
node1Y = hostTmpVectorLocY[index1];
node2X = hostTmpVectorLocX[index2];
node2Y = hostTmpVectorLocY[index2];
if (aniCri.isPairQualify_M(node1X, node1Y, node2X,
node2Y)) {
IndexMap::iterator it = locIndexToAniIndexMap.find(
index1);
if (it == locIndexToAniIndexMap.end()) {
locIndexToAniIndexMap.insert(
std::pair<uint, uint>(index1, curIndex));
curIndex++;
tmpPos = CVector(node1X, node1Y, 0);
//aniVal = hostTmpVectorNodeType[index1];
aniVal = cellColors[i];
rawAniData.aniNodeF_MI_M_MagN_Int.push_back(tmpF_MI_M_MagN_Int[i]/cellsPerimeter[i]) ; //Ali Added
//aniVal2=dppLevels_Cell[i];
aniVal2=cellsDppLevel[i];
rawAniData.dppLevel.push_back(aniVal2) ; //Ali Added
rawAniData.aniNodePosArr.push_back(tmpPos);
rawAniData.aniNodeVal.push_back(aniVal);
}
it = locIndexToAniIndexMap.find(index2);
if (it == locIndexToAniIndexMap.end()) {
locIndexToAniIndexMap.insert(
std::pair<uint, uint>(index2, curIndex));
curIndex++;
tmpPos = CVector(node2X, node2Y, 0);
//aniVal = hostTmpVectorNodeType[index1];
aniVal = cellColors[i];
rawAniData.aniNodeF_MI_M_MagN_Int.push_back(tmpF_MI_M_MagN_Int[i]/cellsPerimeter[i]) ; //Ali Added
//aniVal2=dppLevels_Cell[i];
aniVal2=cellsDppLevel[i];
rawAniData.dppLevel.push_back(aniVal2) ; //Ali Added
rawAniData.aniNodePosArr.push_back(tmpPos);
rawAniData.aniNodeVal.push_back(aniVal);
}
it = locIndexToAniIndexMap.find(index1);
uint aniIndex1 = it->second;
it = locIndexToAniIndexMap.find(index2);
uint aniIndex2 = it->second;
LinkAniData linkData;
linkData.node1Index = aniIndex1;
linkData.node2Index = aniIndex2;
rawAniData.internalLinks.push_back(linkData);
}
}
}
}
}
cout << "I am in obtainAniRawDataGivenCellColor end"<<endl;
return rawAniData;
}
void SceCells::copyInitActiveNodeCount_M(
std::vector<uint>& initMembrActiveNodeCounts,
std::vector<uint>& initIntnlActiveNodeCounts,
std::vector<double> &initGrowProgVec) {
assert(
initMembrActiveNodeCounts.size()
== initIntnlActiveNodeCounts.size());
totalNodeCountForActiveCells = initMembrActiveNodeCounts.size()
* allocPara_m.maxAllNodePerCell;
thrust::copy(initMembrActiveNodeCounts.begin(),
initMembrActiveNodeCounts.end(),
cellInfoVecs.activeMembrNodeCounts.begin());
thrust::copy(initIntnlActiveNodeCounts.begin(),
initIntnlActiveNodeCounts.end(),
cellInfoVecs.activeIntnlNodeCounts.begin());
thrust::copy(initGrowProgVec.begin(), initGrowProgVec.end(),
cellInfoVecs.growthProgress.begin());
}
void SceCells::myDebugFunction() {
uint maxActiveNodeCount = allocPara_m.currentActiveCellCount
* allocPara_m.maxAllNodePerCell;
uint maxActiveCellCount = allocPara_m.currentActiveCellCount;
std::cout << "totalNodeCountforActiveCells: "
<< totalNodeCountForActiveCells << std::endl;
std::cout << "maxAllNodePerCell: " << allocPara_m.maxAllNodePerCell
<< std::endl;
std::cout << "maxActiveCellCount: " << maxActiveCellCount << std::endl;
std::cout << "bdryNodeCount: " << allocPara_m.bdryNodeCount << std::endl;
std::cout << "grow threshold: " << miscPara.growThreshold << std::endl;
std::cout << std::endl;
for (uint i = 0; i < maxActiveCellCount; i++) {
std::cout << cellInfoVecs.growthProgress[i] << " ";
}
std::cout << std::endl;
for (uint i = 0; i < maxActiveCellCount; i++) {
std::cout << cellInfoVecs.isScheduledToGrow[i] << " ";
}
std::cout << std::endl;
for (uint i = 0; i < maxActiveCellCount; i++) {
std::cout << cellInfoVecs.lastCheckPoint[i] << " ";
}
std::cout << std::endl;
for (uint i = 0; i < maxActiveNodeCount; i++) {
if (nodes->getInfoVecs().nodeIsActive[i]
&& nodes->getInfoVecs().nodeCellType[i] == CellIntnl) {
std::cout << nodes->getInfoVecs().nodeVelX[i] << " ";
}
}
std::cout << std::endl;
for (uint i = 0; i < maxActiveCellCount; i++) {
std::cout << cellInfoVecs.activeIntnlNodeCounts[i] << " ";
}
std::cout << std::endl;
for (uint i = 0; i < maxActiveCellCount; i++) {
std::cout << cellInfoVecs.expectedLength[i] << " ";
}
std::cout << std::endl;
for (uint i = 0; i < maxActiveCellCount; i++) {
std::cout << cellInfoVecs.smallestDistance[i] << " ";
}
std::cout << std::endl;
for (uint i = 0; i < maxActiveCellCount; i++) {
std::cout << cellInfoVecs.biggestDistance[i] << " ";
}
std::cout << std::endl;
for (uint i = 0; i < maxActiveCellCount; i++) {
std::cout << cellInfoVecs.lengthDifference[i] << " ";
}
std::cout << std::endl;
for (uint i = 0; i < maxActiveCellCount; i++) {
std::cout << cellInfoVecs.centerCoordX[i] << " ";
}
std::cout << std::endl;
for (uint i = 0; i < maxActiveCellCount; i++) {
std::cout << cellInfoVecs.centerCoordY[i] << " ";
}
std::cout << std::endl;
for (uint i = 0; i < maxActiveCellCount; i++) {
std::cout << cellInfoVecs.growthXDir[i] << " ";
}
std::cout << std::endl;
for (uint i = 0; i < maxActiveCellCount; i++) {
std::cout << cellInfoVecs.growthYDir[i] << " ";
}
std::cout << std::endl;
int jj;
std::cin >> jj;
}
void SceCells::divDebug() {
std::cout << "tmpIsActive_M: ";
for (uint i = 0; i < divAuxData.tmpIsActive_M.size(); i++) {
std::cout << divAuxData.tmpIsActive_M[i] << " ";
}
std::cout << std::endl;
std::cout << "tmpNodePosX_M: ";
for (uint i = 0; i < divAuxData.tmpNodePosX_M.size(); i++) {
std::cout << divAuxData.tmpNodePosX_M[i] << " ";
}
std::cout << std::endl;
std::cout << "tmpNodePosY_M : ";
for (uint i = 0; i < divAuxData.tmpNodePosY_M.size(); i++) {
std::cout << divAuxData.tmpNodePosY_M[i] << " ";
}
std::cout << std::endl;
std::cout << "tmpCellRank_M : ";
for (uint i = 0; i < divAuxData.tmpCellRank_M.size(); i++) {
std::cout << divAuxData.tmpCellRank_M[i] << " ";
}
std::cout << std::endl;
std::cout << "tmpDivDirX_M : ";
for (uint i = 0; i < divAuxData.tmpDivDirX_M.size(); i++) {
std::cout << divAuxData.tmpDivDirX_M[i] << " ";
}
std::cout << std::endl;
std::cout << "tmpDivDirY_M : ";
for (uint i = 0; i < divAuxData.tmpDivDirY_M.size(); i++) {
std::cout << divAuxData.tmpDivDirY_M[i] << " ";
}
std::cout << std::endl;
std::cout << "tmpCenterPosX_M : ";
for (uint i = 0; i < divAuxData.tmpCenterPosX_M.size(); i++) {
std::cout << divAuxData.tmpCenterPosX_M[i] << " ";
}
std::cout << std::endl;
std::cout << "tmpCenterPosY_M : ";
for (uint i = 0; i < divAuxData.tmpCenterPosY_M.size(); i++) {
std::cout << divAuxData.tmpCenterPosY_M[i] << " ";
}
std::cout << std::endl;
std::cout << "tmpIsActive1_M : ";
for (uint i = 0; i < divAuxData.tmpIsActive1_M.size(); i++) {
std::cout << divAuxData.tmpIsActive1_M[i] << " ";
}
std::cout << std::endl;
std::cout << "tmpXPos1_M : ";
for (uint i = 0; i < divAuxData.tmpXPos1_M.size(); i++) {
std::cout << divAuxData.tmpXPos1_M[i] << " ";
if (i > 0 && i < allocPara_m.maxMembrNodePerCell
&& divAuxData.tmpIsActive1_M[i]
&& divAuxData.tmpIsActive1_M[i - 1]
&& fabs(divAuxData.tmpXPos1_M[i] - divAuxData.tmpXPos1_M[i - 1])
> 0.1) {
std::cout << "11111111111111111111111, " << i << std::endl;
int jj;
cin >> jj;
}
}
std::cout << std::endl;
std::cout << "XPos1_onDevice : ";
for (uint i = 0; i < divAuxData.tmpCellRank_M.size(); i++) {
for (uint j = 0; j < allocPara_m.maxAllNodePerCell; j++) {
uint index = divAuxData.tmpCellRank_M[i]
* allocPara_m.maxAllNodePerCell + j;
std::cout << nodes->getInfoVecs().nodeLocX[index] << " ";
}
}
std::cout << std::endl;
std::cout << "tmpYPos1_M : ";
for (uint i = 0; i < divAuxData.tmpYPos1_M.size(); i++) {
std::cout << divAuxData.tmpYPos1_M[i] << " ";
}
std::cout << std::endl;
std::cout << "tmpIsActive2_M: ";
for (uint i = 0; i < divAuxData.tmpIsActive2_M.size(); i++) {
std::cout << divAuxData.tmpIsActive2_M[i] << " ";
}
std::cout << std::endl;
std::cout << "tmpXPos2_M : ";
for (uint i = 0; i < divAuxData.tmpXPos2_M.size(); i++) {
std::cout << divAuxData.tmpXPos2_M[i] << " ";
if (i > 0 && i < allocPara_m.maxMembrNodePerCell
&& divAuxData.tmpIsActive2_M[i]
&& divAuxData.tmpIsActive2_M[i - 1]
&& fabs(divAuxData.tmpXPos2_M[i] - divAuxData.tmpXPos2_M[i - 1])
> 0.1) {
std::cout << "2222222222222222222, " << i << std::endl;
int jj;
cin >> jj;
}
}
std::cout << std::endl;
std::cout << "tmpYPos2_M : ";
for (uint i = 0; i < divAuxData.tmpYPos2_M.size(); i++) {
std::cout << divAuxData.tmpYPos2_M[i] << " ";
}
std::cout << std::endl;
std::cout << "tmp1InternalActiveCounts: ";
for (uint i = 0; i < divAuxData.tmp1InternalActiveCounts.size(); i++) {
std::cout << divAuxData.tmp1InternalActiveCounts[i] << " ";
}
std::cout << std::endl;
std::cout << "tmp2InternalActiveCounts: ";
for (uint i = 0; i < divAuxData.tmp2InternalActiveCounts.size(); i++) {
std::cout << divAuxData.tmp2InternalActiveCounts[i] << " ";
}
std::cout << std::endl;
std::cout << "tmp1MemActiveCounts: ";
for (uint i = 0; i < divAuxData.tmp1MemActiveCounts.size(); i++) {
std::cout << divAuxData.tmp1MemActiveCounts[i] << " ";
}
std::cout << std::endl;
std::cout << "tmp2MemActiveCounts: ";
for (uint i = 0; i < divAuxData.tmp2MemActiveCounts.size(); i++) {
std::cout << divAuxData.tmp2MemActiveCounts[i] << " ";
}
std::cout << std::endl;
int jj;
std::cin >> jj;
}
void SceCells::adjustGrowthInfo_M() {
uint halfMax = allocPara_m.maxIntnlNodePerCell / 2;
thrust::transform_if(
thrust::make_zip_iterator(
thrust::make_tuple(
cellInfoVecs.activeIntnlNodeCounts.begin(),
cellInfoVecs.growthProgress.begin(),
cellInfoVecs.lastCheckPoint.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(
cellInfoVecs.activeIntnlNodeCounts.begin(),
cellInfoVecs.growthProgress.begin(),
cellInfoVecs.lastCheckPoint.begin()))
+ allocPara_m.currentActiveCellCount,
cellInfoVecs.isScheduledToGrow.begin(),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.isScheduledToGrow.begin(),
cellInfoVecs.growthProgress.begin(),
cellInfoVecs.lastCheckPoint.begin())),
AdjustGrowth(halfMax), thrust::identity<bool>());
}
VtkAnimationData SceCells::outputVtkData(AniRawData& rawAniData,
AnimationCriteria& aniCri) {
VtkAnimationData vtkData;
for (uint i = 0; i < rawAniData.aniNodePosArr.size(); i++) {
PointAniData ptAniData;
ptAniData.pos = rawAniData.aniNodePosArr[i];
ptAniData.F_MI_M_MagN_Int= rawAniData.aniNodeF_MI_M_MagN_Int[i]; //AliE
ptAniData.dppLevel1= rawAniData.dppLevel[i]; //AliE
ptAniData.F_MI_M = rawAniData.aniNodeF_MI_M[i];//AAMIRI
ptAniData.colorScale = rawAniData.aniNodeVal[i];
ptAniData.colorScale2 = rawAniData.aniNodeCurvature[i];//AAMIRI
ptAniData.rankScale = rawAniData.aniNodeRank[i];//AAMIRI
ptAniData.extForce = rawAniData.aniNodeExtForceArr[i];//AAMIRI
vtkData.pointsAniData.push_back(ptAniData);
}
for (uint i = 0; i < rawAniData.internalLinks.size(); i++) {
LinkAniData linkData = rawAniData.internalLinks[i];
vtkData.linksAniData.push_back(linkData);
}
for (uint i = 0; i < rawAniData.memLinks.size(); i++) {
LinkAniData linkData = rawAniData.memLinks[i];
vtkData.linksAniData.push_back(linkData);
}
vtkData.isArrowIncluded = false;
return vtkData;
}
void SceCells::copyToGPUConstMem() {
double pI_CPU = acos(-1.0);
double minLengthCPU =
globalConfigVars.getConfigValue("MinLength").toDouble();
hipMemcpyToSymbol(minLength, &minLengthCPU, sizeof(double));
double minDivisorCPU =
globalConfigVars.getConfigValue("MinDivisor").toDouble();
hipMemcpyToSymbol(minDivisor, &minDivisorCPU, sizeof(double));
hipMemcpyToSymbol(membrEquLen, &membrPara.membrEquLenCPU, sizeof(double));
hipMemcpyToSymbol(membrStiff, &membrPara.membrStiffCPU, sizeof(double));
hipMemcpyToSymbol(membrStiff_Mitotic, &membrPara.membrStiff_Mitotic, sizeof(double)); // Ali June 30
hipMemcpyToSymbol(pI, &pI_CPU, sizeof(double));
hipMemcpyToSymbol(bendCoeff, &membrPara.membrBendCoeff, sizeof(double));
hipMemcpyToSymbol(bendCoeff_Mitotic, &membrPara.membrBendCoeff_Mitotic, sizeof(double));//AAMIRI
hipMemcpyToSymbol(F_Ext_Incline_M2, &membrPara.F_Ext_Incline, sizeof(double)); //Ali
uint maxAllNodePerCellCPU = globalConfigVars.getConfigValue(
"MaxAllNodeCountPerCell").toInt();
uint maxMembrNodePerCellCPU = globalConfigVars.getConfigValue(
"MaxMembrNodeCountPerCell").toInt();
uint maxIntnlNodePerCellCPU = globalConfigVars.getConfigValue(
"MaxIntnlNodeCountPerCell").toInt();
hipMemcpyToSymbol(maxAllNodePerCell, &maxAllNodePerCellCPU, sizeof(uint));
hipMemcpyToSymbol(maxMembrPerCell, &maxMembrNodePerCellCPU, sizeof(uint));
hipMemcpyToSymbol(maxIntnlPerCell, &maxIntnlNodePerCellCPU, sizeof(uint));
double sceIntnlBParaCPU_M[5];
double sceIntraParaCPU_M[5];
double sceIntraParaDivCPU_M[5];
double U0_IntnlB =
globalConfigVars.getConfigValue("SceIntnlB_U0").toDouble();
double V0_IntnlB =
globalConfigVars.getConfigValue("SceIntnlB_V0").toDouble();
double k1_IntnlB =
globalConfigVars.getConfigValue("SceIntnlB_k1").toDouble();
double k2_IntnlB =
globalConfigVars.getConfigValue("SceIntnlB_k2").toDouble();
double intnlBEffectiveRange = globalConfigVars.getConfigValue(
"IntnlBEffectRange").toDouble();
sceIntnlBParaCPU_M[0] = U0_IntnlB;
sceIntnlBParaCPU_M[1] = V0_IntnlB;
sceIntnlBParaCPU_M[2] = k1_IntnlB;
sceIntnlBParaCPU_M[3] = k2_IntnlB;
sceIntnlBParaCPU_M[4] = intnlBEffectiveRange;
//////////////////////
//// Block 3 /////////
//////////////////////
double U0_Intra =
globalConfigVars.getConfigValue("IntraCell_U0").toDouble();
double V0_Intra =
globalConfigVars.getConfigValue("IntraCell_V0").toDouble();
double k1_Intra =
globalConfigVars.getConfigValue("IntraCell_k1").toDouble();
double k2_Intra =
globalConfigVars.getConfigValue("IntraCell_k2").toDouble();
double intraLinkEffectiveRange = globalConfigVars.getConfigValue(
"IntraEffectRange").toDouble();
sceIntraParaCPU_M[0] = U0_Intra;
sceIntraParaCPU_M[1] = V0_Intra;
sceIntraParaCPU_M[2] = k1_Intra;
sceIntraParaCPU_M[3] = k2_Intra;
sceIntraParaCPU_M[4] = intraLinkEffectiveRange;
//////////////////////
//// Block 4 /////////
//////////////////////
double U0_Intra_Div =
globalConfigVars.getConfigValue("IntraCell_U0_Div").toDouble();
double V0_Intra_Div =
globalConfigVars.getConfigValue("IntraCell_V0_Div").toDouble();
double k1_Intra_Div =
globalConfigVars.getConfigValue("IntraCell_k1_Div").toDouble();
double k2_Intra_Div =
globalConfigVars.getConfigValue("IntraCell_k2_Div").toDouble();
double intraDivEffectiveRange = globalConfigVars.getConfigValue(
"IntraDivEffectRange").toDouble();
sceIntraParaDivCPU_M[0] = U0_Intra_Div;
sceIntraParaDivCPU_M[1] = V0_Intra_Div;
sceIntraParaDivCPU_M[2] = k1_Intra_Div;
sceIntraParaDivCPU_M[3] = k2_Intra_Div;
sceIntraParaDivCPU_M[4] = intraDivEffectiveRange;
hipMemcpyToSymbol(grthPrgrCriEnd_M, &growthAuxData.grthProgrEndCPU,
sizeof(double));
//hipMemcpyToSymbol(grthPrgrCriVal_M, &growthPrgrCriVal, sizeof(double));
hipMemcpyToSymbol(sceIB_M, sceIntnlBParaCPU_M, 5 * sizeof(double));
hipMemcpyToSymbol(sceII_M, sceIntraParaCPU_M, 5 * sizeof(double));
hipMemcpyToSymbol(sceIIDiv_M, sceIntraParaDivCPU_M, 5 * sizeof(double));
double IBDivHost[5];
IBDivHost[0] =
globalConfigVars.getConfigValue("SceIntnlB_U0_Div").toDouble();
IBDivHost[1] =
globalConfigVars.getConfigValue("SceIntnlB_V0_Div").toDouble();
IBDivHost[2] =
globalConfigVars.getConfigValue("SceIntnlB_k1_Div").toDouble();
IBDivHost[3] =
globalConfigVars.getConfigValue("SceIntnlB_k2_Div").toDouble();
IBDivHost[4] =
globalConfigVars.getConfigValue("IntnlBDivEffectRange").toDouble();
hipMemcpyToSymbol(sceIBDiv_M, IBDivHost, 5 * sizeof(double));
}
void SceCells::handleMembrGrowth_M() {
// figure out membr growth speed
calMembrGrowSpeed_M();
// figure out which cells will add new point
adjustMembrGrowSpeed_M();
decideIfAddMembrNode_M();
// add membr nodes
addMembrNodes_M();
//membrDebug();
}
void SceCells::calMembrGrowSpeed_M() {
membrPara.membrGrowCoeff = growthAuxData.prolifDecay
* membrPara.membrGrowCoeff_Ori;
membrPara.membrGrowLimit = growthAuxData.prolifDecay
* membrPara.membrGrowLimit_Ori;
// reduce_by_key, find value of max tension and their index
thrust::counting_iterator<uint> iBegin(0);
uint maxNPerCell = allocPara_m.maxAllNodePerCell;
thrust::reduce_by_key(
make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)),
make_transform_iterator(iBegin, DivideFunctor(maxNPerCell))
+ totalNodeCountForActiveCells,
thrust::make_zip_iterator(
thrust::make_tuple(
nodes->getInfoVecs().membrTenMagRi.begin(),
make_transform_iterator(iBegin,
ModuloFunctor(maxNPerCell)),
nodes->getInfoVecs().membrLinkRiMidX.begin(),
nodes->getInfoVecs().membrLinkRiMidY.begin(),
nodes->getInfoVecs().membrDistToRi.begin())),
cellInfoVecs.cellRanksTmpStorage.begin(),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.maxTenRiVec.begin(),
cellInfoVecs.maxTenIndxVec.begin(),
cellInfoVecs.maxTenRiMidXVec.begin(),
cellInfoVecs.maxTenRiMidYVec.begin(),
cellInfoVecs.maxDistToRiVec.begin())),
thrust::equal_to<uint>(), MaxWInfo());
thrust::reduce_by_key(
make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)),
make_transform_iterator(iBegin, DivideFunctor(maxNPerCell))
+ totalNodeCountForActiveCells,
nodes->getInfoVecs().membrTensionMag.begin(),
cellInfoVecs.cellRanksTmpStorage.begin(),
cellInfoVecs.aveTension.begin(), thrust::equal_to<uint>(),
thrust::plus<double>());
thrust::transform(cellInfoVecs.aveTension.begin(),
cellInfoVecs.aveTension.begin()
+ allocPara_m.currentActiveCellCount,
cellInfoVecs.activeMembrNodeCounts.begin(),
cellInfoVecs.aveTension.begin(), thrust::divides<double>());
// linear relationship with highest tension; capped by a given value
thrust::transform(cellInfoVecs.aveTension.begin(),
cellInfoVecs.aveTension.begin()
+ allocPara_m.currentActiveCellCount,
cellInfoVecs.membrGrowSpeed.begin(),
MultiWithLimit(membrPara.membrGrowCoeff, membrPara.membrGrowLimit));
}
void SceCells::adjustMembrGrowSpeed_M() {
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(
cellInfoVecs.activeMembrNodeCounts.begin(),
cellInfoVecs.activeIntnlNodeCounts.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(
cellInfoVecs.activeMembrNodeCounts.begin(),
cellInfoVecs.activeIntnlNodeCounts.begin()))
+ allocPara_m.currentActiveCellCount,
cellInfoVecs.membrGrowSpeed.begin(),
AdjustMembrGrow(membrPara.growthConst_N, membrPara.initMembrCt_N,
membrPara.initIntnlCt_N));
}
void SceCells::decideIfAddMembrNode_M() {
// decide if add membrane node given current active node count and
// membr growth progress
uint curActCellCt = allocPara_m.currentActiveCellCount;
thrust::transform(cellInfoVecs.membrGrowSpeed.begin(),
cellInfoVecs.membrGrowSpeed.begin() + curActCellCt,
cellInfoVecs.membrGrowProgress.begin(),
cellInfoVecs.membrGrowProgress.begin(), SaxpyFunctor(dt));
uint maxMembrNode = allocPara_m.maxMembrNodePerCell;
/**Ali thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.membrGrowProgress.begin(),
cellInfoVecs.activeMembrNodeCounts.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.membrGrowProgress.begin(),
cellInfoVecs.activeMembrNodeCounts.begin()))
+ curActCellCt,
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.isMembrAddingNode.begin(),
cellInfoVecs.membrGrowProgress.begin())),
MemGrowFunc(maxMembrNode));
*/
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.activeMembrNodeCounts.begin(),
cellInfoVecs.membrGrowProgress.begin(),cellInfoVecs.maxDistToRiVec.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.activeMembrNodeCounts.begin(),
cellInfoVecs.membrGrowProgress.begin(),cellInfoVecs.maxDistToRiVec.begin()))
+ curActCellCt,
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.isMembrAddingNode.begin(),
cellInfoVecs.membrGrowProgress.begin())),
MemGrowFunc(maxMembrNode));
}
/**
* Add new membrane elements to cells.
* This operation is relatively expensive because of memory rearrangement.
*/
void SceCells::addMembrNodes_M() {
thrust::counting_iterator<uint> iBegin(0);
uint curAcCCount = allocPara_m.currentActiveCellCount;
uint maxNodePerCell = allocPara_m.maxAllNodePerCell;
thrust::transform_if(
thrust::make_zip_iterator(
thrust::make_tuple(iBegin,
cellInfoVecs.maxTenIndxVec.begin(),
cellInfoVecs.activeMembrNodeCounts.begin(),
cellInfoVecs.maxTenRiMidXVec.begin(),
cellInfoVecs.maxTenRiMidYVec.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(iBegin,
cellInfoVecs.maxTenIndxVec.begin(),
cellInfoVecs.activeMembrNodeCounts.begin(),
cellInfoVecs.maxTenRiMidXVec.begin(),
cellInfoVecs.maxTenRiMidYVec.begin()))
+ curAcCCount, cellInfoVecs.isMembrAddingNode.begin(),
cellInfoVecs.activeMembrNodeCounts.begin(),
AddMemNode(maxNodePerCell, growthAuxData.nodeIsActiveAddress,
growthAuxData.nodeXPosAddress,
growthAuxData.nodeYPosAddress, growthAuxData.adhIndxAddr),
thrust::identity<bool>());
}
void SceCells::membrDebug() {
uint curAcCCount = allocPara_m.currentActiveCellCount;
uint maxActiveNodeC = curAcCCount * allocPara_m.maxAllNodePerCell;
uint maxNodePC = allocPara_m.maxAllNodePerCell;
//uint tmp = 0;
//for (uint i = 0; i < curAcCCount; i++) {
// tmp += cellInfoVecs.isMembrAddingNode[i];
//}
//if (tmp != 0) {
// tmpDebug = true;
//}
//if (!tmpDebug) {
// return;
//}
for (uint i = 0; i < maxActiveNodeC; i++) {
if (i % maxNodePC == 0 || i % maxNodePC == 199
|| i % maxNodePC == 200) {
std::cout << nodes->getInfoVecs().membrTensionMag[i] << " ";
}
}
std::cout << std::endl;
for (uint i = 0; i < maxActiveNodeC; i++) {
if (i % maxNodePC == 0 || i % maxNodePC == 199
|| i % maxNodePC == 200) {
std::cout << nodes->getInfoVecs().membrTenMagRi[i] << " ";
}
}
std::cout << std::endl;
for (uint i = 0; i < maxActiveNodeC; i++) {
if (i % maxNodePC == 0 || i % maxNodePC == 199
|| i % maxNodePC == 200) {
std::cout << nodes->getInfoVecs().membrLinkRiMidX[i] << " ";
}
}
std::cout << std::endl;
for (uint i = 0; i < maxActiveNodeC; i++) {
if (i % maxNodePC == 0 || i % maxNodePC == 199
|| i % maxNodePC == 200) {
std::cout << nodes->getInfoVecs().membrLinkRiMidY[i] << " ";
}
}
std::cout << std::endl;
for (uint i = 0; i < maxActiveNodeC; i++) {
std::cout << nodes->getInfoVecs().membrBendLeftX[i] << " ";
}
std::cout << std::endl;
for (uint i = 0; i < maxActiveNodeC; i++) {
std::cout << nodes->getInfoVecs().membrBendLeftY[i] << " ";
}
std::cout << std::endl;
for (uint i = 0; i < maxActiveNodeC; i++) {
std::cout << nodes->getInfoVecs().membrBendRightX[i] << " ";
}
std::cout << std::endl;
for (uint i = 0; i < maxActiveNodeC; i++) {
std::cout << nodes->getInfoVecs().membrBendRightX[i] << " ";
}
std::cout << std::endl;
for (uint i = 0; i < curAcCCount; i++) {
std::cout << "(" << cellInfoVecs.maxTenIndxVec[i] << ","
<< cellInfoVecs.activeMembrNodeCounts[i] << ","
<< cellInfoVecs.maxTenRiMidXVec[i] << ","
<< cellInfoVecs.maxTenRiMidYVec[i] << ")" << std::endl;
}
int jj;
std::cin >> jj;
}
void SceCells::assembleVecForTwoCells(uint i) {
uint membThreshold = allocPara_m.maxMembrNodePerCell;
uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell;
uint index;
for (uint j = 0; j < membThreshold; j++) {
index = i * maxAllNodePerCell + j;
if (j < divAuxData.tmp1VecMem.size()) {
divAuxData.tmpXPos1_M[index] = divAuxData.tmp1VecMem[j].x;
divAuxData.tmpYPos1_M[index] = divAuxData.tmp1VecMem[j].y;
divAuxData.tmpIsActive1_M[index] = true;
} else {
divAuxData.tmpIsActive1_M[index] = false;
}
}
for (uint j = 0; j < membThreshold; j++) {
index = i * maxAllNodePerCell + j;
if (j < divAuxData.tmp2VecMem.size()) {
divAuxData.tmpXPos2_M[index] = divAuxData.tmp2VecMem[j].x;
divAuxData.tmpYPos2_M[index] = divAuxData.tmp2VecMem[j].y;
divAuxData.tmpIsActive2_M[index] = true;
} else {
divAuxData.tmpIsActive2_M[index] = false;
}
}
divAuxData.tmp1MemActiveCounts.push_back(divAuxData.tmp1VecMem.size());
divAuxData.tmp2MemActiveCounts.push_back(divAuxData.tmp2VecMem.size());
for (uint j = membThreshold; j < maxAllNodePerCell; j++) {
index = i * maxAllNodePerCell + j;
uint shift_j = j - membThreshold;
if (shift_j < divAuxData.tmp1IntnlVec.size()) {
divAuxData.tmpXPos1_M[index] = divAuxData.tmp1IntnlVec[shift_j].x;
divAuxData.tmpYPos1_M[index] = divAuxData.tmp1IntnlVec[shift_j].y;
divAuxData.tmpIsActive1_M[index] = true;
} else {
divAuxData.tmpIsActive1_M[index] = false;
}
if (shift_j < divAuxData.tmp2IntnlVec.size()) {
divAuxData.tmpXPos2_M[index] = divAuxData.tmp2IntnlVec[shift_j].x;
divAuxData.tmpYPos2_M[index] = divAuxData.tmp2IntnlVec[shift_j].y;
divAuxData.tmpIsActive2_M[index] = true;
} else {
divAuxData.tmpIsActive2_M[index] = false;
}
}
divAuxData.tmp1InternalActiveCounts.push_back(
divAuxData.tmp1IntnlVec.size());
divAuxData.tmp2InternalActiveCounts.push_back(
divAuxData.tmp2IntnlVec.size());
}
void SceCells::shiftIntnlNodesByCellCenter(CVector cell1Center,
CVector cell2Center) {
CVector tmpCell1Center(0, 0, 0);
for (uint j = 0; j < divAuxData.tmp1IntnlVec.size(); j++) {
tmpCell1Center = tmpCell1Center + divAuxData.tmp1IntnlVec[j];
}
tmpCell1Center = tmpCell1Center / divAuxData.tmp1IntnlVec.size();
CVector shiftVec1 = cell1Center - tmpCell1Center;
for (uint j = 0; j < divAuxData.tmp1IntnlVec.size(); j++) {
divAuxData.tmp1IntnlVec[j] = divAuxData.tmp1IntnlVec[j] + shiftVec1;
}
CVector tmpCell2Center(0, 0, 0);
for (uint j = 0; j < divAuxData.tmp2IntnlVec.size(); j++) {
tmpCell2Center = tmpCell2Center + divAuxData.tmp2IntnlVec[j];
}
tmpCell2Center = tmpCell2Center / divAuxData.tmp2IntnlVec.size();
CVector shiftVec2 = cell2Center - tmpCell2Center;
for (uint j = 0; j < divAuxData.tmp2IntnlVec.size(); j++) {
divAuxData.tmp2IntnlVec[j] = divAuxData.tmp2IntnlVec[j] + shiftVec2;
}
}
void SceCells::processMemVec(std::vector<VecVal>& tmp1,
std::vector<VecVal>& tmp2) {
divAuxData.tmp1VecMem.clear();
divAuxData.tmp2VecMem.clear();
uint membThreshold = allocPara_m.maxMembrNodePerCell;
std::sort(tmp1.begin(), tmp1.end());
std::sort(tmp2.begin(), tmp2.end());
//assert(tmp1.size() < allocPara_m.maxMembrNodePerCell);
//assert(tmp2.size() < allocPara_m.maxMembrNodePerCell);
uint maxDivMembrNodeCount1 = allocPara_m.maxMembrNodePerCell - tmp1.size();
uint maxDivMembrNodeCount2 = allocPara_m.maxMembrNodePerCell - tmp2.size();
std::vector<CVector> ptsBetween1, ptsBetween2;
// if size is less than 1, the situation would have already been very bad.
// Just keep this statement so no seg fault would happen.
if (tmp1.size() >= 1) {
ptsBetween1 = obtainPtsBetween(tmp1[tmp1.size() - 1].vec, tmp1[0].vec,
memNewSpacing, maxDivMembrNodeCount1);
}
// if size is less than 1, the situation would have already been very bad.
// Just keep this statement so no seg fault would happen.
if (tmp2.size() >= 1) {
ptsBetween2 = obtainPtsBetween(tmp2[tmp2.size() - 1].vec, tmp2[0].vec,
memNewSpacing, maxDivMembrNodeCount2);
}
for (uint j = 0; j < tmp1.size(); j++) {
divAuxData.tmp1VecMem.push_back(tmp1[j].vec);
}
for (uint j = 0; j < tmp2.size(); j++) {
divAuxData.tmp2VecMem.push_back(tmp2[j].vec);
}
for (uint j = 0; j < ptsBetween1.size(); j++) {
divAuxData.tmp1VecMem.push_back(ptsBetween1[j]);
}
for (uint j = 0; j < ptsBetween2.size(); j++) {
divAuxData.tmp2VecMem.push_back(ptsBetween2[j]);
}
assert(divAuxData.tmp1VecMem.size() <= membThreshold);
assert(divAuxData.tmp2VecMem.size() <= membThreshold);
}
void SceCells::obtainMembrAndIntnlNodes(uint i, vector<CVector>& membrNodes,
vector<CVector>& intnlNodes) {
membrNodes.clear();
intnlNodes.clear();
uint membThreshold = allocPara_m.maxMembrNodePerCell;
uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell;
uint index;
for (uint j = 0; j < maxAllNodePerCell; j++) {
index = i * maxAllNodePerCell + j;
if (divAuxData.tmpIsActive_M[index] != true) {
continue;
}
double posX = divAuxData.tmpNodePosX_M[index];
double posY = divAuxData.tmpNodePosY_M[index];
if (j < membThreshold) {
// means node type is membrane
CVector memPos(posX, posY, 0);
membrNodes.push_back(memPos);
} else {
CVector intnlPos(posX, posY, 0);
intnlNodes.push_back(intnlPos);
}
}
}
CVector SceCells::obtainCenter(uint i) {
double oldCenterX = divAuxData.tmpCenterPosX_M[i];
double oldCenterY = divAuxData.tmpCenterPosY_M[i];
CVector centerPos(oldCenterX, oldCenterY, 0);
return centerPos;
}
CVector SceCells::calDivDir_MajorAxis(CVector center,
vector<CVector>& membrNodes, double& lenAlongMajorAxis) {
// not the optimal algorithm but easy to code
double maxDiff = 0;
CVector majorAxisDir;
for (uint i = 0; i < membrNodes.size(); i++) {
CVector tmpDir = membrNodes[i] - center;
CVector tmpUnitDir = tmpDir.getUnitVector();
double min = 0, max = 0;
for (uint j = 0; j < membrNodes.size(); j++) {
CVector tmpDir2 = membrNodes[j] - center;
double tmpVecProduct = tmpDir2 * tmpUnitDir;
if (tmpVecProduct < min) {
min = tmpVecProduct;
}
if (tmpVecProduct > max) {
max = tmpVecProduct;
}
}
double diff = max - min;
if (diff > maxDiff) {
maxDiff = diff;
majorAxisDir = tmpUnitDir;
}
}
lenAlongMajorAxis = maxDiff;
return majorAxisDir;
}
//A&A
double SceCells::calLengthAlongHertwigAxis(CVector divDir, CVector center,
vector<CVector>& membrNodes) {
CVector divDirUnit = divDir.getUnitVector();
double minUnit = 0, maxUnit = 0;
double minOveral = 0, maxOveral = 0;
for (uint i = 0; i < membrNodes.size(); i++) {
CVector tmpDir = membrNodes[i] - center;
CVector tmpUnitDir = tmpDir.getUnitVector();
double tmpVecProductUnit = divDirUnit * tmpUnitDir;
double tmpVecProductOveral = divDirUnit * tmpDir;
if (tmpVecProductUnit < minUnit) {
minUnit = tmpVecProductUnit;
minOveral = tmpVecProductOveral;
}
if (tmpVecProductUnit > maxUnit) {
maxUnit = tmpVecProductUnit;
maxOveral = tmpVecProductOveral;
}
}
double lenAlongHertwigAxis = maxOveral - minOveral;
return lenAlongHertwigAxis;
}
void SceCells::obtainTwoNewCenters(CVector& oldCenter, CVector& divDir,
double len_MajorAxis, CVector& centerNew1, CVector& centerNew2) {
CVector divDirUnit = divDir.getUnitVector();
double lenChange = len_MajorAxis / 2.0 * centerShiftRatio;
centerNew1 = oldCenter + lenChange * divDirUnit;
centerNew2 = oldCenter - lenChange * divDirUnit;
}
void SceCells::prepareTmpVec(uint i, CVector divDir, CVector oldCenter,
std::vector<VecVal>& tmp1, std::vector<VecVal>& tmp2) {
tmp1.clear();
tmp2.clear();
uint membThreshold = allocPara_m.maxMembrNodePerCell;
uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell;
uint index;
VecVal tmpData;
CVector splitDir = divDir.rotateNintyDeg_XY_CC();
for (uint j = 0; j < maxAllNodePerCell; j++) {
index = i * maxAllNodePerCell + j;
if (j < membThreshold) {
// means node type is membrane
if (divAuxData.tmpIsActive_M[index] == true) {
CVector memPos(divAuxData.tmpNodePosX_M[index],
divAuxData.tmpNodePosY_M[index], 0);
CVector centerToPosDir = memPos - oldCenter;
CVector centerToPosUnit = centerToPosDir.getUnitVector();
CVector crossProduct = Cross(centerToPosDir, splitDir);
double dotProduct = centerToPosUnit * splitDir;
tmpData.val = dotProduct;
tmpData.vec = memPos;
if (crossProduct.z >= 0) {
// counter-cloce wise
tmp1.push_back(tmpData);
} else {
// cloce wise
tmp2.push_back(tmpData);
}
}
} else {
if (divAuxData.tmpIsActive_M[index] == true) {
CVector internalPos(divAuxData.tmpNodePosX_M[index],
divAuxData.tmpNodePosY_M[index], 0);
CVector centerToPosDir = internalPos - oldCenter;
CVector shrinkedPos = centerToPosDir * shrinkRatio + oldCenter;
double dotProduct = centerToPosDir * divDir;
if (dotProduct > 0) {
divAuxData.tmp1IntnlVec.push_back(shrinkedPos);
} else {
divAuxData.tmp2IntnlVec.push_back(shrinkedPos);
}
}
}
}
}
void SceCells::calCellArea() {
thrust::counting_iterator<uint> iBegin(0), iBegin2(0);
totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount
* allocPara_m.maxAllNodePerCell;
uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell;
double* nodeLocXAddr = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().nodeLocX[0]));
double* nodeLocYAddr = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().nodeLocY[0]));
bool* nodeIsActiveAddr = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().nodeIsActive[0]));
thrust::reduce_by_key(
make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)),
make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))
+ totalNodeCountForActiveCells,
thrust::make_transform_iterator(
thrust::make_zip_iterator(
thrust::make_tuple(
thrust::make_permutation_iterator(
cellInfoVecs.activeMembrNodeCounts.begin(),
make_transform_iterator(iBegin,
DivideFunctor(
maxAllNodePerCell))),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell)),
make_transform_iterator(iBegin,
ModuloFunctor(maxAllNodePerCell)),
make_permutation_iterator(
cellInfoVecs.centerCoordX.begin(),
make_transform_iterator(iBegin,
DivideFunctor(
maxAllNodePerCell))),
make_permutation_iterator(
cellInfoVecs.centerCoordY.begin(),
make_transform_iterator(iBegin,
DivideFunctor(
maxAllNodePerCell))))),
CalTriArea(maxAllNodePerCell, nodeIsActiveAddr,
nodeLocXAddr, nodeLocYAddr)),
cellInfoVecs.cellRanksTmpStorage.begin(),
cellInfoVecs.cellAreaVec.begin(), thrust::equal_to<uint>(),
thrust::plus<double>());
}
//AAMIRI added to calculate Perimeter of each cell
void SceCells::calCellPerim() {
thrust::counting_iterator<uint> iBegin(0), iBegin2(0);
totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount
* allocPara_m.maxAllNodePerCell;
uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell;
double* nodeLocXAddr = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().nodeLocX[0]));
double* nodeLocYAddr = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().nodeLocY[0]));
bool* nodeIsActiveAddr = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().nodeIsActive[0]));
thrust::reduce_by_key(
make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)),
make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))
+ totalNodeCountForActiveCells,
thrust::make_transform_iterator(
thrust::make_zip_iterator(
thrust::make_tuple(
thrust::make_permutation_iterator(
cellInfoVecs.activeMembrNodeCounts.begin(),
make_transform_iterator(iBegin,
DivideFunctor(
maxAllNodePerCell))),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell)),
make_transform_iterator(iBegin,
ModuloFunctor(maxAllNodePerCell)),
make_permutation_iterator(
cellInfoVecs.centerCoordX.begin(),
make_transform_iterator(iBegin,
DivideFunctor(
maxAllNodePerCell))),
make_permutation_iterator(
cellInfoVecs.centerCoordY.begin(),
make_transform_iterator(iBegin,
DivideFunctor(
maxAllNodePerCell))))),
CalPerim(maxAllNodePerCell, nodeIsActiveAddr,
nodeLocXAddr, nodeLocYAddr)),
cellInfoVecs.cellRanksTmpStorage.begin(),
cellInfoVecs.cellPerimVec.begin(), thrust::equal_to<uint>(),
thrust::plus<double>());
}
CellsStatsData SceCells::outputPolyCountData() {
cout << " I am at begining of outpolycount"<< std::flush ;
std::cout.flush();
double sumX,sumY,cntr_X_Domain,cntr_Y_Domain ;
int BdryApproach ;
BdryApproach=1 ;
totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount
* allocPara_m.maxAllNodePerCell;
cout << " I am before cells area"<< endl ;
calCellArea();
cout << " I am after cells area" << endl ;
calCellPerim();//AAMIRI
CellsStatsData result;
cout << " I am after result" << endl ;
uint bdryCriteria =
globalConfigVars.getConfigValue("BdryCellCriteria").toInt();
// already on host; no need to call thrust::copy
thrust::host_vector<int> adhIndxHost =
nodes->getInfoVecs().nodeAdhIndxHostCopy;
thrust::host_vector<double> growthProVecHost(
allocPara_m.currentActiveCellCount);
thrust::copy(cellInfoVecs.growthProgress.begin(),
cellInfoVecs.growthProgress.begin()
+ allocPara_m.currentActiveCellCount,
growthProVecHost.begin());
thrust::host_vector<double> growthProMembrVecHost(
allocPara_m.currentActiveCellCount);
thrust::copy(cellInfoVecs.membrGrowProgress.begin(),
cellInfoVecs.membrGrowProgress.begin()
+ allocPara_m.currentActiveCellCount,
growthProMembrVecHost.begin());
thrust::host_vector<uint> activeMembrNodeCountHost(
allocPara_m.currentActiveCellCount);
thrust::copy(cellInfoVecs.activeMembrNodeCounts.begin(),
cellInfoVecs.activeMembrNodeCounts.begin()
+ allocPara_m.currentActiveCellCount,
activeMembrNodeCountHost.begin());
thrust::host_vector<uint> activeIntnlNodeCountHost(
allocPara_m.currentActiveCellCount);
thrust::copy(cellInfoVecs.activeIntnlNodeCounts.begin(),
cellInfoVecs.activeIntnlNodeCounts.begin()
+ allocPara_m.currentActiveCellCount,
activeIntnlNodeCountHost.begin());
thrust::host_vector<double> centerCoordXHost(
allocPara_m.currentActiveCellCount);
thrust::host_vector<double> centerCoordYHost(
allocPara_m.currentActiveCellCount);
thrust::copy(cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordX.begin()
+ allocPara_m.currentActiveCellCount,
centerCoordXHost.begin());
thrust::copy(cellInfoVecs.centerCoordY.begin(),
cellInfoVecs.centerCoordY.begin()
+ allocPara_m.currentActiveCellCount,
centerCoordYHost.begin());
thrust::host_vector<double> cellAreaHost(
allocPara_m.currentActiveCellCount);
thrust::host_vector<double> cellPerimHost(
allocPara_m.currentActiveCellCount);//AAMIRI
thrust::host_vector<double> cellDppHost(
allocPara_m.currentActiveCellCount);//Ali
thrust::copy(cellInfoVecs.cellAreaVec.begin(),
cellInfoVecs.cellAreaVec.begin()
+ allocPara_m.currentActiveCellCount, cellAreaHost.begin());
thrust::copy(cellInfoVecs.cellPerimVec.begin(),
cellInfoVecs.cellPerimVec.begin()
+ allocPara_m.currentActiveCellCount, cellPerimHost.begin());//AAMIRI
thrust::copy(cellInfoVecs.cell_Dpp.begin(),
cellInfoVecs.cell_Dpp.begin()
+ allocPara_m.currentActiveCellCount, cellDppHost.begin());//Ali
sumX=0 ;
sumY=0 ;
for (uint i = 0; i < allocPara_m.currentActiveCellCount; i++) {
CellStats cellStatsData;
cellStatsData.cellGrowthProgress = growthProVecHost[i];
cellStatsData.cellRank = i;
bool isBdry = false;
std::set<int> neighbors;
std::vector<int> neighborsV; //Ali
int neighborStrength[10]; //Ali
int continousNoAdh = 0;
map <int, int> cellAndNeighborRank ; //Ali
//std::cout << "printing adhesion indicies ";
//for (int ii=0 ; ii<neighborStrength.size() ; ii++){
for (int ii=0 ; ii< 10; ii++){ //Ali
neighborStrength[ii]=0 ;
}
cellAndNeighborRank.clear(); //Ali
for (uint j = 0; j < activeMembrNodeCountHost[i]; j++) {
uint index = i * allocPara_m.maxAllNodePerCell + j;
//std::cout << adhIndxHost[index] << ",";
if (adhIndxHost[index] != -1) {
uint adhCellRank = adhIndxHost[index]
/ allocPara_m.maxAllNodePerCell;
//std::cout << adhCellRank << " ";
neighbors.insert(adhCellRank);
map <int, int>:: iterator iteratorMap=cellAndNeighborRank.find(adhCellRank); //Ali
if (iteratorMap==cellAndNeighborRank.end()) { //Ali
int NewneighborRank= neighbors.size()-1; //Ali
cellAndNeighborRank[adhCellRank]=NewneighborRank; //Ali
neighborStrength[NewneighborRank]=neighborStrength[NewneighborRank]+1 ; //Ali
neighborsV.push_back(adhCellRank); //Ali
}
else { //Ali
int oldNeighborRank=(*iteratorMap).second ;
neighborStrength[oldNeighborRank]=neighborStrength[oldNeighborRank]+1 ; //Ali
}
continousNoAdh = 0;
} else {
continousNoAdh = continousNoAdh + 1;
if (continousNoAdh > bdryCriteria) {
isBdry = true;
}
}
if (j == activeMembrNodeCountHost[i] - 1
&& adhIndxHost[index] == -1) {
int k = 0;
uint indexNew;
while (k < activeMembrNodeCountHost[i] - 1) {
indexNew = i * allocPara_m.maxAllNodePerCell + k;
if (adhIndxHost[indexNew] == -1) {
continousNoAdh = continousNoAdh + 1;
if (continousNoAdh > bdryCriteria) {
isBdry = true;
}
k++;
} else {
break;
}
}
}
}
cellStatsData.isBdryCell = isBdry;
cellStatsData.numNeighbors = neighbors.size();
cellStatsData.currentActiveMembrNodes = activeMembrNodeCountHost[i];
cellStatsData.currentActiveIntnlNodes = activeIntnlNodeCountHost[i];
cellStatsData.neighborVec = neighbors;
cellStatsData.neighborVecV = neighborsV; //Ali
for (int iiii=0; iiii<10 ; iiii++){
cellStatsData.cellNeighborStrength[iiii] = neighborStrength[iiii];
} //Ali
cellStatsData.membrGrowthProgress = growthProMembrVecHost[i];
cellStatsData.cellCenter = CVector(centerCoordXHost[i],
centerCoordYHost[i], 0);
cellStatsData.cellArea = cellAreaHost[i];
cellStatsData.cellPerim = cellPerimHost[i];//AAMIRI
cellStatsData.cellDpp = cellDppHost[i];//Ali
result.cellsStats.push_back(cellStatsData);
sumX=sumX+cellStatsData.cellCenter.x ;
sumY=sumY+cellStatsData.cellCenter.y ;
}
//Ali
if (BdryApproach==2) {
cout << "sumX=" << sumX << endl ;
cout << "sumY=" << sumY << endl ;
cntr_X_Domain=sumX/result.cellsStats.size() ;
cntr_Y_Domain=sumY/result.cellsStats.size() ;
cout << "cntr_X=" << cntr_X_Domain << endl ;
cout << "cntr_Y=" << cntr_Y_Domain << endl ;
double R_Max ;
double Distance ;
R_Max=0 ;
for (uint i = 0; i < allocPara_m.currentActiveCellCount; i++) {
Distance=sqrt( pow(centerCoordXHost[i]-cntr_X_Domain,2) +pow(centerCoordYHost[i]-cntr_Y_Domain,2) ) ;
if (Distance > R_Max) {
R_Max=Distance ;
}
}
cout << "R_Max=" << R_Max << endl ;
for (uint i = 0; i < allocPara_m.currentActiveCellCount; i++) {
Distance=sqrt( pow(centerCoordXHost[i]-cntr_X_Domain,2) +pow(centerCoordYHost[i]-cntr_Y_Domain,2) ) ;
if (Distance > 0.9* R_Max) {
result.cellsStats[i].isBdryCell = true;
cout << "isBdryCell"<< i<< endl ;
}
else {
result.cellsStats[i].isBdryCell = false;
cout << "isNormalCell"<< i << endl ;
}
}
}
//Ali
cout << "I want to write data" << endl ;
// ofstream Stress_Strain_Single ;
//Stress_Strain_Single.open("Stress_Strain_Single.txt");
//Stress_Strain_Single.close() ;
//Ali
result.MaxDistanceX=abs(centerCoordXHost[1]-centerCoordXHost[0]); //Ali
result.Cells_Extrem_Loc[0]=Tisu_MinX;
result.Cells_Extrem_Loc[1]=Tisu_MaxX;
result.Cells_Extrem_Loc[2]=Tisu_MinY;
result.Cells_Extrem_Loc[3]=Tisu_MaxY ;
result.F_Ext_Out=membrPara.F_Ext_Incline*curTime ;
//if (dt==curTime) {
//result.Init_Displace=MaxX-MinX ;
// }
//Ali
return result;
}
__device__ bool bigEnough(double& num) {
if (num > minDivisor) {
return true;
} else {
return false;
}
}
__device__ double cross_Z(double vecA_X, double vecA_Y, double vecB_X,
double vecB_Y) {
return vecA_X * vecB_Y - vecA_Y * vecB_X;
}
/*
__device__ double calBendMulti(double& angle, uint activeMembrCt) {
double equAngle = PI - PI / activeMembrCt;
return bendCoeff * (angle - equAngle);
}
*/
//AAMIRI
__device__ double calBendMulti_Mitotic(double& angle, uint activeMembrCt, double& progress, double mitoticCri) {
double equAngle = PI - PI / activeMembrCt;
if (progress <= mitoticCri){
return bendCoeff * (angle - equAngle);}
else{
return (angle - equAngle)*(bendCoeff + (bendCoeff_Mitotic - bendCoeff) * (progress - mitoticCri)/(1.0 - mitoticCri));
}
}
void SceCells::applySceCellDisc_M() {
totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount
* allocPara_m.maxAllNodePerCell;
uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell;
uint maxMemNodePerCell = allocPara_m.maxMembrNodePerCell;
thrust::counting_iterator<uint> iBegin(0);
double* nodeLocXAddr = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().nodeLocX[0]));
double* nodeLocYAddr = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().nodeLocY[0]));
bool* nodeIsActiveAddr = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().nodeIsActive[0]));
double grthPrgrCriVal_M =growthAuxData.grthPrgrCriVal_M_Ori ; // for now constant growthAuxData.grthProgrEndCPU
// - growthAuxData.prolifDecay
// * (growthAuxData.grthProgrEndCPU
// - growthAuxData.grthPrgrCriVal_M_Ori);
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(
thrust::make_permutation_iterator(
cellInfoVecs.activeMembrNodeCounts.begin(),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell))),
thrust::make_permutation_iterator(
cellInfoVecs.activeIntnlNodeCounts.begin(),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell))),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell)),
make_transform_iterator(iBegin,
ModuloFunctor(maxAllNodePerCell)),
thrust::make_permutation_iterator(
cellInfoVecs.growthProgress.begin(),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell))),
nodes->getInfoVecs().nodeVelX.begin(),
nodes->getInfoVecs().nodeVelY.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(
thrust::make_permutation_iterator(
cellInfoVecs.activeMembrNodeCounts.begin(),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell))),
thrust::make_permutation_iterator(
cellInfoVecs.activeIntnlNodeCounts.begin(),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell))),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell)),
make_transform_iterator(iBegin,
ModuloFunctor(maxAllNodePerCell)),
thrust::make_permutation_iterator(
cellInfoVecs.growthProgress.begin(),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell))),
nodes->getInfoVecs().nodeVelX.begin(),
nodes->getInfoVecs().nodeVelY.begin()))
+ totalNodeCountForActiveCells,
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(),
nodes->getInfoVecs().nodeVelY.begin(),
nodes->getInfoVecs().nodeF_MI_M_x.begin(), //Ali added for cell pressure calculation
nodes->getInfoVecs().nodeF_MI_M_y.begin())),// ALi added for cell pressure calculation
AddSceCellForce(maxAllNodePerCell, maxMemNodePerCell, nodeLocXAddr,
nodeLocYAddr, nodeIsActiveAddr, grthPrgrCriVal_M));
}
__device__
void calAndAddIB_M(double& xPos, double& yPos, double& xPos2, double& yPos2,
double& growPro, double& xRes, double& yRes, double grthPrgrCriVal_M) {
double linkLength = compDist2D(xPos, yPos, xPos2, yPos2);
double forceValue = 0;
if (growPro > grthPrgrCriEnd_M) {
if (linkLength < sceIBDiv_M[4]) {
forceValue = -sceIBDiv_M[0] / sceIBDiv_M[2]
* exp(-linkLength / sceIBDiv_M[2])
+ sceIBDiv_M[1] / sceIBDiv_M[3]
* exp(-linkLength / sceIBDiv_M[3]);
}
} else if (growPro > grthPrgrCriVal_M) {
double percent = (growPro - grthPrgrCriVal_M)
/ (grthPrgrCriEnd_M - grthPrgrCriVal_M);
double lenLimit = percent * (sceIBDiv_M[4])
+ (1.0 - percent) * sceIB_M[4];
if (linkLength < lenLimit) {
double intnlBPara0 = percent * (sceIBDiv_M[0])
+ (1.0 - percent) * sceIB_M[0];
double intnlBPara1 = percent * (sceIBDiv_M[1])
+ (1.0 - percent) * sceIB_M[1];
double intnlBPara2 = percent * (sceIBDiv_M[2])
+ (1.0 - percent) * sceIB_M[2];
double intnlBPara3 = percent * (sceIBDiv_M[3])
+ (1.0 - percent) * sceIB_M[3];
forceValue = -intnlBPara0 / intnlBPara2
* exp(-linkLength / intnlBPara2)
+ intnlBPara1 / intnlBPara3
* exp(-linkLength / intnlBPara3);
}
} else {
if (linkLength < sceIB_M[4]) {
forceValue = -sceIB_M[0] / sceIB_M[2]
* exp(-linkLength / sceIB_M[2])
+ sceIB_M[1] / sceIB_M[3] * exp(-linkLength / sceIB_M[3]);
}
}
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
}
//Ali function added for eventually computing pressure for each cells
__device__
void calAndAddIB_M2(double& xPos, double& yPos, double& xPos2, double& yPos2,
double& growPro, double& xRes, double& yRes, double & F_MI_M_x, double & F_MI_M_y, double grthPrgrCriVal_M) {
double linkLength = compDist2D(xPos, yPos, xPos2, yPos2);
double forceValue = 0;
if (growPro > grthPrgrCriEnd_M) {
if (linkLength < sceIBDiv_M[4]) {
forceValue = -sceIBDiv_M[0] / sceIBDiv_M[2]
* exp(-linkLength / sceIBDiv_M[2])
+ sceIBDiv_M[1] / sceIBDiv_M[3]
* exp(-linkLength / sceIBDiv_M[3]);
}
} else if (growPro > grthPrgrCriVal_M) {
double percent = (growPro - grthPrgrCriVal_M)
/ (grthPrgrCriEnd_M - grthPrgrCriVal_M);
double lenLimit = percent * (sceIBDiv_M[4])
+ (1.0 - percent) * sceIB_M[4];
if (linkLength < lenLimit) {
double intnlBPara0 = percent * (sceIBDiv_M[0])
+ (1.0 - percent) * sceIB_M[0];
double intnlBPara1 = percent * (sceIBDiv_M[1])
+ (1.0 - percent) * sceIB_M[1];
double intnlBPara2 = percent * (sceIBDiv_M[2])
+ (1.0 - percent) * sceIB_M[2];
double intnlBPara3 = percent * (sceIBDiv_M[3])
+ (1.0 - percent) * sceIB_M[3];
forceValue = -intnlBPara0 / intnlBPara2
* exp(-linkLength / intnlBPara2)
+ intnlBPara1 / intnlBPara3
* exp(-linkLength / intnlBPara3);
}
} else {
if (linkLength < sceIB_M[4]) {
forceValue = -sceIB_M[0] / sceIB_M[2]
* exp(-linkLength / sceIB_M[2])
+ sceIB_M[1] / sceIB_M[3] * exp(-linkLength / sceIB_M[3]);
}
}
F_MI_M_x=F_MI_M_x+forceValue * (xPos2 - xPos) / linkLength;
F_MI_M_y=F_MI_M_y+forceValue * (yPos2 - yPos) / linkLength;
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
}
__device__
void calAndAddII_M(double& xPos, double& yPos, double& xPos2, double& yPos2,
double& growPro, double& xRes, double& yRes, double grthPrgrCriVal_M) {
double linkLength = compDist2D(xPos, yPos, xPos2, yPos2);
double forceValue = 0;
if (growPro > grthPrgrCriEnd_M) {
if (linkLength < sceIIDiv_M[4]) {
forceValue = -sceIIDiv_M[0] / sceIIDiv_M[2]
* exp(-linkLength / sceIIDiv_M[2])
+ sceIIDiv_M[1] / sceIIDiv_M[3]
* exp(-linkLength / sceIIDiv_M[3]);
}
} else if (growPro > grthPrgrCriVal_M) {
double percent = (growPro - grthPrgrCriVal_M)
/ (grthPrgrCriEnd_M - grthPrgrCriVal_M);
double lenLimit = percent * (sceIIDiv_M[4])
+ (1.0 - percent) * sceII_M[4];
if (linkLength < lenLimit) {
double intraPara0 = percent * (sceIIDiv_M[0])
+ (1.0 - percent) * sceII_M[0];
double intraPara1 = percent * (sceIIDiv_M[1])
+ (1.0 - percent) * sceII_M[1];
double intraPara2 = percent * (sceIIDiv_M[2])
+ (1.0 - percent) * sceII_M[2];
double intraPara3 = percent * (sceIIDiv_M[3])
+ (1.0 - percent) * sceII_M[3];
forceValue = -intraPara0 / intraPara2
* exp(-linkLength / intraPara2)
+ intraPara1 / intraPara3 * exp(-linkLength / intraPara3);
}
} else {
if (linkLength < sceII_M[4]) {
forceValue = -sceII_M[0] / sceII_M[2]
* exp(-linkLength / sceII_M[2])
+ sceII_M[1] / sceII_M[3] * exp(-linkLength / sceII_M[3]);
}
}
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
}
| SceCells.cu | #include "SceCells.h"
#include <cmath>
double epsilon = 1.0e-12;
__constant__ double membrEquLen;
__constant__ double membrStiff;
__constant__ double membrStiff_Mitotic; //Ali June 30
__constant__ double pI;
__constant__ double minLength;
__constant__ double minDivisor;
__constant__ uint maxAllNodePerCell;
__constant__ uint maxMembrPerCell;
__constant__ uint maxIntnlPerCell;
__constant__ double bendCoeff;
__constant__ double bendCoeff_Mitotic;//AAMIRI
__constant__ double sceIB_M[5];
__constant__ double sceIBDiv_M[5];
__constant__ double sceII_M[5];
__constant__ double sceIIDiv_M[5];
__constant__ double grthPrgrCriEnd_M;
__constant__ double F_Ext_Incline_M2 ; //Ali
//Ali & Abu June 30th
__device__
double calMembrForce_Mitotic(double& length, double& progress, double mitoticCri) {
if (progress <= mitoticCri) {
return (length - membrEquLen) * membrStiff;
} else {
return (length - membrEquLen) *(membrStiff+ (membrStiff_Mitotic-membrStiff)* (progress-mitoticCri)/(1.0-mitoticCri));
}
}
//
//Ali
__device__
double calExtForce(double& curTime) {
return curTime * F_Ext_Incline_M2;
}
//Ali
__device__
double obtainRandAngle(uint& cellRank, uint& seed) {
thrust::default_random_engine rng(seed);
// discard n numbers to avoid correlation
rng.discard(cellRank);
thrust::uniform_real_distribution<double> u0Pi(0, 2.0 * pI);
double randomAngle = u0Pi(rng);
return randomAngle;
}
__device__ uint obtainNewIntnlNodeIndex(uint& cellRank, uint& curActiveCount) {
return (cellRank * maxAllNodePerCell + maxMembrPerCell + curActiveCount);
}
//AAMIRI
__device__ uint obtainLastIntnlNodeIndex(uint& cellRank, uint& curActiveCount) {
return (cellRank * maxAllNodePerCell + maxMembrPerCell + curActiveCount );
}
//AAMIRI
__device__ uint obtainMembEndNode(uint& cellRank, uint& activeMembrNodeThis) {
return (cellRank * maxAllNodePerCell + activeMembrNodeThis - 1 );
}
__device__
bool isAllIntnlFilled(uint& currentIntnlCount) {
if (currentIntnlCount < maxIntnlPerCell) {
return false;
} else {
return true;
}
}
//AAMIRI
__device__
int obtainRemovingMembrNodeID(uint &cellRank, uint& activeMembrNodes, uint& seed) {
thrust::default_random_engine rng(seed);
// discard n numbers to avoid correlation
rng.discard(activeMembrNodes);
thrust::uniform_int_distribution<double> dist(0, activeMembrNodes-1);
int randomNode = dist(rng);
return (cellRank * maxAllNodePerCell + randomNode);
}
//AAMIRI
__device__
bool isAllIntnlEmptied(uint& currentIntnlCount) {
if (currentIntnlCount > 0) {
return false;
} else {
return true;
}
}
//AAMIRI
__device__
bool isAllMembrEmptied(uint& currentMembrCount) {
if (currentMembrCount > 0) {
return false;
} else {
return true;
}
}
__device__
bool longEnough(double& length) {
if (length > minLength) {
return true;
} else {
return false;
}
}
__device__
double compDist2D(double &xPos, double &yPos, double &xPos2, double &yPos2) {
return sqrt(
(xPos - xPos2) * (xPos - xPos2) + (yPos - yPos2) * (yPos - yPos2));
}
void SceCells::distributeBdryIsActiveInfo() {
thrust::fill(nodes->getInfoVecs().nodeIsActive.begin(),
nodes->getInfoVecs().nodeIsActive.begin()
+ allocPara.startPosProfile, true);
}
void SceCells::distributeProfileIsActiveInfo() {
thrust::fill(
nodes->getInfoVecs().nodeIsActive.begin()
+ allocPara.startPosProfile,
nodes->getInfoVecs().nodeIsActive.begin()
+ allocPara.startPosProfile
+ nodes->getAllocPara().currentActiveProfileNodeCount,
true);
}
void SceCells::distributeECMIsActiveInfo() {
uint totalNodeCountForActiveECM = allocPara.currentActiveECM
* allocPara.maxNodePerECM;
thrust::counting_iterator<uint> countingBegin(0);
thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveECM);
thrust::fill(
nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosECM,
nodes->getInfoVecs().nodeIsActive.begin()
+ totalNodeCountForActiveECM + allocPara.startPosECM, true);
}
void SceCells::distributeCellIsActiveInfo() {
totalNodeCountForActiveCells = allocPara.currentActiveCellCount
* allocPara.maxNodeOfOneCell;
thrust::counting_iterator<uint> countingBegin(0);
thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells);
thrust::transform(
thrust::make_transform_iterator(countingBegin,
ModuloFunctor(allocPara.maxNodeOfOneCell)),
thrust::make_transform_iterator(countingEnd,
ModuloFunctor(allocPara.maxNodeOfOneCell)),
thrust::make_permutation_iterator(
cellInfoVecs.activeNodeCountOfThisCell.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(allocPara.maxNodeOfOneCell))),
nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosCells,
thrust::less<uint>());
}
void SceCells::distributeCellGrowthProgress() {
totalNodeCountForActiveCells = allocPara.currentActiveCellCount
* allocPara.maxNodeOfOneCell;
thrust::counting_iterator<uint> countingBegin(0);
thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells);
thrust::copy(
thrust::make_permutation_iterator(
cellInfoVecs.growthProgress.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(allocPara.maxNodeOfOneCell))),
thrust::make_permutation_iterator(
cellInfoVecs.growthProgress.begin(),
make_transform_iterator(countingEnd,
DivideFunctor(allocPara.maxNodeOfOneCell))),
nodes->getInfoVecs().nodeGrowPro.begin() + allocPara.startPosCells);
}
void MembrPara::initFromConfig() {
membrEquLenCPU = globalConfigVars.getConfigValue("MembrEquLen").toDouble();
membrStiffCPU = globalConfigVars.getConfigValue("MembrStiff").toDouble();
membrStiff_Mitotic = globalConfigVars.getConfigValue("MembrStiff_Mitotic").toDouble(); //Ali June30
membrGrowCoeff_Ori =
globalConfigVars.getConfigValue("MembrGrowCoeff").toDouble();
membrGrowLimit_Ori =
globalConfigVars.getConfigValue("MembrGrowLimit").toDouble();
membrGrowCoeff = membrGrowCoeff_Ori;
membrGrowLimit = membrGrowLimit_Ori;
//Ali
F_Ext_Incline =
globalConfigVars.getConfigValue("FExtIncline").toDouble();
//Ali
membrBendCoeff =
globalConfigVars.getConfigValue("MembrBenCoeff").toDouble();
//AAMIRI
membrBendCoeff_Mitotic =
globalConfigVars.getConfigValue("MembrBenCoeff_Mitotic").toDouble();
adjustLimit =
globalConfigVars.getConfigValue("MembrAdjustLimit").toDouble();
adjustCoeff =
globalConfigVars.getConfigValue("MembrAdjustCoeff").toDouble();
growthConst_N =
globalConfigVars.getConfigValue("MembrGrowthConst").toDouble();
initMembrCt_N =
globalConfigVars.getConfigValue("InitMembrNodeCount").toInt();
initIntnlCt_N =
globalConfigVars.getConfigValue("InitCellNodeCount").toInt();
}
SceCells::SceCells() {
//curTime = 0 + 55800.0;//AAMIRI // Ali I comment that our safely on 04/04/2017
std ::cout << "I am in SceCells constructor with zero element "<<InitTimeStage<<std::endl ;
}
void SceCells::growAtRandom(double d_t) {
totalNodeCountForActiveCells = allocPara.currentActiveCellCount
* allocPara.maxNodeOfOneCell;
// randomly select growth direction and speed.
randomizeGrowth();
//std::cout << "after copy grow info" << std::endl;
updateGrowthProgress();
//std::cout << "after update growth progress" << std::endl;
decideIsScheduleToGrow();
//std::cout << "after decode os schedule to grow" << std::endl;
computeCellTargetLength();
//std::cout << "after compute cell target length" << std::endl;
computeDistToCellCenter();
//std::cout << "after compute dist to center" << std::endl;
findMinAndMaxDistToCenter();
//std::cout << "after find min and max dist" << std::endl;
computeLenDiffExpCur();
//std::cout << "after compute diff " << std::endl;
stretchCellGivenLenDiff();
//std::cout << "after apply stretch force" << std::endl;
cellChemotaxis();
//std::cout << "after apply cell chemotaxis" << std::endl;
addPointIfScheduledToGrow();
//std::cout << "after adding node" << std::endl;
}
/**
* Use the growth magnitude and dt to update growthProgress.
*/
void SceCells::updateGrowthProgress() {
thrust::transform(cellInfoVecs.growthSpeed.begin(),
cellInfoVecs.growthSpeed.begin() + allocPara.currentActiveCellCount,
cellInfoVecs.growthProgress.begin(),
cellInfoVecs.growthProgress.begin(), SaxpyFunctorWithMaxOfOne(dt));
}
/**
* Decide if the cells are going to add a node or not.
* Use lastCheckPoint and growthProgress to decide whether add point or not
*/
void SceCells::decideIsScheduleToGrow() {
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.growthProgress.begin(),
cellInfoVecs.lastCheckPoint.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.growthProgress.begin(),
cellInfoVecs.lastCheckPoint.begin()))
+ allocPara.currentActiveCellCount,
cellInfoVecs.isScheduledToGrow.begin(),
PtCondiOp(miscPara.growThreshold));
}
/**
* Calculate target length of cell given the cell growth progress.
* length is along the growth direction.
*/
void SceCells::computeCellTargetLength() {
thrust::transform(cellInfoVecs.growthProgress.begin(),
cellInfoVecs.growthProgress.begin()
+ allocPara.currentActiveCellCount,
cellInfoVecs.expectedLength.begin(),
CompuTarLen(bioPara.cellInitLength, bioPara.cellFinalLength));
}
/**
* Compute distance of each node to its corresponding cell center.
* The distantce could be either positive or negative, depending on the pre-defined
* growth direction.
*/
void SceCells::computeDistToCellCenter() {
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(
make_permutation_iterator(
cellInfoVecs.centerCoordX.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(
allocPara.maxNodeOfOneCell))),
make_permutation_iterator(
cellInfoVecs.centerCoordY.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(
allocPara.maxNodeOfOneCell))),
make_permutation_iterator(
cellInfoVecs.growthXDir.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(
allocPara.maxNodeOfOneCell))),
make_permutation_iterator(
cellInfoVecs.growthYDir.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(
allocPara.maxNodeOfOneCell))),
nodes->getInfoVecs().nodeLocX.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeLocY.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeIsActive.begin()
+ allocPara.startPosCells)),
thrust::make_zip_iterator(
thrust::make_tuple(
make_permutation_iterator(
cellInfoVecs.centerCoordX.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(
allocPara.maxNodeOfOneCell))),
make_permutation_iterator(
cellInfoVecs.centerCoordY.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(
allocPara.maxNodeOfOneCell))),
make_permutation_iterator(
cellInfoVecs.growthXDir.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(
allocPara.maxNodeOfOneCell))),
make_permutation_iterator(
cellInfoVecs.growthYDir.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(
allocPara.maxNodeOfOneCell))),
nodes->getInfoVecs().nodeLocX.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeLocY.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeIsActive.begin()
+ allocPara.startPosCells))
+ totalNodeCountForActiveCells,
cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), CompuDist());
}
/**
* For nodes of each cell, find the maximum and minimum distance to the center.
* We will then calculate the current length of a cell along its growth direction
* using max and min distance to the center.
*/
void SceCells::findMinAndMaxDistToCenter() {
thrust::reduce_by_key(
make_transform_iterator(countingBegin,
DivideFunctor(allocPara.maxNodeOfOneCell)),
make_transform_iterator(countingBegin,
DivideFunctor(allocPara.maxNodeOfOneCell))
+ totalNodeCountForActiveCells,
cellNodeInfoVecs.distToCenterAlongGrowDir.begin(),
cellInfoVecs.cellRanksTmpStorage.begin(),
cellInfoVecs.smallestDistance.begin(), thrust::equal_to<uint>(),
thrust::minimum<double>());
// for nodes of each cell, find the maximum distance from the node to the corresponding
// cell center along the pre-defined growth direction.
thrust::reduce_by_key(
make_transform_iterator(countingBegin,
DivideFunctor(allocPara.maxNodeOfOneCell)),
make_transform_iterator(countingBegin,
DivideFunctor(allocPara.maxNodeOfOneCell))
+ totalNodeCountForActiveCells,
cellNodeInfoVecs.distToCenterAlongGrowDir.begin(),
cellInfoVecs.cellRanksTmpStorage.begin(),
cellInfoVecs.biggestDistance.begin(), thrust::equal_to<uint>(),
thrust::maximum<double>());
}
/**
* Compute the difference for cells between their expected length and current length.
*/
void SceCells::computeLenDiffExpCur() {
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.expectedLength.begin(),
cellInfoVecs.smallestDistance.begin(),
cellInfoVecs.biggestDistance.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.expectedLength.begin(),
cellInfoVecs.smallestDistance.begin(),
cellInfoVecs.biggestDistance.begin()))
+ allocPara.currentActiveCellCount,
cellInfoVecs.lengthDifference.begin(), CompuDiff());
}
/**
* Use the difference that just computed and growthXDir&growthYDir
* to apply stretching force (velocity) on nodes of all cells
*/
void SceCells::stretchCellGivenLenDiff() {
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(
cellNodeInfoVecs.distToCenterAlongGrowDir.begin(),
make_permutation_iterator(
cellInfoVecs.lengthDifference.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(
allocPara.maxNodeOfOneCell))),
make_permutation_iterator(
cellInfoVecs.growthXDir.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(
allocPara.maxNodeOfOneCell))),
make_permutation_iterator(
cellInfoVecs.growthYDir.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(
allocPara.maxNodeOfOneCell))),
nodes->getInfoVecs().nodeVelX.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeVelY.begin()
+ allocPara.startPosCells)),
thrust::make_zip_iterator(
thrust::make_tuple(
cellNodeInfoVecs.distToCenterAlongGrowDir.begin(),
make_permutation_iterator(
cellInfoVecs.lengthDifference.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(
allocPara.maxNodeOfOneCell))),
make_permutation_iterator(
cellInfoVecs.growthXDir.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(
allocPara.maxNodeOfOneCell))),
make_permutation_iterator(
cellInfoVecs.growthYDir.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(
allocPara.maxNodeOfOneCell))),
nodes->getInfoVecs().nodeVelX.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeVelY.begin()
+ allocPara.startPosCells))
+ totalNodeCountForActiveCells,
thrust::make_zip_iterator(
thrust::make_tuple(
nodes->getInfoVecs().nodeVelX.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeVelY.begin()
+ allocPara.startPosCells)),
ApplyStretchForce(bioPara.elongationCoefficient));
}
/**
* This is just an attempt. Cells move according to chemicals.
*/
void SceCells::cellChemotaxis() {
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(
make_permutation_iterator(
cellInfoVecs.growthSpeed.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(
allocPara.maxNodeOfOneCell))),
make_permutation_iterator(
cellInfoVecs.growthXDir.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(
allocPara.maxNodeOfOneCell))),
make_permutation_iterator(
cellInfoVecs.growthYDir.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(
allocPara.maxNodeOfOneCell))),
nodes->getInfoVecs().nodeVelX.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeVelY.begin()
+ allocPara.startPosCells)),
thrust::make_zip_iterator(
thrust::make_tuple(
make_permutation_iterator(
cellInfoVecs.growthSpeed.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(
allocPara.maxNodeOfOneCell))),
make_permutation_iterator(
cellInfoVecs.growthXDir.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(
allocPara.maxNodeOfOneCell))),
make_permutation_iterator(
cellInfoVecs.growthYDir.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(
allocPara.maxNodeOfOneCell))),
nodes->getInfoVecs().nodeVelX.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeVelY.begin()
+ allocPara.startPosCells))
+ totalNodeCountForActiveCells,
thrust::make_zip_iterator(
thrust::make_tuple(
nodes->getInfoVecs().nodeVelX.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeVelY.begin()
+ allocPara.startPosCells)),
ApplyChemoVel(bioPara.chemoCoefficient));
}
/**
* Adjust the velocities of nodes.
* For example, velocity of boundary nodes must be zero.
*/
void SceCells::adjustNodeVel() {
thrust::counting_iterator<uint> countingIterBegin(0);
thrust::counting_iterator<uint> countingIterEnd(
totalNodeCountForActiveCells + allocPara.startPosCells);
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(),
nodes->getInfoVecs().nodeVelY.begin(),
nodes->getInfoVecs().nodeIsActive.begin(),
nodes->getInfoVecs().nodeCellType.begin(),
countingIterBegin)),
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(),
nodes->getInfoVecs().nodeVelY.begin(),
nodes->getInfoVecs().nodeIsActive.begin(),
nodes->getInfoVecs().nodeCellType.begin(),
countingIterBegin)) + totalNodeCountForActiveCells
+ allocPara.startPosCells,
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(),
nodes->getInfoVecs().nodeVelY.begin())),
VelocityModifier(allocPara.startPosProfile,
allocPara.currentActiveProfileNodeCount));
}
/**
* Move nodes according to the velocity we just adjusted.
*/
void SceCells::moveNodes() {
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(),
nodes->getInfoVecs().nodeVelY.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(),
nodes->getInfoVecs().nodeVelY.begin()))
+ totalNodeCountForActiveCells + allocPara.startPosCells,
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(),
nodes->getInfoVecs().nodeLocY.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(),
nodes->getInfoVecs().nodeLocY.begin())),
SaxpyFunctorDim2(dt));
}
/**
* Add a point to a cell if it is scheduled to grow.
* This step does not guarantee success ; If adding new point failed, it will not change
* isScheduleToGrow and activeNodeCount;
*/
void SceCells::addPointIfScheduledToGrow() {
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.isScheduledToGrow.begin(),
cellInfoVecs.activeNodeCountOfThisCell.begin(),
cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin(), countingBegin,
cellInfoVecs.lastCheckPoint.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.isScheduledToGrow.begin(),
cellInfoVecs.activeNodeCountOfThisCell.begin(),
cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin(), countingBegin,
cellInfoVecs.lastCheckPoint.begin()))
+ allocPara.currentActiveCellCount,
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.isScheduledToGrow.begin(),
cellInfoVecs.activeNodeCountOfThisCell.begin(),
cellInfoVecs.lastCheckPoint.begin())),
AddPtOp(allocPara.maxNodeOfOneCell, miscPara.addNodeDistance,
miscPara.minDistanceToOtherNode,
growthAuxData.nodeIsActiveAddress,
growthAuxData.nodeXPosAddress,
growthAuxData.nodeYPosAddress, time(NULL),
miscPara.growThreshold));
}
//Ali commented this constructor in 04/04/2017
SceCells::SceCells(SceNodes* nodesInput,
std::vector<uint>& numOfInitActiveNodesOfCells,
std::vector<SceNodeType>& cellTypes) :
countingBegin(0), initIntnlNodeCount(
nodesInput->getAllocPara().maxNodeOfOneCell / 2), initGrowthProgress(
0.0) {
curTime = 0.0 + 55800.0;//AAMIRI
std ::cout << "I am in SceCells constructor with polymorphism shape "<<InitTimeStage<<std::endl ;
initialize(nodesInput);
copyInitActiveNodeCount(numOfInitActiveNodesOfCells);
thrust::device_vector<SceNodeType> cellTypesToPass = cellTypes;
setCellTypes(cellTypesToPass);
distributeIsActiveInfo();
}
SceCells::SceCells(SceNodes* nodesInput,
std::vector<uint>& initActiveMembrNodeCounts,
std::vector<uint>& initActiveIntnlNodeCounts,
std::vector<double> &initGrowProgVec, double InitTimeStage) {
// curTime = 0.0 + 55800.0;//AAMIRIi
curTime=InitTimeStage ;
std ::cout << "I am in SceCells constructor with number of inputs "<<InitTimeStage<<std::endl ;
lastTimeExchange=0 ;
firstTimeReadDpp=true ;
//currentActiveCellCountOld=1 ; // small number
tmpDebug = false;
aniDebug = false;
membrPara.initFromConfig();
shrinkRatio = globalConfigVars.getConfigValue("ShrinkRatio").toDouble();
centerShiftRatio =
globalConfigVars.getConfigValue("CenterShiftRatio").toDouble();
memNewSpacing = globalConfigVars.getConfigValue("MembrLenDiv").toDouble();
initialize_M(nodesInput);
cout<< "size of dpp in constructor is "<< cellInfoVecs.cell_Dpp.size() << endl ;
copyToGPUConstMem();
copyInitActiveNodeCount_M(initActiveMembrNodeCounts,
initActiveIntnlNodeCounts, initGrowProgVec);
}
void SceCells::initCellInfoVecs() {
cellInfoVecs.growthProgress.resize(allocPara.maxCellCount, 0.0);
cellInfoVecs.expectedLength.resize(allocPara.maxCellCount,
bioPara.cellInitLength);
cellInfoVecs.lengthDifference.resize(allocPara.maxCellCount, 0.0);
cellInfoVecs.smallestDistance.resize(allocPara.maxCellCount);
cellInfoVecs.biggestDistance.resize(allocPara.maxCellCount);
cellInfoVecs.activeNodeCountOfThisCell.resize(allocPara.maxCellCount);
cellInfoVecs.lastCheckPoint.resize(allocPara.maxCellCount, 0.0);
cellInfoVecs.isDividing.resize(allocPara.maxCellCount);
cellInfoVecs.cellTypes.resize(allocPara.maxCellCount, MX);
cellInfoVecs.isScheduledToGrow.resize(allocPara.maxCellCount, false);
cellInfoVecs.centerCoordX.resize(allocPara.maxCellCount);
cellInfoVecs.centerCoordY.resize(allocPara.maxCellCount);
cellInfoVecs.centerCoordZ.resize(allocPara.maxCellCount);
cellInfoVecs.cellRanksTmpStorage.resize(allocPara.maxCellCount);
cellInfoVecs.growthSpeed.resize(allocPara.maxCellCount, 0.0);
cellInfoVecs.growthXDir.resize(allocPara.maxCellCount);
cellInfoVecs.growthYDir.resize(allocPara.maxCellCount);
cellInfoVecs.isRandGrowInited.resize(allocPara.maxCellCount, false);
}
void SceCells::initCellInfoVecs_M() {
//std::cout << "max cell count = " << allocPara_m.maxCellCount << std::endl;
cellInfoVecs.Cell_Damp.resize(allocPara_m.maxCellCount, 36.0); //Ali
cellInfoVecs.cell_Dpp.resize(allocPara_m.maxCellCount, 0.0); //Ali
cellInfoVecs.cell_DppOld.resize(allocPara_m.maxCellCount, 0.0); //Ali
//cout<< "size of dpp in init is "<< cellInfoVecs.cell_Dpp.size() << endl ;
cellInfoVecs.growthProgress.resize(allocPara_m.maxCellCount, 0.0); //A&A
cellInfoVecs.growthProgressOld.resize(allocPara_m.maxCellCount, 0.0);//Ali
cellInfoVecs.Cell_Time.resize(allocPara_m.maxCellCount, 0.0); //Ali
cellInfoVecs.expectedLength.resize(allocPara_m.maxCellCount,
bioPara.cellInitLength);
cellInfoVecs.lengthDifference.resize(allocPara_m.maxCellCount, 0.0);
cellInfoVecs.smallestDistance.resize(allocPara_m.maxCellCount);
cellInfoVecs.biggestDistance.resize(allocPara_m.maxCellCount);
cellInfoVecs.activeMembrNodeCounts.resize(allocPara_m.maxCellCount);
cellInfoVecs.activeIntnlNodeCounts.resize(allocPara_m.maxCellCount);
cellInfoVecs.lastCheckPoint.resize(allocPara_m.maxCellCount, 0.0);
cellInfoVecs.isDividing.resize(allocPara_m.maxCellCount);
cellInfoVecs.isEnteringMitotic.resize(allocPara_m.maxCellCount, false); //A&A
//cellInfoVecs.isRemoving.resize(allocPara.maxCellCount);//AAMIRI
cellInfoVecs.isScheduledToGrow.resize(allocPara_m.maxCellCount, false);
cellInfoVecs.isScheduledToShrink.resize(allocPara_m.maxCellCount, false);//AAMIRI
cellInfoVecs.isCellActive.resize(allocPara_m.maxCellCount, false);//AAMIRI
cellInfoVecs.centerCoordX.resize(allocPara_m.maxCellCount);
cellInfoVecs.centerCoordY.resize(allocPara_m.maxCellCount);
cellInfoVecs.centerCoordZ.resize(allocPara_m.maxCellCount);
cellInfoVecs.HertwigXdir.resize(allocPara_m.maxCellCount,0.0); //A&A
cellInfoVecs.HertwigYdir.resize(allocPara_m.maxCellCount,0.0); //A&A
cellInfoVecs.cellRanksTmpStorage.resize(allocPara_m.maxCellCount);
cellInfoVecs.growthSpeed.resize(allocPara_m.maxCellCount, 0.0);
cellInfoVecs.growthXDir.resize(allocPara_m.maxCellCount);
cellInfoVecs.growthYDir.resize(allocPara_m.maxCellCount);
cellInfoVecs.isRandGrowInited.resize(allocPara_m.maxCellCount, false);
cellInfoVecs.isMembrAddingNode.resize(allocPara_m.maxCellCount, false);
cellInfoVecs.maxTenIndxVec.resize(allocPara_m.maxCellCount);
cellInfoVecs.maxTenRiVec.resize(allocPara_m.maxCellCount);
cellInfoVecs.maxDistToRiVec.resize(allocPara_m.maxCellCount); //Ali
cellInfoVecs.maxTenRiMidXVec.resize(allocPara_m.maxCellCount);
cellInfoVecs.maxTenRiMidYVec.resize(allocPara_m.maxCellCount);
cellInfoVecs.aveTension.resize(allocPara_m.maxCellCount);
cellInfoVecs.membrGrowProgress.resize(allocPara_m.maxCellCount, 0.0);
cellInfoVecs.membrGrowSpeed.resize(allocPara_m.maxCellCount, 0.0);
cellInfoVecs.cellAreaVec.resize(allocPara_m.maxCellCount, 0.0);
cellInfoVecs.cellPerimVec.resize(allocPara_m.maxCellCount, 0.0);//AAMIRI
std::cout << "finished " << std::endl;
}
void SceCells::initCellNodeInfoVecs() {
cellNodeInfoVecs.cellRanks.resize(allocPara.maxTotalCellNodeCount);
cellNodeInfoVecs.activeXPoss.resize(allocPara.maxTotalCellNodeCount);
cellNodeInfoVecs.activeYPoss.resize(allocPara.maxTotalCellNodeCount);
cellNodeInfoVecs.activeZPoss.resize(allocPara.maxTotalCellNodeCount);
cellNodeInfoVecs.distToCenterAlongGrowDir.resize(
allocPara.maxTotalCellNodeCount);
}
void SceCells::initCellNodeInfoVecs_M() {
std::cout << "max total node count = " << allocPara_m.maxTotalNodeCount
<< std::endl;
cellNodeInfoVecs.cellRanks.resize(allocPara_m.maxTotalNodeCount);
cellNodeInfoVecs.activeXPoss.resize(allocPara_m.maxTotalNodeCount);
cellNodeInfoVecs.activeYPoss.resize(allocPara_m.maxTotalNodeCount);
cellNodeInfoVecs.activeZPoss.resize(allocPara_m.maxTotalNodeCount);
cellNodeInfoVecs.distToCenterAlongGrowDir.resize(
allocPara_m.maxTotalNodeCount);
}
void SceCells::initGrowthAuxData() {
growthAuxData.nodeIsActiveAddress = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().nodeIsActive[allocPara.startPosCells]));
growthAuxData.nodeXPosAddress = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().nodeLocX[allocPara.startPosCells]));
growthAuxData.nodeYPosAddress = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().nodeLocY[allocPara.startPosCells]));
growthAuxData.randomGrowthSpeedMin = globalConfigVars.getConfigValue(
"RandomGrowthSpeedMin").toDouble();
growthAuxData.randomGrowthSpeedMax = globalConfigVars.getConfigValue(
"RandomGrowthSpeedMax").toDouble();
growthAuxData.randGenAuxPara = globalConfigVars.getConfigValue(
"RandomGenerationAuxPara").toDouble();
if (controlPara.simuType == SingleCellTest) {
growthAuxData.fixedGrowthSpeed = globalConfigVars.getConfigValue(
"FixedGrowthSpeed").toDouble();
}
}
void SceCells::initGrowthAuxData_M() {
growthAuxData.nodeIsActiveAddress = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().nodeIsActive[allocPara_m.bdryNodeCount]));
growthAuxData.nodeXPosAddress = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().nodeLocX[allocPara_m.bdryNodeCount]));
growthAuxData.nodeYPosAddress = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().nodeLocY[allocPara_m.bdryNodeCount]));
growthAuxData.adhIndxAddr = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().nodeAdhereIndex[allocPara_m.bdryNodeCount]));
growthAuxData.randomGrowthSpeedMin_Ori = globalConfigVars.getConfigValue(
"RandomGrowthSpeedMin").toDouble();
growthAuxData.randomGrowthSpeedMax_Ori = globalConfigVars.getConfigValue(
"RandomGrowthSpeedMax").toDouble();
growthAuxData.randomGrowthSpeedMin = growthAuxData.randomGrowthSpeedMin_Ori;
growthAuxData.randomGrowthSpeedMax = growthAuxData.randomGrowthSpeedMax_Ori;
growthAuxData.grthPrgrCriVal_M_Ori = globalConfigVars.getConfigValue(
"GrowthPrgrCriVal").toDouble();
growthAuxData.grthProgrEndCPU = globalConfigVars.getConfigValue(
"GrowthPrgrValEnd").toDouble();
}
void SceCells::initialize(SceNodes* nodesInput) {
nodes = nodesInput;
controlPara = nodes->getControlPara();
readMiscPara();
readBioPara();
allocPara = nodesInput->getAllocPara();
// max internal node count must be even number.
assert(allocPara_m.maxIntnlNodePerCell % 2 == 0);
initCellInfoVecs();
initCellNodeInfoVecs();
initGrowthAuxData();
distributeIsCellRank();
}
void SceCells::initialize_M(SceNodes* nodesInput) {
std::cout << "Initializing cells ...... " << std::endl;
//std::cout.flush();
nodes = nodesInput;
allocPara_m = nodesInput->getAllocParaM();
// max internal node count must be even number.
assert(allocPara_m.maxIntnlNodePerCell % 2 == 0);
//std::cout << "break point 1 " << std::endl;
//std::cout.flush();
controlPara = nodes->getControlPara();
//std::cout << "break point 2 " << std::endl;
//std::cout.flush();
readMiscPara_M();
//std::cout << "break point 3 " << std::endl;
//std::cout.flush();
initCellInfoVecs_M();
cout<< "size of dpp initilizie is "<< cellInfoVecs.cell_Dpp.size() << endl ;
//std::cout << "break point 4 " << std::endl;
//std::cout.flush();
readBioPara();
//std::cout << "break point 5 " << std::endl;
//std::cout.flush();
//std::cout << "break point 6 " << std::endl;
//std::cout.flush();
initCellNodeInfoVecs_M();
//std::cout << "break point 7 " << std::endl;
//std::cout.flush();
initGrowthAuxData_M();
//std::cout << "break point 8 " << std::endl;
//std::cout.flush();
}
void SceCells::copyInitActiveNodeCount(
std::vector<uint>& numOfInitActiveNodesOfCells) {
thrust::copy(numOfInitActiveNodesOfCells.begin(),
numOfInitActiveNodesOfCells.end(),
cellInfoVecs.activeNodeCountOfThisCell.begin());
}
void SceCells::allComponentsMove() {
adjustNodeVel();
moveNodes();
}
/**
* Mark cell node as either activdistributeIsActiveInfo()e or inactive.
* left part of the node array will be active and right part will be inactive.
* the threshold is defined by array activeNodeCountOfThisCell.
* e.g. activeNodeCountOfThisCell = {2,3} and maxNodeOfOneCell = 5
*/
void SceCells::distributeIsActiveInfo() {
//std::cout << "before distribute bdry isActive" << std::endl;
distributeBdryIsActiveInfo();
//std::cout << "before distribute profile isActive" << std::endl;
distributeProfileIsActiveInfo();
//std::cout << "before distribute ecm isActive" << std::endl;
distributeECMIsActiveInfo();
//std::cout << "before distribute cells isActive" << std::endl;
distributeCellIsActiveInfo();
}
void SceCells::distributeIsCellRank() {
uint totalNodeCountForActiveCells = allocPara.currentActiveCellCount
* allocPara.maxNodeOfOneCell;
thrust::counting_iterator<uint> countingBegin(0);
thrust::counting_iterator<uint> countingCellEnd(
totalNodeCountForActiveCells);
std::cerr << "totalNodeCount for active cells "
<< totalNodeCountForActiveCells << std::endl;
//thrust::counting_iterator<uint> countingECMEnd(countingECMEnd);
// only computes the cell ranks of cells. the rest remain unchanged.
thrust::transform(countingBegin, countingCellEnd,
nodes->getInfoVecs().nodeCellRank.begin() + allocPara.startPosCells,
DivideFunctor(allocPara.maxNodeOfOneCell));
std::cerr << "finished cellRank transformation" << std::endl;
}
/**
* This method computes center of all cells.
* more efficient then simply iterating the cell because of parallel reducing.
*/
void SceCells::computeCenterPos() {
uint totalNodeCountForActiveCells = allocPara.currentActiveCellCount
* allocPara.maxNodeOfOneCell;
thrust::counting_iterator<uint> countingBegin(0);
thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells);
uint totalNumberOfActiveNodes = thrust::reduce(
cellInfoVecs.activeNodeCountOfThisCell.begin(),
cellInfoVecs.activeNodeCountOfThisCell.begin()
+ allocPara.currentActiveCellCount);
thrust::copy_if(
thrust::make_zip_iterator(
thrust::make_tuple(
make_transform_iterator(countingBegin,
DivideFunctor(allocPara.maxNodeOfOneCell)),
nodes->getInfoVecs().nodeLocX.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeLocY.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeLocZ.begin()
+ allocPara.startPosCells)),
thrust::make_zip_iterator(
thrust::make_tuple(
make_transform_iterator(countingBegin,
DivideFunctor(allocPara.maxNodeOfOneCell)),
nodes->getInfoVecs().nodeLocX.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeLocY.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeLocZ.begin()
+ allocPara.startPosCells))
+ totalNodeCountForActiveCells,
nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosCells,
thrust::make_zip_iterator(
thrust::make_tuple(cellNodeInfoVecs.cellRanks.begin(),
cellNodeInfoVecs.activeXPoss.begin(),
cellNodeInfoVecs.activeYPoss.begin(),
cellNodeInfoVecs.activeZPoss.begin())), isTrue());
thrust::reduce_by_key(cellNodeInfoVecs.cellRanks.begin(),
cellNodeInfoVecs.cellRanks.begin() + totalNumberOfActiveNodes,
thrust::make_zip_iterator(
thrust::make_tuple(cellNodeInfoVecs.activeXPoss.begin(),
cellNodeInfoVecs.activeYPoss.begin(),
cellNodeInfoVecs.activeZPoss.begin())),
cellInfoVecs.cellRanksTmpStorage.begin(),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin(),
cellInfoVecs.centerCoordZ.begin())),
thrust::equal_to<uint>(), CVec3Add());
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin(),
cellInfoVecs.centerCoordZ.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin(),
cellInfoVecs.centerCoordZ.begin()))
+ allocPara.currentActiveCellCount,
cellInfoVecs.activeNodeCountOfThisCell.begin(),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin(),
cellInfoVecs.centerCoordZ.begin())), CVec3Divide());
}
/**
* 2D version of cell division.
* Division process is done by creating two temporary vectors to hold the node information
* that are going to divide.
*
* step 1: based on lengthDifference, expectedLength and growthProgress,
* this process determines whether a certain cell is ready to divide and then assign
* a boolean value to isDivided.
*
* step 2. copy those cells that will divide in to the temp vectors created
*
* step 3. For each cell in the temp vectors, we sort its nodes by its distance to the
* corresponding cell center.
* This step is not very effcient when the number of cells going to divide is big.
* but this is unlikely to happen because cells will divide according to external chemical signaling
* and each will have different divide progress.
*
* step 4. copy the right part of each cell of the sorted array (temp1) to left part of each cell of
* another array
*
* step 5. transform isActive vector of both temp1 and temp2, making only left part of each cell active.
*
* step 6. insert temp2 to the end of the cell array
*
* step 7. copy temp1 to the previous position of the cell array.
*
* step 8. add activeCellCount of the system.
*
* step 9. mark isDivide of all cells to false.
*/
void SceCells::divide2DSimplified() {
bool isDivisionPresent = decideIfGoingToDivide();
if (!isDivisionPresent) {
return;
}
copyCellsPreDivision();
sortNodesAccordingToDist();
copyLeftAndRightToSeperateArrays();
transformIsActiveArrayOfBothArrays();
addSecondArrayToCellArray();
copyFirstArrayToPreviousPos();
updateActiveCellCount();
markIsDivideFalse();
}
bool SceCells::decideIfGoingToDivide() {
// step 1
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.lengthDifference.begin(),
cellInfoVecs.expectedLength.begin(),
cellInfoVecs.growthProgress.begin(),
cellInfoVecs.activeNodeCountOfThisCell.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.lengthDifference.begin(),
cellInfoVecs.expectedLength.begin(),
cellInfoVecs.growthProgress.begin(),
cellInfoVecs.activeNodeCountOfThisCell.begin()))
+ allocPara.currentActiveCellCount,
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.isDividing.begin(),
cellInfoVecs.growthProgress.begin())),
CompuIsDivide(miscPara.isDivideCriticalRatio,
allocPara.maxNodeOfOneCell));
// sum all bool values which indicate whether the cell is going to divide.
// toBeDivideCount is the total number of cells going to divide.
divAuxData.toBeDivideCount = thrust::reduce(cellInfoVecs.isDividing.begin(),
cellInfoVecs.isDividing.begin() + allocPara.currentActiveCellCount,
(uint) (0));
if (divAuxData.toBeDivideCount > 0) {
return true;
} else {
return false;
}
}
void SceCells::copyCellsPreDivision() {
// step 2 : copy all cell rank and distance to its corresponding center with divide flag = 1
totalNodeCountForActiveCells = allocPara.currentActiveCellCount
* allocPara.maxNodeOfOneCell;
divAuxData.nodeStorageCount = divAuxData.toBeDivideCount
* allocPara.maxNodeOfOneCell;
divAuxData.tmpIsActiveHold1 = thrust::device_vector<bool>(
divAuxData.nodeStorageCount, true);
divAuxData.tmpDistToCenter1 = thrust::device_vector<double>(
divAuxData.nodeStorageCount, 0.0);
divAuxData.tmpCellRankHold1 = thrust::device_vector<uint>(
divAuxData.nodeStorageCount, 0.0);
divAuxData.tmpXValueHold1 = thrust::device_vector<double>(
divAuxData.nodeStorageCount, 0.0);
divAuxData.tmpYValueHold1 = thrust::device_vector<double>(
divAuxData.nodeStorageCount, 0.0);
divAuxData.tmpZValueHold1 = thrust::device_vector<double>(
divAuxData.nodeStorageCount, 0.0);
divAuxData.tmpCellTypes = thrust::device_vector<SceNodeType>(
divAuxData.nodeStorageCount);
divAuxData.tmpIsActiveHold2 = thrust::device_vector<bool>(
divAuxData.nodeStorageCount, false);
divAuxData.tmpDistToCenter2 = thrust::device_vector<double>(
divAuxData.nodeStorageCount, 0.0);
divAuxData.tmpXValueHold2 = thrust::device_vector<double>(
divAuxData.nodeStorageCount, 0.0);
divAuxData.tmpYValueHold2 = thrust::device_vector<double>(
divAuxData.nodeStorageCount, 0.0);
divAuxData.tmpZValueHold2 = thrust::device_vector<double>(
divAuxData.nodeStorageCount, 0.0);
// step 2 , continued
thrust::copy_if(
thrust::make_zip_iterator(
thrust::make_tuple(
make_transform_iterator(countingBegin,
DivideFunctor(allocPara.maxNodeOfOneCell)),
cellNodeInfoVecs.distToCenterAlongGrowDir.begin(),
nodes->getInfoVecs().nodeLocX.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeLocY.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeLocZ.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeCellType.begin()
+ allocPara.startPosCells)),
thrust::make_zip_iterator(
thrust::make_tuple(
make_transform_iterator(countingBegin,
DivideFunctor(allocPara.maxNodeOfOneCell)),
cellNodeInfoVecs.distToCenterAlongGrowDir.begin(),
nodes->getInfoVecs().nodeLocX.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeLocY.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeLocZ.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeCellType.begin()
+ allocPara.startPosCells))
+ totalNodeCountForActiveCells,
thrust::make_permutation_iterator(cellInfoVecs.isDividing.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(allocPara.maxNodeOfOneCell))),
thrust::make_zip_iterator(
thrust::make_tuple(divAuxData.tmpCellRankHold1.begin(),
divAuxData.tmpDistToCenter1.begin(),
divAuxData.tmpXValueHold1.begin(),
divAuxData.tmpYValueHold1.begin(),
divAuxData.tmpZValueHold1.begin(),
divAuxData.tmpCellTypes.begin())), isTrue());
}
/**
* performance wise, this implementation is not the best because I can use only one sort_by_key
* with speciialized comparision operator. However, This implementation is more robust and won't
* compromise performance too much.
*/
void SceCells::sortNodesAccordingToDist() {
//step 3
for (uint i = 0; i < divAuxData.toBeDivideCount; i++) {
thrust::sort_by_key(
divAuxData.tmpDistToCenter1.begin()
+ i * allocPara.maxNodeOfOneCell,
divAuxData.tmpDistToCenter1.begin()
+ (i + 1) * allocPara.maxNodeOfOneCell,
thrust::make_zip_iterator(
thrust::make_tuple(
divAuxData.tmpXValueHold1.begin()
+ i * allocPara.maxNodeOfOneCell,
divAuxData.tmpYValueHold1.begin()
+ i * allocPara.maxNodeOfOneCell,
divAuxData.tmpZValueHold1.begin()
+ i * allocPara.maxNodeOfOneCell)));
}
}
/**
* scatter_if() is a thrust function.
* inputIter1 first,
* inputIter1 last,
* inputIter2 map,
* inputIter3 stencil
* randomAccessIter output
*/
void SceCells::copyLeftAndRightToSeperateArrays() {
//step 4.
thrust::scatter_if(
thrust::make_zip_iterator(
thrust::make_tuple(divAuxData.tmpXValueHold1.begin(),
divAuxData.tmpYValueHold1.begin(),
divAuxData.tmpZValueHold1.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(divAuxData.tmpXValueHold1.end(),
divAuxData.tmpYValueHold1.end(),
divAuxData.tmpZValueHold1.end())),
make_transform_iterator(countingBegin,
LeftShiftFunctor(allocPara.maxNodeOfOneCell)),
make_transform_iterator(countingBegin,
IsRightSide(allocPara.maxNodeOfOneCell)),
thrust::make_zip_iterator(
thrust::make_tuple(divAuxData.tmpXValueHold2.begin(),
divAuxData.tmpYValueHold2.begin(),
divAuxData.tmpZValueHold2.begin())));
}
void SceCells::transformIsActiveArrayOfBothArrays() {
thrust::transform(countingBegin,
countingBegin + divAuxData.nodeStorageCount,
divAuxData.tmpIsActiveHold1.begin(),
IsLeftSide(allocPara.maxNodeOfOneCell));
thrust::transform(countingBegin,
countingBegin + divAuxData.nodeStorageCount,
divAuxData.tmpIsActiveHold2.begin(),
IsLeftSide(allocPara.maxNodeOfOneCell));
if (divAuxData.toBeDivideCount != 0) {
std::cout << "before insert, active cell count in nodes:"
<< nodes->getAllocPara().currentActiveCellCount << std::endl;
}
}
void SceCells::addSecondArrayToCellArray() {
/// step 6. call SceNodes function to add newly divided cells
nodes->addNewlyDividedCells(divAuxData.tmpXValueHold2,
divAuxData.tmpYValueHold2, divAuxData.tmpZValueHold2,
divAuxData.tmpIsActiveHold2, divAuxData.tmpCellTypes);
}
void SceCells::copyFirstArrayToPreviousPos() {
thrust::scatter(
thrust::make_zip_iterator(
thrust::make_tuple(divAuxData.tmpIsActiveHold1.begin(),
divAuxData.tmpXValueHold1.begin(),
divAuxData.tmpYValueHold1.begin(),
divAuxData.tmpZValueHold1.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(divAuxData.tmpIsActiveHold1.end(),
divAuxData.tmpXValueHold1.end(),
divAuxData.tmpYValueHold1.end(),
divAuxData.tmpZValueHold1.end())),
thrust::make_transform_iterator(
thrust::make_zip_iterator(
thrust::make_tuple(countingBegin,
divAuxData.tmpCellRankHold1.begin())),
CompuPos(allocPara.maxNodeOfOneCell)),
thrust::make_zip_iterator(
thrust::make_tuple(
nodes->getInfoVecs().nodeIsActive.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeLocX.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeLocY.begin()
+ allocPara.startPosCells,
nodes->getInfoVecs().nodeLocZ.begin()
+ allocPara.startPosCells)));
/**
* after dividing, the cell should resume the initial
* (1) node count, which defaults to be half size of max node count
* (2) growth progress, which defaults to 0
* (3) last check point, which defaults to 0
*/
thrust::scatter_if(
thrust::make_zip_iterator(
thrust::make_tuple(initIntnlNodeCount, initGrowthProgress,
initGrowthProgress)),
thrust::make_zip_iterator(
thrust::make_tuple(initIntnlNodeCount, initGrowthProgress,
initGrowthProgress))
+ allocPara.currentActiveCellCount, countingBegin,
cellInfoVecs.isDividing.begin(),
thrust::make_zip_iterator(
thrust::make_tuple(
cellInfoVecs.activeNodeCountOfThisCell.begin(),
cellInfoVecs.growthProgress.begin(),
cellInfoVecs.lastCheckPoint.begin())), isTrue());
// TODO: combine this one with the previous scatter_if to improve efficiency.
thrust::fill(
cellInfoVecs.activeNodeCountOfThisCell.begin()
+ allocPara.currentActiveCellCount,
cellInfoVecs.activeNodeCountOfThisCell.begin()
+ allocPara.currentActiveCellCount
+ divAuxData.toBeDivideCount,
allocPara.maxNodeOfOneCell / 2);
}
void SceCells::updateActiveCellCount() {
allocPara.currentActiveCellCount = allocPara.currentActiveCellCount
+ divAuxData.toBeDivideCount;
NodeAllocPara para = nodes->getAllocPara();
para.currentActiveCellCount = allocPara.currentActiveCellCount;
nodes->setAllocPara(para);
}
void SceCells::markIsDivideFalse() {
thrust::fill(cellInfoVecs.isDividing.begin(),
cellInfoVecs.isDividing.begin() + allocPara.currentActiveCellCount,
false);
}
void SceCells::readMiscPara() {
miscPara.addNodeDistance = globalConfigVars.getConfigValue(
"DistanceForAddingNode").toDouble();
miscPara.minDistanceToOtherNode = globalConfigVars.getConfigValue(
"MinDistanceToOtherNode").toDouble();
miscPara.isDivideCriticalRatio = globalConfigVars.getConfigValue(
"IsDivideCrticalRatio").toDouble();
// reason for adding a small term here is to avoid scenario when checkpoint might add many times
// up to 0.99999999 which is theoretically 1.0 but not in computer memory. If we don't include
// this small term we might risk adding one more node.
int maxNodeOfOneCell =
globalConfigVars.getConfigValue("MaxNodePerCell").toInt();
miscPara.growThreshold = 1.0 / (maxNodeOfOneCell - maxNodeOfOneCell / 2)
+ epsilon;
}
void SceCells::readMiscPara_M() {
miscPara.addNodeDistance = globalConfigVars.getConfigValue(
"DistanceForAddingNode").toDouble();
miscPara.minDistanceToOtherNode = globalConfigVars.getConfigValue(
"MinDistanceToOtherNode").toDouble();
miscPara.isDivideCriticalRatio = globalConfigVars.getConfigValue(
"IsDivideCrticalRatio").toDouble();
// reason for adding a small term here is to avoid scenario when checkpoint might add many times
// up to 0.99999999 which is theoretically 1.0 but not in computer memory. If we don't include
// this small term we might risk adding one more node.
int maxIntnlNodePerCell = globalConfigVars.getConfigValue(
"MaxIntnlNodeCountPerCell").toInt();
miscPara.growThreshold = 1.0
/ (maxIntnlNodePerCell - maxIntnlNodePerCell / 2) + epsilon;
miscPara.prolifDecayCoeff = globalConfigVars.getConfigValue(
"ProlifDecayCoeff").toDouble();
}
void SceCells::readBioPara() {
if (controlPara.simuType != Disc_M) {
bioPara.cellInitLength = globalConfigVars.getConfigValue(
"CellInitLength").toDouble();
std::cout << "break point 1 " << bioPara.cellInitLength << std::endl;
std::cout.flush();
bioPara.cellFinalLength = globalConfigVars.getConfigValue(
"CellFinalLength").toDouble();
std::cout << "break point 2 " << bioPara.cellFinalLength << std::endl;
std::cout.flush();
bioPara.elongationCoefficient = globalConfigVars.getConfigValue(
"ElongateCoefficient").toDouble();
std::cout << "break point 3 " << bioPara.elongationCoefficient
<< std::endl;
std::cout.flush();
}
if (controlPara.simuType == Beak) {
std::cout << "break point 4 " << std::endl;
std::cout.flush();
bioPara.chemoCoefficient = globalConfigVars.getConfigValue(
"ChemoCoefficient").toDouble();
}
//std::cin >> jj;
}
void SceCells::randomizeGrowth() {
thrust::counting_iterator<uint> countingBegin(0);
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.growthXDir.begin(),
cellInfoVecs.growthYDir.begin(),
cellInfoVecs.isRandGrowInited.begin(),
countingBegin)),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.growthXDir.begin(),
cellInfoVecs.growthYDir.begin(),
cellInfoVecs.isRandGrowInited.begin(),
countingBegin)) + allocPara.currentActiveCellCount,
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.growthSpeed.begin(),
cellInfoVecs.growthXDir.begin(),
cellInfoVecs.growthYDir.begin(),
cellInfoVecs.isRandGrowInited.begin())),
AssignRandIfNotInit(growthAuxData.randomGrowthSpeedMin,
growthAuxData.randomGrowthSpeedMax,
allocPara.currentActiveCellCount,
growthAuxData.randGenAuxPara));
}
/**
* To run all the cell level logics.
* First step we got center positions of cells.
* Grow.
*/
void SceCells::runAllCellLevelLogicsDisc(double dt) {
this->dt = dt;
//std::cerr << "enter run all cell level logics" << std::endl;
computeCenterPos();
//std::cerr << "after compute center position." << std::endl;
if (nodes->getControlPara().controlSwitchs.stab == OFF) {
growAtRandom(dt);
//grow2DTwoRegions(dt, region1, region2);
//std::cerr << "after grow cells" << std::endl;
//distributeIsActiveInfo();
//std::cerr << "after distribute is active info." << std::endl;
divide2DSimplified();
//std::cerr << "after divide 2D simplified." << std::endl;
distributeIsActiveInfo();
//std::cerr << "after distribute is active info." << std::endl;
distributeCellGrowthProgress();
}
allComponentsMove();
//std::cerr << "after all components move." << std::endl;
}
//Ali void SceCells::runAllCellLogicsDisc_M(double dt) {
void SceCells::runAllCellLogicsDisc_M(double dt, double Damp_Coef, double InitTimeStage) { //Ali
std::cout << " *** 1 ***" << endl;
std::cout.flush();
this->dt = dt;
this->Damp_Coef=Damp_Coef ; //Ali
this->InitTimeStage=InitTimeStage ; //A & A
growthAuxData.prolifDecay =1.0 ; // no decay for right now exp(-curTime * miscPara.prolifDecayCoeff);
cout<< "The important curTime used in simulation is here which is"<<curTime <<endl;
growthAuxData.randomGrowthSpeedMin = growthAuxData.prolifDecay
* growthAuxData.randomGrowthSpeedMin_Ori;
growthAuxData.randomGrowthSpeedMax = growthAuxData.prolifDecay
* growthAuxData.randomGrowthSpeedMax_Ori;
curTime = curTime + dt;
std::cout << " *** 2 ***" << endl;
std::cout.flush();
applySceCellDisc_M();
std::cout << " *** 3 ***" << endl;
std::cout.flush();
//Ali
computeCenterPos_M();
exchSignal();
BC_Imp_M() ;
std::cout << " *** 3.5 ***" << endl;
std::cout.flush();
//Ali
applyMemForce_M();
std::cout << " *** 4 ***" << endl;
std::cout.flush();
//Ali cmment //
// computeCenterPos_M();
std::cout << " *** 5 ***" << endl;
std::cout.flush();
//Ali cmment //
growAtRandom_M(dt);
std::cout << " *** 6 ***" << endl;
std::cout.flush();
//if (curTime<3300.0)
divide2D_M();
std::cout << " *** 7 ***" << endl;
std::cout.flush();
distributeCellGrowthProgress_M();
std::cout << " *** 8 ***" << endl;
std::cout.flush();
findTangentAndNormal_M();//AAMIRI ADDED May29
allComponentsMove_M();
std::cout << " *** 9 ***" << endl;
std::cout.flush();
handleMembrGrowth_M();
std::cout << " *** 10 ***" << endl;
std::cout.flush();
}
void SceCells::exchSignal(){
if (firstTimeReadDpp) {
uint maxTotalNodes=nodes->getInfoVecs().nodeLocX.size() ;
signal.Initialize(allocPara_m.maxAllNodePerCell,allocPara_m.maxMembrNodePerCell,maxTotalNodes, allocPara_m.maxCellCount) ;
cout << " I passed the initializtion for signaling module" << endl ;
}
lastTimeExchange=lastTimeExchange+dt ;
cout << "last time exchange is " << lastTimeExchange << endl ;
cout << "dt is " << dt << endl ;
double exchPeriod=360 ;
if ( lastTimeExchange>exchPeriod) {
lastTimeExchange=0 ;
//vector<CVector> cellCentersHost ;
//cellCentersHost=getAllCellCenters(); //Ali
cout << "I entered the function to update dpp" << endl ;
thrust::device_vector<double>::iterator MinX_Itr=thrust::min_element(cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordX.begin()+allocPara_m.currentActiveCellCount) ;
thrust::device_vector<double>::iterator MaxX_Itr=thrust::max_element(cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordX.begin()+ allocPara_m.currentActiveCellCount) ;
thrust::device_vector<double>::iterator MinY_Itr=thrust::min_element(cellInfoVecs.centerCoordY.begin(),
cellInfoVecs.centerCoordY.begin()+ allocPara_m.currentActiveCellCount) ;
thrust::device_vector<double>::iterator MaxY_Itr=thrust::max_element(cellInfoVecs.centerCoordY.begin(),
cellInfoVecs.centerCoordY.begin()+ allocPara_m.currentActiveCellCount) ;
Tisu_MinX= *MinX_Itr ;
Tisu_MaxX= *MaxX_Itr ;
Tisu_MinY= *MinY_Itr ;
Tisu_MaxY= *MaxY_Itr ;
Tisu_R=0.5*(0.5*(Tisu_MaxX-Tisu_MinX)+0.5*(Tisu_MaxY-Tisu_MinY)) ;
totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount
* allocPara_m.maxAllNodePerCell;
thrust:: copy (nodes->getInfoVecs().nodeIsActive.begin(),nodes->getInfoVecs().nodeIsActive.begin()+ totalNodeCountForActiveCells, signal.nodeIsActiveHost.begin());
thrust:: copy (nodes->getInfoVecs().nodeLocX.begin(),nodes->getInfoVecs().nodeLocX.begin()+ totalNodeCountForActiveCells, signal.nodeLocXHost.begin());
thrust:: copy (nodes->getInfoVecs().nodeLocY.begin(),nodes->getInfoVecs().nodeLocY.begin()+ totalNodeCountForActiveCells, signal.nodeLocYHost.begin());
thrust:: copy (cellInfoVecs.centerCoordX.begin(),cellInfoVecs.centerCoordX.begin()+allocPara_m.currentActiveCellCount, signal.cellCenterX.begin());
thrust:: copy (cellInfoVecs.centerCoordY.begin(),cellInfoVecs.centerCoordY.begin()+allocPara_m.currentActiveCellCount, signal.cellCenterY.begin());
signal.updateSignal(Tisu_MinX,Tisu_MaxX,Tisu_MinY,Tisu_MaxY,curTime,totalNodeCountForActiveCells,allocPara_m.currentActiveCellCount) ; //Ali
assert(cellInfoVecs.cell_Dpp.size()==signal.dppLevel.size());
thrust::copy(signal.dppLevel.begin(),signal.dppLevel.end(),cellInfoVecs.cell_Dpp.begin()) ;
//currentActiveCellCountOld=allocPara_m.currentActiveCellCount;
}
if (firstTimeReadDpp) {
thrust::copy(signal.dppLevel.begin(),signal.dppLevel.end(),cellInfoVecs.cell_DppOld.begin()) ;
firstTimeReadDpp=false ;
}
}
void SceCells::runStretchTest(double dt) {
this->dt = dt;
computeCenterPos();
growAlongX(false, dt);
moveNodes();
}
void SceCells::growAlongX(bool isAddPt, double d_t) {
totalNodeCountForActiveCells = allocPara.currentActiveCellCount
* allocPara.maxNodeOfOneCell;
setGrowthDirXAxis();
//std::cout << "after copy grow info" << std::endl;
updateGrowthProgress();
//std::cout << "after update growth progress" << std::endl;
decideIsScheduleToGrow();
//std::cout << "after decode os schedule to grow" << std::endl;
computeCellTargetLength();
//std::cout << "after compute cell target length" << std::endl;
computeDistToCellCenter();
//std::cout << "after compute dist to center" << std::endl;
findMinAndMaxDistToCenter();
//std::cout << "after find min and max dist" << std::endl;
computeLenDiffExpCur();
//std::cout << "after compute diff " << std::endl;
stretchCellGivenLenDiff();
if (isAddPt) {
addPointIfScheduledToGrow();
}
}
void SceCells::growWithStress(double d_t) {
}
std::vector<CVector> SceCells::getAllCellCenters() {
//void SceCells::getAllCellCenters() {
//thrust::host_vector<double> centerX = cellInfoVecs.centerCoordX;
//thrust::host_vector<double> centerY = cellInfoVecs.centerCoordY;
//thrust::host_vector<double> centerZ = cellInfoVecs.centerCoordZ;
thrust::host_vector<double> centerX(
allocPara_m.currentActiveCellCount);
thrust::copy(cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordX.begin()
+ allocPara_m.currentActiveCellCount,
centerX.begin());
thrust::host_vector<double> centerY(
allocPara_m.currentActiveCellCount);
thrust::copy(cellInfoVecs.centerCoordY.begin(),
cellInfoVecs.centerCoordY.begin()
+ allocPara_m.currentActiveCellCount,
centerY.begin());
thrust::host_vector<double> centerZ(
allocPara_m.currentActiveCellCount);
thrust::copy(cellInfoVecs.centerCoordZ.begin(),
cellInfoVecs.centerCoordZ.begin()
+ allocPara_m.currentActiveCellCount,
centerZ.begin());
//infoForSignal.sCenterX=centerX[4] ;
//infoForSignal.sCenterY=centerY[4] ;
//infoForSignal.sCenterZ=centerZ[4] ;
std::vector<CVector> result;
for (uint i = 0; i < allocPara_m.currentActiveCellCount; i++) {
CVector pos = CVector(centerX[i], centerY[i], centerZ[i]);
//infoForSignal.sCenterX=centerX[i] ;
//infoForSignal.sCenterY=centerY[i] ;
//infoForSignal.sCenterZ=centerZ[i] ;
result.push_back(pos);
}
return result;
}
void SceCells::setGrowthDirXAxis() {
thrust::fill(cellInfoVecs.growthXDir.begin(),
cellInfoVecs.growthXDir.begin() + allocPara.currentActiveCellCount,
1.0);
thrust::fill(cellInfoVecs.growthYDir.begin(),
cellInfoVecs.growthYDir.begin() + allocPara.currentActiveCellCount,
0.0);
thrust::fill(cellInfoVecs.growthSpeed.begin(),
cellInfoVecs.growthSpeed.begin() + allocPara.currentActiveCellCount,
growthAuxData.fixedGrowthSpeed);
}
std::vector<double> SceCells::getGrowthProgressVec() {
thrust::host_vector<double> growthProVec = cellInfoVecs.growthProgress;
std::vector<double> result;
for (uint i = 0; i < allocPara.currentActiveCellCount; i++) {
result.push_back(growthProVec[i]);
}
return result;
}
void SceCells::copyCellsPreDivision_M() {
totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount
* allocPara_m.maxAllNodePerCell;
divAuxData.nodeStorageCount = divAuxData.toBeDivideCount
* allocPara_m.maxAllNodePerCell;
divAuxData.tmpIsActive_M = thrust::device_vector<bool>(
divAuxData.nodeStorageCount, true);
divAuxData.tmpNodePosX_M = thrust::device_vector<double>(
divAuxData.nodeStorageCount, 0.0);
divAuxData.tmpNodePosY_M = thrust::device_vector<double>(
divAuxData.nodeStorageCount, 0.0);
divAuxData.tmpCellRank_M = thrust::device_vector<uint>(
divAuxData.toBeDivideCount, 0);
divAuxData.tmpDivDirX_M = thrust::device_vector<double>(
divAuxData.toBeDivideCount, 0);
divAuxData.tmpDivDirY_M = thrust::device_vector<double>(
divAuxData.toBeDivideCount, 0);
divAuxData.tmpCenterPosX_M = thrust::device_vector<double>(
divAuxData.toBeDivideCount, 0);
divAuxData.tmpCenterPosY_M = thrust::device_vector<double>(
divAuxData.toBeDivideCount, 0);
divAuxData.tmpIsActive1_M = thrust::device_vector<bool>(
divAuxData.nodeStorageCount, false);
divAuxData.tmpXPos1_M = thrust::device_vector<double>(
divAuxData.nodeStorageCount, 0.0);
divAuxData.tmpYPos1_M = thrust::device_vector<double>(
divAuxData.nodeStorageCount, 0.0);
divAuxData.tmpIsActive2_M = thrust::device_vector<bool>(
divAuxData.nodeStorageCount, false);
divAuxData.tmpXPos2_M = thrust::device_vector<double>(
divAuxData.nodeStorageCount, 0.0);
divAuxData.tmpYPos2_M = thrust::device_vector<double>(
divAuxData.nodeStorageCount, 0.0);
//A&A
divAuxData.tmpHertwigXdir = thrust::device_vector<double>(
divAuxData.nodeStorageCount, 0.0);
divAuxData.tmpHertwigYdir = thrust::device_vector<double>(
divAuxData.nodeStorageCount, 0.0);
//A&A
// step 2 , continued // copy node info values ready for division /comment A&A
thrust::counting_iterator<uint> iStart(0);
thrust::copy_if(
thrust::make_zip_iterator(
thrust::make_tuple(
nodes->getInfoVecs().nodeIsActive.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeLocX.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeLocY.begin()
+ allocPara_m.bdryNodeCount)),
thrust::make_zip_iterator(
thrust::make_tuple(
nodes->getInfoVecs().nodeIsActive.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeLocX.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeLocY.begin()
+ allocPara_m.bdryNodeCount))
+ totalNodeCountForActiveCells,
thrust::make_permutation_iterator(cellInfoVecs.isDividing.begin(),
make_transform_iterator(iStart,
DivideFunctor(allocPara_m.maxAllNodePerCell))),
thrust::make_zip_iterator(
thrust::make_tuple(divAuxData.tmpIsActive_M.begin(),
divAuxData.tmpNodePosX_M.begin(),
divAuxData.tmpNodePosY_M.begin())), isTrue());
// step 3 , continued //copy cell info values ready for division /comment A&A
thrust::counting_iterator<uint> iBegin(0);
thrust::copy_if(
thrust::make_zip_iterator(
thrust::make_tuple(iBegin, cellInfoVecs.growthXDir.begin(),
cellInfoVecs.growthYDir.begin(),
cellInfoVecs.HertwigXdir.begin(),
cellInfoVecs.HertwigYdir.begin(),
cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(iBegin, cellInfoVecs.growthXDir.begin(),
cellInfoVecs.growthYDir.begin(),
cellInfoVecs.HertwigXdir.begin(),
cellInfoVecs.HertwigYdir.begin(),
cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin()))
+ allocPara_m.currentActiveCellCount,
cellInfoVecs.isDividing.begin(),
thrust::make_zip_iterator(
thrust::make_tuple(divAuxData.tmpCellRank_M.begin(),
divAuxData.tmpDivDirX_M.begin(),
divAuxData.tmpDivDirY_M.begin(),
divAuxData.tmpHertwigXdir.begin(),
divAuxData.tmpHertwigYdir.begin(),
divAuxData.tmpCenterPosX_M.begin(),
divAuxData.tmpCenterPosY_M.begin())), isTrue());
}
void SceCells::copyCellsEnterMitotic() {
totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount
* allocPara_m.maxAllNodePerCell;
divAuxData.nodeStorageCount = divAuxData.toEnterMitoticCount
* allocPara_m.maxAllNodePerCell;
divAuxData.tmpIsActive_M = thrust::device_vector<bool>(
divAuxData.nodeStorageCount, true);
divAuxData.tmpNodePosX_M = thrust::device_vector<double>(
divAuxData.nodeStorageCount, 0.0);
divAuxData.tmpNodePosY_M = thrust::device_vector<double>(
divAuxData.nodeStorageCount, 0.0);
divAuxData.tmpCellRank_M = thrust::device_vector<uint>(
divAuxData.toEnterMitoticCount, 0);
divAuxData.tmpDivDirX_M = thrust::device_vector<double>(
divAuxData.toEnterMitoticCount, 0);
divAuxData.tmpDivDirY_M = thrust::device_vector<double>(
divAuxData.toEnterMitoticCount, 0);
divAuxData.tmpCenterPosX_M = thrust::device_vector<double>(
divAuxData.toEnterMitoticCount, 0);
divAuxData.tmpCenterPosY_M = thrust::device_vector<double>(
divAuxData.toEnterMitoticCount, 0);
divAuxData.tmpIsActive1_M = thrust::device_vector<bool>(
divAuxData.nodeStorageCount, false);
divAuxData.tmpXPos1_M = thrust::device_vector<double>(
divAuxData.nodeStorageCount, 0.0);
divAuxData.tmpYPos1_M = thrust::device_vector<double>(
divAuxData.nodeStorageCount, 0.0);
divAuxData.tmpIsActive2_M = thrust::device_vector<bool>(
divAuxData.nodeStorageCount, false);
divAuxData.tmpXPos2_M = thrust::device_vector<double>(
divAuxData.nodeStorageCount, 0.0);
divAuxData.tmpYPos2_M = thrust::device_vector<double>(
divAuxData.nodeStorageCount, 0.0);
// step 2 , continued // copy node info values ready for division /comment A&A
thrust::counting_iterator<uint> iStart(0);
thrust::copy_if(
thrust::make_zip_iterator(
thrust::make_tuple(
nodes->getInfoVecs().nodeIsActive.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeLocX.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeLocY.begin()
+ allocPara_m.bdryNodeCount)),
thrust::make_zip_iterator(
thrust::make_tuple(
nodes->getInfoVecs().nodeIsActive.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeLocX.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeLocY.begin()
+ allocPara_m.bdryNodeCount))
+ totalNodeCountForActiveCells,
thrust::make_permutation_iterator(cellInfoVecs.isEnteringMitotic.begin(),
make_transform_iterator(iStart,
DivideFunctor(allocPara_m.maxAllNodePerCell))),
thrust::make_zip_iterator(
thrust::make_tuple(divAuxData.tmpIsActive_M.begin(),
divAuxData.tmpNodePosX_M.begin(),
divAuxData.tmpNodePosY_M.begin())), isTrue());
// step 3 , continued //copy cell info values ready for division /comment A&A
thrust::counting_iterator<uint> iBegin(0);
thrust::copy_if(
thrust::make_zip_iterator(
thrust::make_tuple(iBegin, cellInfoVecs.growthXDir.begin(),
cellInfoVecs.growthYDir.begin(),
cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(iBegin, cellInfoVecs.growthXDir.begin(),
cellInfoVecs.growthYDir.begin(),
cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin()))
+ allocPara_m.currentActiveCellCount,
cellInfoVecs.isEnteringMitotic.begin(),
thrust::make_zip_iterator(
thrust::make_tuple(divAuxData.tmpCellRank_M.begin(),
divAuxData.tmpDivDirX_M.begin(),
divAuxData.tmpDivDirY_M.begin(),
divAuxData.tmpCenterPosX_M.begin(),
divAuxData.tmpCenterPosY_M.begin())), isTrue());
}
void SceCells::createTwoNewCellArr_M() {
divAuxData.tmp1MemActiveCounts.clear();
divAuxData.tmp1InternalActiveCounts.clear();
divAuxData.tmp2MemActiveCounts.clear();
divAuxData.tmp2InternalActiveCounts.clear();
//divDebug();
for (uint i = 0; i < divAuxData.toBeDivideCount; i++) {
divAuxData.tmp1IntnlVec.clear();
divAuxData.tmp2IntnlVec.clear();
vector<CVector> membrNodes;
vector<CVector> intnlNodes;
obtainMembrAndIntnlNodes(i, membrNodes, intnlNodes);
CVector oldCenter = obtainCenter(i);
//A&A commented
//CVector divDir = calDivDir_MajorAxis(oldCenter, membrNodes,
// lenAlongMajorAxis);
/*CVector divDir = calDivDir_MajorAxis(oldCenter, membrNodes,
lenAlongMajorAxis);*/
CVector divDir;
divDir.x = divAuxData.tmpHertwigXdir[i] ; //A&A
divDir.y = divAuxData.tmpHertwigYdir[i] ; //A&A
double lenAlongHertwigAxis = calLengthAlongHertwigAxis(divDir, oldCenter, membrNodes);//A&A added
//
std::vector<VecVal> tmp1Membr, tmp2Membr;
CVector cell1Center, cell2Center;
obtainTwoNewCenters(oldCenter, divDir, lenAlongHertwigAxis, cell1Center,
cell2Center);
prepareTmpVec(i, divDir, oldCenter, tmp1Membr, tmp2Membr);
processMemVec(tmp1Membr, tmp2Membr);
shiftIntnlNodesByCellCenter(cell1Center, cell2Center);
assembleVecForTwoCells(i);
}
//divDebug();
}
//A&A
void SceCells::findHertwigAxis() {
divAuxData.tmp1MemActiveCounts.clear();
divAuxData.tmp1InternalActiveCounts.clear();
divAuxData.tmp2MemActiveCounts.clear();
divAuxData.tmp2InternalActiveCounts.clear();
//divDebug();
for (uint i = 0; i < divAuxData.toEnterMitoticCount; i++) {
uint cellRank = divAuxData.tmpCellRank_M[i];
vector<CVector> membrNodes;
vector<CVector> intnlNodes;
obtainMembrAndIntnlNodes(i, membrNodes, intnlNodes);
CVector oldCenter = obtainCenter(i);
double lenAlongMajorAxis;
CVector divDir = calDivDir_MajorAxis(oldCenter, membrNodes,
lenAlongMajorAxis);
cellInfoVecs.HertwigXdir[cellRank]=divDir.x ;
cellInfoVecs.HertwigYdir[cellRank]=divDir.y ;
//std::cout<<cellInfoVecs.HertwigXdir[cellRank]<<"HertwigXdir Thrust" <<std::endl;
//std::cout<<cellInfoVecs.HertwigYdir[cellRank]<<"HertwigYdir Thrust" <<std::endl;
//std::cout<<divDir.x<<"HertwigXdir " <<std::endl;
//std::cout<<divDir.y<<"HertwigYdir " <<std::endl;
}
//divDebug();
}
void SceCells::copyFirstCellArr_M() {
uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell;
for (uint i = 0; i < divAuxData.toBeDivideCount; i++) {
uint cellRank = divAuxData.tmpCellRank_M[i];
uint nodeStartIndx = cellRank * maxAllNodePerCell
+ allocPara_m.bdryNodeCount;
uint tmpStartIndx = i * maxAllNodePerCell;
uint tmpEndIndx = (i + 1) * maxAllNodePerCell;
thrust::constant_iterator<int> noAdhesion(-1), noAdhesion2(-1);
thrust::copy(
thrust::make_zip_iterator(
thrust::make_tuple(divAuxData.tmpXPos1_M.begin(),
divAuxData.tmpYPos1_M.begin(),
divAuxData.tmpIsActive1_M.begin(), noAdhesion,
noAdhesion2)) + tmpStartIndx,
thrust::make_zip_iterator(
thrust::make_tuple(divAuxData.tmpXPos1_M.begin(),
divAuxData.tmpYPos1_M.begin(),
divAuxData.tmpIsActive1_M.begin(), noAdhesion,
noAdhesion2)) + tmpEndIndx,
thrust::make_zip_iterator(
thrust::make_tuple(
nodes->getInfoVecs().nodeLocX.begin(),
nodes->getInfoVecs().nodeLocY.begin(),
nodes->getInfoVecs().nodeIsActive.begin(),
nodes->getInfoVecs().nodeAdhereIndex.begin(),
nodes->getInfoVecs().membrIntnlIndex.begin()))
+ nodeStartIndx);
cellInfoVecs.activeIntnlNodeCounts[cellRank] =
divAuxData.tmp1InternalActiveCounts[i];
cellInfoVecs.activeMembrNodeCounts[cellRank] =
divAuxData.tmp1MemActiveCounts[i];
cellInfoVecs.growthProgress[cellRank] = 0;
cellInfoVecs.membrGrowProgress[cellRank] = 0.0;
cellInfoVecs.isRandGrowInited[cellRank] = false;
cellInfoVecs.lastCheckPoint[cellRank] = 0;
cellInfoVecs.cell_DppOld[cellRank] = cellInfoVecs.cell_Dpp[cellRank];
}
}
void SceCells::copySecondCellArr_M() {
uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell;
for (uint i = 0; i < divAuxData.toBeDivideCount; i++) {
uint cellRankMother = divAuxData.tmpCellRank_M[i];
uint cellRank = allocPara_m.currentActiveCellCount + i;
uint nodeStartIndx = cellRank * maxAllNodePerCell
+ allocPara_m.bdryNodeCount;
uint tmpStartIndx = i * maxAllNodePerCell;
uint tmpEndIndx = (i + 1) * maxAllNodePerCell;
thrust::constant_iterator<int> noAdhesion(-1), noAdhesion2(-1);
thrust::copy(
thrust::make_zip_iterator(
thrust::make_tuple(divAuxData.tmpXPos2_M.begin(),
divAuxData.tmpYPos2_M.begin(),
divAuxData.tmpIsActive2_M.begin(), noAdhesion,
noAdhesion2)) + tmpStartIndx,
thrust::make_zip_iterator(
thrust::make_tuple(divAuxData.tmpXPos2_M.begin(),
divAuxData.tmpYPos2_M.begin(),
divAuxData.tmpIsActive2_M.begin(), noAdhesion,
noAdhesion2)) + tmpEndIndx,
thrust::make_zip_iterator(
thrust::make_tuple(
nodes->getInfoVecs().nodeLocX.begin(),
nodes->getInfoVecs().nodeLocY.begin(),
nodes->getInfoVecs().nodeIsActive.begin(),
nodes->getInfoVecs().nodeAdhereIndex.begin(),
nodes->getInfoVecs().membrIntnlIndex.begin()))
+ nodeStartIndx);
cellInfoVecs.activeIntnlNodeCounts[cellRank] =
divAuxData.tmp2InternalActiveCounts[i];
cellInfoVecs.activeMembrNodeCounts[cellRank] =
divAuxData.tmp2MemActiveCounts[i];
cellInfoVecs.growthProgress[cellRank] = 0;
cellInfoVecs.membrGrowProgress[cellRank] = 0;
cellInfoVecs.isRandGrowInited[cellRank] = false;
cellInfoVecs.lastCheckPoint[cellRank] = 0;
cellInfoVecs.cell_DppOld[cellRank] = cellInfoVecs.cell_Dpp[cellRankMother];
cellInfoVecs.cell_Dpp[cellRank] = cellInfoVecs.cell_Dpp[cellRankMother];
}
}
//AAMIRI
/*
void SceCells::removeCellArr_M() {
uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell;
for (uint i = 0; i < divAuxData.toBeDivideCount; i++) {
uint cellRank = divAuxData.tmpCellRank_M[i];
uint nodeStartIndx = cellRank * maxAllNodePerCell
+ allocPara_m.bdryNodeCount;
uint tmpStartIndx = i * maxAllNodePerCell;
uint tmpEndIndx = (i + 1) * maxAllNodePerCell;
thrust::constant_iterator<int> noAdhesion(-1), noAdhesion2(-1);
thrust::copy(
thrust::make_zip_iterator(
thrust::make_tuple(divAuxData.tmpXPos1_M.begin(),
divAuxData.tmpYPos1_M.begin(),
divAuxData.tmpIsActive1_M.begin(), noAdhesion,
noAdhesion2)) + tmpStartIndx,
thrust::make_zip_iterator(
thrust::make_tuple(divAuxData.tmpXPos1_M.begin(),
divAuxData.tmpYPos1_M.begin(),
divAuxData.tmpIsActive1_M.begin(), noAdhesion,
noAdhesion2)) + tmpEndIndx,
thrust::make_zip_iterator(
thrust::make_tuple(
nodes->getInfoVecs().nodeLocX.begin(),
nodes->getInfoVecs().nodeLocY.begin(),
nodes->getInfoVecs().nodeIsActive.begin(),
nodes->getInfoVecs().nodeAdhereIndex.begin(),
nodes->getInfoVecs().membrIntnlIndex.begin()))
+ nodeStartIndx);
cellInfoVecs.activeIntnlNodeCounts[cellRank] =
divAuxData.tmp1InternalActiveCounts[i];
cellInfoVecs.activeMembrNodeCounts[cellRank] =
divAuxData.tmp1MemActiveCounts[i];
cellInfoVecs.growthProgress[cellRank] = 0;
cellInfoVecs.membrGrowProgress[cellRank] = 0.0;
cellInfoVecs.isRandGrowInited[cellRank] = false;
cellInfoVecs.lastCheckPoint[cellRank] = 0;
}
}
*/
void SceCells::updateActiveCellCount_M() {
allocPara_m.currentActiveCellCount = allocPara_m.currentActiveCellCount
+ divAuxData.toBeDivideCount;
nodes->setActiveCellCount(allocPara_m.currentActiveCellCount);
}
//AAMIRI
/*
void SceCells::updateActiveCellCountAfterRemoval_M() {
allocPara_m.currentActiveCellCount = allocPara_m.currentActiveCellCount
+ divAuxData.toBeDivideCount;
nodes->setActiveCellCount(allocPara_m.currentActiveCellCount);
}
*/
void SceCells::markIsDivideFalse_M() {
thrust::fill(cellInfoVecs.isDividing.begin(),
cellInfoVecs.isDividing.begin()
+ allocPara_m.currentActiveCellCount, false);
}
void SceCells::adjustNodeVel_M() {
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(),
nodes->getInfoVecs().nodeVelY.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(),
nodes->getInfoVecs().nodeVelY.begin()))
+ allocPara_m.bdryNodeCount + totalNodeCountForActiveCells,
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(),
nodes->getInfoVecs().nodeVelY.begin())),
ForceZero());
}
void SceCells::moveNodes_M() {
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(),
nodes->getInfoVecs().nodeVelY.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(),
nodes->getInfoVecs().nodeVelY.begin()))
+ totalNodeCountForActiveCells + allocPara_m.bdryNodeCount,
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(),
nodes->getInfoVecs().nodeLocY.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(),
nodes->getInfoVecs().nodeLocY.begin())),
//Ali SaxpyFunctorDim2(dt));
SaxpyFunctorDim2_Damp(dt,Damp_Coef)); //Ali
}
//Ali // This function is written to assigned different damping coefficients to cells, therefore the boundary cells can have more damping
void SceCells::moveNodes_BC_M() {
thrust::counting_iterator<uint> iBegin2(0);
uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell;
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(
thrust::make_permutation_iterator(
cellInfoVecs.Cell_Damp.begin(),
make_transform_iterator(iBegin2,
DivideFunctor(maxAllNodePerCell))),
nodes->getInfoVecs().nodeVelX.begin(),
nodes->getInfoVecs().nodeVelY.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(
thrust::make_permutation_iterator(
cellInfoVecs.Cell_Damp.begin(),
make_transform_iterator(iBegin2,
DivideFunctor(maxAllNodePerCell))),
nodes->getInfoVecs().nodeVelX.begin(),
nodes->getInfoVecs().nodeVelY.begin()))
+ totalNodeCountForActiveCells + allocPara_m.bdryNodeCount,
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(),
nodes->getInfoVecs().nodeLocY.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(),
nodes->getInfoVecs().nodeLocY.begin())),
SaxpyFunctorDim2_BC_Damp(dt));
}
//Ali
void SceCells::applyMemForce_M() {
totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount
* allocPara_m.maxAllNodePerCell;
uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell;
thrust::counting_iterator<uint> iBegin(0), iBegin1(0);
//Ali
thrust::fill(cellInfoVecs.Cell_Time.begin(),cellInfoVecs.Cell_Time.begin() +allocPara_m.currentActiveCellCount,curTime);
//Ali
/*
thrust::device_vector<double>::iterator MinX_Itr=thrust::min_element(nodes->getInfoVecs().nodeLocX.begin()+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeLocX.begin()+ allocPara_m.bdryNodeCount+ totalNodeCountForActiveCells) ;
thrust::device_vector<double>::iterator MaxX_Itr=thrust::max_element(nodes->getInfoVecs().nodeLocX.begin()+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeLocX.begin()+ allocPara_m.bdryNodeCount+ totalNodeCountForActiveCells) ;
thrust::device_vector<double>::iterator MinY_Itr=thrust::min_element(nodes->getInfoVecs().nodeLocY.begin()+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeLocY.begin()+ allocPara_m.bdryNodeCount+ totalNodeCountForActiveCells) ;
thrust::device_vector<double>::iterator MaxY_Itr=thrust::max_element(nodes->getInfoVecs().nodeLocY.begin()+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeLocY.begin()+ allocPara_m.bdryNodeCount+ totalNodeCountForActiveCells) ;
Tisu_MinX= *MinX_Itr ;
Tisu_MaxX= *MaxX_Itr ;
Tisu_MinY= *MinY_Itr ;
Tisu_MaxY= *MaxY_Itr ;
*/
//cout<< "# of boundary nodes"<< allocPara_m.bdryNodeCount<<endl ;
//cout<< "# of total active nodes"<<totalNodeCountForActiveCells <<endl ;
//cout<<"The minimum location in X in applyMemForce_M is="<<Tisu_MinX<< endl;
//cout<<"The maximum location in X in applyMemForce_M is="<<Tisu_MaxX<< endl;
//cout<<"The minimum location in Y in applyMemForce_M is="<<Tisu_MinY<< endl;
//cout<<"The maximum location in Y in applyMemForce_M is="<<Tisu_MaxY<< endl;
//Ali
double* nodeLocXAddr = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().nodeLocX[0]));
double* nodeLocYAddr = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().nodeLocY[0]));
bool* nodeIsActiveAddr = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().nodeIsActive[0]));
double grthPrgrCriVal_M = growthAuxData.grthPrgrCriVal_M_Ori; // for now constant //growthAuxData.grthProgrEndCPU
// - growthAuxData.prolifDecay
// * (growthAuxData.grthProgrEndCPU
// - growthAuxData.grthPrgrCriVal_M_Ori);
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(
thrust::make_permutation_iterator(
cellInfoVecs.growthProgress.begin(),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell))),
thrust::make_permutation_iterator(
cellInfoVecs.activeMembrNodeCounts.begin(),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell))),
thrust::make_permutation_iterator(
cellInfoVecs.centerCoordX.begin(),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell))),
thrust::make_permutation_iterator(
cellInfoVecs.Cell_Time.begin(),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell))),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell)),
make_transform_iterator(iBegin,
ModuloFunctor(maxAllNodePerCell)),
nodes->getInfoVecs().nodeLocX.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeLocY.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeVelX.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeVelY.begin()
+ allocPara_m.bdryNodeCount)),
thrust::make_zip_iterator(
thrust::make_tuple(
thrust::make_permutation_iterator(
cellInfoVecs.growthProgress.begin(),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell))),
thrust::make_permutation_iterator(
cellInfoVecs.activeMembrNodeCounts.begin(),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell))),
thrust::make_permutation_iterator(
cellInfoVecs.centerCoordX.begin(),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell))),
thrust::make_permutation_iterator(
cellInfoVecs.Cell_Time.begin(),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell))),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell)),
make_transform_iterator(iBegin,
ModuloFunctor(maxAllNodePerCell)),
nodes->getInfoVecs().nodeLocX.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeLocY.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeVelX.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeVelY.begin()
+ allocPara_m.bdryNodeCount))
+ totalNodeCountForActiveCells,
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(),
nodes->getInfoVecs().nodeVelY.begin(),
nodes->getInfoVecs().membrTensionMag.begin(),
nodes->getInfoVecs().membrTenMagRi.begin(),
nodes->getInfoVecs().membrLinkRiMidX.begin(),
nodes->getInfoVecs().membrLinkRiMidY.begin(),
nodes->getInfoVecs().membrBendLeftX.begin(),
nodes->getInfoVecs().membrBendLeftY.begin(),
nodes->getInfoVecs().membrBendRightX.begin(),
nodes->getInfoVecs().membrBendRightY.begin()))
+ allocPara_m.bdryNodeCount,
AddMembrForce(allocPara_m.bdryNodeCount, maxAllNodePerCell,
nodeLocXAddr, nodeLocYAddr, nodeIsActiveAddr, grthPrgrCriVal_M));
/**Ali Comment start
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(
thrust::make_permutation_iterator(
cellInfoVecs.activeMembrNodeCounts.begin(),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell))),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell)),
make_transform_iterator(iBegin,
ModuloFunctor(maxAllNodePerCell)),
nodes->getInfoVecs().nodeLocX.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeLocY.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeVelX.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeVelY.begin()
+ allocPara_m.bdryNodeCount)),
thrust::make_zip_iterator(
thrust::make_tuple(
thrust::make_permutation_iterator(
cellInfoVecs.activeMembrNodeCounts.begin(),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell))),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell)),
make_transform_iterator(iBegin,
ModuloFunctor(maxAllNodePerCell)),
nodes->getInfoVecs().nodeLocX.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeLocY.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeVelX.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeVelY.begin()
+ allocPara_m.bdryNodeCount))
+ totalNodeCountForActiveCells,
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(),
nodes->getInfoVecs().nodeVelY.begin(),
nodes->getInfoVecs().membrTensionMag.begin(),
nodes->getInfoVecs().membrTenMagRi.begin(),
nodes->getInfoVecs().membrLinkRiMidX.begin(),
nodes->getInfoVecs().membrLinkRiMidY.begin(),
nodes->getInfoVecs().membrBendLeftX.begin(),
nodes->getInfoVecs().membrBendLeftY.begin(),
nodes->getInfoVecs().membrBendRightX.begin(),
nodes->getInfoVecs().membrBendRightY.begin()))
+ allocPara_m.bdryNodeCount,
AddMembrForce(allocPara_m.bdryNodeCount, maxAllNodePerCell,
nodeLocXAddr, nodeLocYAddr, nodeIsActiveAddr));
**/
// Ali comment end
//Ali
//Ali
double* bendLeftXAddr = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().membrBendLeftX[0]));
double* bendLeftYAddr = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().membrBendLeftY[0]));
double* bendRightXAddr = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().membrBendRightX[0]));
double* bendRightYAddr = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().membrBendRightY[0]));
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(
thrust::make_permutation_iterator(
cellInfoVecs.activeMembrNodeCounts.begin(),
make_transform_iterator(iBegin1,
DivideFunctor(maxAllNodePerCell))),
make_transform_iterator(iBegin1,
DivideFunctor(maxAllNodePerCell)),
make_transform_iterator(iBegin1,
ModuloFunctor(maxAllNodePerCell)),
nodes->getInfoVecs().nodeVelX.begin(),
nodes->getInfoVecs().nodeVelY.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(
thrust::make_permutation_iterator(
cellInfoVecs.activeMembrNodeCounts.begin(),
make_transform_iterator(iBegin1,
DivideFunctor(maxAllNodePerCell))),
make_transform_iterator(iBegin1,
DivideFunctor(maxAllNodePerCell)),
make_transform_iterator(iBegin1,
ModuloFunctor(maxAllNodePerCell)),
nodes->getInfoVecs().nodeVelX.begin(),
nodes->getInfoVecs().nodeVelY.begin()))
+ totalNodeCountForActiveCells,
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(),
nodes->getInfoVecs().nodeVelY.begin())),
AddMembrBend(maxAllNodePerCell, nodeIsActiveAddr, bendLeftXAddr,
bendLeftYAddr, bendRightXAddr, bendRightYAddr));
}
//AAMIRI
void SceCells::findTangentAndNormal_M() {
uint totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount
* allocPara_m.maxAllNodePerCell;
uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell;
thrust::counting_iterator<uint> iBegin(0), iBegin1(0);
double* nodeLocXAddr = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().nodeLocX[0]));
double* nodeLocYAddr = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().nodeLocY[0]));
bool* nodeIsActiveAddr = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().nodeIsActive[0]));
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(
thrust::make_permutation_iterator(
cellInfoVecs.activeMembrNodeCounts.begin(),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell))),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell)),
make_transform_iterator(iBegin1,
ModuloFunctor(maxAllNodePerCell)),
nodes->getInfoVecs().nodeF_MI_M_x.begin(), //AliE
nodes->getInfoVecs().nodeF_MI_M_y.begin(), //AliE
nodes->getInfoVecs().nodeF_MI_M_T.begin(), //AliE
nodes->getInfoVecs().nodeF_MI_M_N.begin(), //AliE
nodes->getInfoVecs().nodeCurvature.begin(),
nodes->getInfoVecs().nodeExtForceX.begin(),
nodes->getInfoVecs().nodeExtForceY.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(
thrust::make_permutation_iterator(
cellInfoVecs.activeMembrNodeCounts.begin(),
make_transform_iterator(iBegin1,
DivideFunctor(maxAllNodePerCell))),
make_transform_iterator(iBegin1,
DivideFunctor(maxAllNodePerCell)),
make_transform_iterator(iBegin1,
ModuloFunctor(maxAllNodePerCell)),
nodes->getInfoVecs().nodeF_MI_M_x.begin(), //AliE
nodes->getInfoVecs().nodeF_MI_M_y.begin(), //AliE
nodes->getInfoVecs().nodeF_MI_M_T.begin(), //AliE
nodes->getInfoVecs().nodeF_MI_M_N.begin(), //AliE
nodes->getInfoVecs().nodeCurvature.begin(),
nodes->getInfoVecs().nodeExtForceX.begin(),
nodes->getInfoVecs().nodeExtForceY.begin()))
+ totalNodeCountForActiveCells,
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeF_MI_M_T.begin(),
nodes->getInfoVecs().nodeF_MI_M_N.begin(),
nodes->getInfoVecs().nodeCurvature.begin(),
nodes->getInfoVecs().nodeExtForceTangent.begin(),
nodes->getInfoVecs().nodeExtForceNormal.begin(),
nodes->getInfoVecs().membrDistToRi.begin())),
CalCurvatures(maxAllNodePerCell, nodeIsActiveAddr, nodeLocXAddr, nodeLocYAddr));
}
void SceCells::runAblationTest(AblationEvent& ablEvent) {
for (uint i = 0; i < ablEvent.ablationCells.size(); i++) {
int cellRank = ablEvent.ablationCells[i].cellNum;
std::vector<uint> removeSeq = ablEvent.ablationCells[i].nodeNums;
cellInfoVecs.activeNodeCountOfThisCell[cellRank] =
cellInfoVecs.activeNodeCountOfThisCell[cellRank]
- removeSeq.size();
nodes->removeNodes(cellRank, removeSeq);
}
}
void SceCells::computeCenterPos_M() {
totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount
* allocPara_m.maxAllNodePerCell;
thrust::counting_iterator<uint> iBegin(0);
thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells);
//uint totalMembrActiveNodeCount = thrust::reduce(
// cellInfoVecs.activeMembrNodeCounts.begin(),
// cellInfoVecs.activeMembrNodeCounts.begin()
// + allocPara_m.currentActiveCellCount);
uint totalIntnlActiveNodeCount = thrust::reduce(
cellInfoVecs.activeIntnlNodeCounts.begin(),
cellInfoVecs.activeIntnlNodeCounts.begin()
+ allocPara_m.currentActiveCellCount);
thrust::copy_if(
thrust::make_zip_iterator(
thrust::make_tuple(
make_transform_iterator(iBegin,
DivideFunctor(
allocPara_m.maxAllNodePerCell)),
nodes->getInfoVecs().nodeLocX.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeLocY.begin()
+ allocPara_m.bdryNodeCount)),
thrust::make_zip_iterator(
thrust::make_tuple(
make_transform_iterator(iBegin,
DivideFunctor(
allocPara_m.maxAllNodePerCell)),
nodes->getInfoVecs().nodeLocX.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeLocY.begin()
+ allocPara_m.bdryNodeCount))
+ totalNodeCountForActiveCells,
thrust::make_zip_iterator(
thrust::make_tuple(
nodes->getInfoVecs().nodeIsActive.begin(),
nodes->getInfoVecs().nodeCellType.begin()))
+ allocPara_m.bdryNodeCount,
thrust::make_zip_iterator(
thrust::make_tuple(cellNodeInfoVecs.cellRanks.begin(),
cellNodeInfoVecs.activeXPoss.begin(),
cellNodeInfoVecs.activeYPoss.begin())),
ActiveAndIntnl());
thrust::reduce_by_key(cellNodeInfoVecs.cellRanks.begin(),
cellNodeInfoVecs.cellRanks.begin() + totalIntnlActiveNodeCount,
thrust::make_zip_iterator(
thrust::make_tuple(cellNodeInfoVecs.activeXPoss.begin(),
cellNodeInfoVecs.activeYPoss.begin())),
cellInfoVecs.cellRanksTmpStorage.begin(),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin())),
thrust::equal_to<uint>(), CVec2Add());
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin()))
+ allocPara_m.currentActiveCellCount,
cellInfoVecs.activeIntnlNodeCounts.begin(),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin())), CVec2Divide());
}
void SceCells::BC_Imp_M() {
/*
thrust::device_vector<double>::iterator MinX_Itr=thrust::min_element(
cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordX.begin()+allocPara_m.currentActiveCellCount ) ;
thrust::device_vector<double>::iterator MaxX_Itr=thrust::max_element(
cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordX.begin()+allocPara_m.currentActiveCellCount ) ;
thrust::device_vector<double>::iterator MinY_Itr=thrust::min_element(
cellInfoVecs.centerCoordY.begin(),
cellInfoVecs.centerCoordY.begin()+allocPara_m.currentActiveCellCount ) ;
thrust::device_vector<double>::iterator MaxY_Itr=thrust::max_element(
cellInfoVecs.centerCoordY.begin(),
cellInfoVecs.centerCoordY.begin()+allocPara_m.currentActiveCellCount ) ;
MinX= *MinX_Itr ;
MaxX= *MaxX_Itr ;
MinY= *MinY_Itr ;
MaxY= *MaxY_Itr ;
*/
//cout<<"The minimum location of cell cetners in Y in BC_Imp_M is="<<Tisu_MinX<< endl;
//cout<<"The maximum location of cell centers in Y in BC_Imp_M is="<<Tisu_MaxX<< endl;
//cout<<"The minimum location of cell centers in Y in BC_Imp_M is="<<Tisu_MinY<< endl;
//cout<<"The maximum location of cell centers in Y in BC_Imp_M is="<<Tisu_MaxY<< endl;
/** thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin())
),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin())) + allocPara_m.currentActiveCellCount,
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin())),
BC_Tissue_Damp(Damp_Coef)) ;
**/
int NumActCells=allocPara_m.currentActiveCellCount ;
//Ali
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin(),
cellInfoVecs.Cell_Damp.begin())
),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin(),
cellInfoVecs.Cell_Damp.begin())) + allocPara_m.currentActiveCellCount,
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.Cell_Damp.begin())),
BC_Tissue_Damp(Tisu_MinX,Tisu_MaxX,Tisu_MinY,Tisu_MaxY,Damp_Coef,NumActCells)) ;
/**void SceCells::randomizeGrowth() {
thrust::counting_iterator<uint> countingBegin(0);
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.growthXDir.begin(),
cellInfoVecs.growthYDir.begin(),
cellInfoVecs.isRandGrowInited.begin(),
countingBegin)),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.growthXDir.begin(),
cellInfoVecs.growthYDir.begin(),
cellInfoVecs.isRandGrowInited.begin(),
countingBegin)) + allocPara.currentActiveCellCount,
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.growthSpeed.begin(),
cellInfoVecs.growthXDir.begin(),
cellInfoVecs.growthYDir.begin(),
cellInfoVecs.isRandGrowInited.begin())),
AssignRandIfNotInit(growthAuxData.randomGrowthSpeedMin,
growthAuxData.randomGrowthSpeedMax,
allocPara.currentActiveCellCount,
growthAuxData.randGenAuxPara));
}
**/
}
void SceCells::growAtRandom_M(double dt) {
totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount
* allocPara_m.maxAllNodePerCell;
randomizeGrowth_M();
updateGrowthProgress_M();
decideIsScheduleToGrow_M();
//computeCellTargetLength_M();
//computeDistToCellCenter_M();
//findMinAndMaxDistToCenter_M();
//computeLenDiffExpCur_M();
//stretchCellGivenLenDiff_M();
addPointIfScheduledToGrow_M();
//decideIsScheduleToShrink_M();// AAMIRI May5
//delPointIfScheduledToGrow_M();//AAMIRI - commented out on June20
adjustGrowthInfo_M();
}
void SceCells::divide2D_M() {
bool isDivisionPresent = decideIfGoingToDivide_M();
bool isEnteringMitotic = decideIfAnyCellEnteringMitotic() ; //A&A
//A&A
if (isEnteringMitotic){
std::cout<< "I am in EnteringMitotic"<< std::endl;
copyCellsEnterMitotic();
findHertwigAxis();
}
//A&A
if (!isDivisionPresent) {
return;
}
//aniDebug = true;
copyCellsPreDivision_M();
createTwoNewCellArr_M();
copyFirstCellArr_M();
copySecondCellArr_M();
updateActiveCellCount_M();
markIsDivideFalse_M();
//divDebug();
}
void SceCells::distributeCellGrowthProgress_M() {
totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount
* allocPara_m.maxAllNodePerCell;
thrust::counting_iterator<uint> countingBegin(0);
thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells);
thrust::copy(
thrust::make_permutation_iterator(
cellInfoVecs.growthProgress.begin(),
make_transform_iterator(countingBegin,
DivideFunctor(allocPara_m.maxAllNodePerCell))),
thrust::make_permutation_iterator(
cellInfoVecs.growthProgress.begin(),
make_transform_iterator(countingEnd,
DivideFunctor(allocPara_m.maxAllNodePerCell))),
nodes->getInfoVecs().nodeGrowPro.begin()
+ allocPara_m.bdryNodeCount);
std::cout << "the vlaue of init time stage in distributeCellGrowthProgress_M is"<< InitTimeStage << std:: endl ;
if (curTime <= InitTimeStage+dt)//AAMIRI /A & A
thrust::copy(
cellInfoVecs.growthProgress.begin(),
cellInfoVecs.growthProgress.end(),
cellInfoVecs.lastCheckPoint.begin()
);
}
void SceCells::allComponentsMove_M() {
//moveNodes_M(); //Ali
moveNodes_BC_M(); //Ali
}
//Ali modified this function to introduce differential proliferation rates
void SceCells::randomizeGrowth_M() {
double CntrTisuX=0.5*(Tisu_MaxX-Tisu_MinX) ;
double CntrTisuY=0.5*(Tisu_MaxY-Tisu_MinY) ;
//cout<<"The minimum location of cell cetners in Y in randomizeGrowth_M is="<<Tisu_MinX<< endl;
//cout<<"The maximum location of cell centers in Y in randomizeGrowth_M is="<<Tisu_MaxX<< endl;
//cout<<"The minimum location of cell centers in Y in randomizeGrowth_M is="<<Tisu_MinY<< endl;
//cout<<"The maximum location of cell centers in Y in randomizeGrowth_M is="<<Tisu_MaxY<< endl;
uint seed = time(NULL);
thrust::counting_iterator<uint> countingBegin(0);
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.growthSpeed.begin(),
cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin(),
cellInfoVecs.isRandGrowInited.begin(),
countingBegin)),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.growthSpeed.begin(),
cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin(),
cellInfoVecs.isRandGrowInited.begin(),
countingBegin))
+ allocPara_m.currentActiveCellCount,
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.growthSpeed.begin(),
cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin(),
cellInfoVecs.isRandGrowInited.begin())),
RandomizeGrow_M(CntrTisuX,CntrTisuY,Tisu_R,growthAuxData.randomGrowthSpeedMin,
growthAuxData.randomGrowthSpeedMax, seed));
}
void SceCells::updateGrowthProgress_M() {
thrust::copy(cellInfoVecs.growthProgress.begin(),
cellInfoVecs.growthProgress.begin()
+ allocPara_m.currentActiveCellCount,
cellInfoVecs.growthProgressOld.begin());
/*
thrust::transform(cellInfoVecs.growthSpeed.begin(),
cellInfoVecs.growthSpeed.begin()
+ allocPara_m.currentActiveCellCount,
cellInfoVecs.growthProgress.begin(),
cellInfoVecs.growthProgress.begin(), SaxpyFunctorWithMaxOfOne(dt));
*/
cout << " I am trying to update growth progress" << endl ;
//double dummy=0 ;
double mitoticCheckPoint=growthAuxData.grthPrgrCriVal_M_Ori ;
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.cell_Dpp.begin(),
cellInfoVecs.cell_DppOld.begin(),
cellInfoVecs.growthProgress.begin(),
cellInfoVecs.growthSpeed.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.cell_Dpp.begin(),
cellInfoVecs.cell_DppOld.begin(),
cellInfoVecs.growthProgress.begin(),
cellInfoVecs.growthSpeed.begin()))
+ allocPara_m.currentActiveCellCount,
cellInfoVecs.growthProgress.begin(),
DppGrowRegulator(dt,mitoticCheckPoint));
}
void SceCells::decideIsScheduleToGrow_M() {
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.growthProgress.begin(),
cellInfoVecs.lastCheckPoint.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.growthProgress.begin(),
cellInfoVecs.lastCheckPoint.begin()))
+ allocPara_m.currentActiveCellCount,
cellInfoVecs.isScheduledToGrow.begin(),
PtCondiOp(miscPara.growThreshold));
}
//AAMIRI May5
void SceCells::decideIsScheduleToShrink_M() {
double laserCenterY = 25.0;
double laserCenterX = 25.0;
double laserRadius = 4.0;
thrust::counting_iterator<uint> iBegin(0);
thrust::counting_iterator<uint> iEnd(allocPara_m.currentActiveCellCount);
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(iBegin,
cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin(),
cellInfoVecs.isScheduledToShrink.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(iEnd,
cellInfoVecs.centerCoordX.begin()+allocPara_m.currentActiveCellCount,
cellInfoVecs.centerCoordY.begin()+allocPara_m.currentActiveCellCount,
cellInfoVecs.isScheduledToShrink.begin()+allocPara_m.currentActiveCellCount)),
cellInfoVecs.isScheduledToShrink.begin(),
isDelOp(laserCenterX, laserCenterY, laserRadius));
}
void SceCells::computeCellTargetLength_M() {
thrust::transform(cellInfoVecs.growthProgress.begin(),
cellInfoVecs.growthProgress.begin()
+ allocPara_m.currentActiveCellCount,
cellInfoVecs.expectedLength.begin(),
CompuTarLen(bioPara.cellInitLength, bioPara.cellFinalLength));
}
void SceCells::computeDistToCellCenter_M() {
thrust::counting_iterator<uint> iBegin(0);
thrust::counting_iterator<uint> iEnd(totalNodeCountForActiveCells);
uint endIndx = allocPara_m.bdryNodeCount + totalNodeCountForActiveCells;
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(
make_permutation_iterator(
cellInfoVecs.centerCoordX.begin(),
make_transform_iterator(iBegin,
DivideFunctor(
allocPara_m.maxAllNodePerCell))),
make_permutation_iterator(
cellInfoVecs.centerCoordY.begin(),
make_transform_iterator(iBegin,
DivideFunctor(
allocPara_m.maxAllNodePerCell))),
make_permutation_iterator(
cellInfoVecs.growthXDir.begin(),
make_transform_iterator(iBegin,
DivideFunctor(
allocPara_m.maxAllNodePerCell))),
make_permutation_iterator(
cellInfoVecs.growthYDir.begin(),
make_transform_iterator(iBegin,
DivideFunctor(
allocPara_m.maxAllNodePerCell))),
nodes->getInfoVecs().nodeLocX.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeLocY.begin()
+ allocPara_m.bdryNodeCount,
nodes->getInfoVecs().nodeIsActive.begin()
+ allocPara_m.bdryNodeCount)),
thrust::make_zip_iterator(
thrust::make_tuple(
make_permutation_iterator(
cellInfoVecs.centerCoordX.begin(),
make_transform_iterator(iEnd,
DivideFunctor(
allocPara_m.maxAllNodePerCell))),
make_permutation_iterator(
cellInfoVecs.centerCoordY.begin(),
make_transform_iterator(iEnd,
DivideFunctor(
allocPara_m.maxAllNodePerCell))),
make_permutation_iterator(
cellInfoVecs.growthXDir.begin(),
make_transform_iterator(iEnd,
DivideFunctor(
allocPara_m.maxAllNodePerCell))),
make_permutation_iterator(
cellInfoVecs.growthYDir.begin(),
make_transform_iterator(iEnd,
DivideFunctor(
allocPara_m.maxAllNodePerCell))),
nodes->getInfoVecs().nodeLocX.begin() + endIndx,
nodes->getInfoVecs().nodeLocY.begin() + endIndx,
nodes->getInfoVecs().nodeIsActive.begin()
+ endIndx)),
cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), CompuDist());
}
void SceCells::findMinAndMaxDistToCenter_M() {
thrust::reduce_by_key(
make_transform_iterator(countingBegin,
DivideFunctor(allocPara_m.maxAllNodePerCell)),
make_transform_iterator(countingBegin,
DivideFunctor(allocPara_m.maxAllNodePerCell))
+ totalNodeCountForActiveCells,
cellNodeInfoVecs.distToCenterAlongGrowDir.begin(),
cellInfoVecs.cellRanksTmpStorage.begin(),
cellInfoVecs.smallestDistance.begin(), thrust::equal_to<uint>(),
thrust::minimum<double>());
// for nodes of each cell, find the maximum distance from the node to the corresponding
// cell center along the pre-defined growth direction.
thrust::reduce_by_key(
make_transform_iterator(countingBegin,
DivideFunctor(allocPara_m.maxAllNodePerCell)),
make_transform_iterator(countingBegin,
DivideFunctor(allocPara_m.maxAllNodePerCell))
+ totalNodeCountForActiveCells,
cellNodeInfoVecs.distToCenterAlongGrowDir.begin(),
cellInfoVecs.cellRanksTmpStorage.begin(),
cellInfoVecs.biggestDistance.begin(), thrust::equal_to<uint>(),
thrust::maximum<double>());
}
void SceCells::computeLenDiffExpCur_M() {
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.expectedLength.begin(),
cellInfoVecs.smallestDistance.begin(),
cellInfoVecs.biggestDistance.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.expectedLength.begin(),
cellInfoVecs.smallestDistance.begin(),
cellInfoVecs.biggestDistance.begin()))
+ allocPara_m.currentActiveCellCount,
cellInfoVecs.lengthDifference.begin(), CompuDiff());
}
void SceCells::stretchCellGivenLenDiff_M() {
uint count = allocPara_m.maxAllNodePerCell;
uint bdry = allocPara_m.bdryNodeCount;
uint actCount = totalNodeCountForActiveCells;
uint all = bdry + actCount;
thrust::counting_iterator<uint> iBegin(0);
thrust::counting_iterator<uint> iEnd(actCount);
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(
cellNodeInfoVecs.distToCenterAlongGrowDir.begin(),
make_permutation_iterator(
cellInfoVecs.lengthDifference.begin(),
make_transform_iterator(iBegin,
DivideFunctor(count))),
make_permutation_iterator(
cellInfoVecs.growthXDir.begin(),
make_transform_iterator(iBegin,
DivideFunctor(count))),
make_permutation_iterator(
cellInfoVecs.growthYDir.begin(),
make_transform_iterator(iBegin,
DivideFunctor(count))),
nodes->getInfoVecs().nodeVelX.begin() + bdry,
nodes->getInfoVecs().nodeVelY.begin() + bdry,
make_transform_iterator(iBegin,
ModuloFunctor(count)))),
thrust::make_zip_iterator(
thrust::make_tuple(
cellNodeInfoVecs.distToCenterAlongGrowDir.begin()
+ actCount,
make_permutation_iterator(
cellInfoVecs.lengthDifference.begin(),
make_transform_iterator(iEnd,
DivideFunctor(count))),
make_permutation_iterator(
cellInfoVecs.growthXDir.begin(),
make_transform_iterator(iEnd,
DivideFunctor(count))),
make_permutation_iterator(
cellInfoVecs.growthYDir.begin(),
make_transform_iterator(iEnd,
DivideFunctor(count))),
nodes->getInfoVecs().nodeVelX.begin() + all,
nodes->getInfoVecs().nodeVelY.begin() + all,
make_transform_iterator(iEnd,
ModuloFunctor(count)))),
thrust::make_zip_iterator(
thrust::make_tuple(
nodes->getInfoVecs().nodeVelX.begin() + bdry,
nodes->getInfoVecs().nodeVelY.begin() + bdry)),
ApplyStretchForce_M(bioPara.elongationCoefficient,
allocPara_m.maxMembrNodePerCell));
}
void SceCells::addPointIfScheduledToGrow_M() {
uint seed = time(NULL);
uint activeCellCount = allocPara_m.currentActiveCellCount;
thrust::counting_iterator<uint> iBegin(0);
thrust::counting_iterator<uint> iEnd(activeCellCount);
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.isScheduledToGrow.begin(),
cellInfoVecs.activeIntnlNodeCounts.begin(),
cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin(), iBegin,
cellInfoVecs.lastCheckPoint.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(
cellInfoVecs.isScheduledToGrow.begin()
+ activeCellCount,
cellInfoVecs.activeIntnlNodeCounts.begin()
+ activeCellCount,
cellInfoVecs.centerCoordX.begin() + activeCellCount,
cellInfoVecs.centerCoordY.begin() + activeCellCount,
iEnd,
cellInfoVecs.lastCheckPoint.begin()
+ activeCellCount)),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.lastCheckPoint.begin(),
cellInfoVecs.activeIntnlNodeCounts.begin())),
AddPtOp_M(seed, miscPara.addNodeDistance, miscPara.growThreshold,
growthAuxData.nodeXPosAddress,
growthAuxData.nodeYPosAddress,
growthAuxData.nodeIsActiveAddress));
}
//AAMIRI
void SceCells::delPointIfScheduledToGrow_M() {
uint seed = time(NULL);
uint activeCellCount = allocPara_m.currentActiveCellCount;
thrust::counting_iterator<uint> iBegin(0);
thrust::counting_iterator<uint> iEnd(activeCellCount);
int timeStep = curTime/dt;
if (curTime>70000.0 && curTime<70000.1){
decideIsScheduleToShrink_M();// AAMIRI
}
if (curTime > 70000.0)
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.isScheduledToShrink.begin(),
cellInfoVecs.activeIntnlNodeCounts.begin(),
cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordY.begin(), iBegin,
cellInfoVecs.activeMembrNodeCounts.begin(),
cellInfoVecs.isCellActive.begin(),
cellInfoVecs.growthSpeed.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(
cellInfoVecs.isScheduledToShrink.begin()
+ activeCellCount,
cellInfoVecs.activeIntnlNodeCounts.begin()
+ activeCellCount,
cellInfoVecs.centerCoordX.begin() + activeCellCount,
cellInfoVecs.centerCoordY.begin() + activeCellCount,
iEnd,
cellInfoVecs.activeMembrNodeCounts.begin()
+ activeCellCount,
cellInfoVecs.isCellActive.begin()
+ activeCellCount,
cellInfoVecs.growthSpeed.begin()
+ activeCellCount)),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.activeMembrNodeCounts.begin(),
cellInfoVecs.activeIntnlNodeCounts.begin(), cellInfoVecs.isCellActive.begin(),
cellInfoVecs.growthSpeed.begin())),
DelPtOp_M(seed, timeStep, growthAuxData.adhIndxAddr,
growthAuxData.nodeXPosAddress,
growthAuxData.nodeYPosAddress,
growthAuxData.nodeIsActiveAddress));
}
bool SceCells::decideIfGoingToDivide_M() {
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.growthProgress.begin(),
cellInfoVecs.activeIntnlNodeCounts.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.growthProgress.begin(),
cellInfoVecs.activeIntnlNodeCounts.begin()))
+ allocPara_m.currentActiveCellCount,
cellInfoVecs.isDividing.begin(),
CompuIsDivide_M(allocPara_m.maxIntnlNodePerCell));
// sum all bool values which indicate whether the cell is going to divide.
// toBeDivideCount is the total number of cells going to divide.
divAuxData.toBeDivideCount = thrust::reduce(cellInfoVecs.isDividing.begin(),
cellInfoVecs.isDividing.begin()
+ allocPara_m.currentActiveCellCount, (uint) (0));
if (divAuxData.toBeDivideCount > 0) {
return true;
} else {
return false;
}
}
//A&A
bool SceCells::decideIfAnyCellEnteringMitotic() {
double grthPrgrCriVal_M =growthAuxData.grthPrgrCriVal_M_Ori ; // for now constant growthAuxData.grthProgrEndCPU
// - growthAuxData.prolifDecay
// * (growthAuxData.grthProgrEndCPU
// - growthAuxData.grthPrgrCriVal_M_Ori);
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.growthProgress.begin(),
cellInfoVecs.growthProgressOld.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.growthProgress.begin(),
cellInfoVecs.growthProgressOld.begin()))
+ allocPara_m.currentActiveCellCount,
cellInfoVecs.isEnteringMitotic.begin(),
CompuIsEnteringMitotic_M(grthPrgrCriVal_M));
// sum all bool values which indicate whether the cell is going to divide.
// toBeDivideCount is the total number of cells going to divide.
divAuxData.toEnterMitoticCount = thrust::reduce(cellInfoVecs.isEnteringMitotic.begin(),
cellInfoVecs.isEnteringMitotic.begin()
+ allocPara_m.currentActiveCellCount, (uint) (0));
if (divAuxData.toEnterMitoticCount > 0) {
return true;
} else {
return false;
}
}
//AAMIRI
/*
bool SceCells::decideIfGoingToRemove_M() {
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.growthProgress.begin(),
cellInfoVecs.activeIntnlNodeCounts.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.growthProgress.begin(),
cellInfoVecs.activeIntnlNodeCounts.begin()))
+ allocPara_m.currentActiveCellCount,
cellInfoVecs.isRemoving.begin(),
CompuIsRemoving_M(allocPara_m.maxIntnlNodePerCell));
// sum all bool values which indicate whether the cell is going to divide.
// toBeDivideCount is the total number of cells going to divide.
divAuxData.toBeRemovingCount = thrust::reduce(cellInfoVecs.isRemoving.begin(),
cellInfoVecs.isRemoving.begin()
+ allocPara_m.currentActiveCellCount, (uint) (0));
if (divAuxData.toBeRemovingCount > 0) {
return true;
} else {
return false;
}
}
*/
AniRawData SceCells::obtainAniRawData(AnimationCriteria& aniCri) {
uint activeCellCount = allocPara_m.currentActiveCellCount;
uint maxNodePerCell = allocPara_m.maxAllNodePerCell;
uint maxMemNodePerCell = allocPara_m.maxMembrNodePerCell;
uint beginIndx = allocPara_m.bdryNodeCount;
AniRawData rawAniData;
//cout << "size of potential pairs = " << pairs.size() << endl;
// unordered_map is more efficient than map, but it is a c++ 11 feature
// and c++ 11 seems to be incompatible with Thrust.
IndexMap locIndexToAniIndexMap;
uint maxActiveNode = activeCellCount * maxNodePerCell;
thrust::host_vector<double> hostTmpVectorLocX(maxActiveNode);
thrust::host_vector<double> hostTmpVectorLocY(maxActiveNode);
thrust::host_vector<bool> hostIsActiveVec(maxActiveNode);
thrust::host_vector<int> hostBondVec(maxActiveNode);
thrust::host_vector<double> hostTmpVectorTenMag(maxActiveNode);
thrust::copy(
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(),
nodes->getInfoVecs().nodeLocY.begin(),
nodes->getInfoVecs().nodeIsActive.begin(),
nodes->getInfoVecs().nodeAdhereIndex.begin(),
nodes->getInfoVecs().membrTensionMag.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(),
nodes->getInfoVecs().nodeLocY.begin(),
nodes->getInfoVecs().nodeIsActive.begin(),
nodes->getInfoVecs().nodeAdhereIndex.begin(),
nodes->getInfoVecs().membrTensionMag.begin()))
+ maxActiveNode,
thrust::make_zip_iterator(
thrust::make_tuple(hostTmpVectorLocX.begin(),
hostTmpVectorLocY.begin(),
hostIsActiveVec.begin(),
hostBondVec.begin(), hostTmpVectorTenMag.begin())));
thrust::host_vector<uint> curActiveMemNodeCounts =
cellInfoVecs.activeMembrNodeCounts;
CVector tmpPos;
uint index1;
int index2;
std::vector<BondInfo> bondInfoVec;
double node1X, node1Y;
double node2X, node2Y;
double aniVal;
for (uint i = 0; i < activeCellCount; i++) {
for (uint j = 0; j < maxMemNodePerCell; j++) {
index1 = beginIndx + i * maxNodePerCell + j;
if (hostIsActiveVec[index1] == true) {
index2 = hostBondVec[index1];
if (index2 > index1 && index2 != -1) {
BondInfo bond;
bond.cellRank1 = i;
bond.pos1 = CVector(hostTmpVectorLocX[index1],
hostTmpVectorLocY[index1], 0);
bond.cellRank2 = (index2 - beginIndx) / maxNodePerCell;
bond.pos2 = CVector(hostTmpVectorLocX[index2],
hostTmpVectorLocY[index2], 0);
bondInfoVec.push_back(bond);
}
}
}
}
rawAniData.bondsArr = bondInfoVec;
uint curIndex = 0;
for (uint i = 0; i < activeCellCount; i++) {
for (uint j = 0; j < curActiveMemNodeCounts[i]; j++) {
index1 = beginIndx + i * maxNodePerCell + j;
if (j == curActiveMemNodeCounts[i] - 1) {
index2 = beginIndx + i * maxNodePerCell;
} else {
index2 = beginIndx + i * maxNodePerCell + j + 1;
}
if (hostIsActiveVec[index1] == true
&& hostIsActiveVec[index2] == true) {
node1X = hostTmpVectorLocX[index1];
node1Y = hostTmpVectorLocY[index1];
node2X = hostTmpVectorLocX[index2];
node2Y = hostTmpVectorLocY[index2];
IndexMap::iterator it = locIndexToAniIndexMap.find(index1);
if (it == locIndexToAniIndexMap.end()) {
locIndexToAniIndexMap.insert(
std::pair<uint, uint>(index1, curIndex));
curIndex++;
tmpPos = CVector(node1X, node1Y, 0);
//aniVal = hostTmpVectorNodeType[index1];
aniVal = hostTmpVectorTenMag[index1];
rawAniData.aniNodePosArr.push_back(tmpPos);
rawAniData.aniNodeVal.push_back(aniVal);
}
it = locIndexToAniIndexMap.find(index2);
if (it == locIndexToAniIndexMap.end()) {
locIndexToAniIndexMap.insert(
std::pair<uint, uint>(index2, curIndex));
curIndex++;
tmpPos = CVector(node2X, node2Y, 0);
//aniVal = hostTmpVectorNodeType[index2];
aniVal = hostTmpVectorTenMag[index2];
rawAniData.aniNodePosArr.push_back(tmpPos);
rawAniData.aniNodeVal.push_back(aniVal);
}
it = locIndexToAniIndexMap.find(index1);
uint aniIndex1 = it->second;
it = locIndexToAniIndexMap.find(index2);
uint aniIndex2 = it->second;
LinkAniData linkData;
linkData.node1Index = aniIndex1;
linkData.node2Index = aniIndex2;
rawAniData.memLinks.push_back(linkData);
}
}
}
for (uint i = 0; i < activeCellCount; i++) {
for (uint j = 0; j < allocPara_m.maxIntnlNodePerCell; j++) {
for (uint k = j + 1; k < allocPara_m.maxIntnlNodePerCell; k++) {
index1 = i * maxNodePerCell + maxMemNodePerCell + j;
index2 = i * maxNodePerCell + maxMemNodePerCell + k;
if (hostIsActiveVec[index1] && hostIsActiveVec[index2]) {
node1X = hostTmpVectorLocX[index1];
node1Y = hostTmpVectorLocY[index1];
node2X = hostTmpVectorLocX[index2];
node2Y = hostTmpVectorLocY[index2];
if (aniCri.isPairQualify_M(node1X, node1Y, node2X,
node2Y)) {
IndexMap::iterator it = locIndexToAniIndexMap.find(
index1);
if (it == locIndexToAniIndexMap.end()) {
locIndexToAniIndexMap.insert(
std::pair<uint, uint>(index1, curIndex));
curIndex++;
tmpPos = CVector(node1X, node1Y, 0);
//aniVal = hostTmpVectorNodeType[index1];
aniVal = -1;
rawAniData.aniNodePosArr.push_back(tmpPos);
rawAniData.aniNodeVal.push_back(aniVal);
}
it = locIndexToAniIndexMap.find(index2);
if (it == locIndexToAniIndexMap.end()) {
locIndexToAniIndexMap.insert(
std::pair<uint, uint>(index2, curIndex));
curIndex++;
tmpPos = CVector(node2X, node2Y, 0);
//aniVal = hostTmpVectorNodeType[index1];
aniVal = -1;
rawAniData.aniNodePosArr.push_back(tmpPos);
rawAniData.aniNodeVal.push_back(aniVal);
}
it = locIndexToAniIndexMap.find(index1);
uint aniIndex1 = it->second;
it = locIndexToAniIndexMap.find(index2);
uint aniIndex2 = it->second;
LinkAniData linkData;
linkData.node1Index = aniIndex1;
linkData.node2Index = aniIndex2;
rawAniData.internalLinks.push_back(linkData);
}
}
}
}
}
return rawAniData;
}
AniRawData SceCells::obtainAniRawDataGivenCellColor(vector<double>& cellColors,
AnimationCriteria& aniCri, vector<double>& cellsPerimeter, vector <double> & cellsDppLevel) { //AliE
cout << "I am in obtainAniRawDataGivenCellColor start"<<endl;
uint activeCellCount = allocPara_m.currentActiveCellCount;
uint maxNodePerCell = allocPara_m.maxAllNodePerCell;
uint maxMemNodePerCell = allocPara_m.maxMembrNodePerCell;
uint beginIndx = allocPara_m.bdryNodeCount;
assert(cellColors.size() >= activeCellCount);
assert(cellsPerimeter.size() == activeCellCount); //AliE
AniRawData rawAniData;
//cout << "size of potential pairs = " << pairs.size() << endl;
// unordered_map is more efficient than map, but it is a c++ 11 feature
// and c++ 11 seems to be incompatible with Thrust.
IndexMap locIndexToAniIndexMap;
uint maxActiveNode = activeCellCount * maxNodePerCell;
thrust::host_vector<double> hostTmpVectorLocX(maxActiveNode);
thrust::host_vector<double> hostTmpVectorLocY(maxActiveNode);
thrust::host_vector<bool> hostIsActiveVec(maxActiveNode);
thrust::host_vector<int> hostBondVec(maxActiveNode);
thrust::host_vector<double> hostTmpVectorTenMag(maxActiveNode);
thrust::host_vector<double> hostTmpVectorF_MI_M_x(maxActiveNode);//AAMIRI //AliE
thrust::host_vector<double> hostTmpVectorF_MI_M_y(maxActiveNode);//AAMIRI //AliE
thrust::host_vector<double> hostTmpVectorF_MI_M_T(maxActiveNode); //AliE
thrust::host_vector<double> hostTmpVectorF_MI_M_N(maxActiveNode);//AliE
thrust::host_vector<double> hostTmpVectorNodeCurvature(maxActiveNode);//AAMIRI
thrust::host_vector<double> hostTmpVectorExtForceTangent(maxActiveNode);//AAMIRI
thrust::host_vector<double> hostTmpVectorExtForceNormal(maxActiveNode);//AAMIRI
thrust::copy(
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(),
nodes->getInfoVecs().nodeLocY.begin(),
nodes->getInfoVecs().nodeF_MI_M_x.begin(),//AAMIRI //AliE
nodes->getInfoVecs().nodeF_MI_M_y.begin(),//AAMIRI //AliE
nodes->getInfoVecs().nodeCurvature.begin(),//AAMIRI
nodes->getInfoVecs().nodeIsActive.begin(),
nodes->getInfoVecs().nodeAdhereIndex.begin(),
nodes->getInfoVecs().membrTensionMag.begin(),
nodes->getInfoVecs().nodeExtForceTangent.begin(),//AAMIRI
nodes->getInfoVecs().nodeExtForceNormal.begin())),//AAMIRI
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(),
nodes->getInfoVecs().nodeLocY.begin(),
nodes->getInfoVecs().nodeF_MI_M_x.begin(),//AAMIRI //AliE
nodes->getInfoVecs().nodeF_MI_M_y.begin(),//AAMIRI //AliE
nodes->getInfoVecs().nodeCurvature.begin(),//AAMIRI
nodes->getInfoVecs().nodeIsActive.begin(),
nodes->getInfoVecs().nodeAdhereIndex.begin(),
nodes->getInfoVecs().membrTensionMag.begin(),
nodes->getInfoVecs().nodeExtForceTangent.begin(),//AAMIRI
nodes->getInfoVecs().nodeExtForceNormal.begin()))//AAMIRI
+ maxActiveNode,
thrust::make_zip_iterator(
thrust::make_tuple(hostTmpVectorLocX.begin(),
hostTmpVectorLocY.begin(),
hostTmpVectorF_MI_M_x.begin(), hostTmpVectorF_MI_M_y.begin(),//AAMIRI
hostTmpVectorNodeCurvature.begin(), //AAMIRI
hostIsActiveVec.begin(),
hostBondVec.begin(), hostTmpVectorTenMag.begin(),
hostTmpVectorExtForceTangent.begin(), hostTmpVectorExtForceNormal.begin())));//AAMIRI
//Copy more than 10 elements is not allowed so, I separate it
thrust::copy(
thrust::make_zip_iterator(
thrust::make_tuple(
nodes->getInfoVecs().nodeF_MI_M_T.begin(), //Ali
nodes->getInfoVecs().nodeF_MI_M_N.begin() //Ali
)),
thrust::make_zip_iterator(
thrust::make_tuple(
nodes->getInfoVecs().nodeF_MI_M_T.begin(),//AliE
nodes->getInfoVecs().nodeF_MI_M_N.begin() //AliE
))
+ maxActiveNode,
thrust::make_zip_iterator(
thrust::make_tuple(
hostTmpVectorF_MI_M_T.begin(), hostTmpVectorF_MI_M_N.begin()
)));
thrust::host_vector<uint> curActiveMemNodeCounts =
cellInfoVecs.activeMembrNodeCounts;
thrust::host_vector<uint> curActiveIntnlNodeCounts =
cellInfoVecs.activeIntnlNodeCounts;
CVector tmpPos;
CVector tmpF_MI_M ;//AAmiri
CVector tmpExtForce;//AAMIRI
double tmpCurv;
uint index1;
int index2;
std::vector<BondInfo> bondInfoVec;
double node1X, node1Y;
double node2X, node2Y;
double node1F_MI_M_x, node1F_MI_M_y;//AAMIRI //AliE
double nodeExtForceT, nodeExtForceN;//AAMIRI
double aniVal;
double aniVal2;
double tmpF_MI_M_MagN_Int[activeCellCount-1] ; //AliE
//This is how the VTK file is intended to be written. First the memmbraen nodes are going to be written and then internal nodes.
//loop on membrane nodes
for (uint i = 0; i < activeCellCount; i++) {
tmpF_MI_M_MagN_Int[i]=0.0 ;
for (uint j = 0; j < curActiveMemNodeCounts[i]; j++) {
index1 = beginIndx + i * maxNodePerCell + j;
if ( hostIsActiveVec[index1]==true) {
tmpCurv = hostTmpVectorNodeCurvature[index1];//AAMIRI
rawAniData.aniNodeCurvature.push_back(tmpCurv);//AAMIRI
node1F_MI_M_x= hostTmpVectorF_MI_M_x[index1]; //AliE
node1F_MI_M_y= hostTmpVectorF_MI_M_y[index1]; //AliE
tmpF_MI_M= CVector(node1F_MI_M_x, node1F_MI_M_y, 0.0); //AliE
rawAniData.aniNodeF_MI_M.push_back(tmpF_MI_M); //AliE
// tmpF_MI_M_MagN_Int[i]=tmpF_MI_M_MagN_Int[i]+sqrt(pow(hostTmpVectorF_MI_M_x[index1],2)+pow(hostTmpVectorF_MI_M_y[index1],2)) ; //AliE
tmpF_MI_M_MagN_Int[i]=tmpF_MI_M_MagN_Int[i]+abs(hostTmpVectorF_MI_M_N[index1]) ; //AliE
nodeExtForceT = hostTmpVectorExtForceTangent[index1];//AAMIRI
nodeExtForceN = hostTmpVectorExtForceNormal[index1];//AAMIRI
tmpExtForce = CVector(nodeExtForceT, nodeExtForceN, 0.0);//AAMIRI
rawAniData.aniNodeExtForceArr.push_back(tmpExtForce);
rawAniData.aniNodeRank.push_back(i);//AAMIRI
}
}
}
//loop on internal nodes
for (uint i=0; i<activeCellCount; i++){
for (uint j = maxMemNodePerCell; j < maxNodePerCell; j++) {
index1 = beginIndx + i * maxNodePerCell + j;
if ( hostIsActiveVec[index1]==true ) {
tmpCurv = hostTmpVectorNodeCurvature[index1];//AAMIRI
rawAniData.aniNodeCurvature.push_back(tmpCurv);//AAMIRI
node1F_MI_M_x= hostTmpVectorF_MI_M_x[index1]; //AliE
node1F_MI_M_y= hostTmpVectorF_MI_M_y[index1]; //AliE
tmpF_MI_M= CVector(node1F_MI_M_x, node1F_MI_M_y, 0.0); //AliE
rawAniData.aniNodeF_MI_M.push_back(tmpF_MI_M);
nodeExtForceT = hostTmpVectorExtForceTangent[index1];//AAMIRI
nodeExtForceN = hostTmpVectorExtForceNormal[index1];//AAMIRI
tmpExtForce = CVector(nodeExtForceT, nodeExtForceN, 0.0);//AAMIRI
rawAniData.aniNodeExtForceArr.push_back(tmpExtForce);
rawAniData.aniNodeRank.push_back(i);//AAMIRI
}
}
}
for (uint i = 0; i < activeCellCount; i++) {
for (uint j = 0; j < maxMemNodePerCell; j++) {
index1 = beginIndx + i * maxNodePerCell + j;
if (hostIsActiveVec[index1] == true) {
index2 = hostBondVec[index1];
if (index2 > index1 && index2 != -1) {
BondInfo bond;
bond.cellRank1 = i;
bond.pos1 = CVector(hostTmpVectorLocX[index1],
hostTmpVectorLocY[index1], 0);
bond.cellRank2 = (index2 - beginIndx) / maxNodePerCell;
bond.pos2 = CVector(hostTmpVectorLocX[index2],
hostTmpVectorLocY[index2], 0);
bondInfoVec.push_back(bond);
}
}
}
}
rawAniData.bondsArr = bondInfoVec;
uint curIndex = 0;
//loop on membrane nodes
for (uint i = 0; i < activeCellCount; i++) {
for (uint j = 0; j < curActiveMemNodeCounts[i]; j++) {
index1 = beginIndx + i * maxNodePerCell + j;
if (j == curActiveMemNodeCounts[i] - 1) {
index2 = beginIndx + i * maxNodePerCell;
} else {
index2 = beginIndx + i * maxNodePerCell + j + 1;
}
if (hostIsActiveVec[index1] == true
&& hostIsActiveVec[index2] == true) {
node1X = hostTmpVectorLocX[index1];
node1Y = hostTmpVectorLocY[index1];
node2X = hostTmpVectorLocX[index2];
node2Y = hostTmpVectorLocY[index2];
IndexMap::iterator it = locIndexToAniIndexMap.find(index1);
if (it == locIndexToAniIndexMap.end()) {
locIndexToAniIndexMap.insert(
std::pair<uint, uint>(index1, curIndex));
curIndex++;
tmpPos = CVector(node1X, node1Y, 0);
//aniVal = hostTmpVectorNodeType[index1];
aniVal = cellColors[i];
rawAniData.aniNodeF_MI_M_MagN_Int.push_back(tmpF_MI_M_MagN_Int[i]/cellsPerimeter[i]) ; //Ali added
//aniVal2=dppLevels_Cell[i] ;
aniVal2=cellsDppLevel[i] ;
rawAniData.dppLevel.push_back(aniVal2) ; //Ali Added
rawAniData.aniNodePosArr.push_back(tmpPos);
rawAniData.aniNodeVal.push_back(aniVal);
}
it = locIndexToAniIndexMap.find(index2);
if (it == locIndexToAniIndexMap.end()) {
locIndexToAniIndexMap.insert(
std::pair<uint, uint>(index2, curIndex));
curIndex++;
tmpPos = CVector(node2X, node2Y, 0);
//aniVal = hostTmpVectorNodeType[index2];
aniVal = cellColors[i];
rawAniData.aniNodeF_MI_M_MagN_Int.push_back(tmpF_MI_M_MagN_Int[i]/cellsPerimeter[i]) ; //Ali Added
//aniVal2=dppLevels_Cell[i];
aniVal2=cellsDppLevel[i];
rawAniData.dppLevel.push_back(aniVal2) ; //Ali Added
rawAniData.aniNodePosArr.push_back(tmpPos);
rawAniData.aniNodeVal.push_back(aniVal);
}
it = locIndexToAniIndexMap.find(index1);
uint aniIndex1 = it->second;
it = locIndexToAniIndexMap.find(index2);
uint aniIndex2 = it->second;
LinkAniData linkData;
linkData.node1Index = aniIndex1;
linkData.node2Index = aniIndex2;
rawAniData.memLinks.push_back(linkData);
}
}
}
//loop on internal nodes
for (uint i = 0; i < activeCellCount; i++) {
// for (uint j = 0; j < allocPara_m.maxAllNodePerCell; j++) {
for (uint j = 0; j < allocPara_m.maxIntnlNodePerCell; j++) {
for (uint k = 0; k < allocPara_m.maxAllNodePerCell; k++) { //Ali
//for (uint k = j + 1; k < allocPara_m.maxIntnlNodePerCell; k++) { //Ali comment
index1 = i * maxNodePerCell + maxMemNodePerCell + j;
index2 = i * maxNodePerCell + k; //Ali
// index2 = i * maxNodePerCell + maxMemNodePerCell + k; //Ali comment
// if (hostIsActiveVec[index1] && hostIsActiveVec[index2]) {
if (hostIsActiveVec[index1] && hostIsActiveVec[index2]&& index1 !=index2 ) {
node1X = hostTmpVectorLocX[index1];
node1Y = hostTmpVectorLocY[index1];
node2X = hostTmpVectorLocX[index2];
node2Y = hostTmpVectorLocY[index2];
if (aniCri.isPairQualify_M(node1X, node1Y, node2X,
node2Y)) {
IndexMap::iterator it = locIndexToAniIndexMap.find(
index1);
if (it == locIndexToAniIndexMap.end()) {
locIndexToAniIndexMap.insert(
std::pair<uint, uint>(index1, curIndex));
curIndex++;
tmpPos = CVector(node1X, node1Y, 0);
//aniVal = hostTmpVectorNodeType[index1];
aniVal = cellColors[i];
rawAniData.aniNodeF_MI_M_MagN_Int.push_back(tmpF_MI_M_MagN_Int[i]/cellsPerimeter[i]) ; //Ali Added
//aniVal2=dppLevels_Cell[i];
aniVal2=cellsDppLevel[i];
rawAniData.dppLevel.push_back(aniVal2) ; //Ali Added
rawAniData.aniNodePosArr.push_back(tmpPos);
rawAniData.aniNodeVal.push_back(aniVal);
}
it = locIndexToAniIndexMap.find(index2);
if (it == locIndexToAniIndexMap.end()) {
locIndexToAniIndexMap.insert(
std::pair<uint, uint>(index2, curIndex));
curIndex++;
tmpPos = CVector(node2X, node2Y, 0);
//aniVal = hostTmpVectorNodeType[index1];
aniVal = cellColors[i];
rawAniData.aniNodeF_MI_M_MagN_Int.push_back(tmpF_MI_M_MagN_Int[i]/cellsPerimeter[i]) ; //Ali Added
//aniVal2=dppLevels_Cell[i];
aniVal2=cellsDppLevel[i];
rawAniData.dppLevel.push_back(aniVal2) ; //Ali Added
rawAniData.aniNodePosArr.push_back(tmpPos);
rawAniData.aniNodeVal.push_back(aniVal);
}
it = locIndexToAniIndexMap.find(index1);
uint aniIndex1 = it->second;
it = locIndexToAniIndexMap.find(index2);
uint aniIndex2 = it->second;
LinkAniData linkData;
linkData.node1Index = aniIndex1;
linkData.node2Index = aniIndex2;
rawAniData.internalLinks.push_back(linkData);
}
}
}
}
}
cout << "I am in obtainAniRawDataGivenCellColor end"<<endl;
return rawAniData;
}
void SceCells::copyInitActiveNodeCount_M(
std::vector<uint>& initMembrActiveNodeCounts,
std::vector<uint>& initIntnlActiveNodeCounts,
std::vector<double> &initGrowProgVec) {
assert(
initMembrActiveNodeCounts.size()
== initIntnlActiveNodeCounts.size());
totalNodeCountForActiveCells = initMembrActiveNodeCounts.size()
* allocPara_m.maxAllNodePerCell;
thrust::copy(initMembrActiveNodeCounts.begin(),
initMembrActiveNodeCounts.end(),
cellInfoVecs.activeMembrNodeCounts.begin());
thrust::copy(initIntnlActiveNodeCounts.begin(),
initIntnlActiveNodeCounts.end(),
cellInfoVecs.activeIntnlNodeCounts.begin());
thrust::copy(initGrowProgVec.begin(), initGrowProgVec.end(),
cellInfoVecs.growthProgress.begin());
}
void SceCells::myDebugFunction() {
uint maxActiveNodeCount = allocPara_m.currentActiveCellCount
* allocPara_m.maxAllNodePerCell;
uint maxActiveCellCount = allocPara_m.currentActiveCellCount;
std::cout << "totalNodeCountforActiveCells: "
<< totalNodeCountForActiveCells << std::endl;
std::cout << "maxAllNodePerCell: " << allocPara_m.maxAllNodePerCell
<< std::endl;
std::cout << "maxActiveCellCount: " << maxActiveCellCount << std::endl;
std::cout << "bdryNodeCount: " << allocPara_m.bdryNodeCount << std::endl;
std::cout << "grow threshold: " << miscPara.growThreshold << std::endl;
std::cout << std::endl;
for (uint i = 0; i < maxActiveCellCount; i++) {
std::cout << cellInfoVecs.growthProgress[i] << " ";
}
std::cout << std::endl;
for (uint i = 0; i < maxActiveCellCount; i++) {
std::cout << cellInfoVecs.isScheduledToGrow[i] << " ";
}
std::cout << std::endl;
for (uint i = 0; i < maxActiveCellCount; i++) {
std::cout << cellInfoVecs.lastCheckPoint[i] << " ";
}
std::cout << std::endl;
for (uint i = 0; i < maxActiveNodeCount; i++) {
if (nodes->getInfoVecs().nodeIsActive[i]
&& nodes->getInfoVecs().nodeCellType[i] == CellIntnl) {
std::cout << nodes->getInfoVecs().nodeVelX[i] << " ";
}
}
std::cout << std::endl;
for (uint i = 0; i < maxActiveCellCount; i++) {
std::cout << cellInfoVecs.activeIntnlNodeCounts[i] << " ";
}
std::cout << std::endl;
for (uint i = 0; i < maxActiveCellCount; i++) {
std::cout << cellInfoVecs.expectedLength[i] << " ";
}
std::cout << std::endl;
for (uint i = 0; i < maxActiveCellCount; i++) {
std::cout << cellInfoVecs.smallestDistance[i] << " ";
}
std::cout << std::endl;
for (uint i = 0; i < maxActiveCellCount; i++) {
std::cout << cellInfoVecs.biggestDistance[i] << " ";
}
std::cout << std::endl;
for (uint i = 0; i < maxActiveCellCount; i++) {
std::cout << cellInfoVecs.lengthDifference[i] << " ";
}
std::cout << std::endl;
for (uint i = 0; i < maxActiveCellCount; i++) {
std::cout << cellInfoVecs.centerCoordX[i] << " ";
}
std::cout << std::endl;
for (uint i = 0; i < maxActiveCellCount; i++) {
std::cout << cellInfoVecs.centerCoordY[i] << " ";
}
std::cout << std::endl;
for (uint i = 0; i < maxActiveCellCount; i++) {
std::cout << cellInfoVecs.growthXDir[i] << " ";
}
std::cout << std::endl;
for (uint i = 0; i < maxActiveCellCount; i++) {
std::cout << cellInfoVecs.growthYDir[i] << " ";
}
std::cout << std::endl;
int jj;
std::cin >> jj;
}
void SceCells::divDebug() {
std::cout << "tmpIsActive_M: ";
for (uint i = 0; i < divAuxData.tmpIsActive_M.size(); i++) {
std::cout << divAuxData.tmpIsActive_M[i] << " ";
}
std::cout << std::endl;
std::cout << "tmpNodePosX_M: ";
for (uint i = 0; i < divAuxData.tmpNodePosX_M.size(); i++) {
std::cout << divAuxData.tmpNodePosX_M[i] << " ";
}
std::cout << std::endl;
std::cout << "tmpNodePosY_M : ";
for (uint i = 0; i < divAuxData.tmpNodePosY_M.size(); i++) {
std::cout << divAuxData.tmpNodePosY_M[i] << " ";
}
std::cout << std::endl;
std::cout << "tmpCellRank_M : ";
for (uint i = 0; i < divAuxData.tmpCellRank_M.size(); i++) {
std::cout << divAuxData.tmpCellRank_M[i] << " ";
}
std::cout << std::endl;
std::cout << "tmpDivDirX_M : ";
for (uint i = 0; i < divAuxData.tmpDivDirX_M.size(); i++) {
std::cout << divAuxData.tmpDivDirX_M[i] << " ";
}
std::cout << std::endl;
std::cout << "tmpDivDirY_M : ";
for (uint i = 0; i < divAuxData.tmpDivDirY_M.size(); i++) {
std::cout << divAuxData.tmpDivDirY_M[i] << " ";
}
std::cout << std::endl;
std::cout << "tmpCenterPosX_M : ";
for (uint i = 0; i < divAuxData.tmpCenterPosX_M.size(); i++) {
std::cout << divAuxData.tmpCenterPosX_M[i] << " ";
}
std::cout << std::endl;
std::cout << "tmpCenterPosY_M : ";
for (uint i = 0; i < divAuxData.tmpCenterPosY_M.size(); i++) {
std::cout << divAuxData.tmpCenterPosY_M[i] << " ";
}
std::cout << std::endl;
std::cout << "tmpIsActive1_M : ";
for (uint i = 0; i < divAuxData.tmpIsActive1_M.size(); i++) {
std::cout << divAuxData.tmpIsActive1_M[i] << " ";
}
std::cout << std::endl;
std::cout << "tmpXPos1_M : ";
for (uint i = 0; i < divAuxData.tmpXPos1_M.size(); i++) {
std::cout << divAuxData.tmpXPos1_M[i] << " ";
if (i > 0 && i < allocPara_m.maxMembrNodePerCell
&& divAuxData.tmpIsActive1_M[i]
&& divAuxData.tmpIsActive1_M[i - 1]
&& fabs(divAuxData.tmpXPos1_M[i] - divAuxData.tmpXPos1_M[i - 1])
> 0.1) {
std::cout << "11111111111111111111111, " << i << std::endl;
int jj;
cin >> jj;
}
}
std::cout << std::endl;
std::cout << "XPos1_onDevice : ";
for (uint i = 0; i < divAuxData.tmpCellRank_M.size(); i++) {
for (uint j = 0; j < allocPara_m.maxAllNodePerCell; j++) {
uint index = divAuxData.tmpCellRank_M[i]
* allocPara_m.maxAllNodePerCell + j;
std::cout << nodes->getInfoVecs().nodeLocX[index] << " ";
}
}
std::cout << std::endl;
std::cout << "tmpYPos1_M : ";
for (uint i = 0; i < divAuxData.tmpYPos1_M.size(); i++) {
std::cout << divAuxData.tmpYPos1_M[i] << " ";
}
std::cout << std::endl;
std::cout << "tmpIsActive2_M: ";
for (uint i = 0; i < divAuxData.tmpIsActive2_M.size(); i++) {
std::cout << divAuxData.tmpIsActive2_M[i] << " ";
}
std::cout << std::endl;
std::cout << "tmpXPos2_M : ";
for (uint i = 0; i < divAuxData.tmpXPos2_M.size(); i++) {
std::cout << divAuxData.tmpXPos2_M[i] << " ";
if (i > 0 && i < allocPara_m.maxMembrNodePerCell
&& divAuxData.tmpIsActive2_M[i]
&& divAuxData.tmpIsActive2_M[i - 1]
&& fabs(divAuxData.tmpXPos2_M[i] - divAuxData.tmpXPos2_M[i - 1])
> 0.1) {
std::cout << "2222222222222222222, " << i << std::endl;
int jj;
cin >> jj;
}
}
std::cout << std::endl;
std::cout << "tmpYPos2_M : ";
for (uint i = 0; i < divAuxData.tmpYPos2_M.size(); i++) {
std::cout << divAuxData.tmpYPos2_M[i] << " ";
}
std::cout << std::endl;
std::cout << "tmp1InternalActiveCounts: ";
for (uint i = 0; i < divAuxData.tmp1InternalActiveCounts.size(); i++) {
std::cout << divAuxData.tmp1InternalActiveCounts[i] << " ";
}
std::cout << std::endl;
std::cout << "tmp2InternalActiveCounts: ";
for (uint i = 0; i < divAuxData.tmp2InternalActiveCounts.size(); i++) {
std::cout << divAuxData.tmp2InternalActiveCounts[i] << " ";
}
std::cout << std::endl;
std::cout << "tmp1MemActiveCounts: ";
for (uint i = 0; i < divAuxData.tmp1MemActiveCounts.size(); i++) {
std::cout << divAuxData.tmp1MemActiveCounts[i] << " ";
}
std::cout << std::endl;
std::cout << "tmp2MemActiveCounts: ";
for (uint i = 0; i < divAuxData.tmp2MemActiveCounts.size(); i++) {
std::cout << divAuxData.tmp2MemActiveCounts[i] << " ";
}
std::cout << std::endl;
int jj;
std::cin >> jj;
}
void SceCells::adjustGrowthInfo_M() {
uint halfMax = allocPara_m.maxIntnlNodePerCell / 2;
thrust::transform_if(
thrust::make_zip_iterator(
thrust::make_tuple(
cellInfoVecs.activeIntnlNodeCounts.begin(),
cellInfoVecs.growthProgress.begin(),
cellInfoVecs.lastCheckPoint.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(
cellInfoVecs.activeIntnlNodeCounts.begin(),
cellInfoVecs.growthProgress.begin(),
cellInfoVecs.lastCheckPoint.begin()))
+ allocPara_m.currentActiveCellCount,
cellInfoVecs.isScheduledToGrow.begin(),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.isScheduledToGrow.begin(),
cellInfoVecs.growthProgress.begin(),
cellInfoVecs.lastCheckPoint.begin())),
AdjustGrowth(halfMax), thrust::identity<bool>());
}
VtkAnimationData SceCells::outputVtkData(AniRawData& rawAniData,
AnimationCriteria& aniCri) {
VtkAnimationData vtkData;
for (uint i = 0; i < rawAniData.aniNodePosArr.size(); i++) {
PointAniData ptAniData;
ptAniData.pos = rawAniData.aniNodePosArr[i];
ptAniData.F_MI_M_MagN_Int= rawAniData.aniNodeF_MI_M_MagN_Int[i]; //AliE
ptAniData.dppLevel1= rawAniData.dppLevel[i]; //AliE
ptAniData.F_MI_M = rawAniData.aniNodeF_MI_M[i];//AAMIRI
ptAniData.colorScale = rawAniData.aniNodeVal[i];
ptAniData.colorScale2 = rawAniData.aniNodeCurvature[i];//AAMIRI
ptAniData.rankScale = rawAniData.aniNodeRank[i];//AAMIRI
ptAniData.extForce = rawAniData.aniNodeExtForceArr[i];//AAMIRI
vtkData.pointsAniData.push_back(ptAniData);
}
for (uint i = 0; i < rawAniData.internalLinks.size(); i++) {
LinkAniData linkData = rawAniData.internalLinks[i];
vtkData.linksAniData.push_back(linkData);
}
for (uint i = 0; i < rawAniData.memLinks.size(); i++) {
LinkAniData linkData = rawAniData.memLinks[i];
vtkData.linksAniData.push_back(linkData);
}
vtkData.isArrowIncluded = false;
return vtkData;
}
void SceCells::copyToGPUConstMem() {
double pI_CPU = acos(-1.0);
double minLengthCPU =
globalConfigVars.getConfigValue("MinLength").toDouble();
cudaMemcpyToSymbol(minLength, &minLengthCPU, sizeof(double));
double minDivisorCPU =
globalConfigVars.getConfigValue("MinDivisor").toDouble();
cudaMemcpyToSymbol(minDivisor, &minDivisorCPU, sizeof(double));
cudaMemcpyToSymbol(membrEquLen, &membrPara.membrEquLenCPU, sizeof(double));
cudaMemcpyToSymbol(membrStiff, &membrPara.membrStiffCPU, sizeof(double));
cudaMemcpyToSymbol(membrStiff_Mitotic, &membrPara.membrStiff_Mitotic, sizeof(double)); // Ali June 30
cudaMemcpyToSymbol(pI, &pI_CPU, sizeof(double));
cudaMemcpyToSymbol(bendCoeff, &membrPara.membrBendCoeff, sizeof(double));
cudaMemcpyToSymbol(bendCoeff_Mitotic, &membrPara.membrBendCoeff_Mitotic, sizeof(double));//AAMIRI
cudaMemcpyToSymbol(F_Ext_Incline_M2, &membrPara.F_Ext_Incline, sizeof(double)); //Ali
uint maxAllNodePerCellCPU = globalConfigVars.getConfigValue(
"MaxAllNodeCountPerCell").toInt();
uint maxMembrNodePerCellCPU = globalConfigVars.getConfigValue(
"MaxMembrNodeCountPerCell").toInt();
uint maxIntnlNodePerCellCPU = globalConfigVars.getConfigValue(
"MaxIntnlNodeCountPerCell").toInt();
cudaMemcpyToSymbol(maxAllNodePerCell, &maxAllNodePerCellCPU, sizeof(uint));
cudaMemcpyToSymbol(maxMembrPerCell, &maxMembrNodePerCellCPU, sizeof(uint));
cudaMemcpyToSymbol(maxIntnlPerCell, &maxIntnlNodePerCellCPU, sizeof(uint));
double sceIntnlBParaCPU_M[5];
double sceIntraParaCPU_M[5];
double sceIntraParaDivCPU_M[5];
double U0_IntnlB =
globalConfigVars.getConfigValue("SceIntnlB_U0").toDouble();
double V0_IntnlB =
globalConfigVars.getConfigValue("SceIntnlB_V0").toDouble();
double k1_IntnlB =
globalConfigVars.getConfigValue("SceIntnlB_k1").toDouble();
double k2_IntnlB =
globalConfigVars.getConfigValue("SceIntnlB_k2").toDouble();
double intnlBEffectiveRange = globalConfigVars.getConfigValue(
"IntnlBEffectRange").toDouble();
sceIntnlBParaCPU_M[0] = U0_IntnlB;
sceIntnlBParaCPU_M[1] = V0_IntnlB;
sceIntnlBParaCPU_M[2] = k1_IntnlB;
sceIntnlBParaCPU_M[3] = k2_IntnlB;
sceIntnlBParaCPU_M[4] = intnlBEffectiveRange;
//////////////////////
//// Block 3 /////////
//////////////////////
double U0_Intra =
globalConfigVars.getConfigValue("IntraCell_U0").toDouble();
double V0_Intra =
globalConfigVars.getConfigValue("IntraCell_V0").toDouble();
double k1_Intra =
globalConfigVars.getConfigValue("IntraCell_k1").toDouble();
double k2_Intra =
globalConfigVars.getConfigValue("IntraCell_k2").toDouble();
double intraLinkEffectiveRange = globalConfigVars.getConfigValue(
"IntraEffectRange").toDouble();
sceIntraParaCPU_M[0] = U0_Intra;
sceIntraParaCPU_M[1] = V0_Intra;
sceIntraParaCPU_M[2] = k1_Intra;
sceIntraParaCPU_M[3] = k2_Intra;
sceIntraParaCPU_M[4] = intraLinkEffectiveRange;
//////////////////////
//// Block 4 /////////
//////////////////////
double U0_Intra_Div =
globalConfigVars.getConfigValue("IntraCell_U0_Div").toDouble();
double V0_Intra_Div =
globalConfigVars.getConfigValue("IntraCell_V0_Div").toDouble();
double k1_Intra_Div =
globalConfigVars.getConfigValue("IntraCell_k1_Div").toDouble();
double k2_Intra_Div =
globalConfigVars.getConfigValue("IntraCell_k2_Div").toDouble();
double intraDivEffectiveRange = globalConfigVars.getConfigValue(
"IntraDivEffectRange").toDouble();
sceIntraParaDivCPU_M[0] = U0_Intra_Div;
sceIntraParaDivCPU_M[1] = V0_Intra_Div;
sceIntraParaDivCPU_M[2] = k1_Intra_Div;
sceIntraParaDivCPU_M[3] = k2_Intra_Div;
sceIntraParaDivCPU_M[4] = intraDivEffectiveRange;
cudaMemcpyToSymbol(grthPrgrCriEnd_M, &growthAuxData.grthProgrEndCPU,
sizeof(double));
//cudaMemcpyToSymbol(grthPrgrCriVal_M, &growthPrgrCriVal, sizeof(double));
cudaMemcpyToSymbol(sceIB_M, sceIntnlBParaCPU_M, 5 * sizeof(double));
cudaMemcpyToSymbol(sceII_M, sceIntraParaCPU_M, 5 * sizeof(double));
cudaMemcpyToSymbol(sceIIDiv_M, sceIntraParaDivCPU_M, 5 * sizeof(double));
double IBDivHost[5];
IBDivHost[0] =
globalConfigVars.getConfigValue("SceIntnlB_U0_Div").toDouble();
IBDivHost[1] =
globalConfigVars.getConfigValue("SceIntnlB_V0_Div").toDouble();
IBDivHost[2] =
globalConfigVars.getConfigValue("SceIntnlB_k1_Div").toDouble();
IBDivHost[3] =
globalConfigVars.getConfigValue("SceIntnlB_k2_Div").toDouble();
IBDivHost[4] =
globalConfigVars.getConfigValue("IntnlBDivEffectRange").toDouble();
cudaMemcpyToSymbol(sceIBDiv_M, IBDivHost, 5 * sizeof(double));
}
void SceCells::handleMembrGrowth_M() {
// figure out membr growth speed
calMembrGrowSpeed_M();
// figure out which cells will add new point
adjustMembrGrowSpeed_M();
decideIfAddMembrNode_M();
// add membr nodes
addMembrNodes_M();
//membrDebug();
}
void SceCells::calMembrGrowSpeed_M() {
membrPara.membrGrowCoeff = growthAuxData.prolifDecay
* membrPara.membrGrowCoeff_Ori;
membrPara.membrGrowLimit = growthAuxData.prolifDecay
* membrPara.membrGrowLimit_Ori;
// reduce_by_key, find value of max tension and their index
thrust::counting_iterator<uint> iBegin(0);
uint maxNPerCell = allocPara_m.maxAllNodePerCell;
thrust::reduce_by_key(
make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)),
make_transform_iterator(iBegin, DivideFunctor(maxNPerCell))
+ totalNodeCountForActiveCells,
thrust::make_zip_iterator(
thrust::make_tuple(
nodes->getInfoVecs().membrTenMagRi.begin(),
make_transform_iterator(iBegin,
ModuloFunctor(maxNPerCell)),
nodes->getInfoVecs().membrLinkRiMidX.begin(),
nodes->getInfoVecs().membrLinkRiMidY.begin(),
nodes->getInfoVecs().membrDistToRi.begin())),
cellInfoVecs.cellRanksTmpStorage.begin(),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.maxTenRiVec.begin(),
cellInfoVecs.maxTenIndxVec.begin(),
cellInfoVecs.maxTenRiMidXVec.begin(),
cellInfoVecs.maxTenRiMidYVec.begin(),
cellInfoVecs.maxDistToRiVec.begin())),
thrust::equal_to<uint>(), MaxWInfo());
thrust::reduce_by_key(
make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)),
make_transform_iterator(iBegin, DivideFunctor(maxNPerCell))
+ totalNodeCountForActiveCells,
nodes->getInfoVecs().membrTensionMag.begin(),
cellInfoVecs.cellRanksTmpStorage.begin(),
cellInfoVecs.aveTension.begin(), thrust::equal_to<uint>(),
thrust::plus<double>());
thrust::transform(cellInfoVecs.aveTension.begin(),
cellInfoVecs.aveTension.begin()
+ allocPara_m.currentActiveCellCount,
cellInfoVecs.activeMembrNodeCounts.begin(),
cellInfoVecs.aveTension.begin(), thrust::divides<double>());
// linear relationship with highest tension; capped by a given value
thrust::transform(cellInfoVecs.aveTension.begin(),
cellInfoVecs.aveTension.begin()
+ allocPara_m.currentActiveCellCount,
cellInfoVecs.membrGrowSpeed.begin(),
MultiWithLimit(membrPara.membrGrowCoeff, membrPara.membrGrowLimit));
}
void SceCells::adjustMembrGrowSpeed_M() {
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(
cellInfoVecs.activeMembrNodeCounts.begin(),
cellInfoVecs.activeIntnlNodeCounts.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(
cellInfoVecs.activeMembrNodeCounts.begin(),
cellInfoVecs.activeIntnlNodeCounts.begin()))
+ allocPara_m.currentActiveCellCount,
cellInfoVecs.membrGrowSpeed.begin(),
AdjustMembrGrow(membrPara.growthConst_N, membrPara.initMembrCt_N,
membrPara.initIntnlCt_N));
}
void SceCells::decideIfAddMembrNode_M() {
// decide if add membrane node given current active node count and
// membr growth progress
uint curActCellCt = allocPara_m.currentActiveCellCount;
thrust::transform(cellInfoVecs.membrGrowSpeed.begin(),
cellInfoVecs.membrGrowSpeed.begin() + curActCellCt,
cellInfoVecs.membrGrowProgress.begin(),
cellInfoVecs.membrGrowProgress.begin(), SaxpyFunctor(dt));
uint maxMembrNode = allocPara_m.maxMembrNodePerCell;
/**Ali thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.membrGrowProgress.begin(),
cellInfoVecs.activeMembrNodeCounts.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.membrGrowProgress.begin(),
cellInfoVecs.activeMembrNodeCounts.begin()))
+ curActCellCt,
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.isMembrAddingNode.begin(),
cellInfoVecs.membrGrowProgress.begin())),
MemGrowFunc(maxMembrNode));
*/
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.activeMembrNodeCounts.begin(),
cellInfoVecs.membrGrowProgress.begin(),cellInfoVecs.maxDistToRiVec.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.activeMembrNodeCounts.begin(),
cellInfoVecs.membrGrowProgress.begin(),cellInfoVecs.maxDistToRiVec.begin()))
+ curActCellCt,
thrust::make_zip_iterator(
thrust::make_tuple(cellInfoVecs.isMembrAddingNode.begin(),
cellInfoVecs.membrGrowProgress.begin())),
MemGrowFunc(maxMembrNode));
}
/**
* Add new membrane elements to cells.
* This operation is relatively expensive because of memory rearrangement.
*/
void SceCells::addMembrNodes_M() {
thrust::counting_iterator<uint> iBegin(0);
uint curAcCCount = allocPara_m.currentActiveCellCount;
uint maxNodePerCell = allocPara_m.maxAllNodePerCell;
thrust::transform_if(
thrust::make_zip_iterator(
thrust::make_tuple(iBegin,
cellInfoVecs.maxTenIndxVec.begin(),
cellInfoVecs.activeMembrNodeCounts.begin(),
cellInfoVecs.maxTenRiMidXVec.begin(),
cellInfoVecs.maxTenRiMidYVec.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(iBegin,
cellInfoVecs.maxTenIndxVec.begin(),
cellInfoVecs.activeMembrNodeCounts.begin(),
cellInfoVecs.maxTenRiMidXVec.begin(),
cellInfoVecs.maxTenRiMidYVec.begin()))
+ curAcCCount, cellInfoVecs.isMembrAddingNode.begin(),
cellInfoVecs.activeMembrNodeCounts.begin(),
AddMemNode(maxNodePerCell, growthAuxData.nodeIsActiveAddress,
growthAuxData.nodeXPosAddress,
growthAuxData.nodeYPosAddress, growthAuxData.adhIndxAddr),
thrust::identity<bool>());
}
void SceCells::membrDebug() {
uint curAcCCount = allocPara_m.currentActiveCellCount;
uint maxActiveNodeC = curAcCCount * allocPara_m.maxAllNodePerCell;
uint maxNodePC = allocPara_m.maxAllNodePerCell;
//uint tmp = 0;
//for (uint i = 0; i < curAcCCount; i++) {
// tmp += cellInfoVecs.isMembrAddingNode[i];
//}
//if (tmp != 0) {
// tmpDebug = true;
//}
//if (!tmpDebug) {
// return;
//}
for (uint i = 0; i < maxActiveNodeC; i++) {
if (i % maxNodePC == 0 || i % maxNodePC == 199
|| i % maxNodePC == 200) {
std::cout << nodes->getInfoVecs().membrTensionMag[i] << " ";
}
}
std::cout << std::endl;
for (uint i = 0; i < maxActiveNodeC; i++) {
if (i % maxNodePC == 0 || i % maxNodePC == 199
|| i % maxNodePC == 200) {
std::cout << nodes->getInfoVecs().membrTenMagRi[i] << " ";
}
}
std::cout << std::endl;
for (uint i = 0; i < maxActiveNodeC; i++) {
if (i % maxNodePC == 0 || i % maxNodePC == 199
|| i % maxNodePC == 200) {
std::cout << nodes->getInfoVecs().membrLinkRiMidX[i] << " ";
}
}
std::cout << std::endl;
for (uint i = 0; i < maxActiveNodeC; i++) {
if (i % maxNodePC == 0 || i % maxNodePC == 199
|| i % maxNodePC == 200) {
std::cout << nodes->getInfoVecs().membrLinkRiMidY[i] << " ";
}
}
std::cout << std::endl;
for (uint i = 0; i < maxActiveNodeC; i++) {
std::cout << nodes->getInfoVecs().membrBendLeftX[i] << " ";
}
std::cout << std::endl;
for (uint i = 0; i < maxActiveNodeC; i++) {
std::cout << nodes->getInfoVecs().membrBendLeftY[i] << " ";
}
std::cout << std::endl;
for (uint i = 0; i < maxActiveNodeC; i++) {
std::cout << nodes->getInfoVecs().membrBendRightX[i] << " ";
}
std::cout << std::endl;
for (uint i = 0; i < maxActiveNodeC; i++) {
std::cout << nodes->getInfoVecs().membrBendRightX[i] << " ";
}
std::cout << std::endl;
for (uint i = 0; i < curAcCCount; i++) {
std::cout << "(" << cellInfoVecs.maxTenIndxVec[i] << ","
<< cellInfoVecs.activeMembrNodeCounts[i] << ","
<< cellInfoVecs.maxTenRiMidXVec[i] << ","
<< cellInfoVecs.maxTenRiMidYVec[i] << ")" << std::endl;
}
int jj;
std::cin >> jj;
}
void SceCells::assembleVecForTwoCells(uint i) {
uint membThreshold = allocPara_m.maxMembrNodePerCell;
uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell;
uint index;
for (uint j = 0; j < membThreshold; j++) {
index = i * maxAllNodePerCell + j;
if (j < divAuxData.tmp1VecMem.size()) {
divAuxData.tmpXPos1_M[index] = divAuxData.tmp1VecMem[j].x;
divAuxData.tmpYPos1_M[index] = divAuxData.tmp1VecMem[j].y;
divAuxData.tmpIsActive1_M[index] = true;
} else {
divAuxData.tmpIsActive1_M[index] = false;
}
}
for (uint j = 0; j < membThreshold; j++) {
index = i * maxAllNodePerCell + j;
if (j < divAuxData.tmp2VecMem.size()) {
divAuxData.tmpXPos2_M[index] = divAuxData.tmp2VecMem[j].x;
divAuxData.tmpYPos2_M[index] = divAuxData.tmp2VecMem[j].y;
divAuxData.tmpIsActive2_M[index] = true;
} else {
divAuxData.tmpIsActive2_M[index] = false;
}
}
divAuxData.tmp1MemActiveCounts.push_back(divAuxData.tmp1VecMem.size());
divAuxData.tmp2MemActiveCounts.push_back(divAuxData.tmp2VecMem.size());
for (uint j = membThreshold; j < maxAllNodePerCell; j++) {
index = i * maxAllNodePerCell + j;
uint shift_j = j - membThreshold;
if (shift_j < divAuxData.tmp1IntnlVec.size()) {
divAuxData.tmpXPos1_M[index] = divAuxData.tmp1IntnlVec[shift_j].x;
divAuxData.tmpYPos1_M[index] = divAuxData.tmp1IntnlVec[shift_j].y;
divAuxData.tmpIsActive1_M[index] = true;
} else {
divAuxData.tmpIsActive1_M[index] = false;
}
if (shift_j < divAuxData.tmp2IntnlVec.size()) {
divAuxData.tmpXPos2_M[index] = divAuxData.tmp2IntnlVec[shift_j].x;
divAuxData.tmpYPos2_M[index] = divAuxData.tmp2IntnlVec[shift_j].y;
divAuxData.tmpIsActive2_M[index] = true;
} else {
divAuxData.tmpIsActive2_M[index] = false;
}
}
divAuxData.tmp1InternalActiveCounts.push_back(
divAuxData.tmp1IntnlVec.size());
divAuxData.tmp2InternalActiveCounts.push_back(
divAuxData.tmp2IntnlVec.size());
}
void SceCells::shiftIntnlNodesByCellCenter(CVector cell1Center,
CVector cell2Center) {
CVector tmpCell1Center(0, 0, 0);
for (uint j = 0; j < divAuxData.tmp1IntnlVec.size(); j++) {
tmpCell1Center = tmpCell1Center + divAuxData.tmp1IntnlVec[j];
}
tmpCell1Center = tmpCell1Center / divAuxData.tmp1IntnlVec.size();
CVector shiftVec1 = cell1Center - tmpCell1Center;
for (uint j = 0; j < divAuxData.tmp1IntnlVec.size(); j++) {
divAuxData.tmp1IntnlVec[j] = divAuxData.tmp1IntnlVec[j] + shiftVec1;
}
CVector tmpCell2Center(0, 0, 0);
for (uint j = 0; j < divAuxData.tmp2IntnlVec.size(); j++) {
tmpCell2Center = tmpCell2Center + divAuxData.tmp2IntnlVec[j];
}
tmpCell2Center = tmpCell2Center / divAuxData.tmp2IntnlVec.size();
CVector shiftVec2 = cell2Center - tmpCell2Center;
for (uint j = 0; j < divAuxData.tmp2IntnlVec.size(); j++) {
divAuxData.tmp2IntnlVec[j] = divAuxData.tmp2IntnlVec[j] + shiftVec2;
}
}
void SceCells::processMemVec(std::vector<VecVal>& tmp1,
std::vector<VecVal>& tmp2) {
divAuxData.tmp1VecMem.clear();
divAuxData.tmp2VecMem.clear();
uint membThreshold = allocPara_m.maxMembrNodePerCell;
std::sort(tmp1.begin(), tmp1.end());
std::sort(tmp2.begin(), tmp2.end());
//assert(tmp1.size() < allocPara_m.maxMembrNodePerCell);
//assert(tmp2.size() < allocPara_m.maxMembrNodePerCell);
uint maxDivMembrNodeCount1 = allocPara_m.maxMembrNodePerCell - tmp1.size();
uint maxDivMembrNodeCount2 = allocPara_m.maxMembrNodePerCell - tmp2.size();
std::vector<CVector> ptsBetween1, ptsBetween2;
// if size is less than 1, the situation would have already been very bad.
// Just keep this statement so no seg fault would happen.
if (tmp1.size() >= 1) {
ptsBetween1 = obtainPtsBetween(tmp1[tmp1.size() - 1].vec, tmp1[0].vec,
memNewSpacing, maxDivMembrNodeCount1);
}
// if size is less than 1, the situation would have already been very bad.
// Just keep this statement so no seg fault would happen.
if (tmp2.size() >= 1) {
ptsBetween2 = obtainPtsBetween(tmp2[tmp2.size() - 1].vec, tmp2[0].vec,
memNewSpacing, maxDivMembrNodeCount2);
}
for (uint j = 0; j < tmp1.size(); j++) {
divAuxData.tmp1VecMem.push_back(tmp1[j].vec);
}
for (uint j = 0; j < tmp2.size(); j++) {
divAuxData.tmp2VecMem.push_back(tmp2[j].vec);
}
for (uint j = 0; j < ptsBetween1.size(); j++) {
divAuxData.tmp1VecMem.push_back(ptsBetween1[j]);
}
for (uint j = 0; j < ptsBetween2.size(); j++) {
divAuxData.tmp2VecMem.push_back(ptsBetween2[j]);
}
assert(divAuxData.tmp1VecMem.size() <= membThreshold);
assert(divAuxData.tmp2VecMem.size() <= membThreshold);
}
void SceCells::obtainMembrAndIntnlNodes(uint i, vector<CVector>& membrNodes,
vector<CVector>& intnlNodes) {
membrNodes.clear();
intnlNodes.clear();
uint membThreshold = allocPara_m.maxMembrNodePerCell;
uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell;
uint index;
for (uint j = 0; j < maxAllNodePerCell; j++) {
index = i * maxAllNodePerCell + j;
if (divAuxData.tmpIsActive_M[index] != true) {
continue;
}
double posX = divAuxData.tmpNodePosX_M[index];
double posY = divAuxData.tmpNodePosY_M[index];
if (j < membThreshold) {
// means node type is membrane
CVector memPos(posX, posY, 0);
membrNodes.push_back(memPos);
} else {
CVector intnlPos(posX, posY, 0);
intnlNodes.push_back(intnlPos);
}
}
}
CVector SceCells::obtainCenter(uint i) {
double oldCenterX = divAuxData.tmpCenterPosX_M[i];
double oldCenterY = divAuxData.tmpCenterPosY_M[i];
CVector centerPos(oldCenterX, oldCenterY, 0);
return centerPos;
}
CVector SceCells::calDivDir_MajorAxis(CVector center,
vector<CVector>& membrNodes, double& lenAlongMajorAxis) {
// not the optimal algorithm but easy to code
double maxDiff = 0;
CVector majorAxisDir;
for (uint i = 0; i < membrNodes.size(); i++) {
CVector tmpDir = membrNodes[i] - center;
CVector tmpUnitDir = tmpDir.getUnitVector();
double min = 0, max = 0;
for (uint j = 0; j < membrNodes.size(); j++) {
CVector tmpDir2 = membrNodes[j] - center;
double tmpVecProduct = tmpDir2 * tmpUnitDir;
if (tmpVecProduct < min) {
min = tmpVecProduct;
}
if (tmpVecProduct > max) {
max = tmpVecProduct;
}
}
double diff = max - min;
if (diff > maxDiff) {
maxDiff = diff;
majorAxisDir = tmpUnitDir;
}
}
lenAlongMajorAxis = maxDiff;
return majorAxisDir;
}
//A&A
double SceCells::calLengthAlongHertwigAxis(CVector divDir, CVector center,
vector<CVector>& membrNodes) {
CVector divDirUnit = divDir.getUnitVector();
double minUnit = 0, maxUnit = 0;
double minOveral = 0, maxOveral = 0;
for (uint i = 0; i < membrNodes.size(); i++) {
CVector tmpDir = membrNodes[i] - center;
CVector tmpUnitDir = tmpDir.getUnitVector();
double tmpVecProductUnit = divDirUnit * tmpUnitDir;
double tmpVecProductOveral = divDirUnit * tmpDir;
if (tmpVecProductUnit < minUnit) {
minUnit = tmpVecProductUnit;
minOveral = tmpVecProductOveral;
}
if (tmpVecProductUnit > maxUnit) {
maxUnit = tmpVecProductUnit;
maxOveral = tmpVecProductOveral;
}
}
double lenAlongHertwigAxis = maxOveral - minOveral;
return lenAlongHertwigAxis;
}
void SceCells::obtainTwoNewCenters(CVector& oldCenter, CVector& divDir,
double len_MajorAxis, CVector& centerNew1, CVector& centerNew2) {
CVector divDirUnit = divDir.getUnitVector();
double lenChange = len_MajorAxis / 2.0 * centerShiftRatio;
centerNew1 = oldCenter + lenChange * divDirUnit;
centerNew2 = oldCenter - lenChange * divDirUnit;
}
void SceCells::prepareTmpVec(uint i, CVector divDir, CVector oldCenter,
std::vector<VecVal>& tmp1, std::vector<VecVal>& tmp2) {
tmp1.clear();
tmp2.clear();
uint membThreshold = allocPara_m.maxMembrNodePerCell;
uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell;
uint index;
VecVal tmpData;
CVector splitDir = divDir.rotateNintyDeg_XY_CC();
for (uint j = 0; j < maxAllNodePerCell; j++) {
index = i * maxAllNodePerCell + j;
if (j < membThreshold) {
// means node type is membrane
if (divAuxData.tmpIsActive_M[index] == true) {
CVector memPos(divAuxData.tmpNodePosX_M[index],
divAuxData.tmpNodePosY_M[index], 0);
CVector centerToPosDir = memPos - oldCenter;
CVector centerToPosUnit = centerToPosDir.getUnitVector();
CVector crossProduct = Cross(centerToPosDir, splitDir);
double dotProduct = centerToPosUnit * splitDir;
tmpData.val = dotProduct;
tmpData.vec = memPos;
if (crossProduct.z >= 0) {
// counter-cloce wise
tmp1.push_back(tmpData);
} else {
// cloce wise
tmp2.push_back(tmpData);
}
}
} else {
if (divAuxData.tmpIsActive_M[index] == true) {
CVector internalPos(divAuxData.tmpNodePosX_M[index],
divAuxData.tmpNodePosY_M[index], 0);
CVector centerToPosDir = internalPos - oldCenter;
CVector shrinkedPos = centerToPosDir * shrinkRatio + oldCenter;
double dotProduct = centerToPosDir * divDir;
if (dotProduct > 0) {
divAuxData.tmp1IntnlVec.push_back(shrinkedPos);
} else {
divAuxData.tmp2IntnlVec.push_back(shrinkedPos);
}
}
}
}
}
void SceCells::calCellArea() {
thrust::counting_iterator<uint> iBegin(0), iBegin2(0);
totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount
* allocPara_m.maxAllNodePerCell;
uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell;
double* nodeLocXAddr = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().nodeLocX[0]));
double* nodeLocYAddr = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().nodeLocY[0]));
bool* nodeIsActiveAddr = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().nodeIsActive[0]));
thrust::reduce_by_key(
make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)),
make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))
+ totalNodeCountForActiveCells,
thrust::make_transform_iterator(
thrust::make_zip_iterator(
thrust::make_tuple(
thrust::make_permutation_iterator(
cellInfoVecs.activeMembrNodeCounts.begin(),
make_transform_iterator(iBegin,
DivideFunctor(
maxAllNodePerCell))),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell)),
make_transform_iterator(iBegin,
ModuloFunctor(maxAllNodePerCell)),
make_permutation_iterator(
cellInfoVecs.centerCoordX.begin(),
make_transform_iterator(iBegin,
DivideFunctor(
maxAllNodePerCell))),
make_permutation_iterator(
cellInfoVecs.centerCoordY.begin(),
make_transform_iterator(iBegin,
DivideFunctor(
maxAllNodePerCell))))),
CalTriArea(maxAllNodePerCell, nodeIsActiveAddr,
nodeLocXAddr, nodeLocYAddr)),
cellInfoVecs.cellRanksTmpStorage.begin(),
cellInfoVecs.cellAreaVec.begin(), thrust::equal_to<uint>(),
thrust::plus<double>());
}
//AAMIRI added to calculate Perimeter of each cell
void SceCells::calCellPerim() {
thrust::counting_iterator<uint> iBegin(0), iBegin2(0);
totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount
* allocPara_m.maxAllNodePerCell;
uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell;
double* nodeLocXAddr = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().nodeLocX[0]));
double* nodeLocYAddr = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().nodeLocY[0]));
bool* nodeIsActiveAddr = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().nodeIsActive[0]));
thrust::reduce_by_key(
make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)),
make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))
+ totalNodeCountForActiveCells,
thrust::make_transform_iterator(
thrust::make_zip_iterator(
thrust::make_tuple(
thrust::make_permutation_iterator(
cellInfoVecs.activeMembrNodeCounts.begin(),
make_transform_iterator(iBegin,
DivideFunctor(
maxAllNodePerCell))),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell)),
make_transform_iterator(iBegin,
ModuloFunctor(maxAllNodePerCell)),
make_permutation_iterator(
cellInfoVecs.centerCoordX.begin(),
make_transform_iterator(iBegin,
DivideFunctor(
maxAllNodePerCell))),
make_permutation_iterator(
cellInfoVecs.centerCoordY.begin(),
make_transform_iterator(iBegin,
DivideFunctor(
maxAllNodePerCell))))),
CalPerim(maxAllNodePerCell, nodeIsActiveAddr,
nodeLocXAddr, nodeLocYAddr)),
cellInfoVecs.cellRanksTmpStorage.begin(),
cellInfoVecs.cellPerimVec.begin(), thrust::equal_to<uint>(),
thrust::plus<double>());
}
CellsStatsData SceCells::outputPolyCountData() {
cout << " I am at begining of outpolycount"<< std::flush ;
std::cout.flush();
double sumX,sumY,cntr_X_Domain,cntr_Y_Domain ;
int BdryApproach ;
BdryApproach=1 ;
totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount
* allocPara_m.maxAllNodePerCell;
cout << " I am before cells area"<< endl ;
calCellArea();
cout << " I am after cells area" << endl ;
calCellPerim();//AAMIRI
CellsStatsData result;
cout << " I am after result" << endl ;
uint bdryCriteria =
globalConfigVars.getConfigValue("BdryCellCriteria").toInt();
// already on host; no need to call thrust::copy
thrust::host_vector<int> adhIndxHost =
nodes->getInfoVecs().nodeAdhIndxHostCopy;
thrust::host_vector<double> growthProVecHost(
allocPara_m.currentActiveCellCount);
thrust::copy(cellInfoVecs.growthProgress.begin(),
cellInfoVecs.growthProgress.begin()
+ allocPara_m.currentActiveCellCount,
growthProVecHost.begin());
thrust::host_vector<double> growthProMembrVecHost(
allocPara_m.currentActiveCellCount);
thrust::copy(cellInfoVecs.membrGrowProgress.begin(),
cellInfoVecs.membrGrowProgress.begin()
+ allocPara_m.currentActiveCellCount,
growthProMembrVecHost.begin());
thrust::host_vector<uint> activeMembrNodeCountHost(
allocPara_m.currentActiveCellCount);
thrust::copy(cellInfoVecs.activeMembrNodeCounts.begin(),
cellInfoVecs.activeMembrNodeCounts.begin()
+ allocPara_m.currentActiveCellCount,
activeMembrNodeCountHost.begin());
thrust::host_vector<uint> activeIntnlNodeCountHost(
allocPara_m.currentActiveCellCount);
thrust::copy(cellInfoVecs.activeIntnlNodeCounts.begin(),
cellInfoVecs.activeIntnlNodeCounts.begin()
+ allocPara_m.currentActiveCellCount,
activeIntnlNodeCountHost.begin());
thrust::host_vector<double> centerCoordXHost(
allocPara_m.currentActiveCellCount);
thrust::host_vector<double> centerCoordYHost(
allocPara_m.currentActiveCellCount);
thrust::copy(cellInfoVecs.centerCoordX.begin(),
cellInfoVecs.centerCoordX.begin()
+ allocPara_m.currentActiveCellCount,
centerCoordXHost.begin());
thrust::copy(cellInfoVecs.centerCoordY.begin(),
cellInfoVecs.centerCoordY.begin()
+ allocPara_m.currentActiveCellCount,
centerCoordYHost.begin());
thrust::host_vector<double> cellAreaHost(
allocPara_m.currentActiveCellCount);
thrust::host_vector<double> cellPerimHost(
allocPara_m.currentActiveCellCount);//AAMIRI
thrust::host_vector<double> cellDppHost(
allocPara_m.currentActiveCellCount);//Ali
thrust::copy(cellInfoVecs.cellAreaVec.begin(),
cellInfoVecs.cellAreaVec.begin()
+ allocPara_m.currentActiveCellCount, cellAreaHost.begin());
thrust::copy(cellInfoVecs.cellPerimVec.begin(),
cellInfoVecs.cellPerimVec.begin()
+ allocPara_m.currentActiveCellCount, cellPerimHost.begin());//AAMIRI
thrust::copy(cellInfoVecs.cell_Dpp.begin(),
cellInfoVecs.cell_Dpp.begin()
+ allocPara_m.currentActiveCellCount, cellDppHost.begin());//Ali
sumX=0 ;
sumY=0 ;
for (uint i = 0; i < allocPara_m.currentActiveCellCount; i++) {
CellStats cellStatsData;
cellStatsData.cellGrowthProgress = growthProVecHost[i];
cellStatsData.cellRank = i;
bool isBdry = false;
std::set<int> neighbors;
std::vector<int> neighborsV; //Ali
int neighborStrength[10]; //Ali
int continousNoAdh = 0;
map <int, int> cellAndNeighborRank ; //Ali
//std::cout << "printing adhesion indicies ";
//for (int ii=0 ; ii<neighborStrength.size() ; ii++){
for (int ii=0 ; ii< 10; ii++){ //Ali
neighborStrength[ii]=0 ;
}
cellAndNeighborRank.clear(); //Ali
for (uint j = 0; j < activeMembrNodeCountHost[i]; j++) {
uint index = i * allocPara_m.maxAllNodePerCell + j;
//std::cout << adhIndxHost[index] << ",";
if (adhIndxHost[index] != -1) {
uint adhCellRank = adhIndxHost[index]
/ allocPara_m.maxAllNodePerCell;
//std::cout << adhCellRank << " ";
neighbors.insert(adhCellRank);
map <int, int>:: iterator iteratorMap=cellAndNeighborRank.find(adhCellRank); //Ali
if (iteratorMap==cellAndNeighborRank.end()) { //Ali
int NewneighborRank= neighbors.size()-1; //Ali
cellAndNeighborRank[adhCellRank]=NewneighborRank; //Ali
neighborStrength[NewneighborRank]=neighborStrength[NewneighborRank]+1 ; //Ali
neighborsV.push_back(adhCellRank); //Ali
}
else { //Ali
int oldNeighborRank=(*iteratorMap).second ;
neighborStrength[oldNeighborRank]=neighborStrength[oldNeighborRank]+1 ; //Ali
}
continousNoAdh = 0;
} else {
continousNoAdh = continousNoAdh + 1;
if (continousNoAdh > bdryCriteria) {
isBdry = true;
}
}
if (j == activeMembrNodeCountHost[i] - 1
&& adhIndxHost[index] == -1) {
int k = 0;
uint indexNew;
while (k < activeMembrNodeCountHost[i] - 1) {
indexNew = i * allocPara_m.maxAllNodePerCell + k;
if (adhIndxHost[indexNew] == -1) {
continousNoAdh = continousNoAdh + 1;
if (continousNoAdh > bdryCriteria) {
isBdry = true;
}
k++;
} else {
break;
}
}
}
}
cellStatsData.isBdryCell = isBdry;
cellStatsData.numNeighbors = neighbors.size();
cellStatsData.currentActiveMembrNodes = activeMembrNodeCountHost[i];
cellStatsData.currentActiveIntnlNodes = activeIntnlNodeCountHost[i];
cellStatsData.neighborVec = neighbors;
cellStatsData.neighborVecV = neighborsV; //Ali
for (int iiii=0; iiii<10 ; iiii++){
cellStatsData.cellNeighborStrength[iiii] = neighborStrength[iiii];
} //Ali
cellStatsData.membrGrowthProgress = growthProMembrVecHost[i];
cellStatsData.cellCenter = CVector(centerCoordXHost[i],
centerCoordYHost[i], 0);
cellStatsData.cellArea = cellAreaHost[i];
cellStatsData.cellPerim = cellPerimHost[i];//AAMIRI
cellStatsData.cellDpp = cellDppHost[i];//Ali
result.cellsStats.push_back(cellStatsData);
sumX=sumX+cellStatsData.cellCenter.x ;
sumY=sumY+cellStatsData.cellCenter.y ;
}
//Ali
if (BdryApproach==2) {
cout << "sumX=" << sumX << endl ;
cout << "sumY=" << sumY << endl ;
cntr_X_Domain=sumX/result.cellsStats.size() ;
cntr_Y_Domain=sumY/result.cellsStats.size() ;
cout << "cntr_X=" << cntr_X_Domain << endl ;
cout << "cntr_Y=" << cntr_Y_Domain << endl ;
double R_Max ;
double Distance ;
R_Max=0 ;
for (uint i = 0; i < allocPara_m.currentActiveCellCount; i++) {
Distance=sqrt( pow(centerCoordXHost[i]-cntr_X_Domain,2) +pow(centerCoordYHost[i]-cntr_Y_Domain,2) ) ;
if (Distance > R_Max) {
R_Max=Distance ;
}
}
cout << "R_Max=" << R_Max << endl ;
for (uint i = 0; i < allocPara_m.currentActiveCellCount; i++) {
Distance=sqrt( pow(centerCoordXHost[i]-cntr_X_Domain,2) +pow(centerCoordYHost[i]-cntr_Y_Domain,2) ) ;
if (Distance > 0.9* R_Max) {
result.cellsStats[i].isBdryCell = true;
cout << "isBdryCell"<< i<< endl ;
}
else {
result.cellsStats[i].isBdryCell = false;
cout << "isNormalCell"<< i << endl ;
}
}
}
//Ali
cout << "I want to write data" << endl ;
// ofstream Stress_Strain_Single ;
//Stress_Strain_Single.open("Stress_Strain_Single.txt");
//Stress_Strain_Single.close() ;
//Ali
result.MaxDistanceX=abs(centerCoordXHost[1]-centerCoordXHost[0]); //Ali
result.Cells_Extrem_Loc[0]=Tisu_MinX;
result.Cells_Extrem_Loc[1]=Tisu_MaxX;
result.Cells_Extrem_Loc[2]=Tisu_MinY;
result.Cells_Extrem_Loc[3]=Tisu_MaxY ;
result.F_Ext_Out=membrPara.F_Ext_Incline*curTime ;
//if (dt==curTime) {
//result.Init_Displace=MaxX-MinX ;
// }
//Ali
return result;
}
__device__ bool bigEnough(double& num) {
if (num > minDivisor) {
return true;
} else {
return false;
}
}
__device__ double cross_Z(double vecA_X, double vecA_Y, double vecB_X,
double vecB_Y) {
return vecA_X * vecB_Y - vecA_Y * vecB_X;
}
/*
__device__ double calBendMulti(double& angle, uint activeMembrCt) {
double equAngle = PI - PI / activeMembrCt;
return bendCoeff * (angle - equAngle);
}
*/
//AAMIRI
__device__ double calBendMulti_Mitotic(double& angle, uint activeMembrCt, double& progress, double mitoticCri) {
double equAngle = PI - PI / activeMembrCt;
if (progress <= mitoticCri){
return bendCoeff * (angle - equAngle);}
else{
return (angle - equAngle)*(bendCoeff + (bendCoeff_Mitotic - bendCoeff) * (progress - mitoticCri)/(1.0 - mitoticCri));
}
}
void SceCells::applySceCellDisc_M() {
totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount
* allocPara_m.maxAllNodePerCell;
uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell;
uint maxMemNodePerCell = allocPara_m.maxMembrNodePerCell;
thrust::counting_iterator<uint> iBegin(0);
double* nodeLocXAddr = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().nodeLocX[0]));
double* nodeLocYAddr = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().nodeLocY[0]));
bool* nodeIsActiveAddr = thrust::raw_pointer_cast(
&(nodes->getInfoVecs().nodeIsActive[0]));
double grthPrgrCriVal_M =growthAuxData.grthPrgrCriVal_M_Ori ; // for now constant growthAuxData.grthProgrEndCPU
// - growthAuxData.prolifDecay
// * (growthAuxData.grthProgrEndCPU
// - growthAuxData.grthPrgrCriVal_M_Ori);
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(
thrust::make_permutation_iterator(
cellInfoVecs.activeMembrNodeCounts.begin(),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell))),
thrust::make_permutation_iterator(
cellInfoVecs.activeIntnlNodeCounts.begin(),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell))),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell)),
make_transform_iterator(iBegin,
ModuloFunctor(maxAllNodePerCell)),
thrust::make_permutation_iterator(
cellInfoVecs.growthProgress.begin(),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell))),
nodes->getInfoVecs().nodeVelX.begin(),
nodes->getInfoVecs().nodeVelY.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(
thrust::make_permutation_iterator(
cellInfoVecs.activeMembrNodeCounts.begin(),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell))),
thrust::make_permutation_iterator(
cellInfoVecs.activeIntnlNodeCounts.begin(),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell))),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell)),
make_transform_iterator(iBegin,
ModuloFunctor(maxAllNodePerCell)),
thrust::make_permutation_iterator(
cellInfoVecs.growthProgress.begin(),
make_transform_iterator(iBegin,
DivideFunctor(maxAllNodePerCell))),
nodes->getInfoVecs().nodeVelX.begin(),
nodes->getInfoVecs().nodeVelY.begin()))
+ totalNodeCountForActiveCells,
thrust::make_zip_iterator(
thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(),
nodes->getInfoVecs().nodeVelY.begin(),
nodes->getInfoVecs().nodeF_MI_M_x.begin(), //Ali added for cell pressure calculation
nodes->getInfoVecs().nodeF_MI_M_y.begin())),// ALi added for cell pressure calculation
AddSceCellForce(maxAllNodePerCell, maxMemNodePerCell, nodeLocXAddr,
nodeLocYAddr, nodeIsActiveAddr, grthPrgrCriVal_M));
}
__device__
void calAndAddIB_M(double& xPos, double& yPos, double& xPos2, double& yPos2,
double& growPro, double& xRes, double& yRes, double grthPrgrCriVal_M) {
double linkLength = compDist2D(xPos, yPos, xPos2, yPos2);
double forceValue = 0;
if (growPro > grthPrgrCriEnd_M) {
if (linkLength < sceIBDiv_M[4]) {
forceValue = -sceIBDiv_M[0] / sceIBDiv_M[2]
* exp(-linkLength / sceIBDiv_M[2])
+ sceIBDiv_M[1] / sceIBDiv_M[3]
* exp(-linkLength / sceIBDiv_M[3]);
}
} else if (growPro > grthPrgrCriVal_M) {
double percent = (growPro - grthPrgrCriVal_M)
/ (grthPrgrCriEnd_M - grthPrgrCriVal_M);
double lenLimit = percent * (sceIBDiv_M[4])
+ (1.0 - percent) * sceIB_M[4];
if (linkLength < lenLimit) {
double intnlBPara0 = percent * (sceIBDiv_M[0])
+ (1.0 - percent) * sceIB_M[0];
double intnlBPara1 = percent * (sceIBDiv_M[1])
+ (1.0 - percent) * sceIB_M[1];
double intnlBPara2 = percent * (sceIBDiv_M[2])
+ (1.0 - percent) * sceIB_M[2];
double intnlBPara3 = percent * (sceIBDiv_M[3])
+ (1.0 - percent) * sceIB_M[3];
forceValue = -intnlBPara0 / intnlBPara2
* exp(-linkLength / intnlBPara2)
+ intnlBPara1 / intnlBPara3
* exp(-linkLength / intnlBPara3);
}
} else {
if (linkLength < sceIB_M[4]) {
forceValue = -sceIB_M[0] / sceIB_M[2]
* exp(-linkLength / sceIB_M[2])
+ sceIB_M[1] / sceIB_M[3] * exp(-linkLength / sceIB_M[3]);
}
}
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
}
//Ali function added for eventually computing pressure for each cells
__device__
void calAndAddIB_M2(double& xPos, double& yPos, double& xPos2, double& yPos2,
double& growPro, double& xRes, double& yRes, double & F_MI_M_x, double & F_MI_M_y, double grthPrgrCriVal_M) {
double linkLength = compDist2D(xPos, yPos, xPos2, yPos2);
double forceValue = 0;
if (growPro > grthPrgrCriEnd_M) {
if (linkLength < sceIBDiv_M[4]) {
forceValue = -sceIBDiv_M[0] / sceIBDiv_M[2]
* exp(-linkLength / sceIBDiv_M[2])
+ sceIBDiv_M[1] / sceIBDiv_M[3]
* exp(-linkLength / sceIBDiv_M[3]);
}
} else if (growPro > grthPrgrCriVal_M) {
double percent = (growPro - grthPrgrCriVal_M)
/ (grthPrgrCriEnd_M - grthPrgrCriVal_M);
double lenLimit = percent * (sceIBDiv_M[4])
+ (1.0 - percent) * sceIB_M[4];
if (linkLength < lenLimit) {
double intnlBPara0 = percent * (sceIBDiv_M[0])
+ (1.0 - percent) * sceIB_M[0];
double intnlBPara1 = percent * (sceIBDiv_M[1])
+ (1.0 - percent) * sceIB_M[1];
double intnlBPara2 = percent * (sceIBDiv_M[2])
+ (1.0 - percent) * sceIB_M[2];
double intnlBPara3 = percent * (sceIBDiv_M[3])
+ (1.0 - percent) * sceIB_M[3];
forceValue = -intnlBPara0 / intnlBPara2
* exp(-linkLength / intnlBPara2)
+ intnlBPara1 / intnlBPara3
* exp(-linkLength / intnlBPara3);
}
} else {
if (linkLength < sceIB_M[4]) {
forceValue = -sceIB_M[0] / sceIB_M[2]
* exp(-linkLength / sceIB_M[2])
+ sceIB_M[1] / sceIB_M[3] * exp(-linkLength / sceIB_M[3]);
}
}
F_MI_M_x=F_MI_M_x+forceValue * (xPos2 - xPos) / linkLength;
F_MI_M_y=F_MI_M_y+forceValue * (yPos2 - yPos) / linkLength;
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
}
__device__
void calAndAddII_M(double& xPos, double& yPos, double& xPos2, double& yPos2,
double& growPro, double& xRes, double& yRes, double grthPrgrCriVal_M) {
double linkLength = compDist2D(xPos, yPos, xPos2, yPos2);
double forceValue = 0;
if (growPro > grthPrgrCriEnd_M) {
if (linkLength < sceIIDiv_M[4]) {
forceValue = -sceIIDiv_M[0] / sceIIDiv_M[2]
* exp(-linkLength / sceIIDiv_M[2])
+ sceIIDiv_M[1] / sceIIDiv_M[3]
* exp(-linkLength / sceIIDiv_M[3]);
}
} else if (growPro > grthPrgrCriVal_M) {
double percent = (growPro - grthPrgrCriVal_M)
/ (grthPrgrCriEnd_M - grthPrgrCriVal_M);
double lenLimit = percent * (sceIIDiv_M[4])
+ (1.0 - percent) * sceII_M[4];
if (linkLength < lenLimit) {
double intraPara0 = percent * (sceIIDiv_M[0])
+ (1.0 - percent) * sceII_M[0];
double intraPara1 = percent * (sceIIDiv_M[1])
+ (1.0 - percent) * sceII_M[1];
double intraPara2 = percent * (sceIIDiv_M[2])
+ (1.0 - percent) * sceII_M[2];
double intraPara3 = percent * (sceIIDiv_M[3])
+ (1.0 - percent) * sceII_M[3];
forceValue = -intraPara0 / intraPara2
* exp(-linkLength / intraPara2)
+ intraPara1 / intraPara3 * exp(-linkLength / intraPara3);
}
} else {
if (linkLength < sceII_M[4]) {
forceValue = -sceII_M[0] / sceII_M[2]
* exp(-linkLength / sceII_M[2])
+ sceII_M[1] / sceII_M[3] * exp(-linkLength / sceII_M[3]);
}
}
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
}
|
fa6f04b68184602b72c1fa84eca5e0e3a13af888.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "all_structure_undir.cuh"
#include "gpuFunctions_undir.cuh"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include<vector>
#include <chrono>
#include <algorithm>
#include "cuCompactor_hip.cuh"
#include "supportingFunctions.cu"
#define THREADS_PER_BLOCK 1024 //we can change it
using namespace std;
using namespace std::chrono;
/*
1st arg: original graph file name
2nd arg: no. of nodes
3rd arg: no. of edges
4th arg: input SSSP file name
5th arg: change edges file name
6th arg: drone start vertex
7th arg: destination vertex
8th arg: payload (0 or 7)
9th arg: output file name
****main commands to run****
nvcc -o op_main CudaSSSPmain.cu
./op_main original_graph_file_name number_of_nodes number_of_edges input_SSSP_file_name change_edge_file_name
*/
int main(int argc, char* argv[]) {
int nodes, edges, deviceId, numberOfSMs;
int no_of_movement = 0;
hipError_t cudaStatus;
char* graphFile = argv[1];
nodes = atoi(argv[2]);
edges = atoi(argv[3]);
char* inputSSSPfile = argv[4];
char* changeEdgesFile = argv[5];
char* outFile = argv[9]; //output file
//Drone related
int currentLoc = 0; //drone's current location. considering single drone single depot.
int nextLoc; //drone's current location
int droneStartLoc = atoi(argv[6]); //drone start vertex
int destination = atoi(argv[7]); //destination vertex
int payload = atoi(argv[8]); //payload
int cost = 0; //total cost for travel
int* traversed;
traversed = (int*)calloc(nodes, sizeof(int));
int ws[4] = {0,5,10,15}; //keep the choices here//change if taking different dataset
int wd[5] = {180,135,90,45,0}; //keep the choices here//change if taking different dataset
int oldRand = 0, oldRand2 = 0;
while (currentLoc != destination && no_of_movement < 20) {
if (no_of_movement > 0)
{
graphFile = "nextGraph.txt";
inputSSSPfile = "nextSSSP.txt";
changeEdgesFile = "nextEffectiveChangeEdges.txt"; //have to vary this randomly
}
int totalInsertion = 0;
bool zeroDelFlag = false, zeroInsFlag = false;
vector<ColWtList> AdjList; //stores input graph in 2D adjacency list
vector<ColWt> AdjListFull; //Row-major implementation of adjacency list (1D)
ColWt* AdjListFull_device; //1D array in GPU to store Row-major implementation of adjacency list
int* AdjListTracker_device; //1D array to track offset for each node's adjacency list
vector<changeEdge> allChange_Ins, allChange_Del;
changeEdge* allChange_Ins_device; //stores all change edges marked for insertion in GPU
changeEdge* allChange_Del_device; //stores all change edges marked for deletion in GPU
int* counter_del;
int* affectedNodeList_del;
int* updatedAffectedNodeList_del;
int* updated_counter_del;
vector<ColList> SSSPTreeAdjList;
int* SSSPTreeAdjListTracker;
vector<int> SSSPTreeAdjListFull;
RT_Vertex* SSSP;
int* SSSPTreeAdjListFull_device;
int* SSSPTreeAdjListTracker_device;
vector<int> hop;
int* d_hop;
//Get gpu device id and number of SMs
hipGetDevice(&deviceId);
hipDeviceGetAttribute(&numberOfSMs, hipDeviceAttributeMultiprocessorCount, deviceId);
size_t numberOfBlocks = 32 * numberOfSMs;
//Read Original input graph
AdjList.resize(nodes);
int* AdjListTracker = (int*)malloc((nodes + 1) * sizeof(int));//we take nodes +1 to store the start ptr of the first row
read_graphEdges(AdjList, graphFile, &nodes);
//Read change edges input
readin_changes(changeEdgesFile, allChange_Ins, allChange_Del, AdjList, totalInsertion);
int totalChangeEdges_Ins = allChange_Ins.size();
if (totalChangeEdges_Ins == 0) {
zeroInsFlag = true;
}
int totalChangeEdges_Del = allChange_Del.size();
if (totalChangeEdges_Del == 0) {
zeroDelFlag = true;
}
//Transfer input graph, changed edges to GPU and set memory advices
transfer_data_to_GPU(AdjList, AdjListTracker, AdjListFull, AdjListFull_device,
nodes, edges, totalInsertion, AdjListTracker_device, zeroInsFlag,
allChange_Ins, allChange_Ins_device, totalChangeEdges_Ins,
deviceId, totalChangeEdges_Del, zeroDelFlag, allChange_Del_device,
counter_del, affectedNodeList_del, updatedAffectedNodeList_del, updated_counter_del, allChange_Del, numberOfBlocks);
//Read input SSSP Tree and storing on unified memory
read_and_transfer_input_SSSPtree_to_GPU(inputSSSPfile, SSSPTreeAdjList, SSSPTreeAdjListTracker, SSSPTreeAdjListFull,
SSSP, nodes, edges, SSSPTreeAdjListFull_device, SSSPTreeAdjListTracker_device, hop, deviceId, d_hop);
//Initialize supporting variables
int* change = 0;
cudaStatus = hipMallocManaged(&change, sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed at change structure");
}
int* affectedNodeList;
cudaStatus = hipMallocManaged(&affectedNodeList, nodes * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed at affectedNodeList structure");
}
int* counter = 0;
cudaStatus = hipMallocManaged(&counter, sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed at counter structure");
}
int* updatedAffectedNodeList_all;
hipMallocManaged(&updatedAffectedNodeList_all, nodes * sizeof(int));
int* updated_counter_all = 0;
hipMallocManaged(&updated_counter_all, sizeof(int));
//**process change edges**
auto startTimeDelEdge = high_resolution_clock::now(); //Time calculation start
//Process del edges
if (zeroDelFlag != true) {
deleteEdge << < numberOfBlocks, THREADS_PER_BLOCK >> > (allChange_Del_device, SSSP, totalChangeEdges_Del, AdjListFull_device, AdjListTracker_device);
hipDeviceSynchronize();
}
auto stopTimeDelEdge = high_resolution_clock::now();//Time calculation ends
auto durationDelEdge = duration_cast<microseconds>(stopTimeDelEdge - startTimeDelEdge);// duration calculation
//cout << "**Time taken for processing deleted edges: "<< float(durationDelEdge.count()) / 1000 << " milliseconds**" << endl;
//Process ins edges
auto startTimeinsertEdge = high_resolution_clock::now();
if (zeroInsFlag != true) {
insertEdge << < numberOfBlocks, THREADS_PER_BLOCK >> > (allChange_Ins_device, SSSP, totalChangeEdges_Ins, AdjListFull_device, AdjListTracker_device);
hipDeviceSynchronize();
}
auto stopTimeinsertEdge = high_resolution_clock::now();//Time calculation ends
auto durationinsertEdge = duration_cast<microseconds>(stopTimeinsertEdge - startTimeinsertEdge);// duration calculation
//cout << "**Time taken for processing inserted Edges: "<< float(durationinsertEdge.count()) / 1000 << " milliseconds**" << endl;
//new code
//int* Q_array = (int*)malloc((nodes) * sizeof(int));
//vector<int> Del_Affected_array;
//auto startTime1 = high_resolution_clock::now();
//*counter_del = cuCompactor::compact<RT_Vertex, int>(SSSP, affectedNodeList_del, nodes, predicate(), THREADS_PER_BLOCK);
///*for (int i = 0; i < *counter_del; i++) {
// printf("%d::", affectedNodeList_del[i]);
//}*/
////hipDeviceSynchronize();
//for (int i = 0; i < *counter_del; i++) {
// int x = affectedNodeList_del[i];
// Del_Affected_array.push_back(x);
//}
////printf("test 2");
//int j = 0;
//while (j < Del_Affected_array.size())
//{
// int x = Del_Affected_array[j];
// if (SSSPTreeAdjList.at(x).size() > 0) {
// Del_Affected_array.insert(std::end(Del_Affected_array), std::begin(SSSPTreeAdjList.at(x)), std::end(SSSPTreeAdjList.at(x)));
// }
//
// j++;
//}
//auto stopTime1 = high_resolution_clock::now();//Time calculation ends
//auto durationin1 = duration_cast<microseconds>(stopTime1 - startTime1);// duration calculation
//cout << "**Time taken for creating Del_Affected_array: "
// << float(durationin1.count()) / 1000 << " milliseconds**" << endl;
//cout << "size of Del_Affected_array:" << j << endl;
//for (int i = 0; i < Del_Affected_array.size(); i++) {
// printf("%d::", Del_Affected_array[i]);
//}
//new code
//**make the subtree under deletion affected vertices disconnected (make wt = inf)
auto startTimeupdateNeighbors_del = high_resolution_clock::now();
if (zeroDelFlag != true) {
*counter_del = cuCompactor::compact<RT_Vertex, int>(SSSP, affectedNodeList_del, nodes, predicate(), THREADS_PER_BLOCK);
*change = 1;
while (*change > 0) {
*change = 0;
updateNeighbors_del << <numberOfBlocks, THREADS_PER_BLOCK >> >
(SSSP, updated_counter_del, updatedAffectedNodeList_del, affectedNodeList_del, counter_del, SSSPTreeAdjListFull_device, SSSPTreeAdjListTracker_device, change);
*counter_del = cuCompactor::compact<RT_Vertex, int>(SSSP, affectedNodeList_del, nodes, predicate(), THREADS_PER_BLOCK);
//printf("number of elements in the compacted list: %d\n", *counter_del);
//hipDeviceSynchronize();//not required as hipMalloc/hipFree perform heavy-weight synchronizations. cuCompactor::compact uses both in it.
}
}
hipFree(SSSPTreeAdjListFull_device); //we can free memory at the end if we have enough GPU memory. That will decrease some time
hipFree(SSSPTreeAdjListTracker);
auto stopTimeupdateNeighbors_del = high_resolution_clock::now();//Time calculation ends
auto durationupdateNeighbors_del = duration_cast<microseconds>(stopTimeupdateNeighbors_del - startTimeupdateNeighbors_del);// duration calculation
//cout << "**Time taken for updateNeighbors_del: "<< float(durationupdateNeighbors_del.count()) / 1000 << " milliseconds**" << endl;
//**Update neighbors and connect disconnected vertices with main SSSP tree**
auto startTimeupdateNeighbors = high_resolution_clock::now();
//collect all vertices where update value > 0
*counter = cuCompactor::compact<RT_Vertex, int>(SSSP, affectedNodeList, nodes, predicate2(), THREADS_PER_BLOCK);
*change = 1;
while (*change == 1) {
*change = 0;
updateNeighbors << <(*counter / THREADS_PER_BLOCK) + 1, THREADS_PER_BLOCK >> > (SSSP, counter, affectedNodeList, AdjListFull_device, AdjListTracker_device, change);
*counter = cuCompactor::compact<RT_Vertex, int>(SSSP, affectedNodeList, nodes, predicate2(), THREADS_PER_BLOCK);
//hipDeviceSynchronize(); //not required as hipMalloc/hipFree perform heavy-weight synchronizations. cuCompactor::compact uses both in it.
}
auto stopTimeupdateNeighbors = high_resolution_clock::now();//Time calculation ends
auto durationupdateNeighbors = duration_cast<microseconds>(stopTimeupdateNeighbors - startTimeupdateNeighbors);// duration calculation
//cout << "**Time taken for updateNeighbors: "<< float(durationupdateNeighbors.count()) / 1000 << " milliseconds**" << endl;
cout << "****Total Time taken for SSSP update: "
<< (float(durationDelEdge.count()) + float(durationupdateNeighbors_del.count()) + float(durationinsertEdge.count()) + float(durationupdateNeighbors.count())) / 1000 << " milliseconds****" << endl;
//cout << "Total affected nodes by Delete edge only: " << totalAffectedNodes_del << endl;
//print node parent distance
//cout << "from GPU: \n[";
//printSSSP << <1, 1 >> > (SSSP, nodes);
//hipDeviceSynchronize();
//int x;
//if (nodes < 40) {
// x = nodes;
//}
//else {
// x = 40;
//}
////cout << "from CPU: \n[";
//for (int i = 0; i < x; i++) {
// cout << i << " " << SSSP[i].Parent << " " << SSSP[i].Dist << endl;
// //cout << i << ":" << SSSP[i].Dist << " ";
//}
//****Print next move****
traversed[currentLoc] = 1;
int parent = -1;
int y = destination;
cout << "print path" << endl;
cout << y;
while (y != 0) {
parent = SSSP[y].Parent;
cout << "<- " << parent << "(" << SSSP[y].Dist - SSSP[parent].Dist << ")"; //test
if (parent == currentLoc)
{
cost = cost + SSSP[y].Dist - SSSP[parent].Dist;
cout << "\nNext move: " << parent << "to" << y << " cost upto this: " << cost << endl;
nextLoc = y;
}
y = parent;
//cout << i << ":" << SSSP[i].Dist << " ";
}
//****print sssp tree in file. format: vertex parent distance****
ofstream myfile("nextSSSP.txt");
if (myfile.is_open())
{
for (int i = 0; i < nodes; i++) {
string line = to_string(i) + " " + to_string(SSSP[i].Parent) + " " + to_string(SSSP[i].Dist) + "\n";
myfile << line;
}
myfile.close();
}
else cout << "Unable to open nextSSSP.txt file";
//****print current graph in file. format: vertex1 vertex2 weight****
ofstream myfile2("nextGraph.txt");
int nextEdges = 0;
if (myfile2.is_open())
{
for (int i = 0; i < nodes; i++) {
for (int j = AdjListTracker[i]; j < AdjListTracker[i + 1]; j++) {
int myn = AdjListFull_device[j].col;
int mywt = AdjListFull_device[j].wt;
if (mywt < 0) { continue; } //if mywt = -1, that means edge was deleted
if (myn < i) { continue; } //avoid printing "b a w1" if "a b w1" is already printed
string line = to_string(i) + " " + to_string(myn) + " " + to_string(mywt) + "\n";
myfile2 << line;
nextEdges++;
}
}
myfile2.close();
}
else cout << "Unable to open nextGraph.txt file";
//****choose and print next effective change edges****
srand(time(NULL));
int random1 = rand() % 4; //choice for ws
int random2 = rand() % 5; //choice for wd
if(oldRand == random1)
{
random1 = (random1 + 1)%4;
}
if(oldRand2 == random2)
{
random2 = (random2 + 1)%5; //adding 1 just to create another number
}
oldRand = random1;
oldRand2 = random2;
string ceFileName = "TATA_p" + to_string(payload) + "_ws" + to_string(ws[random1]) +"_wd"+to_string(wd[random2]) + ".txt";
//int filename_length = ceFileName.length();
cout<<"next changeEdgeFile::"<<ceFileName<<endl;
const char *char_fileName = ceFileName.c_str();
ofstream myfile3("nextEffectiveChangeEdges.txt");
if (myfile3.is_open())
{
//insert new edges
FILE* delE_file;
char line[128];
//delE_file = fopen("nextChangeEdges.txt", "r"); //select the next Del E***implement a random choice fn***
delE_file = fopen(char_fileName, "r"); //select the next Del E
while (fgets(line, 128, delE_file) != NULL)
{
int n1, n2, wt;
changeEdge cE;
sscanf(line, "%d %d %d", &n1, &n2, &wt);
//Add change edge in effective change edge only when none of the endpoint is traversed
if (traversed[n1] == 0 && traversed[n2] == 0)
{
int flag1 = 0;
//****delete edge (u,n,wt) when drone moves from u to v****
for (int j = AdjListTracker[n1]; j < AdjListTracker[n1 + 1]; j++) {
int myn = AdjListFull_device[j].col;
int mywt = AdjListFull_device[j].wt;
if (mywt < 0) { continue; } //if mywt = -1, that means edge was deleted
if (myn == n2)
{
if (mywt != wt) {
string line1 = to_string(n1) + " " + to_string(myn) + " " + to_string(mywt) + " " + to_string(0) + "\n"; //delete previous edge
myfile3 << line1;
//cout << line1 << endl;
string line2 = to_string(n1) + " " + to_string(n2) + " " + to_string(wt) + " " + to_string(1) + "\n"; //insert new edge
myfile3 << line2;
//cout << line2 << endl;
}
flag1 = 1;
break;
}
}
if (flag1 == 0)
{
string line2 = to_string(n1) + " " + to_string(n2) + " " + to_string(wt) + " " + to_string(1) + "\n"; //insert new edge
myfile3 << line2;
//cout << line2 << endl;
}
}
}
fclose(delE_file);
//****delete edge (u,n,wt) when drone moves from u to v****
for (int j = AdjListTracker[currentLoc]; j < AdjListTracker[currentLoc + 1]; j++) {
int myn = AdjListFull_device[j].col;
int mywt = AdjListFull_device[j].wt;
if (mywt < 0) { continue; } //if mywt = -1, that means edge was deleted
if (myn == nextLoc) { continue; } //skip as nextLoc is v and (u,v) should be 0
if (traversed[myn] == 1) { continue; }
string line4 = to_string(currentLoc) + " " + to_string(myn) + " " + to_string(mywt) + " " + to_string(0) + "\n";
myfile3 << line4;
}
//insert edge (u,v,0) when drone moves from u to v
string line1 = to_string(currentLoc) + " " + to_string(nextLoc) + " " + to_string(0) + " " + to_string(1) + "\n";
myfile3 << line1;
myfile3.close();
}
else cout << "Unable to open nextEffectiveChangeEdges.txt file";
//prepare for next iteration
currentLoc = nextLoc; //prepare current location for next iteration
edges = nextEdges;
no_of_movement++;
if (zeroDelFlag != true) {
hipFree(affectedNodeList_del);
hipFree(updatedAffectedNodeList_del);
hipFree(counter_del);
hipFree(updated_counter_del);
hipFree(allChange_Del_device);
}
if (zeroInsFlag != true) {
hipFree(allChange_Ins_device);
}
hipFree(change);
hipFree(affectedNodeList);
hipFree(counter);
hipFree(AdjListFull_device);
hipFree(AdjListTracker_device);
hipFree(SSSP);
hipFree(d_hop); //try to free this at some earlier place
}
std::ofstream ofs;
ofs.open (outFile, std::ofstream::out | std::ofstream::app);
ofs << droneStartLoc << " " << destination << " " << cost <<"\n";
ofs.close();
return 0;
} | fa6f04b68184602b72c1fa84eca5e0e3a13af888.cu | #include <stdio.h>
#include "all_structure_undir.cuh"
#include "gpuFunctions_undir.cuh"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include<vector>
#include <chrono>
#include <algorithm>
#include "cuCompactor.cuh"
#include "supportingFunctions.cu"
#define THREADS_PER_BLOCK 1024 //we can change it
using namespace std;
using namespace std::chrono;
/*
1st arg: original graph file name
2nd arg: no. of nodes
3rd arg: no. of edges
4th arg: input SSSP file name
5th arg: change edges file name
6th arg: drone start vertex
7th arg: destination vertex
8th arg: payload (0 or 7)
9th arg: output file name
****main commands to run****
nvcc -o op_main CudaSSSPmain.cu
./op_main original_graph_file_name number_of_nodes number_of_edges input_SSSP_file_name change_edge_file_name
*/
int main(int argc, char* argv[]) {
int nodes, edges, deviceId, numberOfSMs;
int no_of_movement = 0;
cudaError_t cudaStatus;
char* graphFile = argv[1];
nodes = atoi(argv[2]);
edges = atoi(argv[3]);
char* inputSSSPfile = argv[4];
char* changeEdgesFile = argv[5];
char* outFile = argv[9]; //output file
//Drone related
int currentLoc = 0; //drone's current location. considering single drone single depot.
int nextLoc; //drone's current location
int droneStartLoc = atoi(argv[6]); //drone start vertex
int destination = atoi(argv[7]); //destination vertex
int payload = atoi(argv[8]); //payload
int cost = 0; //total cost for travel
int* traversed;
traversed = (int*)calloc(nodes, sizeof(int));
int ws[4] = {0,5,10,15}; //keep the choices here//change if taking different dataset
int wd[5] = {180,135,90,45,0}; //keep the choices here//change if taking different dataset
int oldRand = 0, oldRand2 = 0;
while (currentLoc != destination && no_of_movement < 20) {
if (no_of_movement > 0)
{
graphFile = "nextGraph.txt";
inputSSSPfile = "nextSSSP.txt";
changeEdgesFile = "nextEffectiveChangeEdges.txt"; //have to vary this randomly
}
int totalInsertion = 0;
bool zeroDelFlag = false, zeroInsFlag = false;
vector<ColWtList> AdjList; //stores input graph in 2D adjacency list
vector<ColWt> AdjListFull; //Row-major implementation of adjacency list (1D)
ColWt* AdjListFull_device; //1D array in GPU to store Row-major implementation of adjacency list
int* AdjListTracker_device; //1D array to track offset for each node's adjacency list
vector<changeEdge> allChange_Ins, allChange_Del;
changeEdge* allChange_Ins_device; //stores all change edges marked for insertion in GPU
changeEdge* allChange_Del_device; //stores all change edges marked for deletion in GPU
int* counter_del;
int* affectedNodeList_del;
int* updatedAffectedNodeList_del;
int* updated_counter_del;
vector<ColList> SSSPTreeAdjList;
int* SSSPTreeAdjListTracker;
vector<int> SSSPTreeAdjListFull;
RT_Vertex* SSSP;
int* SSSPTreeAdjListFull_device;
int* SSSPTreeAdjListTracker_device;
vector<int> hop;
int* d_hop;
//Get gpu device id and number of SMs
cudaGetDevice(&deviceId);
cudaDeviceGetAttribute(&numberOfSMs, cudaDevAttrMultiProcessorCount, deviceId);
size_t numberOfBlocks = 32 * numberOfSMs;
//Read Original input graph
AdjList.resize(nodes);
int* AdjListTracker = (int*)malloc((nodes + 1) * sizeof(int));//we take nodes +1 to store the start ptr of the first row
read_graphEdges(AdjList, graphFile, &nodes);
//Read change edges input
readin_changes(changeEdgesFile, allChange_Ins, allChange_Del, AdjList, totalInsertion);
int totalChangeEdges_Ins = allChange_Ins.size();
if (totalChangeEdges_Ins == 0) {
zeroInsFlag = true;
}
int totalChangeEdges_Del = allChange_Del.size();
if (totalChangeEdges_Del == 0) {
zeroDelFlag = true;
}
//Transfer input graph, changed edges to GPU and set memory advices
transfer_data_to_GPU(AdjList, AdjListTracker, AdjListFull, AdjListFull_device,
nodes, edges, totalInsertion, AdjListTracker_device, zeroInsFlag,
allChange_Ins, allChange_Ins_device, totalChangeEdges_Ins,
deviceId, totalChangeEdges_Del, zeroDelFlag, allChange_Del_device,
counter_del, affectedNodeList_del, updatedAffectedNodeList_del, updated_counter_del, allChange_Del, numberOfBlocks);
//Read input SSSP Tree and storing on unified memory
read_and_transfer_input_SSSPtree_to_GPU(inputSSSPfile, SSSPTreeAdjList, SSSPTreeAdjListTracker, SSSPTreeAdjListFull,
SSSP, nodes, edges, SSSPTreeAdjListFull_device, SSSPTreeAdjListTracker_device, hop, deviceId, d_hop);
//Initialize supporting variables
int* change = 0;
cudaStatus = cudaMallocManaged(&change, sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed at change structure");
}
int* affectedNodeList;
cudaStatus = cudaMallocManaged(&affectedNodeList, nodes * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed at affectedNodeList structure");
}
int* counter = 0;
cudaStatus = cudaMallocManaged(&counter, sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed at counter structure");
}
int* updatedAffectedNodeList_all;
cudaMallocManaged(&updatedAffectedNodeList_all, nodes * sizeof(int));
int* updated_counter_all = 0;
cudaMallocManaged(&updated_counter_all, sizeof(int));
//**process change edges**
auto startTimeDelEdge = high_resolution_clock::now(); //Time calculation start
//Process del edges
if (zeroDelFlag != true) {
deleteEdge << < numberOfBlocks, THREADS_PER_BLOCK >> > (allChange_Del_device, SSSP, totalChangeEdges_Del, AdjListFull_device, AdjListTracker_device);
cudaDeviceSynchronize();
}
auto stopTimeDelEdge = high_resolution_clock::now();//Time calculation ends
auto durationDelEdge = duration_cast<microseconds>(stopTimeDelEdge - startTimeDelEdge);// duration calculation
//cout << "**Time taken for processing deleted edges: "<< float(durationDelEdge.count()) / 1000 << " milliseconds**" << endl;
//Process ins edges
auto startTimeinsertEdge = high_resolution_clock::now();
if (zeroInsFlag != true) {
insertEdge << < numberOfBlocks, THREADS_PER_BLOCK >> > (allChange_Ins_device, SSSP, totalChangeEdges_Ins, AdjListFull_device, AdjListTracker_device);
cudaDeviceSynchronize();
}
auto stopTimeinsertEdge = high_resolution_clock::now();//Time calculation ends
auto durationinsertEdge = duration_cast<microseconds>(stopTimeinsertEdge - startTimeinsertEdge);// duration calculation
//cout << "**Time taken for processing inserted Edges: "<< float(durationinsertEdge.count()) / 1000 << " milliseconds**" << endl;
//new code
//int* Q_array = (int*)malloc((nodes) * sizeof(int));
//vector<int> Del_Affected_array;
//auto startTime1 = high_resolution_clock::now();
//*counter_del = cuCompactor::compact<RT_Vertex, int>(SSSP, affectedNodeList_del, nodes, predicate(), THREADS_PER_BLOCK);
///*for (int i = 0; i < *counter_del; i++) {
// printf("%d::", affectedNodeList_del[i]);
//}*/
////cudaDeviceSynchronize();
//for (int i = 0; i < *counter_del; i++) {
// int x = affectedNodeList_del[i];
// Del_Affected_array.push_back(x);
//}
////printf("test 2");
//int j = 0;
//while (j < Del_Affected_array.size())
//{
// int x = Del_Affected_array[j];
// if (SSSPTreeAdjList.at(x).size() > 0) {
// Del_Affected_array.insert(std::end(Del_Affected_array), std::begin(SSSPTreeAdjList.at(x)), std::end(SSSPTreeAdjList.at(x)));
// }
//
// j++;
//}
//auto stopTime1 = high_resolution_clock::now();//Time calculation ends
//auto durationin1 = duration_cast<microseconds>(stopTime1 - startTime1);// duration calculation
//cout << "**Time taken for creating Del_Affected_array: "
// << float(durationin1.count()) / 1000 << " milliseconds**" << endl;
//cout << "size of Del_Affected_array:" << j << endl;
//for (int i = 0; i < Del_Affected_array.size(); i++) {
// printf("%d::", Del_Affected_array[i]);
//}
//new code
//**make the subtree under deletion affected vertices disconnected (make wt = inf)
auto startTimeupdateNeighbors_del = high_resolution_clock::now();
if (zeroDelFlag != true) {
*counter_del = cuCompactor::compact<RT_Vertex, int>(SSSP, affectedNodeList_del, nodes, predicate(), THREADS_PER_BLOCK);
*change = 1;
while (*change > 0) {
*change = 0;
updateNeighbors_del << <numberOfBlocks, THREADS_PER_BLOCK >> >
(SSSP, updated_counter_del, updatedAffectedNodeList_del, affectedNodeList_del, counter_del, SSSPTreeAdjListFull_device, SSSPTreeAdjListTracker_device, change);
*counter_del = cuCompactor::compact<RT_Vertex, int>(SSSP, affectedNodeList_del, nodes, predicate(), THREADS_PER_BLOCK);
//printf("number of elements in the compacted list: %d\n", *counter_del);
//cudaDeviceSynchronize();//not required as cudaMalloc/cudaFree perform heavy-weight synchronizations. cuCompactor::compact uses both in it.
}
}
cudaFree(SSSPTreeAdjListFull_device); //we can free memory at the end if we have enough GPU memory. That will decrease some time
cudaFree(SSSPTreeAdjListTracker);
auto stopTimeupdateNeighbors_del = high_resolution_clock::now();//Time calculation ends
auto durationupdateNeighbors_del = duration_cast<microseconds>(stopTimeupdateNeighbors_del - startTimeupdateNeighbors_del);// duration calculation
//cout << "**Time taken for updateNeighbors_del: "<< float(durationupdateNeighbors_del.count()) / 1000 << " milliseconds**" << endl;
//**Update neighbors and connect disconnected vertices with main SSSP tree**
auto startTimeupdateNeighbors = high_resolution_clock::now();
//collect all vertices where update value > 0
*counter = cuCompactor::compact<RT_Vertex, int>(SSSP, affectedNodeList, nodes, predicate2(), THREADS_PER_BLOCK);
*change = 1;
while (*change == 1) {
*change = 0;
updateNeighbors << <(*counter / THREADS_PER_BLOCK) + 1, THREADS_PER_BLOCK >> > (SSSP, counter, affectedNodeList, AdjListFull_device, AdjListTracker_device, change);
*counter = cuCompactor::compact<RT_Vertex, int>(SSSP, affectedNodeList, nodes, predicate2(), THREADS_PER_BLOCK);
//cudaDeviceSynchronize(); //not required as cudaMalloc/cudaFree perform heavy-weight synchronizations. cuCompactor::compact uses both in it.
}
auto stopTimeupdateNeighbors = high_resolution_clock::now();//Time calculation ends
auto durationupdateNeighbors = duration_cast<microseconds>(stopTimeupdateNeighbors - startTimeupdateNeighbors);// duration calculation
//cout << "**Time taken for updateNeighbors: "<< float(durationupdateNeighbors.count()) / 1000 << " milliseconds**" << endl;
cout << "****Total Time taken for SSSP update: "
<< (float(durationDelEdge.count()) + float(durationupdateNeighbors_del.count()) + float(durationinsertEdge.count()) + float(durationupdateNeighbors.count())) / 1000 << " milliseconds****" << endl;
//cout << "Total affected nodes by Delete edge only: " << totalAffectedNodes_del << endl;
//print node parent distance
//cout << "from GPU: \n[";
//printSSSP << <1, 1 >> > (SSSP, nodes);
//cudaDeviceSynchronize();
//int x;
//if (nodes < 40) {
// x = nodes;
//}
//else {
// x = 40;
//}
////cout << "from CPU: \n[";
//for (int i = 0; i < x; i++) {
// cout << i << " " << SSSP[i].Parent << " " << SSSP[i].Dist << endl;
// //cout << i << ":" << SSSP[i].Dist << " ";
//}
//****Print next move****
traversed[currentLoc] = 1;
int parent = -1;
int y = destination;
cout << "print path" << endl;
cout << y;
while (y != 0) {
parent = SSSP[y].Parent;
cout << "<- " << parent << "(" << SSSP[y].Dist - SSSP[parent].Dist << ")"; //test
if (parent == currentLoc)
{
cost = cost + SSSP[y].Dist - SSSP[parent].Dist;
cout << "\nNext move: " << parent << "to" << y << " cost upto this: " << cost << endl;
nextLoc = y;
}
y = parent;
//cout << i << ":" << SSSP[i].Dist << " ";
}
//****print sssp tree in file. format: vertex parent distance****
ofstream myfile("nextSSSP.txt");
if (myfile.is_open())
{
for (int i = 0; i < nodes; i++) {
string line = to_string(i) + " " + to_string(SSSP[i].Parent) + " " + to_string(SSSP[i].Dist) + "\n";
myfile << line;
}
myfile.close();
}
else cout << "Unable to open nextSSSP.txt file";
//****print current graph in file. format: vertex1 vertex2 weight****
ofstream myfile2("nextGraph.txt");
int nextEdges = 0;
if (myfile2.is_open())
{
for (int i = 0; i < nodes; i++) {
for (int j = AdjListTracker[i]; j < AdjListTracker[i + 1]; j++) {
int myn = AdjListFull_device[j].col;
int mywt = AdjListFull_device[j].wt;
if (mywt < 0) { continue; } //if mywt = -1, that means edge was deleted
if (myn < i) { continue; } //avoid printing "b a w1" if "a b w1" is already printed
string line = to_string(i) + " " + to_string(myn) + " " + to_string(mywt) + "\n";
myfile2 << line;
nextEdges++;
}
}
myfile2.close();
}
else cout << "Unable to open nextGraph.txt file";
//****choose and print next effective change edges****
srand(time(NULL));
int random1 = rand() % 4; //choice for ws
int random2 = rand() % 5; //choice for wd
if(oldRand == random1)
{
random1 = (random1 + 1)%4;
}
if(oldRand2 == random2)
{
random2 = (random2 + 1)%5; //adding 1 just to create another number
}
oldRand = random1;
oldRand2 = random2;
string ceFileName = "TATA_p" + to_string(payload) + "_ws" + to_string(ws[random1]) +"_wd"+to_string(wd[random2]) + ".txt";
//int filename_length = ceFileName.length();
cout<<"next changeEdgeFile::"<<ceFileName<<endl;
const char *char_fileName = ceFileName.c_str();
ofstream myfile3("nextEffectiveChangeEdges.txt");
if (myfile3.is_open())
{
//insert new edges
FILE* delE_file;
char line[128];
//delE_file = fopen("nextChangeEdges.txt", "r"); //select the next Del E***implement a random choice fn***
delE_file = fopen(char_fileName, "r"); //select the next Del E
while (fgets(line, 128, delE_file) != NULL)
{
int n1, n2, wt;
changeEdge cE;
sscanf(line, "%d %d %d", &n1, &n2, &wt);
//Add change edge in effective change edge only when none of the endpoint is traversed
if (traversed[n1] == 0 && traversed[n2] == 0)
{
int flag1 = 0;
//****delete edge (u,n,wt) when drone moves from u to v****
for (int j = AdjListTracker[n1]; j < AdjListTracker[n1 + 1]; j++) {
int myn = AdjListFull_device[j].col;
int mywt = AdjListFull_device[j].wt;
if (mywt < 0) { continue; } //if mywt = -1, that means edge was deleted
if (myn == n2)
{
if (mywt != wt) {
string line1 = to_string(n1) + " " + to_string(myn) + " " + to_string(mywt) + " " + to_string(0) + "\n"; //delete previous edge
myfile3 << line1;
//cout << line1 << endl;
string line2 = to_string(n1) + " " + to_string(n2) + " " + to_string(wt) + " " + to_string(1) + "\n"; //insert new edge
myfile3 << line2;
//cout << line2 << endl;
}
flag1 = 1;
break;
}
}
if (flag1 == 0)
{
string line2 = to_string(n1) + " " + to_string(n2) + " " + to_string(wt) + " " + to_string(1) + "\n"; //insert new edge
myfile3 << line2;
//cout << line2 << endl;
}
}
}
fclose(delE_file);
//****delete edge (u,n,wt) when drone moves from u to v****
for (int j = AdjListTracker[currentLoc]; j < AdjListTracker[currentLoc + 1]; j++) {
int myn = AdjListFull_device[j].col;
int mywt = AdjListFull_device[j].wt;
if (mywt < 0) { continue; } //if mywt = -1, that means edge was deleted
if (myn == nextLoc) { continue; } //skip as nextLoc is v and (u,v) should be 0
if (traversed[myn] == 1) { continue; }
string line4 = to_string(currentLoc) + " " + to_string(myn) + " " + to_string(mywt) + " " + to_string(0) + "\n";
myfile3 << line4;
}
//insert edge (u,v,0) when drone moves from u to v
string line1 = to_string(currentLoc) + " " + to_string(nextLoc) + " " + to_string(0) + " " + to_string(1) + "\n";
myfile3 << line1;
myfile3.close();
}
else cout << "Unable to open nextEffectiveChangeEdges.txt file";
//prepare for next iteration
currentLoc = nextLoc; //prepare current location for next iteration
edges = nextEdges;
no_of_movement++;
if (zeroDelFlag != true) {
cudaFree(affectedNodeList_del);
cudaFree(updatedAffectedNodeList_del);
cudaFree(counter_del);
cudaFree(updated_counter_del);
cudaFree(allChange_Del_device);
}
if (zeroInsFlag != true) {
cudaFree(allChange_Ins_device);
}
cudaFree(change);
cudaFree(affectedNodeList);
cudaFree(counter);
cudaFree(AdjListFull_device);
cudaFree(AdjListTracker_device);
cudaFree(SSSP);
cudaFree(d_hop); //try to free this at some earlier place
}
std::ofstream ofs;
ofs.open (outFile, std::ofstream::out | std::ofstream::app);
ofs << droneStartLoc << " " << destination << " " << cost <<"\n";
ofs.close();
return 0;
} |
f4a34ddaebf65e93c8f4deafe22449883f82c61e.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/test/TestUtils.h>
#include <faiss/gpu/utils/BlockSelectKernel.cuh>
#include <faiss/gpu/utils/DeviceDefs.cuh>
#include <faiss/gpu/utils/DeviceTensor.cuh>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/HostTensor.cuh>
#include <faiss/gpu/utils/WarpSelectKernel.cuh>
#include <algorithm>
#include <gtest/gtest.h>
#include <sstream>
#include <unordered_map>
#include <vector>
void testForSize(int rows, int cols, int k, bool dir, bool warp) {
std::vector<float> v = faiss::gpu::randVecs(rows, cols);
faiss::gpu::HostTensor<float, 2, true> hostVal({rows, cols});
for (int r = 0; r < rows; ++r) {
for (int c = 0; c < cols; ++c) {
hostVal[r][c] = v[r * cols + c];
}
}
// row -> (val -> idx)
std::unordered_map<int, std::vector<std::pair<int, float>>> hostOutValAndInd;
for (int r = 0; r < rows; ++r) {
std::vector<std::pair<int, float>> closest;
for (int c = 0; c < cols; ++c) {
closest.emplace_back(c, (float) hostVal[r][c]);
}
auto dirFalseFn =
[](std::pair<int, float>& a, std::pair<int, float>& b) {
return a.second < b.second;
};
auto dirTrueFn =
[](std::pair<int, float>& a, std::pair<int, float>& b) {
return a.second > b.second;
};
std::sort(closest.begin(), closest.end(), dir ? dirTrueFn : dirFalseFn);
hostOutValAndInd.emplace(r, closest);
}
// Select top-k on GPU
faiss::gpu::DeviceTensor<float, 2, true> gpuVal(hostVal, 0);
faiss::gpu::DeviceTensor<float, 2, true> gpuOutVal({rows, k});
faiss::gpu::DeviceTensor<int, 2, true> gpuOutInd({rows, k});
if (warp) {
faiss::gpu::runWarpSelect(gpuVal, gpuOutVal, gpuOutInd, dir, k, 0);
} else {
faiss::gpu::runBlockSelect(gpuVal, gpuOutVal, gpuOutInd, dir, k, 0);
}
// Copy back to CPU
faiss::gpu::HostTensor<float, 2, true> outVal(gpuOutVal, 0);
faiss::gpu::HostTensor<int, 2, true> outInd(gpuOutInd, 0);
for (int r = 0; r < rows; ++r) {
std::unordered_map<int, int> seenIndices;
for (int i = 0; i < k; ++i) {
float gpuV = outVal[r][i];
float cpuV = hostOutValAndInd[r][i].second;
EXPECT_EQ(gpuV, cpuV) <<
"rows " << rows << " cols " << cols << " k " << k << " dir " << dir
<< " row " << r << " ind " << i;
// If there are identical elements in a row that should be
// within the top-k, then it is possible that the index can
// differ, because the order in which the GPU will see the
// equivalent values is different than the CPU (and will remain
// unspecified, since this is affected by the choice of
// k-selection algorithm that we use)
int gpuInd = outInd[r][i];
int cpuInd = hostOutValAndInd[r][i].first;
// We should never see duplicate indices, however
auto itSeenIndex = seenIndices.find(gpuInd);
EXPECT_EQ(itSeenIndex, seenIndices.end()) <<
"Row " << r << " user index " << gpuInd << " was seen at both " <<
itSeenIndex->second << " and " << i;
seenIndices[gpuInd] = i;
if (gpuInd != cpuInd) {
// Gather the values from the original data via index; the
// values should be the same
float gpuGatherV = hostVal[r][gpuInd];
float cpuGatherV = hostVal[r][cpuInd];
EXPECT_EQ(gpuGatherV, cpuGatherV) <<
"rows " << rows << " cols " << cols << " k " << k << " dir " << dir
<< " row " << r << " ind " << i << " source ind "
<< gpuInd << " " << cpuInd;
}
}
}
}
// General test
TEST(TestGpuSelect, test) {
for (int i = 0; i < 10; ++i) {
int rows = faiss::gpu::randVal(10, 100);
int cols = faiss::gpu::randVal(1, 30000);
int k = ::min(cols, faiss::gpu::randVal(1, GPU_MAX_SELECTION_K));
bool dir = faiss::gpu::randBool();
testForSize(rows, cols, k, dir, false);
}
}
// Test for k = 1
TEST(TestGpuSelect, test1) {
for (int i = 0; i < 5; ++i) {
int rows = faiss::gpu::randVal(10, 100);
int cols = faiss::gpu::randVal(1, 30000);
bool dir = faiss::gpu::randBool();
testForSize(rows, cols, 1, dir, false);
}
}
// Test for where k = #cols exactly (we are returning all the values,
// just sorted)
TEST(TestGpuSelect, testExact) {
for (int i = 0; i < 5; ++i) {
int rows = faiss::gpu::randVal(10, 100);
int cols = faiss::gpu::randVal(1, GPU_MAX_SELECTION_K);
bool dir = faiss::gpu::randBool();
testForSize(rows, cols, cols, dir, false);
}
}
// General test
TEST(TestGpuSelect, testWarp) {
for (int i = 0; i < 10; ++i) {
int rows = faiss::gpu::randVal(10, 100);
int cols = faiss::gpu::randVal(1, 30000);
int k = ::min(cols, faiss::gpu::randVal(1, GPU_MAX_SELECTION_K));
bool dir = faiss::gpu::randBool();
testForSize(rows, cols, k, dir, true);
}
}
// Test for k = 1
TEST(TestGpuSelect, test1Warp) {
for (int i = 0; i < 5; ++i) {
int rows = faiss::gpu::randVal(10, 100);
int cols = faiss::gpu::randVal(1, 30000);
bool dir = faiss::gpu::randBool();
testForSize(rows, cols, 1, dir, true);
}
}
// Test for where k = #cols exactly (we are returning all the values,
// just sorted)
TEST(TestGpuSelect, testExactWarp) {
for (int i = 0; i < 5; ++i) {
int rows = faiss::gpu::randVal(10, 100);
int cols = faiss::gpu::randVal(1, GPU_MAX_SELECTION_K);
bool dir = faiss::gpu::randBool();
testForSize(rows, cols, cols, dir, true);
}
}
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
// just run with a fixed test seed
faiss::gpu::setTestSeed(100);
return RUN_ALL_TESTS();
}
| f4a34ddaebf65e93c8f4deafe22449883f82c61e.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/test/TestUtils.h>
#include <faiss/gpu/utils/BlockSelectKernel.cuh>
#include <faiss/gpu/utils/DeviceDefs.cuh>
#include <faiss/gpu/utils/DeviceTensor.cuh>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/HostTensor.cuh>
#include <faiss/gpu/utils/WarpSelectKernel.cuh>
#include <algorithm>
#include <gtest/gtest.h>
#include <sstream>
#include <unordered_map>
#include <vector>
void testForSize(int rows, int cols, int k, bool dir, bool warp) {
std::vector<float> v = faiss::gpu::randVecs(rows, cols);
faiss::gpu::HostTensor<float, 2, true> hostVal({rows, cols});
for (int r = 0; r < rows; ++r) {
for (int c = 0; c < cols; ++c) {
hostVal[r][c] = v[r * cols + c];
}
}
// row -> (val -> idx)
std::unordered_map<int, std::vector<std::pair<int, float>>> hostOutValAndInd;
for (int r = 0; r < rows; ++r) {
std::vector<std::pair<int, float>> closest;
for (int c = 0; c < cols; ++c) {
closest.emplace_back(c, (float) hostVal[r][c]);
}
auto dirFalseFn =
[](std::pair<int, float>& a, std::pair<int, float>& b) {
return a.second < b.second;
};
auto dirTrueFn =
[](std::pair<int, float>& a, std::pair<int, float>& b) {
return a.second > b.second;
};
std::sort(closest.begin(), closest.end(), dir ? dirTrueFn : dirFalseFn);
hostOutValAndInd.emplace(r, closest);
}
// Select top-k on GPU
faiss::gpu::DeviceTensor<float, 2, true> gpuVal(hostVal, 0);
faiss::gpu::DeviceTensor<float, 2, true> gpuOutVal({rows, k});
faiss::gpu::DeviceTensor<int, 2, true> gpuOutInd({rows, k});
if (warp) {
faiss::gpu::runWarpSelect(gpuVal, gpuOutVal, gpuOutInd, dir, k, 0);
} else {
faiss::gpu::runBlockSelect(gpuVal, gpuOutVal, gpuOutInd, dir, k, 0);
}
// Copy back to CPU
faiss::gpu::HostTensor<float, 2, true> outVal(gpuOutVal, 0);
faiss::gpu::HostTensor<int, 2, true> outInd(gpuOutInd, 0);
for (int r = 0; r < rows; ++r) {
std::unordered_map<int, int> seenIndices;
for (int i = 0; i < k; ++i) {
float gpuV = outVal[r][i];
float cpuV = hostOutValAndInd[r][i].second;
EXPECT_EQ(gpuV, cpuV) <<
"rows " << rows << " cols " << cols << " k " << k << " dir " << dir
<< " row " << r << " ind " << i;
// If there are identical elements in a row that should be
// within the top-k, then it is possible that the index can
// differ, because the order in which the GPU will see the
// equivalent values is different than the CPU (and will remain
// unspecified, since this is affected by the choice of
// k-selection algorithm that we use)
int gpuInd = outInd[r][i];
int cpuInd = hostOutValAndInd[r][i].first;
// We should never see duplicate indices, however
auto itSeenIndex = seenIndices.find(gpuInd);
EXPECT_EQ(itSeenIndex, seenIndices.end()) <<
"Row " << r << " user index " << gpuInd << " was seen at both " <<
itSeenIndex->second << " and " << i;
seenIndices[gpuInd] = i;
if (gpuInd != cpuInd) {
// Gather the values from the original data via index; the
// values should be the same
float gpuGatherV = hostVal[r][gpuInd];
float cpuGatherV = hostVal[r][cpuInd];
EXPECT_EQ(gpuGatherV, cpuGatherV) <<
"rows " << rows << " cols " << cols << " k " << k << " dir " << dir
<< " row " << r << " ind " << i << " source ind "
<< gpuInd << " " << cpuInd;
}
}
}
}
// General test
TEST(TestGpuSelect, test) {
for (int i = 0; i < 10; ++i) {
int rows = faiss::gpu::randVal(10, 100);
int cols = faiss::gpu::randVal(1, 30000);
int k = std::min(cols, faiss::gpu::randVal(1, GPU_MAX_SELECTION_K));
bool dir = faiss::gpu::randBool();
testForSize(rows, cols, k, dir, false);
}
}
// Test for k = 1
TEST(TestGpuSelect, test1) {
for (int i = 0; i < 5; ++i) {
int rows = faiss::gpu::randVal(10, 100);
int cols = faiss::gpu::randVal(1, 30000);
bool dir = faiss::gpu::randBool();
testForSize(rows, cols, 1, dir, false);
}
}
// Test for where k = #cols exactly (we are returning all the values,
// just sorted)
TEST(TestGpuSelect, testExact) {
for (int i = 0; i < 5; ++i) {
int rows = faiss::gpu::randVal(10, 100);
int cols = faiss::gpu::randVal(1, GPU_MAX_SELECTION_K);
bool dir = faiss::gpu::randBool();
testForSize(rows, cols, cols, dir, false);
}
}
// General test
TEST(TestGpuSelect, testWarp) {
for (int i = 0; i < 10; ++i) {
int rows = faiss::gpu::randVal(10, 100);
int cols = faiss::gpu::randVal(1, 30000);
int k = std::min(cols, faiss::gpu::randVal(1, GPU_MAX_SELECTION_K));
bool dir = faiss::gpu::randBool();
testForSize(rows, cols, k, dir, true);
}
}
// Test for k = 1
TEST(TestGpuSelect, test1Warp) {
for (int i = 0; i < 5; ++i) {
int rows = faiss::gpu::randVal(10, 100);
int cols = faiss::gpu::randVal(1, 30000);
bool dir = faiss::gpu::randBool();
testForSize(rows, cols, 1, dir, true);
}
}
// Test for where k = #cols exactly (we are returning all the values,
// just sorted)
TEST(TestGpuSelect, testExactWarp) {
for (int i = 0; i < 5; ++i) {
int rows = faiss::gpu::randVal(10, 100);
int cols = faiss::gpu::randVal(1, GPU_MAX_SELECTION_K);
bool dir = faiss::gpu::randBool();
testForSize(rows, cols, cols, dir, true);
}
}
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
// just run with a fixed test seed
faiss::gpu::setTestSeed(100);
return RUN_ALL_TESTS();
}
|
f6d8eac35f6b3ba931c0819dab9ddb05aadcc84d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "internal_shared.hpp"
#include "opencv2/gpu/device/limits.hpp"
namespace cv { namespace gpu { namespace device
{
namespace disp_bilateral_filter
{
__constant__ float* ctable_color;
__constant__ float* ctable_space;
__constant__ size_t ctable_space_step;
__constant__ int cndisp;
__constant__ int cradius;
__constant__ short cedge_disc;
__constant__ short cmax_disc;
void disp_load_constants(float* table_color, PtrStepSzf table_space, int ndisp, int radius, short edge_disc, short max_disc)
{
cudaSafeCall( hipMemcpyToSymbol(ctable_color, &table_color, sizeof(table_color)) );
cudaSafeCall( hipMemcpyToSymbol(ctable_space, &table_space.data, sizeof(table_space.data)) );
size_t table_space_step = table_space.step / sizeof(float);
cudaSafeCall( hipMemcpyToSymbol(ctable_space_step, &table_space_step, sizeof(size_t)) );
cudaSafeCall( hipMemcpyToSymbol(cndisp, &ndisp, sizeof(int)) );
cudaSafeCall( hipMemcpyToSymbol(cradius, &radius, sizeof(int)) );
cudaSafeCall( hipMemcpyToSymbol(cedge_disc, &edge_disc, sizeof(short)) );
cudaSafeCall( hipMemcpyToSymbol(cmax_disc, &max_disc, sizeof(short)) );
}
template <int channels>
struct DistRgbMax
{
static __device__ __forceinline__ uchar calc(const uchar* a, const uchar* b)
{
uchar x = ::abs(a[0] - b[0]);
uchar y = ::abs(a[1] - b[1]);
uchar z = ::abs(a[2] - b[2]);
return (::max(::max(x, y), z));
}
};
template <>
struct DistRgbMax<1>
{
static __device__ __forceinline__ uchar calc(const uchar* a, const uchar* b)
{
return ::abs(a[0] - b[0]);
}
};
template <int channels, typename T>
__global__ void disp_bilateral_filter(int t, T* disp, size_t disp_step, const uchar* img, size_t img_step, int h, int w)
{
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int x = ((blockIdx.x * blockDim.x + threadIdx.x) << 1) + ((y + t) & 1);
T dp[5];
if (y > 0 && y < h - 1 && x > 0 && x < w - 1)
{
dp[0] = *(disp + (y ) * disp_step + x + 0);
dp[1] = *(disp + (y-1) * disp_step + x + 0);
dp[2] = *(disp + (y ) * disp_step + x - 1);
dp[3] = *(disp + (y+1) * disp_step + x + 0);
dp[4] = *(disp + (y ) * disp_step + x + 1);
if(::abs(dp[1] - dp[0]) >= cedge_disc || ::abs(dp[2] - dp[0]) >= cedge_disc || ::abs(dp[3] - dp[0]) >= cedge_disc || ::abs(dp[4] - dp[0]) >= cedge_disc)
{
const int ymin = ::max(0, y - cradius);
const int xmin = ::max(0, x - cradius);
const int ymax = ::min(h - 1, y + cradius);
const int xmax = ::min(w - 1, x + cradius);
float cost[] = {0.0f, 0.0f, 0.0f, 0.0f, 0.0f};
const uchar* ic = img + y * img_step + channels * x;
for(int yi = ymin; yi <= ymax; yi++)
{
const T* disp_y = disp + yi * disp_step;
for(int xi = xmin; xi <= xmax; xi++)
{
const uchar* in = img + yi * img_step + channels * xi;
uchar dist_rgb = DistRgbMax<channels>::calc(in, ic);
const float weight = ctable_color[dist_rgb] * (ctable_space + ::abs(y-yi)* ctable_space_step)[::abs(x-xi)];
const T disp_reg = disp_y[xi];
cost[0] += ::min(cmax_disc, ::abs(disp_reg - dp[0])) * weight;
cost[1] += ::min(cmax_disc, ::abs(disp_reg - dp[1])) * weight;
cost[2] += ::min(cmax_disc, ::abs(disp_reg - dp[2])) * weight;
cost[3] += ::min(cmax_disc, ::abs(disp_reg - dp[3])) * weight;
cost[4] += ::min(cmax_disc, ::abs(disp_reg - dp[4])) * weight;
}
}
float minimum = numeric_limits<float>::max();
int id = 0;
if (cost[0] < minimum)
{
minimum = cost[0];
id = 0;
}
if (cost[1] < minimum)
{
minimum = cost[1];
id = 1;
}
if (cost[2] < minimum)
{
minimum = cost[2];
id = 2;
}
if (cost[3] < minimum)
{
minimum = cost[3];
id = 3;
}
if (cost[4] < minimum)
{
minimum = cost[4];
id = 4;
}
*(disp + y * disp_step + x) = dp[id];
}
}
}
template <typename T>
void disp_bilateral_filter(PtrStepSz<T> disp, PtrStepSzb img, int channels, int iters, hipStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(disp.cols, threads.x << 1);
grid.y = divUp(disp.rows, threads.y);
switch (channels)
{
case 1:
for (int i = 0; i < iters; ++i)
{
hipLaunchKernelGGL(( disp_bilateral_filter<1>), dim3(grid), dim3(threads), 0, stream, 0, disp.data, disp.step/sizeof(T), img.data, img.step, disp.rows, disp.cols);
cudaSafeCall( hipGetLastError() );
hipLaunchKernelGGL(( disp_bilateral_filter<1>), dim3(grid), dim3(threads), 0, stream, 1, disp.data, disp.step/sizeof(T), img.data, img.step, disp.rows, disp.cols);
cudaSafeCall( hipGetLastError() );
}
break;
case 3:
for (int i = 0; i < iters; ++i)
{
hipLaunchKernelGGL(( disp_bilateral_filter<3>), dim3(grid), dim3(threads), 0, stream, 0, disp.data, disp.step/sizeof(T), img.data, img.step, disp.rows, disp.cols);
cudaSafeCall( hipGetLastError() );
hipLaunchKernelGGL(( disp_bilateral_filter<3>), dim3(grid), dim3(threads), 0, stream, 1, disp.data, disp.step/sizeof(T), img.data, img.step, disp.rows, disp.cols);
cudaSafeCall( hipGetLastError() );
}
break;
default:
cv::gpu::error("Unsupported channels count", __FILE__, __LINE__, "disp_bilateral_filter");
}
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template void disp_bilateral_filter<uchar>(PtrStepSz<uchar> disp, PtrStepSzb img, int channels, int iters, hipStream_t stream);
template void disp_bilateral_filter<short>(PtrStepSz<short> disp, PtrStepSzb img, int channels, int iters, hipStream_t stream);
} // namespace bilateral_filter
}}} // namespace cv { namespace gpu { namespace device
#endif /* CUDA_DISABLER */ | f6d8eac35f6b3ba931c0819dab9ddb05aadcc84d.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "internal_shared.hpp"
#include "opencv2/gpu/device/limits.hpp"
namespace cv { namespace gpu { namespace device
{
namespace disp_bilateral_filter
{
__constant__ float* ctable_color;
__constant__ float* ctable_space;
__constant__ size_t ctable_space_step;
__constant__ int cndisp;
__constant__ int cradius;
__constant__ short cedge_disc;
__constant__ short cmax_disc;
void disp_load_constants(float* table_color, PtrStepSzf table_space, int ndisp, int radius, short edge_disc, short max_disc)
{
cudaSafeCall( cudaMemcpyToSymbol(ctable_color, &table_color, sizeof(table_color)) );
cudaSafeCall( cudaMemcpyToSymbol(ctable_space, &table_space.data, sizeof(table_space.data)) );
size_t table_space_step = table_space.step / sizeof(float);
cudaSafeCall( cudaMemcpyToSymbol(ctable_space_step, &table_space_step, sizeof(size_t)) );
cudaSafeCall( cudaMemcpyToSymbol(cndisp, &ndisp, sizeof(int)) );
cudaSafeCall( cudaMemcpyToSymbol(cradius, &radius, sizeof(int)) );
cudaSafeCall( cudaMemcpyToSymbol(cedge_disc, &edge_disc, sizeof(short)) );
cudaSafeCall( cudaMemcpyToSymbol(cmax_disc, &max_disc, sizeof(short)) );
}
template <int channels>
struct DistRgbMax
{
static __device__ __forceinline__ uchar calc(const uchar* a, const uchar* b)
{
uchar x = ::abs(a[0] - b[0]);
uchar y = ::abs(a[1] - b[1]);
uchar z = ::abs(a[2] - b[2]);
return (::max(::max(x, y), z));
}
};
template <>
struct DistRgbMax<1>
{
static __device__ __forceinline__ uchar calc(const uchar* a, const uchar* b)
{
return ::abs(a[0] - b[0]);
}
};
template <int channels, typename T>
__global__ void disp_bilateral_filter(int t, T* disp, size_t disp_step, const uchar* img, size_t img_step, int h, int w)
{
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int x = ((blockIdx.x * blockDim.x + threadIdx.x) << 1) + ((y + t) & 1);
T dp[5];
if (y > 0 && y < h - 1 && x > 0 && x < w - 1)
{
dp[0] = *(disp + (y ) * disp_step + x + 0);
dp[1] = *(disp + (y-1) * disp_step + x + 0);
dp[2] = *(disp + (y ) * disp_step + x - 1);
dp[3] = *(disp + (y+1) * disp_step + x + 0);
dp[4] = *(disp + (y ) * disp_step + x + 1);
if(::abs(dp[1] - dp[0]) >= cedge_disc || ::abs(dp[2] - dp[0]) >= cedge_disc || ::abs(dp[3] - dp[0]) >= cedge_disc || ::abs(dp[4] - dp[0]) >= cedge_disc)
{
const int ymin = ::max(0, y - cradius);
const int xmin = ::max(0, x - cradius);
const int ymax = ::min(h - 1, y + cradius);
const int xmax = ::min(w - 1, x + cradius);
float cost[] = {0.0f, 0.0f, 0.0f, 0.0f, 0.0f};
const uchar* ic = img + y * img_step + channels * x;
for(int yi = ymin; yi <= ymax; yi++)
{
const T* disp_y = disp + yi * disp_step;
for(int xi = xmin; xi <= xmax; xi++)
{
const uchar* in = img + yi * img_step + channels * xi;
uchar dist_rgb = DistRgbMax<channels>::calc(in, ic);
const float weight = ctable_color[dist_rgb] * (ctable_space + ::abs(y-yi)* ctable_space_step)[::abs(x-xi)];
const T disp_reg = disp_y[xi];
cost[0] += ::min(cmax_disc, ::abs(disp_reg - dp[0])) * weight;
cost[1] += ::min(cmax_disc, ::abs(disp_reg - dp[1])) * weight;
cost[2] += ::min(cmax_disc, ::abs(disp_reg - dp[2])) * weight;
cost[3] += ::min(cmax_disc, ::abs(disp_reg - dp[3])) * weight;
cost[4] += ::min(cmax_disc, ::abs(disp_reg - dp[4])) * weight;
}
}
float minimum = numeric_limits<float>::max();
int id = 0;
if (cost[0] < minimum)
{
minimum = cost[0];
id = 0;
}
if (cost[1] < minimum)
{
minimum = cost[1];
id = 1;
}
if (cost[2] < minimum)
{
minimum = cost[2];
id = 2;
}
if (cost[3] < minimum)
{
minimum = cost[3];
id = 3;
}
if (cost[4] < minimum)
{
minimum = cost[4];
id = 4;
}
*(disp + y * disp_step + x) = dp[id];
}
}
}
template <typename T>
void disp_bilateral_filter(PtrStepSz<T> disp, PtrStepSzb img, int channels, int iters, cudaStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(disp.cols, threads.x << 1);
grid.y = divUp(disp.rows, threads.y);
switch (channels)
{
case 1:
for (int i = 0; i < iters; ++i)
{
disp_bilateral_filter<1><<<grid, threads, 0, stream>>>(0, disp.data, disp.step/sizeof(T), img.data, img.step, disp.rows, disp.cols);
cudaSafeCall( cudaGetLastError() );
disp_bilateral_filter<1><<<grid, threads, 0, stream>>>(1, disp.data, disp.step/sizeof(T), img.data, img.step, disp.rows, disp.cols);
cudaSafeCall( cudaGetLastError() );
}
break;
case 3:
for (int i = 0; i < iters; ++i)
{
disp_bilateral_filter<3><<<grid, threads, 0, stream>>>(0, disp.data, disp.step/sizeof(T), img.data, img.step, disp.rows, disp.cols);
cudaSafeCall( cudaGetLastError() );
disp_bilateral_filter<3><<<grid, threads, 0, stream>>>(1, disp.data, disp.step/sizeof(T), img.data, img.step, disp.rows, disp.cols);
cudaSafeCall( cudaGetLastError() );
}
break;
default:
cv::gpu::error("Unsupported channels count", __FILE__, __LINE__, "disp_bilateral_filter");
}
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template void disp_bilateral_filter<uchar>(PtrStepSz<uchar> disp, PtrStepSzb img, int channels, int iters, cudaStream_t stream);
template void disp_bilateral_filter<short>(PtrStepSz<short> disp, PtrStepSzb img, int channels, int iters, cudaStream_t stream);
} // namespace bilateral_filter
}}} // namespace cv { namespace gpu { namespace device
#endif /* CUDA_DISABLER */ |
1a44a13f5af753e1d76417f7b3ad01ee19e2c54b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void _bcnn_cuda_fill_f32_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < N)
X[i*INCX] = ALPHA;
} | 1a44a13f5af753e1d76417f7b3ad01ee19e2c54b.cu | #include "includes.h"
__global__ void _bcnn_cuda_fill_f32_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < N)
X[i*INCX] = ALPHA;
} |
d77d91ff2c973b4f6d066a55eb96e14c0b4abc55.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef __BASIC_INTEROP_H__
#define __BASIC_INTEROP_H__
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <iostream>
#ifdef _WIN32
# define WINDOWS_LEAN_AND_MEAN
# define NOMINMAX
# include <windows.h>
#endif
// OpenGL Graphics includes
#include <GL/glew.h>
#if defined (__APPLE__) || defined(MACOSX)
#include <GLUT/glut.h>
#ifndef glutCloseFunc
#define glutCloseFunc glutWMCloseFunc
#endif
#else
#include <GL/freeglut.h>
#endif
// includes, cuda
#include <hip/hip_runtime.h>
#include <cuda_gl_interop.h>
// Utilities and timing functions
#include <helper_functions.h> // includes cuda.h and hip/hip_runtime_api.h
#include <timer.h> // timing functions
// CUDA helper functions
#include <helper_cuda.h> // helper functions for CUDA error check
#include <helper_cuda_gl.h> // helper functions for CUDA/GL interop
typedef struct
{
float4 pos;
float4 color;
float4 dir_speed;
} Vertex;
///////////////////////////////////////////////////////////////////////////////
//! Simple kernel to modify vertex positions in sine wave pattern
//! @param data data in global memory
///////////////////////////////////////////////////////////////////////////////
__global__ void kernel(Vertex* v, unsigned int width, unsigned int height, float time)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
// write output vertex
int index = y * width + x;
if( v[index].pos.x < -0.99f )
{
v[index].dir_speed.x = 1.0f;
}
else if( v[index].pos.x > 0.99f )
{
v[index].dir_speed.x = -1.0f;
}
v[index].pos.x += (0.001f*v[index].dir_speed.x*v[index].dir_speed.z);
if( v[index].pos.y < -0.99f )
{
v[index].dir_speed.y = 1.0f;
}
else if( v[index].pos.y > 0.99f )
{
v[index].dir_speed.y = -1.0f;
}
v[index].pos.y += (0.001f*v[index].dir_speed.y*v[index].dir_speed.z);
//Para verlo en 3D:
v[index].pos.z = sin(time*v[index].dir_speed.z);
}
extern "C" void runCuda(cudaGraphicsResource** resource, Vertex* devPtr, int dim, float dt)
{
//Getting an actual address in device memory that can be passed to our kernel.
//We achieve this by instructing the CUDA runtime to map the
//shared resource and then by requesting a pointer to the mapped resource.
checkCudaErrors( hipGraphicsMapResources( 1, resource, NULL ) );
// devPtr is our device memory
size_t size;
checkCudaErrors( hipGraphicsResourceGetMappedPointer( (void**)&devPtr, &size, *resource) );
//launchKernel (devPtr, DIM, dt);
dim3 numBlocks(dim/16,dim/16);
dim3 numThreads(16,16);
hipLaunchKernelGGL(( kernel), dim3(numBlocks),dim3(numThreads), 0, 0, devPtr, dim, dim, dt );
//unmapping our shared resource. This call is important to make prior to performing rendering tasks because it
//provides synchronization between the CUDA and graphics portions of the application. Specifically,
//it implies that all CUDA operations performed prior to the call
//to hipGraphicsUnmapResources() will complete before ensuing graphics
//calls begin.
checkCudaErrors( hipGraphicsUnmapResources( 1, resource, NULL ) );
}
extern "C" void unregRes(cudaGraphicsResource** res)
{
checkCudaErrors( hipGraphicsUnmapResources( 1, res, NULL ) );
}
extern "C" void chooseDev(int ARGC, const char **ARGV)
{
gpuGLDeviceInit(ARGC, ARGV);
}
extern "C" void regBuffer(cudaGraphicsResource** res, unsigned int& vbo)
{
// setting up graphics interoperability by notifying the CUDA runtime
//that we intend to share the OpenGL buffer named vbo with CUDA.
checkCudaErrors( hipGraphicsGLRegisterBuffer( res, vbo, hipGraphicsMapFlagsWriteDiscard ) );
}
#endif
| d77d91ff2c973b4f6d066a55eb96e14c0b4abc55.cu | #ifndef __BASIC_INTEROP_H__
#define __BASIC_INTEROP_H__
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <iostream>
#ifdef _WIN32
# define WINDOWS_LEAN_AND_MEAN
# define NOMINMAX
# include <windows.h>
#endif
// OpenGL Graphics includes
#include <GL/glew.h>
#if defined (__APPLE__) || defined(MACOSX)
#include <GLUT/glut.h>
#ifndef glutCloseFunc
#define glutCloseFunc glutWMCloseFunc
#endif
#else
#include <GL/freeglut.h>
#endif
// includes, cuda
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
// Utilities and timing functions
#include <helper_functions.h> // includes cuda.h and cuda_runtime_api.h
#include <timer.h> // timing functions
// CUDA helper functions
#include <helper_cuda.h> // helper functions for CUDA error check
#include <helper_cuda_gl.h> // helper functions for CUDA/GL interop
typedef struct
{
float4 pos;
float4 color;
float4 dir_speed;
} Vertex;
///////////////////////////////////////////////////////////////////////////////
//! Simple kernel to modify vertex positions in sine wave pattern
//! @param data data in global memory
///////////////////////////////////////////////////////////////////////////////
__global__ void kernel(Vertex* v, unsigned int width, unsigned int height, float time)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
// write output vertex
int index = y * width + x;
if( v[index].pos.x < -0.99f )
{
v[index].dir_speed.x = 1.0f;
}
else if( v[index].pos.x > 0.99f )
{
v[index].dir_speed.x = -1.0f;
}
v[index].pos.x += (0.001f*v[index].dir_speed.x*v[index].dir_speed.z);
if( v[index].pos.y < -0.99f )
{
v[index].dir_speed.y = 1.0f;
}
else if( v[index].pos.y > 0.99f )
{
v[index].dir_speed.y = -1.0f;
}
v[index].pos.y += (0.001f*v[index].dir_speed.y*v[index].dir_speed.z);
//Para verlo en 3D:
v[index].pos.z = sin(time*v[index].dir_speed.z);
}
extern "C" void runCuda(cudaGraphicsResource** resource, Vertex* devPtr, int dim, float dt)
{
//Getting an actual address in device memory that can be passed to our kernel.
//We achieve this by instructing the CUDA runtime to map the
//shared resource and then by requesting a pointer to the mapped resource.
checkCudaErrors( cudaGraphicsMapResources( 1, resource, NULL ) );
// devPtr is our device memory
size_t size;
checkCudaErrors( cudaGraphicsResourceGetMappedPointer( (void**)&devPtr, &size, *resource) );
//launchKernel (devPtr, DIM, dt);
dim3 numBlocks(dim/16,dim/16);
dim3 numThreads(16,16);
kernel<<<numBlocks,numThreads>>>( devPtr, dim, dim, dt );
//unmapping our shared resource. This call is important to make prior to performing rendering tasks because it
//provides synchronization between the CUDA and graphics portions of the application. Specifically,
//it implies that all CUDA operations performed prior to the call
//to cudaGraphicsUnmapResources() will complete before ensuing graphics
//calls begin.
checkCudaErrors( cudaGraphicsUnmapResources( 1, resource, NULL ) );
}
extern "C" void unregRes(cudaGraphicsResource** res)
{
checkCudaErrors( cudaGraphicsUnmapResources( 1, res, NULL ) );
}
extern "C" void chooseDev(int ARGC, const char **ARGV)
{
gpuGLDeviceInit(ARGC, ARGV);
}
extern "C" void regBuffer(cudaGraphicsResource** res, unsigned int& vbo)
{
// setting up graphics interoperability by notifying the CUDA runtime
//that we intend to share the OpenGL buffer named vbo with CUDA.
checkCudaErrors( cudaGraphicsGLRegisterBuffer( res, vbo, cudaGraphicsMapFlagsWriteDiscard ) );
}
#endif
|
58783caf396164770b68e8aa955d3596938135dc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2019 Yan Yan
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <ATen/ATen.h>
#include <chrono>
#include <limits>
#include <spconv/pillar_scatter_functor.h>
#include <tensorview/cuda_utils.h>
#include <tensorview/kernel_utils.h>
#include <tensorview/mp_helper.h>
#include <tensorview/tensorview.h>
#include <type_traits>
#include <utility/timer.h>
namespace spconv {
template <typename T, typename Index>
__global__ void pointPillarsScatterKernel(tv::TensorView<T> canvas,
tv::TensorView<const T> features,
tv::TensorView<const T> coors) {
auto numFeatures = features.dim(0);
auto numPoints = features.dim(1);
for (int i : tv::KernelLoopX<int>(numPoints)) {
for (int ifeature : tv::KernelLoopY<int>(numFeatures)) {
canvas(int(coors(0, i)), ifeature, int(coors(2, i)), int(coors(3, i))) =
features(ifeature, i);
}
}
}
namespace functor {
template <typename T, typename Index>
struct PointPillarScatter<tv::GPU, T, Index> {
void operator()(const tv::GPU &d, tv::TensorView<T> canvas,
tv::TensorView<const T> features,
tv::TensorView<const T> coors) {
auto grid = dim3(tv::cuda::DivUp(features.dim(1), 32),
tv::cuda::DivUp(features.dim(0), 32));
hipLaunchKernelGGL(( pointPillarsScatterKernel<T, Index>)
, dim3(grid), dim3(dim3(32, 32)), 0, d.getStream(), canvas, features, coors);
TV_CHECK_CUDA_ERR();
}
};
} // namespace functor
#define DECLARE_GPU_SPECS_T_INDEX(T, Index) \
template struct functor::PointPillarScatter<tv::GPU, T, Index>;
#define DECLARE_GPU_SPECS(T) DECLARE_GPU_SPECS_T_INDEX(T, int);
DECLARE_GPU_SPECS(float);
DECLARE_GPU_SPECS(double);
DECLARE_GPU_SPECS(at::Half);
#undef DECLARE_GPU_SPECS
#undef DECLARE_GPU_SPECS_T_INDEX
} // namespace spconv | 58783caf396164770b68e8aa955d3596938135dc.cu | // Copyright 2019 Yan Yan
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <ATen/ATen.h>
#include <chrono>
#include <limits>
#include <spconv/pillar_scatter_functor.h>
#include <tensorview/cuda_utils.h>
#include <tensorview/kernel_utils.h>
#include <tensorview/mp_helper.h>
#include <tensorview/tensorview.h>
#include <type_traits>
#include <utility/timer.h>
namespace spconv {
template <typename T, typename Index>
__global__ void pointPillarsScatterKernel(tv::TensorView<T> canvas,
tv::TensorView<const T> features,
tv::TensorView<const T> coors) {
auto numFeatures = features.dim(0);
auto numPoints = features.dim(1);
for (int i : tv::KernelLoopX<int>(numPoints)) {
for (int ifeature : tv::KernelLoopY<int>(numFeatures)) {
canvas(int(coors(0, i)), ifeature, int(coors(2, i)), int(coors(3, i))) =
features(ifeature, i);
}
}
}
namespace functor {
template <typename T, typename Index>
struct PointPillarScatter<tv::GPU, T, Index> {
void operator()(const tv::GPU &d, tv::TensorView<T> canvas,
tv::TensorView<const T> features,
tv::TensorView<const T> coors) {
auto grid = dim3(tv::cuda::DivUp(features.dim(1), 32),
tv::cuda::DivUp(features.dim(0), 32));
pointPillarsScatterKernel<T, Index>
<<<grid, dim3(32, 32), 0, d.getStream()>>>(canvas, features, coors);
TV_CHECK_CUDA_ERR();
}
};
} // namespace functor
#define DECLARE_GPU_SPECS_T_INDEX(T, Index) \
template struct functor::PointPillarScatter<tv::GPU, T, Index>;
#define DECLARE_GPU_SPECS(T) DECLARE_GPU_SPECS_T_INDEX(T, int);
DECLARE_GPU_SPECS(float);
DECLARE_GPU_SPECS(double);
DECLARE_GPU_SPECS(at::Half);
#undef DECLARE_GPU_SPECS
#undef DECLARE_GPU_SPECS_T_INDEX
} // namespace spconv |
fc3973ba7dd09ed74dd463819557706c7b756181.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hipfft.h>
#include "cu_errchk.h"
#include "cublas_setstream.h"
/**
* Set the stream for the cuBLAS handle.
* @param handle - [hipblasHandle_t*] : The cuBLAS handle.
* @param stream - [hipStream_t*] : CUDA stream.
*/
void cublas_setstream(hipblasHandle_t *handle, hipStream_t *stream)
{
if(stream == NULL) {
gpuBlasErrchk(hipblasSetStream(*handle,NULL));
}
else {
gpuBlasErrchk(hipblasSetStream(*handle,*stream));
}
return;
}
| fc3973ba7dd09ed74dd463819557706c7b756181.cu | #include <cuda.h>
#include <cufft.h>
#include "cu_errchk.h"
#include "cublas_setstream.h"
/**
* Set the stream for the cuBLAS handle.
* @param handle - [cublasHandle_t*] : The cuBLAS handle.
* @param stream - [cudaStream_t*] : CUDA stream.
*/
void cublas_setstream(cublasHandle_t *handle, cudaStream_t *stream)
{
if(stream == NULL) {
gpuBlasErrchk(cublasSetStream(*handle,NULL));
}
else {
gpuBlasErrchk(cublasSetStream(*handle,*stream));
}
return;
}
|
812d15cac8324a8d4090dba7aa9789e4305ff69b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "display_rand.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *afRandom = NULL;
hipMalloc(&afRandom, XSIZE*YSIZE);
int iNRand = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
display_rand), dim3(gridBlock),dim3(threadBlock), 0, 0, afRandom,iNRand);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
display_rand), dim3(gridBlock),dim3(threadBlock), 0, 0, afRandom,iNRand);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
display_rand), dim3(gridBlock),dim3(threadBlock), 0, 0, afRandom,iNRand);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 812d15cac8324a8d4090dba7aa9789e4305ff69b.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "display_rand.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *afRandom = NULL;
cudaMalloc(&afRandom, XSIZE*YSIZE);
int iNRand = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
display_rand<<<gridBlock,threadBlock>>>(afRandom,iNRand);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
display_rand<<<gridBlock,threadBlock>>>(afRandom,iNRand);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
display_rand<<<gridBlock,threadBlock>>>(afRandom,iNRand);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
eaa131cce2fa27e275d31b8f30949f7566cb2fdf.hip | // !!! This is a file automatically generated by hipify!!!
#include "additionally.h" // some definitions from: im2col.h, blas.h, list.h, utils.h, activations.h, tree.h, layer.h, network.h
// softmax_layer.h, reorg_layer.h, route_layer.h, region_layer.h, maxpool_layer.h, convolutional_layer.h
#include "gpu.h"
/*
// from: box.h
typedef struct {
float x, y, w, h;
} box;
*/
// ------------- GPU cuDNN ---------------
#define checkCUDNN(status) { \
if (status != CUDNN_STATUS_SUCCESS) { \
printf("CUDNN failure\nError: %d - %s \n", status, cudnnGetErrorString(status)); \
getchar(); \
} \
}
// 4 layers in 1: convolution, batch-normalization, BIAS and activation
void forward_convolutional_layer_gpu_cudnn(layer l, network_state state)
{
// XNOR-net
if (l.xnor) {
if (l.align_bit_weights_gpu && l.c >= 32)
{
//return;
hipError_t status = hipSuccess;
int input_size = l.c*l.h*l.w*l.batch;
int m = l.n;
int k = l.size*l.size*l.c;
int n = l.out_w*l.out_h;
//float * a = l.weights_gpu;
int ldb_align = l.lda_align;
size_t new_ldb = k + (ldb_align - k%ldb_align); // (k / 8 + 1) * 8;
size_t t_intput_size = new_ldb * n;
size_t t_bit_input_size = t_intput_size / 8;// +1;
if (l.c % 32 == 0)
{
//printf("\n\n l.index = %d, l.w = %d, l.c = %d, l.n = %d, l.stride = %d, l.pad = %d - new XNOR \n", l.index, l.w, l.c, l.n, l.stride, l.pad);
//printf("l.align_workspace_size = %d, (l.c * l.w * l.h) = %d \n", l.align_workspace_size, (l.c * l.w * l.h));
int ldb_align = l.lda_align;
size_t new_ldb = k + (ldb_align - k%ldb_align); // (k / 8 + 1) * 8;
size_t t_intput_size = new_ldb * l.bit_align;// n;
size_t t_bit_input_size = t_intput_size / 8;// +1;
const int new_c = l.c / 32;
repack_input_gpu_bin(state.input, (uint32_t *)l.align_workspace_gpu, l.w, l.h, l.c);
im2col_ongpu(l.align_workspace_gpu, new_c, l.h, l.w, l.size, l.stride, l.pad, state.workspace);
int new_k = l.size*l.size*l.c / 32;
transpose_uint32_gpu((uint32_t *)state.workspace, (uint32_t *)l.transposed_align_workspace_gpu, new_k, n, n, new_ldb);
gemm_nn_custom_bin_mean_transposed_gpu(m, n, k,
(unsigned char *)l.align_bit_weights_gpu, new_ldb, (unsigned char *)l.transposed_align_workspace_gpu,
new_ldb, l.output_gpu, n, l.mean_arr_gpu, l.biases_gpu, l.activation == LEAKY,
l.bin_conv_shortcut_in_gpu, l.bin_conv_shortcut_out_gpu);
}
else
{
//printf("\n\n l.index = %d, l.w = %d, l.c = %d, l.n = %d, l.stride = %d, l.pad = %d - old XNOR \n", l.index, l.w, l.c, l.n, l.stride, l.pad);
int i = 0;
im2col_align_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, l.align_workspace_gpu, l.bit_align);
float_to_bit_gpu(l.align_workspace_gpu, (unsigned char *)state.workspace, l.align_workspace_size);
transpose_bin_gpu((unsigned char *)state.workspace, (unsigned char *)l.transposed_align_workspace_gpu, k, n, l.bit_align, new_ldb, 8);
gemm_nn_custom_bin_mean_transposed_gpu(m, n, k,
(unsigned char *)l.align_bit_weights_gpu, new_ldb, (unsigned char *)l.transposed_align_workspace_gpu,
new_ldb, l.output_gpu, n, l.mean_arr_gpu, l.biases_gpu, l.activation == LEAKY,
l.bin_conv_shortcut_in_gpu, l.bin_conv_shortcut_out_gpu);
}
//add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
if (l.activation != LINEAR && l.activation != LEAKY) activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
return;
}
if (!l.align_bit_weights_gpu) {
binarize_weights_gpu(l.weights_gpu, l.n, l.c*l.size*l.size, l.binary_weights_gpu);
}
l.weights_gpu = l.binary_weights_gpu;
binarize_gpu(state.input, l.c*l.h*l.w*l.batch, l.binary_input_gpu);
state.input = l.binary_input_gpu;
}
// blas_kernels.cu
//fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1);
int size = l.inputs * l.batch;
float one = 1;
float zero = 0;
// cuDNN >= v5.1
cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
state.input,
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
state.workspace,
l.workspace_size,
&zero,
l.dstTensorDesc,
l.output_gpu);
if (l.batch_normalize) {
// blas_kernels.cu
//normalize_gpu(l.output_gpu, l.rolling_mean_gpu, l.rolling_variance_gpu, l.batch, l.out_c, l.out_h*l.out_w);
//scale_bias_gpu(l.output_gpu, l.scales_gpu, l.batch, l.out_c, l.out_h*l.out_w);
}
// blas_kernels.cu
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
// blas_kernels.cu
if (l.activation != LINEAR) activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
}
// 4 layers in 1: convolution, batch-normalization, BIAS and activation
void forward_convolutional_layer_gpu_cudnn_quantized(layer l, network_state state)
{
int i;
// blas_kernels.cu
//fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1);
int size = l.inputs * l.batch;
hipError_t status;
/*
static int once = 1;
if (once) {
//printf(" l.input_quant_multipler = %f \n", l.input_quant_multipler);
once = 0;
cuda_convert_f32_to_int8(state.input, size, state.input_int8, l.input_quant_multipler, (256 / 2 - 1)); // 7-bit (1-bit sign)
//cuda_convert_int8_to_f32(state.input_int8, size, state.input, 1.0F / l.input_quant_multipler);
}
else {
//printf(" NEXT!!! \n");
//cuda_convert_f32_to_int8(state.input, size, state.input_int8, l.input_quant_multipler, (256 / 2 - 1)); // 7-bit (1-bit sign)
cuda_convert_f32_to_int8_nomax(state.input, size, state.input_int8, l.input_quant_multipler); // 7-bit (1-bit sign)
}
*/
//#if(CUDNN_MAJOR >= 7 )
#define INT8CONV
//#endif // #if(CUDNN_MAJOR >= 7 )
#ifdef INT8CONV
{
float one = 1;
float zero = 0;
// input
cudnnTensorDescriptor_t srcTransformDesc;
cudnnCreateTensorDescriptor(&srcTransformDesc);
cudnnSetTensor4dDescriptor(srcTransformDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_INT8, l.batch, l.c, l.h, l.w);
cuda_convert_f32_to_int8(state.input, size, state.input_int8, l.input_quant_multipler, (256 / 2 - 1)); // 7-bit (1-bit sign)
//cuda_convert_f32_to_int8_nomax(state.input, size, state.input_int8, l.input_quant_multipler); // 7-bit (1-bit sign)
print_ints(state.input_int8, 10 ,10);
//printf("\n l.input_quant_multipler = %f \n", l.input_quant_multipler);
cudnnStatus_t transform_status =
cudnnTransformTensor(
cudnn_handle(),
&one,
srcTransformDesc,
state.input_int8, //input_init_int8,
&zero,
l.srcTensorDesc,
state.input);
checkCUDNN(transform_status);
//float ALPHA1 = l.output_multipler / R_MULT;
float ALPHA1 = 1 / (l.input_quant_multipler * l.weights_quant_multipler);
//float ALPHA2 = 0;
//printf(" ALPHA1 = %f \n", ALPHA1);
// x w y and z bias alpha1/alpha2
// X_INT8 X_INT8 X_INT8 X_FLOAT X_FLOAT
// y = act ( alpha1 * conv(x) + alpha2 * z + bias )
cudnnStatus_t cudnnstat =
cudnnConvolutionBiasActivationForward(cudnn_handle(),
&ALPHA1, // ALPHA
l.srcTensorDesc,
state.input,
l.weightDesc,
l.weights_int8_int8x4_gpu, //l.weights_gpu,
l.convDesc,
l.fw_algo,
state.workspace,
l.workspace_size,
&zero, // ALPHA2
l.dstTensorDesc,
l.output_gpu,
l.biasTensorDesc,
l.biases_gpu,
l.activationDesc,
l.dstTensorDesc,
l.output_gpu);
//print_ints(l.output_gpu_int8, 10, 10);
/*
// cuDNN >= v5.1
cudnnStatus_t cudnnstat =
cudnnConvolutionForward(cudnn_handle(),
&ALPHA1,//&one,
l.srcTensorDesc,
state.input, //state.input_int8, // state.input,
l.weightDesc,
l.weights_int8_int8x4_gpu, //l.weights_int8_gpu, //l.weights_gpu,
l.convDesc,
l.fw_algo,
state.workspace,
l.workspace_size,
&zero,
l.dstTensorDesc,
l.output_gpu);
*/
//printf(" l.w = %d, l.h = %d, l.c = %d, l.n = %d \n", l.w, l.h, l.c, l.n);
if (cudnnstat != CUDNN_STATUS_SUCCESS) {
if (cudnnstat == CUDNN_STATUS_ARCH_MISMATCH) {
printf("\n Error: CUDNN_STATUS_ARCH_MISMATCH - This GPU doesn't support DP4A (INT8 weights and input) \n");
}
else if (cudnnstat == CUDNN_STATUS_NOT_SUPPORTED) {
printf("\n Error: CUDNN_STATUS_NOT_SUPPORTED (INT8 weights and input) \n");
}
else if (cudnnstat == CUDNN_STATUS_BAD_PARAM) {
printf("\n Error: CUDNN_STATUS_BAD_PARAM (INT8 weights and input) \n");
}
printf("\n cudnnstat = %d \n", cudnnstat);
getchar();
}
else {
//printf("\n cudnnstat == CUDNN_STATUS_SUCCESS \n");
}
//status = hipMemcpy(l.output, l.output_gpu, sizeof(float)*l.outputs*l.batch, hipMemcpyDeviceToHost);
//for (i = 0; i < l.outputs && i < 100; ++i) printf(" %f, ", l.output[i]);
//draw_distribution(l.output, l.outputs*l.batch, "Output");
//add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
}
#else // INT8CONV
float one = 1;
float zero = 0;
// cuDNN >= v5.1
cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
state.input,
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
state.workspace,
l.workspace_size,
&zero,
l.dstTensorDesc,
l.output_gpu);
if (l.batch_normalize) {
// blas_kernels.cu
//normalize_gpu(l.output_gpu, l.rolling_mean_gpu, l.rolling_variance_gpu, l.batch, l.out_c, l.out_h*l.out_w);
//scale_bias_gpu(l.output_gpu, l.scales_gpu, l.batch, l.out_c, l.out_h*l.out_w);
}
// blas_kernels.cu
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
#endif // INT8CONV
// blas_kernels.cu
activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
// printf("printing l.output_gpu");
// print_floats(l.output_gpu, 10, 10);
// for(i = 0; i < 10; i++){
// printf("cpu output %d is %f\n", i, l.output[i]);
// }
//hipMemcpy(l.output_gpu, l.output,l.outputs*l.batch * sizeof(float),hipMemcpyHostToDevice)
}
// 4 layers in 1: convolution, batch-normalization, BIAS and activation
void forward_convolutional_layer_gpu_cudnn_quantized_bac(layer l, network_state state)
{
int i;
// blas_kernels.cu
//fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1);
int size = l.inputs * l.batch;
hipError_t status;
/*
static int once = 1;
if (once) {
//printf(" l.input_quant_multipler = %f \n", l.input_quant_multipler);
once = 0;
cuda_convert_f32_to_int8(state.input, size, state.input_int8, l.input_quant_multipler, (256 / 2 - 1)); // 7-bit (1-bit sign)
//cuda_convert_int8_to_f32(state.input_int8, size, state.input, 1.0F / l.input_quant_multipler);
}
else {
//printf(" NEXT!!! \n");
//cuda_convert_f32_to_int8(state.input, size, state.input_int8, l.input_quant_multipler, (256 / 2 - 1)); // 7-bit (1-bit sign)
cuda_convert_f32_to_int8_nomax(state.input, size, state.input_int8, l.input_quant_multipler); // 7-bit (1-bit sign)
}
*/
//#if(CUDNN_MAJOR >= 7 )
#define INT8CONV
//#endif // #if(CUDNN_MAJOR >= 7 )
#ifdef INT8CONV
{
float one = 1;
float zero = 0;
// input
cudnnTensorDescriptor_t srcTransformDesc;
cudnnCreateTensorDescriptor(&srcTransformDesc);
cudnnSetTensor4dDescriptor(srcTransformDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_INT8, l.batch, l.c, l.h, l.w);
cuda_convert_f32_to_int8(state.input, size, state.input_int8, l.input_quant_multipler, (256 / 2 - 1)); // 7-bit (1-bit sign)
//cuda_convert_f32_to_int8_nomax(state.input, size, state.input_int8, l.input_quant_multipler); // 7-bit (1-bit sign)
print_ints(state.input_int8, 10 ,10);
//printf("\n l.input_quant_multipler = %f \n", l.input_quant_multipler);
cudnnStatus_t transform_status =
cudnnTransformTensor(
cudnn_handle(),
&one,
srcTransformDesc,
state.input_int8, //input_init_int8,
&zero,
l.srcTensorDesc,
state.input);
checkCUDNN(transform_status);
//float ALPHA1 = l.output_multipler / R_MULT;
float ALPHA1 = 1 / (l.input_quant_multipler * l.weights_quant_multipler);
//float ALPHA2 = 0;
//printf(" ALPHA1 = %f \n", ALPHA1);
// x w y and z bias alpha1/alpha2
// X_INT8 X_INT8 X_INT8 X_FLOAT X_FLOAT
// y = act ( alpha1 * conv(x) + alpha2 * z + bias )
cudnnStatus_t cudnnstat =
cudnnConvolutionBiasActivationForward(cudnn_handle(),
&ALPHA1, // ALPHA
l.srcTensorDesc,
state.input,
l.weightDesc,
l.weights_int8_int8x4_gpu, //l.weights_gpu,
l.convDesc,
l.fw_algo,
state.workspace,
l.workspace_size,
&zero, // ALPHA2
l.dstTensorDesc,
l.output_gpu,
l.biasTensorDesc,
l.biases_gpu,
l.activationDesc,
l.dstTensorDesc,
l.output_gpu);
//print_ints(l.output_gpu_int8, 10, 10);
/*
// cuDNN >= v5.1
cudnnStatus_t cudnnstat =
cudnnConvolutionForward(cudnn_handle(),
&ALPHA1,//&one,
l.srcTensorDesc,
state.input, //state.input_int8, // state.input,
l.weightDesc,
l.weights_int8_int8x4_gpu, //l.weights_int8_gpu, //l.weights_gpu,
l.convDesc,
l.fw_algo,
state.workspace,
l.workspace_size,
&zero,
l.dstTensorDesc,
l.output_gpu);
*/
//printf(" l.w = %d, l.h = %d, l.c = %d, l.n = %d \n", l.w, l.h, l.c, l.n);
if (cudnnstat != CUDNN_STATUS_SUCCESS) {
if (cudnnstat == CUDNN_STATUS_ARCH_MISMATCH) {
printf("\n Error: CUDNN_STATUS_ARCH_MISMATCH - This GPU doesn't support DP4A (INT8 weights and input) \n");
}
else if (cudnnstat == CUDNN_STATUS_NOT_SUPPORTED) {
printf("\n Error: CUDNN_STATUS_NOT_SUPPORTED (INT8 weights and input) \n");
}
else if (cudnnstat == CUDNN_STATUS_BAD_PARAM) {
printf("\n Error: CUDNN_STATUS_BAD_PARAM (INT8 weights and input) \n");
}
printf("\n cudnnstat = %d \n", cudnnstat);
getchar();
}
else {
//printf("\n cudnnstat == CUDNN_STATUS_SUCCESS \n");
}
//status = hipMemcpy(l.output, l.output_gpu, sizeof(float)*l.outputs*l.batch, hipMemcpyDeviceToHost);
//for (i = 0; i < l.outputs && i < 100; ++i) printf(" %f, ", l.output[i]);
//draw_distribution(l.output, l.outputs*l.batch, "Output");
//add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
}
#else // INT8CONV
float one = 1;
float zero = 0;
// cuDNN >= v5.1
cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
state.input,
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
state.workspace,
l.workspace_size,
&zero,
l.dstTensorDesc,
l.output_gpu);
if (l.batch_normalize) {
// blas_kernels.cu
//normalize_gpu(l.output_gpu, l.rolling_mean_gpu, l.rolling_variance_gpu, l.batch, l.out_c, l.out_h*l.out_w);
//scale_bias_gpu(l.output_gpu, l.scales_gpu, l.batch, l.out_c, l.out_h*l.out_w);
}
// blas_kernels.cu
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
#endif // INT8CONV
// blas_kernels.cu
printf("printing l.output_gpu");
print_floats(l.output_gpu, 10, 10);
activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
}
// MAX pooling layer
void forward_maxpool_layer_gpu_cuda(const layer l, network_state state)
{
// maxpool_layer_kernels.cu
forward_maxpool_layer_gpu(l, state);
}
// route layer
void forward_route_layer_gpu_cuda(const layer l, network_state state)
{
int i, j;
int offset = 0;
for (i = 0; i < l.n; ++i) {
int index = l.input_layers[i];
float *input = state.net.layers[index].output_gpu;
int input_size = l.input_sizes[i];
for (j = 0; j < l.batch; ++j) {
// CUDA
hipMemcpy(l.output_gpu + offset + j*l.outputs, input + j*input_size, sizeof(float)*input_size, hipMemcpyDeviceToDevice);
}
offset += input_size;
}
}
// reorg layer
void forward_reorg_layer_gpu_cuda(layer l, network_state state)
{
// blas_kernels.cu
//reorg_ongpu(state.input, l.w, l.h, l.c, l.batch, l.stride, 1, l.output_gpu);
reorg_ongpu(state.input, l.out_w, l.out_h, l.out_c, l.batch, l.stride, 0, l.output_gpu);
}
// upsample_layer.c
void forward_upsample_layer_cuda(const layer l, network_state state)
{
fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1);
//printf(" l.reverse = %d \n", l.reverse);
if (l.reverse) {
upsample_gpu(l.output_gpu, l.out_w, l.out_h, l.c, l.batch, l.stride, 0, l.scale, state.input);
}
else {
upsample_gpu(state.input, l.w, l.h, l.c, l.batch, l.stride, 1, l.scale, l.output_gpu);
}
}
// shortcut_layer.c
void forward_shortcut_layer_cuda(const layer l, network_state state)
{
//copy_ongpu(l.outputs*l.batch, state.input, 1, l.output_gpu, 1);
//shortcut_gpu(l.batch, l.w, l.h, l.c, state.net.layers[l.index].output_gpu, l.out_w, l.out_h, l.out_c, l.output_gpu);
//activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
input_shortcut_gpu(state.input, l.batch, l.w, l.h, l.c, state.net.layers[l.index].output_gpu, l.out_w, l.out_h, l.out_c, l.output_gpu);
activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
}
// region layer
void forward_region_layer_gpu_cuda(const layer l, network_state state)
{
// blas_kernels.cu
flatten_ongpu(state.input, l.h*l.w, l.n*(l.coords + l.classes + 1), l.batch, 1, l.output_gpu);
if (l.softmax_tree) { // Yolo 9000
int i;
int count = 5;
for (i = 0; i < l.softmax_tree->groups; ++i) {
int group_size = l.softmax_tree->group_size[i];
// blas_kernels.cu
softmax_gpu(l.output_gpu + count, group_size, l.classes + 5, l.w*l.h*l.n*l.batch, 1, l.output_gpu + count);
count += group_size;
}
}
else if (l.softmax) { // Yolo v2
// blas_kernels.cu
softmax_gpu(l.output_gpu + 5, l.classes, l.classes + 5, l.w*l.h*l.n*l.batch, 1, l.output_gpu + 5);
}
float *in_cpu = (float *)calloc(l.batch*l.inputs, sizeof(float));
float *truth_cpu = 0;
if (state.truth) {
int num_truth = l.batch*l.truths;
truth_cpu = (float *)calloc(num_truth, sizeof(float));
hipError_t status = hipMemcpy(state.truth, truth_cpu, num_truth * sizeof(float), hipMemcpyDeviceToHost);
}
hipError_t status = hipMemcpy(in_cpu, l.output_gpu, l.batch*l.inputs * sizeof(float), hipMemcpyDeviceToHost);
network_state cpu_state = state;
cpu_state.train = state.train;
cpu_state.truth = truth_cpu;
cpu_state.input = in_cpu;
int i, b;
int size = l.coords + l.classes + 1;
memcpy(l.output, cpu_state.input, l.outputs*l.batch * sizeof(float));
for (b = 0; b < l.batch; ++b) {
for (i = 0; i < l.h*l.w*l.n; ++i) {
int index = size*i + b*l.outputs;
float x = l.output[index + 4];
l.output[index + 4] = 1.0F / (1.0F + expf(-x)); // logistic_activate_cpu(l.output[index + 4]);
}
}
free(cpu_state.input);
}
// yolo_layer.c Yolo v3
void forward_yolo_layer_cuda(const layer l, network_state state)
{
copy_ongpu(l.batch*l.inputs, state.input, 1, l.output_gpu, 1);
int b, n;
for (b = 0; b < l.batch; ++b) {
for (n = 0; n < l.n; ++n) {
int index = entry_index(l, b, n*l.w*l.h, 0);
activate_array_ongpu(l.output_gpu + index, 2 * l.w*l.h, LOGISTIC);
index = entry_index(l, b, n*l.w*l.h, 4);
activate_array_ongpu(l.output_gpu + index, (1 + l.classes)*l.w*l.h, LOGISTIC);
}
}
cuda_pull_array(l.output_gpu, l.output, l.batch*l.outputs);
//return;
}
void forward_network_gpu_cudnn(network net, network_state state)
{
state.workspace = net.workspace;
int i;
for (i = 0; i < net.n; ++i) {
state.index = i;
layer l = net.layers[i];
if (l.type == CONVOLUTIONAL) {
forward_convolutional_layer_gpu_cudnn(l, state);
//printf("\n CONVOLUTIONAL \t\t l.size = %d \n", l.size);
}
else if (l.type == MAXPOOL) {
forward_maxpool_layer_gpu_cuda(l, state);
//printf("\n MAXPOOL \t\t l.size = %d \n", l.size);
}
else if (l.type == ROUTE) {
forward_route_layer_gpu_cuda(l, state);
//printf("\n ROUTE \n");
}
else if (l.type == REORG) {
forward_reorg_layer_gpu_cuda(l, state);
//printf("\n REORG \n");
}
else if (l.type == UPSAMPLE) {
forward_upsample_layer_cuda(l, state);
//printf("\n UPSAMPLE \n");
}
else if (l.type == SHORTCUT) {
forward_shortcut_layer_cuda(l, state);
//printf("\n SHORTCUT \n");
}
else if (l.type == YOLO) {
forward_yolo_layer_cuda(l, state);
//printf("\n YOLO \n");
}
else if (l.type == REGION) {
forward_region_layer_gpu_cuda(l, state);
//printf("\n REGION \n");
}
else if (l.type == BLANK) {
//printf("\n layer: BLANK - %d \n", i);
}
else {
printf("\n layer: %d \n", l.type);
}
state.input = l.output_gpu;
}
}
void forward_network_gpu_cudnn_quantized(network net, network_state state)
{
state.workspace = net.workspace;
int i;
for (i = 0; i < net.n; ++i) {
state.index = i;
layer l = net.layers[i];
if (l.type == CONVOLUTIONAL) {
//printf("\n %d - CONVOLUTIONAL \t\t l.size = %d \n", i, l.size);
//if (l.quantized && i != 80 && i != 92 && i != 104) forward_convolutional_layer_gpu_cudnn_quantized(l, state); // mAP = 0, very strange
if (l.quantized) {
//forward_convolutional_layer_q(l, state);
forward_convolutional_layer_gpu_cudnn_quantized(l, state);
}
else forward_convolutional_layer_gpu_cudnn(l, state);
//printf("\n %d - CONVOLUTIONAL \t\t l.size = %d \n", i, l.size);
}
else if (l.type == MAXPOOL) {
forward_maxpool_layer_gpu_cuda(l, state);
//printf("\n MAXPOOL \t\t l.size = %d \n", l.size);
}
else if (l.type == ROUTE) {
forward_route_layer_gpu_cuda(l, state);
//printf("\n ROUTE \n");
}
else if (l.type == REORG) {
forward_reorg_layer_gpu_cuda(l, state);
//printf("\n REORG \n");
}
else if (l.type == UPSAMPLE) {
forward_upsample_layer_cuda(l, state);
//printf("\n UPSAMPLE \n");
}
else if (l.type == SHORTCUT) {
forward_shortcut_layer_cuda(l, state);
//printf("\n SHORTCUT \n");
}
else if (l.type == YOLO) {
forward_yolo_layer_cuda(l, state);
//printf("\n YOLO \n");
}
else if (l.type == REGION) {
forward_region_layer_gpu_cuda(l, state);
//printf("\n REGION \n");
}
else {
printf("\n layer: %d \n", l.type);
}
state.input = l.output_gpu;
state.input_int8 = l.output_gpu_int8;
}
}
// detect on GPU
float *network_predict_gpu_cudnn(network net, float *input)
{
hipError_t status = hipSetDevice(net.gpu_index);
//check_error(status);
int size = net.layers[0].inputs * net.batch;
network_state state;
state.index = 0;
state.net = net;
//status = hipMalloc((void **)&(state.input), sizeof(float)*size);
state.input = net.input_state_gpu;
memcpy(net.input_pinned_cpu, input, size * sizeof(float));
status = hipMemcpy(state.input, net.input_pinned_cpu, sizeof(float)*size, hipMemcpyHostToDevice);
state.truth = 0;
state.train = 0;
state.delta = 0;
forward_network_gpu_cudnn(net, state); // network on GPU
//status = hipFree(state.input);
//status = hipFree(state.input_int8);
//float *out = get_network_output_gpu(net);
int i;
for (i = net.n - 1; i > 0; --i) if (net.layers[i].type != COST) break;
layer l = net.layers[i];
if (l.type != REGION && l.type != YOLO) status = hipMemcpy(l.output, l.output_gpu, l.outputs*l.batch * sizeof(float), hipMemcpyDeviceToHost);
return l.output;
}
// detect on GPU
float *network_predict_gpu_cudnn_quantized(network net, float *input)
{
hipError_t status = hipSetDevice(net.gpu_index);
//check_error(status);
int size = net.layers[0].inputs * net.batch;
network_state state;
state.index = 0;
state.net = net;
status = hipMalloc((void **)&(state.input), sizeof(float)*size);
memcpy(net.input_pinned_cpu, input, size * sizeof(float));
status = hipMemcpy(state.input, net.input_pinned_cpu, sizeof(float)*size, hipMemcpyHostToDevice);
state.truth = 0;
state.train = 0;
state.delta = 0;
forward_network_gpu_cudnn_quantized(net, state); // network on GPU
status = hipFree(state.input);
//status = hipFree(state.input_int8);
//float *out = get_network_output_gpu(net);
int i;
for (i = net.n - 1; i > 0; --i) if (net.layers[i].type != COST) break;
layer l = net.layers[i];
if (l.type != REGION && l.type != YOLO) status = hipMemcpy(l.output, l.output_gpu, l.outputs*l.batch * sizeof(float), hipMemcpyDeviceToHost);
return l.output;
}
// init weights and cuDNN for quantized IINT8x4
void init_gpu_int8x4(network net)
{
hipError_t status = hipSetDevice(net.gpu_index);
int k;
for (k = 0; k < net.n; ++k) {
layer &l = net.layers[k];
if (l.type == CONVOLUTIONAL && k > 0) {
if (l.weights_int8_gpu == NULL) {
size_t const weights_size = l.size*l.size*l.c*l.n;
status = hipMalloc((void **)&(l.weights_int8_gpu), sizeof(int8_t)*weights_size);
status = hipMalloc((void **)&(l.weights_int8_int8x4_gpu), sizeof(int8_t)*weights_size);
status = hipMemcpy(l.weights_int8_gpu, l.weights_int8, sizeof(int8_t)*weights_size, hipMemcpyHostToDevice);
// convert weights CUDNN_TENSOR_NCHW -> CUDNN_TENSOR_NCHW_VECT_C
cudnnTensorDescriptor_t src_weights_desc;
cudnnCreateTensorDescriptor(&src_weights_desc);
cudnnSetTensor4dDescriptor(src_weights_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_INT8, l.n, l.c, l.size, l.size);
cudnnDataType_t cudnn_data_type = CUDNN_DATA_INT8x4;
#if((CUDNN_MAJOR*10 + CUDNN_MINOR) >= 72)
//if (l.c % 32 == 0) cudnn_data_type = CUDNN_DATA_INT8x32; // Tensor Cores for INT8
#endif //(CUDNN_MAJOR >= 7.2)
cudnnTensorDescriptor_t dst_weights_desc;
cudnnCreateTensorDescriptor(&dst_weights_desc);
cudnnSetTensor4dDescriptor(dst_weights_desc, CUDNN_TENSOR_NCHW_VECT_C, cudnn_data_type, l.n, l.c, l.size, l.size);
float one = 1;
float zero = 0;
cudnnStatus_t transform_status;
transform_status =
cudnnTransformTensor(
cudnn_handle(),
&one,
src_weights_desc,
l.weights_int8_gpu,
&zero,
dst_weights_desc,
l.weights_int8_int8x4_gpu);
checkCUDNN(transform_status);
cudnnDestroyTensorDescriptor(src_weights_desc);
cudnnDestroyTensorDescriptor(dst_weights_desc);
status = hipMalloc((void **)&(l.biases_quant_gpu), sizeof(float)*l.n);
status = hipMemcpy(l.biases_quant_gpu, l.biases_quant, sizeof(float)*l.n, hipMemcpyHostToDevice);
}
}
}
}
| eaa131cce2fa27e275d31b8f30949f7566cb2fdf.cu | #include "additionally.h" // some definitions from: im2col.h, blas.h, list.h, utils.h, activations.h, tree.h, layer.h, network.h
// softmax_layer.h, reorg_layer.h, route_layer.h, region_layer.h, maxpool_layer.h, convolutional_layer.h
#include "gpu.h"
/*
// from: box.h
typedef struct {
float x, y, w, h;
} box;
*/
// ------------- GPU cuDNN ---------------
#define checkCUDNN(status) { \
if (status != CUDNN_STATUS_SUCCESS) { \
printf("CUDNN failure\nError: %d - %s \n", status, cudnnGetErrorString(status)); \
getchar(); \
} \
}
// 4 layers in 1: convolution, batch-normalization, BIAS and activation
void forward_convolutional_layer_gpu_cudnn(layer l, network_state state)
{
// XNOR-net
if (l.xnor) {
if (l.align_bit_weights_gpu && l.c >= 32)
{
//return;
cudaError_t status = cudaSuccess;
int input_size = l.c*l.h*l.w*l.batch;
int m = l.n;
int k = l.size*l.size*l.c;
int n = l.out_w*l.out_h;
//float * a = l.weights_gpu;
int ldb_align = l.lda_align;
size_t new_ldb = k + (ldb_align - k%ldb_align); // (k / 8 + 1) * 8;
size_t t_intput_size = new_ldb * n;
size_t t_bit_input_size = t_intput_size / 8;// +1;
if (l.c % 32 == 0)
{
//printf("\n\n l.index = %d, l.w = %d, l.c = %d, l.n = %d, l.stride = %d, l.pad = %d - new XNOR \n", l.index, l.w, l.c, l.n, l.stride, l.pad);
//printf("l.align_workspace_size = %d, (l.c * l.w * l.h) = %d \n", l.align_workspace_size, (l.c * l.w * l.h));
int ldb_align = l.lda_align;
size_t new_ldb = k + (ldb_align - k%ldb_align); // (k / 8 + 1) * 8;
size_t t_intput_size = new_ldb * l.bit_align;// n;
size_t t_bit_input_size = t_intput_size / 8;// +1;
const int new_c = l.c / 32;
repack_input_gpu_bin(state.input, (uint32_t *)l.align_workspace_gpu, l.w, l.h, l.c);
im2col_ongpu(l.align_workspace_gpu, new_c, l.h, l.w, l.size, l.stride, l.pad, state.workspace);
int new_k = l.size*l.size*l.c / 32;
transpose_uint32_gpu((uint32_t *)state.workspace, (uint32_t *)l.transposed_align_workspace_gpu, new_k, n, n, new_ldb);
gemm_nn_custom_bin_mean_transposed_gpu(m, n, k,
(unsigned char *)l.align_bit_weights_gpu, new_ldb, (unsigned char *)l.transposed_align_workspace_gpu,
new_ldb, l.output_gpu, n, l.mean_arr_gpu, l.biases_gpu, l.activation == LEAKY,
l.bin_conv_shortcut_in_gpu, l.bin_conv_shortcut_out_gpu);
}
else
{
//printf("\n\n l.index = %d, l.w = %d, l.c = %d, l.n = %d, l.stride = %d, l.pad = %d - old XNOR \n", l.index, l.w, l.c, l.n, l.stride, l.pad);
int i = 0;
im2col_align_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, l.align_workspace_gpu, l.bit_align);
float_to_bit_gpu(l.align_workspace_gpu, (unsigned char *)state.workspace, l.align_workspace_size);
transpose_bin_gpu((unsigned char *)state.workspace, (unsigned char *)l.transposed_align_workspace_gpu, k, n, l.bit_align, new_ldb, 8);
gemm_nn_custom_bin_mean_transposed_gpu(m, n, k,
(unsigned char *)l.align_bit_weights_gpu, new_ldb, (unsigned char *)l.transposed_align_workspace_gpu,
new_ldb, l.output_gpu, n, l.mean_arr_gpu, l.biases_gpu, l.activation == LEAKY,
l.bin_conv_shortcut_in_gpu, l.bin_conv_shortcut_out_gpu);
}
//add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
if (l.activation != LINEAR && l.activation != LEAKY) activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
return;
}
if (!l.align_bit_weights_gpu) {
binarize_weights_gpu(l.weights_gpu, l.n, l.c*l.size*l.size, l.binary_weights_gpu);
}
l.weights_gpu = l.binary_weights_gpu;
binarize_gpu(state.input, l.c*l.h*l.w*l.batch, l.binary_input_gpu);
state.input = l.binary_input_gpu;
}
// blas_kernels.cu
//fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1);
int size = l.inputs * l.batch;
float one = 1;
float zero = 0;
// cuDNN >= v5.1
cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
state.input,
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
state.workspace,
l.workspace_size,
&zero,
l.dstTensorDesc,
l.output_gpu);
if (l.batch_normalize) {
// blas_kernels.cu
//normalize_gpu(l.output_gpu, l.rolling_mean_gpu, l.rolling_variance_gpu, l.batch, l.out_c, l.out_h*l.out_w);
//scale_bias_gpu(l.output_gpu, l.scales_gpu, l.batch, l.out_c, l.out_h*l.out_w);
}
// blas_kernels.cu
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
// blas_kernels.cu
if (l.activation != LINEAR) activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
}
// 4 layers in 1: convolution, batch-normalization, BIAS and activation
void forward_convolutional_layer_gpu_cudnn_quantized(layer l, network_state state)
{
int i;
// blas_kernels.cu
//fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1);
int size = l.inputs * l.batch;
cudaError_t status;
/*
static int once = 1;
if (once) {
//printf(" l.input_quant_multipler = %f \n", l.input_quant_multipler);
once = 0;
cuda_convert_f32_to_int8(state.input, size, state.input_int8, l.input_quant_multipler, (256 / 2 - 1)); // 7-bit (1-bit sign)
//cuda_convert_int8_to_f32(state.input_int8, size, state.input, 1.0F / l.input_quant_multipler);
}
else {
//printf(" NEXT!!! \n");
//cuda_convert_f32_to_int8(state.input, size, state.input_int8, l.input_quant_multipler, (256 / 2 - 1)); // 7-bit (1-bit sign)
cuda_convert_f32_to_int8_nomax(state.input, size, state.input_int8, l.input_quant_multipler); // 7-bit (1-bit sign)
}
*/
//#if(CUDNN_MAJOR >= 7 )
#define INT8CONV
//#endif // #if(CUDNN_MAJOR >= 7 )
#ifdef INT8CONV
{
float one = 1;
float zero = 0;
// input
cudnnTensorDescriptor_t srcTransformDesc;
cudnnCreateTensorDescriptor(&srcTransformDesc);
cudnnSetTensor4dDescriptor(srcTransformDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_INT8, l.batch, l.c, l.h, l.w);
cuda_convert_f32_to_int8(state.input, size, state.input_int8, l.input_quant_multipler, (256 / 2 - 1)); // 7-bit (1-bit sign)
//cuda_convert_f32_to_int8_nomax(state.input, size, state.input_int8, l.input_quant_multipler); // 7-bit (1-bit sign)
print_ints(state.input_int8, 10 ,10);
//printf("\n l.input_quant_multipler = %f \n", l.input_quant_multipler);
cudnnStatus_t transform_status =
cudnnTransformTensor(
cudnn_handle(),
&one,
srcTransformDesc,
state.input_int8, //input_init_int8,
&zero,
l.srcTensorDesc,
state.input);
checkCUDNN(transform_status);
//float ALPHA1 = l.output_multipler / R_MULT;
float ALPHA1 = 1 / (l.input_quant_multipler * l.weights_quant_multipler);
//float ALPHA2 = 0;
//printf(" ALPHA1 = %f \n", ALPHA1);
// x w y and z bias alpha1/alpha2
// X_INT8 X_INT8 X_INT8 X_FLOAT X_FLOAT
// y = act ( alpha1 * conv(x) + alpha2 * z + bias )
cudnnStatus_t cudnnstat =
cudnnConvolutionBiasActivationForward(cudnn_handle(),
&ALPHA1, // ALPHA
l.srcTensorDesc,
state.input,
l.weightDesc,
l.weights_int8_int8x4_gpu, //l.weights_gpu,
l.convDesc,
l.fw_algo,
state.workspace,
l.workspace_size,
&zero, // ALPHA2
l.dstTensorDesc,
l.output_gpu,
l.biasTensorDesc,
l.biases_gpu,
l.activationDesc,
l.dstTensorDesc,
l.output_gpu);
//print_ints(l.output_gpu_int8, 10, 10);
/*
// cuDNN >= v5.1
cudnnStatus_t cudnnstat =
cudnnConvolutionForward(cudnn_handle(),
&ALPHA1,//&one,
l.srcTensorDesc,
state.input, //state.input_int8, // state.input,
l.weightDesc,
l.weights_int8_int8x4_gpu, //l.weights_int8_gpu, //l.weights_gpu,
l.convDesc,
l.fw_algo,
state.workspace,
l.workspace_size,
&zero,
l.dstTensorDesc,
l.output_gpu);
*/
//printf(" l.w = %d, l.h = %d, l.c = %d, l.n = %d \n", l.w, l.h, l.c, l.n);
if (cudnnstat != CUDNN_STATUS_SUCCESS) {
if (cudnnstat == CUDNN_STATUS_ARCH_MISMATCH) {
printf("\n Error: CUDNN_STATUS_ARCH_MISMATCH - This GPU doesn't support DP4A (INT8 weights and input) \n");
}
else if (cudnnstat == CUDNN_STATUS_NOT_SUPPORTED) {
printf("\n Error: CUDNN_STATUS_NOT_SUPPORTED (INT8 weights and input) \n");
}
else if (cudnnstat == CUDNN_STATUS_BAD_PARAM) {
printf("\n Error: CUDNN_STATUS_BAD_PARAM (INT8 weights and input) \n");
}
printf("\n cudnnstat = %d \n", cudnnstat);
getchar();
}
else {
//printf("\n cudnnstat == CUDNN_STATUS_SUCCESS \n");
}
//status = cudaMemcpy(l.output, l.output_gpu, sizeof(float)*l.outputs*l.batch, cudaMemcpyDeviceToHost);
//for (i = 0; i < l.outputs && i < 100; ++i) printf(" %f, ", l.output[i]);
//draw_distribution(l.output, l.outputs*l.batch, "Output");
//add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
}
#else // INT8CONV
float one = 1;
float zero = 0;
// cuDNN >= v5.1
cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
state.input,
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
state.workspace,
l.workspace_size,
&zero,
l.dstTensorDesc,
l.output_gpu);
if (l.batch_normalize) {
// blas_kernels.cu
//normalize_gpu(l.output_gpu, l.rolling_mean_gpu, l.rolling_variance_gpu, l.batch, l.out_c, l.out_h*l.out_w);
//scale_bias_gpu(l.output_gpu, l.scales_gpu, l.batch, l.out_c, l.out_h*l.out_w);
}
// blas_kernels.cu
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
#endif // INT8CONV
// blas_kernels.cu
activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
// printf("printing l.output_gpu");
// print_floats(l.output_gpu, 10, 10);
// for(i = 0; i < 10; i++){
// printf("cpu output %d is %f\n", i, l.output[i]);
// }
//cudaMemcpy(l.output_gpu, l.output,l.outputs*l.batch * sizeof(float),cudaMemcpyHostToDevice)
}
// 4 layers in 1: convolution, batch-normalization, BIAS and activation
void forward_convolutional_layer_gpu_cudnn_quantized_bac(layer l, network_state state)
{
int i;
// blas_kernels.cu
//fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1);
int size = l.inputs * l.batch;
cudaError_t status;
/*
static int once = 1;
if (once) {
//printf(" l.input_quant_multipler = %f \n", l.input_quant_multipler);
once = 0;
cuda_convert_f32_to_int8(state.input, size, state.input_int8, l.input_quant_multipler, (256 / 2 - 1)); // 7-bit (1-bit sign)
//cuda_convert_int8_to_f32(state.input_int8, size, state.input, 1.0F / l.input_quant_multipler);
}
else {
//printf(" NEXT!!! \n");
//cuda_convert_f32_to_int8(state.input, size, state.input_int8, l.input_quant_multipler, (256 / 2 - 1)); // 7-bit (1-bit sign)
cuda_convert_f32_to_int8_nomax(state.input, size, state.input_int8, l.input_quant_multipler); // 7-bit (1-bit sign)
}
*/
//#if(CUDNN_MAJOR >= 7 )
#define INT8CONV
//#endif // #if(CUDNN_MAJOR >= 7 )
#ifdef INT8CONV
{
float one = 1;
float zero = 0;
// input
cudnnTensorDescriptor_t srcTransformDesc;
cudnnCreateTensorDescriptor(&srcTransformDesc);
cudnnSetTensor4dDescriptor(srcTransformDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_INT8, l.batch, l.c, l.h, l.w);
cuda_convert_f32_to_int8(state.input, size, state.input_int8, l.input_quant_multipler, (256 / 2 - 1)); // 7-bit (1-bit sign)
//cuda_convert_f32_to_int8_nomax(state.input, size, state.input_int8, l.input_quant_multipler); // 7-bit (1-bit sign)
print_ints(state.input_int8, 10 ,10);
//printf("\n l.input_quant_multipler = %f \n", l.input_quant_multipler);
cudnnStatus_t transform_status =
cudnnTransformTensor(
cudnn_handle(),
&one,
srcTransformDesc,
state.input_int8, //input_init_int8,
&zero,
l.srcTensorDesc,
state.input);
checkCUDNN(transform_status);
//float ALPHA1 = l.output_multipler / R_MULT;
float ALPHA1 = 1 / (l.input_quant_multipler * l.weights_quant_multipler);
//float ALPHA2 = 0;
//printf(" ALPHA1 = %f \n", ALPHA1);
// x w y and z bias alpha1/alpha2
// X_INT8 X_INT8 X_INT8 X_FLOAT X_FLOAT
// y = act ( alpha1 * conv(x) + alpha2 * z + bias )
cudnnStatus_t cudnnstat =
cudnnConvolutionBiasActivationForward(cudnn_handle(),
&ALPHA1, // ALPHA
l.srcTensorDesc,
state.input,
l.weightDesc,
l.weights_int8_int8x4_gpu, //l.weights_gpu,
l.convDesc,
l.fw_algo,
state.workspace,
l.workspace_size,
&zero, // ALPHA2
l.dstTensorDesc,
l.output_gpu,
l.biasTensorDesc,
l.biases_gpu,
l.activationDesc,
l.dstTensorDesc,
l.output_gpu);
//print_ints(l.output_gpu_int8, 10, 10);
/*
// cuDNN >= v5.1
cudnnStatus_t cudnnstat =
cudnnConvolutionForward(cudnn_handle(),
&ALPHA1,//&one,
l.srcTensorDesc,
state.input, //state.input_int8, // state.input,
l.weightDesc,
l.weights_int8_int8x4_gpu, //l.weights_int8_gpu, //l.weights_gpu,
l.convDesc,
l.fw_algo,
state.workspace,
l.workspace_size,
&zero,
l.dstTensorDesc,
l.output_gpu);
*/
//printf(" l.w = %d, l.h = %d, l.c = %d, l.n = %d \n", l.w, l.h, l.c, l.n);
if (cudnnstat != CUDNN_STATUS_SUCCESS) {
if (cudnnstat == CUDNN_STATUS_ARCH_MISMATCH) {
printf("\n Error: CUDNN_STATUS_ARCH_MISMATCH - This GPU doesn't support DP4A (INT8 weights and input) \n");
}
else if (cudnnstat == CUDNN_STATUS_NOT_SUPPORTED) {
printf("\n Error: CUDNN_STATUS_NOT_SUPPORTED (INT8 weights and input) \n");
}
else if (cudnnstat == CUDNN_STATUS_BAD_PARAM) {
printf("\n Error: CUDNN_STATUS_BAD_PARAM (INT8 weights and input) \n");
}
printf("\n cudnnstat = %d \n", cudnnstat);
getchar();
}
else {
//printf("\n cudnnstat == CUDNN_STATUS_SUCCESS \n");
}
//status = cudaMemcpy(l.output, l.output_gpu, sizeof(float)*l.outputs*l.batch, cudaMemcpyDeviceToHost);
//for (i = 0; i < l.outputs && i < 100; ++i) printf(" %f, ", l.output[i]);
//draw_distribution(l.output, l.outputs*l.batch, "Output");
//add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
}
#else // INT8CONV
float one = 1;
float zero = 0;
// cuDNN >= v5.1
cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
state.input,
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
state.workspace,
l.workspace_size,
&zero,
l.dstTensorDesc,
l.output_gpu);
if (l.batch_normalize) {
// blas_kernels.cu
//normalize_gpu(l.output_gpu, l.rolling_mean_gpu, l.rolling_variance_gpu, l.batch, l.out_c, l.out_h*l.out_w);
//scale_bias_gpu(l.output_gpu, l.scales_gpu, l.batch, l.out_c, l.out_h*l.out_w);
}
// blas_kernels.cu
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
#endif // INT8CONV
// blas_kernels.cu
printf("printing l.output_gpu");
print_floats(l.output_gpu, 10, 10);
activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
}
// MAX pooling layer
void forward_maxpool_layer_gpu_cuda(const layer l, network_state state)
{
// maxpool_layer_kernels.cu
forward_maxpool_layer_gpu(l, state);
}
// route layer
void forward_route_layer_gpu_cuda(const layer l, network_state state)
{
int i, j;
int offset = 0;
for (i = 0; i < l.n; ++i) {
int index = l.input_layers[i];
float *input = state.net.layers[index].output_gpu;
int input_size = l.input_sizes[i];
for (j = 0; j < l.batch; ++j) {
// CUDA
cudaMemcpy(l.output_gpu + offset + j*l.outputs, input + j*input_size, sizeof(float)*input_size, cudaMemcpyDeviceToDevice);
}
offset += input_size;
}
}
// reorg layer
void forward_reorg_layer_gpu_cuda(layer l, network_state state)
{
// blas_kernels.cu
//reorg_ongpu(state.input, l.w, l.h, l.c, l.batch, l.stride, 1, l.output_gpu);
reorg_ongpu(state.input, l.out_w, l.out_h, l.out_c, l.batch, l.stride, 0, l.output_gpu);
}
// upsample_layer.c
void forward_upsample_layer_cuda(const layer l, network_state state)
{
fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1);
//printf(" l.reverse = %d \n", l.reverse);
if (l.reverse) {
upsample_gpu(l.output_gpu, l.out_w, l.out_h, l.c, l.batch, l.stride, 0, l.scale, state.input);
}
else {
upsample_gpu(state.input, l.w, l.h, l.c, l.batch, l.stride, 1, l.scale, l.output_gpu);
}
}
// shortcut_layer.c
void forward_shortcut_layer_cuda(const layer l, network_state state)
{
//copy_ongpu(l.outputs*l.batch, state.input, 1, l.output_gpu, 1);
//shortcut_gpu(l.batch, l.w, l.h, l.c, state.net.layers[l.index].output_gpu, l.out_w, l.out_h, l.out_c, l.output_gpu);
//activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
input_shortcut_gpu(state.input, l.batch, l.w, l.h, l.c, state.net.layers[l.index].output_gpu, l.out_w, l.out_h, l.out_c, l.output_gpu);
activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
}
// region layer
void forward_region_layer_gpu_cuda(const layer l, network_state state)
{
// blas_kernels.cu
flatten_ongpu(state.input, l.h*l.w, l.n*(l.coords + l.classes + 1), l.batch, 1, l.output_gpu);
if (l.softmax_tree) { // Yolo 9000
int i;
int count = 5;
for (i = 0; i < l.softmax_tree->groups; ++i) {
int group_size = l.softmax_tree->group_size[i];
// blas_kernels.cu
softmax_gpu(l.output_gpu + count, group_size, l.classes + 5, l.w*l.h*l.n*l.batch, 1, l.output_gpu + count);
count += group_size;
}
}
else if (l.softmax) { // Yolo v2
// blas_kernels.cu
softmax_gpu(l.output_gpu + 5, l.classes, l.classes + 5, l.w*l.h*l.n*l.batch, 1, l.output_gpu + 5);
}
float *in_cpu = (float *)calloc(l.batch*l.inputs, sizeof(float));
float *truth_cpu = 0;
if (state.truth) {
int num_truth = l.batch*l.truths;
truth_cpu = (float *)calloc(num_truth, sizeof(float));
cudaError_t status = cudaMemcpy(state.truth, truth_cpu, num_truth * sizeof(float), cudaMemcpyDeviceToHost);
}
cudaError_t status = cudaMemcpy(in_cpu, l.output_gpu, l.batch*l.inputs * sizeof(float), cudaMemcpyDeviceToHost);
network_state cpu_state = state;
cpu_state.train = state.train;
cpu_state.truth = truth_cpu;
cpu_state.input = in_cpu;
int i, b;
int size = l.coords + l.classes + 1;
memcpy(l.output, cpu_state.input, l.outputs*l.batch * sizeof(float));
for (b = 0; b < l.batch; ++b) {
for (i = 0; i < l.h*l.w*l.n; ++i) {
int index = size*i + b*l.outputs;
float x = l.output[index + 4];
l.output[index + 4] = 1.0F / (1.0F + expf(-x)); // logistic_activate_cpu(l.output[index + 4]);
}
}
free(cpu_state.input);
}
// yolo_layer.c Yolo v3
void forward_yolo_layer_cuda(const layer l, network_state state)
{
copy_ongpu(l.batch*l.inputs, state.input, 1, l.output_gpu, 1);
int b, n;
for (b = 0; b < l.batch; ++b) {
for (n = 0; n < l.n; ++n) {
int index = entry_index(l, b, n*l.w*l.h, 0);
activate_array_ongpu(l.output_gpu + index, 2 * l.w*l.h, LOGISTIC);
index = entry_index(l, b, n*l.w*l.h, 4);
activate_array_ongpu(l.output_gpu + index, (1 + l.classes)*l.w*l.h, LOGISTIC);
}
}
cuda_pull_array(l.output_gpu, l.output, l.batch*l.outputs);
//return;
}
void forward_network_gpu_cudnn(network net, network_state state)
{
state.workspace = net.workspace;
int i;
for (i = 0; i < net.n; ++i) {
state.index = i;
layer l = net.layers[i];
if (l.type == CONVOLUTIONAL) {
forward_convolutional_layer_gpu_cudnn(l, state);
//printf("\n CONVOLUTIONAL \t\t l.size = %d \n", l.size);
}
else if (l.type == MAXPOOL) {
forward_maxpool_layer_gpu_cuda(l, state);
//printf("\n MAXPOOL \t\t l.size = %d \n", l.size);
}
else if (l.type == ROUTE) {
forward_route_layer_gpu_cuda(l, state);
//printf("\n ROUTE \n");
}
else if (l.type == REORG) {
forward_reorg_layer_gpu_cuda(l, state);
//printf("\n REORG \n");
}
else if (l.type == UPSAMPLE) {
forward_upsample_layer_cuda(l, state);
//printf("\n UPSAMPLE \n");
}
else if (l.type == SHORTCUT) {
forward_shortcut_layer_cuda(l, state);
//printf("\n SHORTCUT \n");
}
else if (l.type == YOLO) {
forward_yolo_layer_cuda(l, state);
//printf("\n YOLO \n");
}
else if (l.type == REGION) {
forward_region_layer_gpu_cuda(l, state);
//printf("\n REGION \n");
}
else if (l.type == BLANK) {
//printf("\n layer: BLANK - %d \n", i);
}
else {
printf("\n layer: %d \n", l.type);
}
state.input = l.output_gpu;
}
}
void forward_network_gpu_cudnn_quantized(network net, network_state state)
{
state.workspace = net.workspace;
int i;
for (i = 0; i < net.n; ++i) {
state.index = i;
layer l = net.layers[i];
if (l.type == CONVOLUTIONAL) {
//printf("\n %d - CONVOLUTIONAL \t\t l.size = %d \n", i, l.size);
//if (l.quantized && i != 80 && i != 92 && i != 104) forward_convolutional_layer_gpu_cudnn_quantized(l, state); // mAP = 0, very strange
if (l.quantized) {
//forward_convolutional_layer_q(l, state);
forward_convolutional_layer_gpu_cudnn_quantized(l, state);
}
else forward_convolutional_layer_gpu_cudnn(l, state);
//printf("\n %d - CONVOLUTIONAL \t\t l.size = %d \n", i, l.size);
}
else if (l.type == MAXPOOL) {
forward_maxpool_layer_gpu_cuda(l, state);
//printf("\n MAXPOOL \t\t l.size = %d \n", l.size);
}
else if (l.type == ROUTE) {
forward_route_layer_gpu_cuda(l, state);
//printf("\n ROUTE \n");
}
else if (l.type == REORG) {
forward_reorg_layer_gpu_cuda(l, state);
//printf("\n REORG \n");
}
else if (l.type == UPSAMPLE) {
forward_upsample_layer_cuda(l, state);
//printf("\n UPSAMPLE \n");
}
else if (l.type == SHORTCUT) {
forward_shortcut_layer_cuda(l, state);
//printf("\n SHORTCUT \n");
}
else if (l.type == YOLO) {
forward_yolo_layer_cuda(l, state);
//printf("\n YOLO \n");
}
else if (l.type == REGION) {
forward_region_layer_gpu_cuda(l, state);
//printf("\n REGION \n");
}
else {
printf("\n layer: %d \n", l.type);
}
state.input = l.output_gpu;
state.input_int8 = l.output_gpu_int8;
}
}
// detect on GPU
float *network_predict_gpu_cudnn(network net, float *input)
{
cudaError_t status = cudaSetDevice(net.gpu_index);
//check_error(status);
int size = net.layers[0].inputs * net.batch;
network_state state;
state.index = 0;
state.net = net;
//status = cudaMalloc((void **)&(state.input), sizeof(float)*size);
state.input = net.input_state_gpu;
memcpy(net.input_pinned_cpu, input, size * sizeof(float));
status = cudaMemcpy(state.input, net.input_pinned_cpu, sizeof(float)*size, cudaMemcpyHostToDevice);
state.truth = 0;
state.train = 0;
state.delta = 0;
forward_network_gpu_cudnn(net, state); // network on GPU
//status = cudaFree(state.input);
//status = cudaFree(state.input_int8);
//float *out = get_network_output_gpu(net);
int i;
for (i = net.n - 1; i > 0; --i) if (net.layers[i].type != COST) break;
layer l = net.layers[i];
if (l.type != REGION && l.type != YOLO) status = cudaMemcpy(l.output, l.output_gpu, l.outputs*l.batch * sizeof(float), cudaMemcpyDeviceToHost);
return l.output;
}
// detect on GPU
float *network_predict_gpu_cudnn_quantized(network net, float *input)
{
cudaError_t status = cudaSetDevice(net.gpu_index);
//check_error(status);
int size = net.layers[0].inputs * net.batch;
network_state state;
state.index = 0;
state.net = net;
status = cudaMalloc((void **)&(state.input), sizeof(float)*size);
memcpy(net.input_pinned_cpu, input, size * sizeof(float));
status = cudaMemcpy(state.input, net.input_pinned_cpu, sizeof(float)*size, cudaMemcpyHostToDevice);
state.truth = 0;
state.train = 0;
state.delta = 0;
forward_network_gpu_cudnn_quantized(net, state); // network on GPU
status = cudaFree(state.input);
//status = cudaFree(state.input_int8);
//float *out = get_network_output_gpu(net);
int i;
for (i = net.n - 1; i > 0; --i) if (net.layers[i].type != COST) break;
layer l = net.layers[i];
if (l.type != REGION && l.type != YOLO) status = cudaMemcpy(l.output, l.output_gpu, l.outputs*l.batch * sizeof(float), cudaMemcpyDeviceToHost);
return l.output;
}
// init weights and cuDNN for quantized IINT8x4
void init_gpu_int8x4(network net)
{
cudaError_t status = cudaSetDevice(net.gpu_index);
int k;
for (k = 0; k < net.n; ++k) {
layer &l = net.layers[k];
if (l.type == CONVOLUTIONAL && k > 0) {
if (l.weights_int8_gpu == NULL) {
size_t const weights_size = l.size*l.size*l.c*l.n;
status = cudaMalloc((void **)&(l.weights_int8_gpu), sizeof(int8_t)*weights_size);
status = cudaMalloc((void **)&(l.weights_int8_int8x4_gpu), sizeof(int8_t)*weights_size);
status = cudaMemcpy(l.weights_int8_gpu, l.weights_int8, sizeof(int8_t)*weights_size, cudaMemcpyHostToDevice);
// convert weights CUDNN_TENSOR_NCHW -> CUDNN_TENSOR_NCHW_VECT_C
cudnnTensorDescriptor_t src_weights_desc;
cudnnCreateTensorDescriptor(&src_weights_desc);
cudnnSetTensor4dDescriptor(src_weights_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_INT8, l.n, l.c, l.size, l.size);
cudnnDataType_t cudnn_data_type = CUDNN_DATA_INT8x4;
#if((CUDNN_MAJOR*10 + CUDNN_MINOR) >= 72)
//if (l.c % 32 == 0) cudnn_data_type = CUDNN_DATA_INT8x32; // Tensor Cores for INT8
#endif //(CUDNN_MAJOR >= 7.2)
cudnnTensorDescriptor_t dst_weights_desc;
cudnnCreateTensorDescriptor(&dst_weights_desc);
cudnnSetTensor4dDescriptor(dst_weights_desc, CUDNN_TENSOR_NCHW_VECT_C, cudnn_data_type, l.n, l.c, l.size, l.size);
float one = 1;
float zero = 0;
cudnnStatus_t transform_status;
transform_status =
cudnnTransformTensor(
cudnn_handle(),
&one,
src_weights_desc,
l.weights_int8_gpu,
&zero,
dst_weights_desc,
l.weights_int8_int8x4_gpu);
checkCUDNN(transform_status);
cudnnDestroyTensorDescriptor(src_weights_desc);
cudnnDestroyTensorDescriptor(dst_weights_desc);
status = cudaMalloc((void **)&(l.biases_quant_gpu), sizeof(float)*l.n);
status = cudaMemcpy(l.biases_quant_gpu, l.biases_quant, sizeof(float)*l.n, cudaMemcpyHostToDevice);
}
}
}
}
|
6961f573bcd541535e27530d7f1f40191fcbaf62.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include "hip/hip_runtime.h"
#include "utils.cuh"
#include "matrix_hip.cuh"
#include "device_context.cuh"
#include "tsvd.h"
#include <ctime>
#include <thrust/iterator/counting_iterator.h>
#include<algorithm>
#include <thrust/sequence.h>
namespace tsvd
{
/**
* Division utility to get explained variance ratio
*
* @param XVar
* @param XVarSum
* @param ExplainedVarRatio
* @param context
*/
void divide(const Matrix<float> &XVar, const Matrix<float> &XVarSum, Matrix<float> &ExplainedVarRatio, DeviceContext &context){
auto d_x_var = XVar.data();
auto d_x_var_sum = XVarSum.data();
auto d_expl_var_ratio = ExplainedVarRatio.data();
auto counting = thrust::make_counting_iterator <int>(0);
thrust::for_each(counting, counting+ExplainedVarRatio.size(), [=]__device__(int idx){
float div_val = 0.0;
//XVarSum can possibly be zero
if(d_x_var_sum[0] != 0.0){
div_val = d_x_var[idx] / d_x_var_sum[0];
}
d_expl_var_ratio[idx] = div_val;
} );
}
/**
* Square each value in a matrix
*
* @param UmultSigma
* @param UmultSigmaSquare
* @param context
*/
void square_val(const Matrix<float> &UmultSigma, Matrix<float> &UmultSigmaSquare, DeviceContext &context){
auto n = UmultSigma.columns();
auto m = UmultSigma.rows();
auto k = UmultSigmaSquare.rows();
auto d_u_mult_sigma = UmultSigma.data();
auto d_u_mult_sigma_square = UmultSigmaSquare.data();
auto counting = thrust::make_counting_iterator <int>(0);
thrust::for_each(counting, counting+UmultSigmaSquare.size(), [=]__device__(int idx){
float square_val = ::pow(d_u_mult_sigma[idx],2);
d_u_mult_sigma_square[idx] = square_val;
} );
}
/**
* Alternative variance calculation (Can be slow for big matrices)
*
* @param UmultSigma
* @param k
* @param UmultSigmaVar
* @param context
*/
void calc_var(const Matrix<float>UmultSigma, int k, Matrix<float> &UmultSigmaVar, DeviceContext &context){
//Set aside matrix of 1's for getting columnar sums(t(UmultSima) * UmultOnes)
Matrix<float>UmultOnes(UmultSigma.rows(), 1);
UmultOnes.fill(1.0f);
//Allocate matrices for variance calculation
Matrix<float>UmultSigmaSquare(UmultSigma.rows(), UmultSigma.columns());
Matrix<float>UmultSigmaSum(k, 1);
Matrix<float>UmultSigmaSumSquare(k, 1);
Matrix<float>UmultSigmaSumOfSquare(k, 1);
Matrix<float>UmultSigmaVarNum(k, 1);
//Calculate Variance
square_val(UmultSigma, UmultSigmaSquare, context);
multiply(UmultSigmaSquare, UmultOnes, UmultSigmaSumOfSquare, context, true, false, 1.0f);
multiply(UmultSigma, UmultOnes, UmultSigmaSum, context, true, false, 1.0f);
square_val(UmultSigmaSum, UmultSigmaSumSquare, context);
//Get rows
auto m = UmultSigma.rows();
multiply(UmultSigmaSumOfSquare, m, context);
subtract(UmultSigmaSumOfSquare, UmultSigmaSumSquare, UmultSigmaVarNum, context);
auto d_u_sigma_var_num = UmultSigmaVarNum.data();
auto d_u_sigma_var = UmultSigmaVar.data();
auto counting = thrust::make_counting_iterator <int>(0);
thrust::for_each(counting, counting+UmultSigmaVar.size(), [=]__device__(int idx){
float div_val = d_u_sigma_var_num[idx]/(::pow(m,2));
d_u_sigma_var[idx] = div_val;
} );
}
template<typename T>
class variance_iterator{
public:
// Required iterator traits
typedef variance_iterator<T> self_type; ///< My own type
typedef size_t difference_type; ///< Type to express the result of subtracting one iterator from another
typedef T value_type; ///< The type of the element the iterator can point to
typedef T* pointer; ///< The type of a pointer to an element the iterator can point to
typedef T reference; ///< The type of a reference to an element the iterator can point to
typedef std::random_access_iterator_tag iterator_category; ///< The iterator category
const T* data_ptr;
const T* mean_ptr;
const int col_rows;
size_t offset;
__device__ T operator[](size_t idx){
idx = idx + offset;
T mean = mean_ptr[idx/col_rows];
T dev_square = pow((data_ptr[idx] - mean),2);
return dev_square;
}
__device__ self_type operator+(size_t idx){
self_type retval(data_ptr, mean_ptr, col_rows);
retval.offset += idx;
return retval;
}
__host__ __device__ variance_iterator(const T* data_ptr, const T* mean_ptr, const int col_rows):data_ptr(data_ptr), mean_ptr(mean_ptr), col_rows(col_rows), offset(0){
}
};
/**
* Utility to calculate variance for each column of a matrix
*
* @param X
* @param UColMean
* @param UVar
* @param context
*/
void calc_var_numerator(const Matrix<float> &X, const Matrix<float> &UColMean, Matrix<float> &UVar, DeviceContext &context){
auto m = X.rows();
variance_iterator<float> variance(X.data(), UColMean.data(), m);
thrust::device_vector<int> segments(X.columns() + 1);
thrust::sequence(segments.begin(), segments.end(), 0, static_cast<int>(X.rows()));
// Determine temporary device storage requirements
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
int cols = static_cast<int>(X.columns());
hipcub::DeviceSegmentedReduce::Sum(d_temp_storage, temp_storage_bytes, variance, UVar.data(),
cols, thrust::raw_pointer_cast(segments.data()), thrust::raw_pointer_cast(segments.data() + 1));
// Allocate temporary storage
safe_cuda(hipMalloc(&d_temp_storage, temp_storage_bytes));
// Run sum-reduction
hipcub::DeviceSegmentedReduce::Sum(d_temp_storage, temp_storage_bytes, variance, UVar.data(),
cols, thrust::raw_pointer_cast(segments.data()), thrust::raw_pointer_cast(segments.data() + 1));
safe_cuda(hipFree(d_temp_storage));
}
/**
* Utility to reverse q to show most import k to least important k
*
* @param Q
* @param QReversed
* @param context
*/
void col_reverse_q(const Matrix<float> &Q, Matrix<float> &QReversed, DeviceContext &context){
auto n = Q.columns();
auto m = Q.rows();
auto k = QReversed.rows();
auto d_q = Q.data();
auto d_q_reversed = QReversed.data();
auto counting = thrust::make_counting_iterator <int>(0);
thrust::for_each(counting, counting+QReversed.size(), [=]__device__(int idx){
int dest_row = idx % m;
int dest_col = idx/m;
int src_row = dest_row;
int src_col = (n - dest_col) - 1;
d_q_reversed[idx] = d_q[src_col * m + src_row];
} );
}
/**
* Truncate Q transpose to top k
*
* @param Qt
* @param QtTrunc
* @param context
*/
void row_reverse_trunc_q(const Matrix<float> &Qt, Matrix<float> &QtTrunc, DeviceContext &context){
auto m = Qt.rows();
auto k = QtTrunc.rows();
auto d_q = Qt.data();
auto d_q_trunc = QtTrunc.data();
auto counting = thrust::make_counting_iterator <int>(0);
thrust::for_each(counting, counting+QtTrunc.size(), [=]__device__(int idx){
int dest_row = idx % k;
int dest_col = idx / k;
int src_row = (m - dest_row) - 1;
int src_col = dest_col;
float q = d_q[src_col * m + src_row];
d_q_trunc[idx] = q;
} );
}
/**
* Calculate the U matrix, which is defined as:
* U = A*V/sigma where A is our X Matrix, V is Q, and sigma is 1/w_i
*
* @param X
* @param Q
* @param w
* @param U
* @param context
*/
void calculate_u(const Matrix<float> &X, const Matrix<float> &Q, const Matrix<float> &w, Matrix<float> &U, DeviceContext &context){
multiply(X, Q, U, context, false, false, 1.0f); //A*V
auto d_u = U.data();
auto d_sigma = w.data();
auto column_size = U.rows();
auto counting = thrust::make_counting_iterator <int>(0);
thrust::for_each(counting, counting+U.size(), [=]__device__(int idx){
int column = idx/column_size;
float sigma = d_sigma[column];
float u = d_u[idx];
if(sigma != 0.0){
d_u[idx] = u * 1.0/sigma;
} else{
d_u[idx] = 0.0;
}
} );
}
/**
* Conduct truncated SVD on a matrix
*
* @param _X
* @param _Q
* @param _w
* @param _U
* @param _explained_variance
* @param _explained_variance_ratio
* @param _param
*/
void truncated_svd(const double* _X, double* _Q, double* _w, double* _U, double* _explained_variance, double* _explained_variance_ratio, params _param)
{
try
{
//Take in X matrix and allocate for X^TX
Matrix<float>X(_param.X_m, _param.X_n);
X.copy(_X);
Matrix<float>XtX(_param.X_n, _param.X_n);
//create context
DeviceContext context;
//Multiply X and Xt and output result to XtX
multiply(X, X, XtX, context, true, false, 1.0f);
//Set up Q (V^T) and w (singular value) matrices (w is a matrix of size Q.rows() by 1; really just a vector
Matrix<float>Q(XtX.rows(), XtX.columns()); // n X n -> V^T
Matrix<float>w(Q.rows(), 1);
calculate_eigen_pairs_exact(XtX, Q, w, context);
//Obtain Q^T to obtain vector as row major order
Matrix<float>Qt(Q.columns(), Q.rows());
transpose(Q, Qt, context); //Needed for calculate_u()
Matrix<float>QtTrunc(_param.k, Qt.columns());
row_reverse_trunc_q(Qt, QtTrunc, context);
QtTrunc.copy_to_host(_Q); //Send to host
//Obtain square root of eigenvalues, which are singular values
w.transform([=]__device__(float elem){
if(elem > 0.0){
return std::sqrt(elem);
}else{
return 0.0f;
}
}
);
//Sort from biggest singular value to smallest
std::vector<double> w_temp(w.size());
w.copy_to_host(w_temp.data()); //Send to host
std::reverse(w_temp.begin(), w_temp.end());
std::copy(w_temp.begin(), w_temp.begin() + _param.k, _w);
Matrix<float>sigma(_param.k, 1);
sigma.copy(w_temp.data());
//Get U matrix
Matrix<float>U(X.rows(), _param.k);
Matrix<float>QReversed(Q.rows(), Q.columns());
col_reverse_q(Q, QReversed, context);
calculate_u(X, QReversed, sigma, U, context);
U.copy_to_host(_U); //Send to host
//Explained Variance
Matrix<float>UmultSigma(U.rows(), U.columns());
//U * Sigma
multiply_diag(U, sigma, UmultSigma, context, false);
Matrix<float>UOnesSigma(UmultSigma.rows(), 1);
UOnesSigma.fill(1.0f);
Matrix<float>USigmaVar(_param.k, 1);
Matrix<float>USigmaColMean(_param.k, 1);
multiply(UmultSigma, UOnesSigma, USigmaColMean, context, true, false, 1.0f);
float m_usigma = UmultSigma.rows();
multiply(USigmaColMean, 1/m_usigma, context);
calc_var_numerator(UmultSigma, USigmaColMean, USigmaVar, context);
multiply(USigmaVar, 1/m_usigma, context);
USigmaVar.copy_to_host(_explained_variance);
//Explained Variance Ratio
//Set aside matrix of 1's for getting sum of columnar variances
Matrix<float>XmultOnes(X.rows(), 1);
XmultOnes.fill(1.0f);
Matrix<float>XVar(X.columns(), 1);
Matrix<float>XColMean(X.columns(), 1);
multiply(X, XmultOnes, XColMean, context, true, false, 1.0f);
float m = X.rows();
multiply(XColMean, 1/m, context);
calc_var_numerator(X, XColMean, XVar, context);
multiply(XVar, 1/m, context);
Matrix<float>XVarSum(1,1);
multiply(XVar, XmultOnes, XVarSum, context, true, false, 1.0f);
Matrix<float>ExplainedVarRatio(_param.k, 1);
divide(USigmaVar, XVarSum, ExplainedVarRatio, context);
ExplainedVarRatio.copy_to_host(_explained_variance_ratio);
}
catch (const std::exception &e)
{
std::cerr << "tsvd error: " << e.what() << "\n";
}
catch (std::string e)
{
std::cerr << "tsvd error: " << e << "\n";
}
catch (...)
{
std::cerr << "tsvd error\n";
}
}
}
| 6961f573bcd541535e27530d7f1f40191fcbaf62.cu | #include <cstdio>
#include "cuda_runtime.h"
#include "utils.cuh"
#include "matrix.cuh"
#include "device_context.cuh"
#include "tsvd.h"
#include <ctime>
#include <thrust/iterator/counting_iterator.h>
#include<algorithm>
#include <thrust/sequence.h>
namespace tsvd
{
/**
* Division utility to get explained variance ratio
*
* @param XVar
* @param XVarSum
* @param ExplainedVarRatio
* @param context
*/
void divide(const Matrix<float> &XVar, const Matrix<float> &XVarSum, Matrix<float> &ExplainedVarRatio, DeviceContext &context){
auto d_x_var = XVar.data();
auto d_x_var_sum = XVarSum.data();
auto d_expl_var_ratio = ExplainedVarRatio.data();
auto counting = thrust::make_counting_iterator <int>(0);
thrust::for_each(counting, counting+ExplainedVarRatio.size(), [=]__device__(int idx){
float div_val = 0.0;
//XVarSum can possibly be zero
if(d_x_var_sum[0] != 0.0){
div_val = d_x_var[idx] / d_x_var_sum[0];
}
d_expl_var_ratio[idx] = div_val;
} );
}
/**
* Square each value in a matrix
*
* @param UmultSigma
* @param UmultSigmaSquare
* @param context
*/
void square_val(const Matrix<float> &UmultSigma, Matrix<float> &UmultSigmaSquare, DeviceContext &context){
auto n = UmultSigma.columns();
auto m = UmultSigma.rows();
auto k = UmultSigmaSquare.rows();
auto d_u_mult_sigma = UmultSigma.data();
auto d_u_mult_sigma_square = UmultSigmaSquare.data();
auto counting = thrust::make_counting_iterator <int>(0);
thrust::for_each(counting, counting+UmultSigmaSquare.size(), [=]__device__(int idx){
float square_val = std::pow(d_u_mult_sigma[idx],2);
d_u_mult_sigma_square[idx] = square_val;
} );
}
/**
* Alternative variance calculation (Can be slow for big matrices)
*
* @param UmultSigma
* @param k
* @param UmultSigmaVar
* @param context
*/
void calc_var(const Matrix<float>UmultSigma, int k, Matrix<float> &UmultSigmaVar, DeviceContext &context){
//Set aside matrix of 1's for getting columnar sums(t(UmultSima) * UmultOnes)
Matrix<float>UmultOnes(UmultSigma.rows(), 1);
UmultOnes.fill(1.0f);
//Allocate matrices for variance calculation
Matrix<float>UmultSigmaSquare(UmultSigma.rows(), UmultSigma.columns());
Matrix<float>UmultSigmaSum(k, 1);
Matrix<float>UmultSigmaSumSquare(k, 1);
Matrix<float>UmultSigmaSumOfSquare(k, 1);
Matrix<float>UmultSigmaVarNum(k, 1);
//Calculate Variance
square_val(UmultSigma, UmultSigmaSquare, context);
multiply(UmultSigmaSquare, UmultOnes, UmultSigmaSumOfSquare, context, true, false, 1.0f);
multiply(UmultSigma, UmultOnes, UmultSigmaSum, context, true, false, 1.0f);
square_val(UmultSigmaSum, UmultSigmaSumSquare, context);
//Get rows
auto m = UmultSigma.rows();
multiply(UmultSigmaSumOfSquare, m, context);
subtract(UmultSigmaSumOfSquare, UmultSigmaSumSquare, UmultSigmaVarNum, context);
auto d_u_sigma_var_num = UmultSigmaVarNum.data();
auto d_u_sigma_var = UmultSigmaVar.data();
auto counting = thrust::make_counting_iterator <int>(0);
thrust::for_each(counting, counting+UmultSigmaVar.size(), [=]__device__(int idx){
float div_val = d_u_sigma_var_num[idx]/(std::pow(m,2));
d_u_sigma_var[idx] = div_val;
} );
}
template<typename T>
class variance_iterator{
public:
// Required iterator traits
typedef variance_iterator<T> self_type; ///< My own type
typedef size_t difference_type; ///< Type to express the result of subtracting one iterator from another
typedef T value_type; ///< The type of the element the iterator can point to
typedef T* pointer; ///< The type of a pointer to an element the iterator can point to
typedef T reference; ///< The type of a reference to an element the iterator can point to
typedef std::random_access_iterator_tag iterator_category; ///< The iterator category
const T* data_ptr;
const T* mean_ptr;
const int col_rows;
size_t offset;
__device__ T operator[](size_t idx){
idx = idx + offset;
T mean = mean_ptr[idx/col_rows];
T dev_square = pow((data_ptr[idx] - mean),2);
return dev_square;
}
__device__ self_type operator+(size_t idx){
self_type retval(data_ptr, mean_ptr, col_rows);
retval.offset += idx;
return retval;
}
__host__ __device__ variance_iterator(const T* data_ptr, const T* mean_ptr, const int col_rows):data_ptr(data_ptr), mean_ptr(mean_ptr), col_rows(col_rows), offset(0){
}
};
/**
* Utility to calculate variance for each column of a matrix
*
* @param X
* @param UColMean
* @param UVar
* @param context
*/
void calc_var_numerator(const Matrix<float> &X, const Matrix<float> &UColMean, Matrix<float> &UVar, DeviceContext &context){
auto m = X.rows();
variance_iterator<float> variance(X.data(), UColMean.data(), m);
thrust::device_vector<int> segments(X.columns() + 1);
thrust::sequence(segments.begin(), segments.end(), 0, static_cast<int>(X.rows()));
// Determine temporary device storage requirements
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
int cols = static_cast<int>(X.columns());
cub::DeviceSegmentedReduce::Sum(d_temp_storage, temp_storage_bytes, variance, UVar.data(),
cols, thrust::raw_pointer_cast(segments.data()), thrust::raw_pointer_cast(segments.data() + 1));
// Allocate temporary storage
safe_cuda(cudaMalloc(&d_temp_storage, temp_storage_bytes));
// Run sum-reduction
cub::DeviceSegmentedReduce::Sum(d_temp_storage, temp_storage_bytes, variance, UVar.data(),
cols, thrust::raw_pointer_cast(segments.data()), thrust::raw_pointer_cast(segments.data() + 1));
safe_cuda(cudaFree(d_temp_storage));
}
/**
* Utility to reverse q to show most import k to least important k
*
* @param Q
* @param QReversed
* @param context
*/
void col_reverse_q(const Matrix<float> &Q, Matrix<float> &QReversed, DeviceContext &context){
auto n = Q.columns();
auto m = Q.rows();
auto k = QReversed.rows();
auto d_q = Q.data();
auto d_q_reversed = QReversed.data();
auto counting = thrust::make_counting_iterator <int>(0);
thrust::for_each(counting, counting+QReversed.size(), [=]__device__(int idx){
int dest_row = idx % m;
int dest_col = idx/m;
int src_row = dest_row;
int src_col = (n - dest_col) - 1;
d_q_reversed[idx] = d_q[src_col * m + src_row];
} );
}
/**
* Truncate Q transpose to top k
*
* @param Qt
* @param QtTrunc
* @param context
*/
void row_reverse_trunc_q(const Matrix<float> &Qt, Matrix<float> &QtTrunc, DeviceContext &context){
auto m = Qt.rows();
auto k = QtTrunc.rows();
auto d_q = Qt.data();
auto d_q_trunc = QtTrunc.data();
auto counting = thrust::make_counting_iterator <int>(0);
thrust::for_each(counting, counting+QtTrunc.size(), [=]__device__(int idx){
int dest_row = idx % k;
int dest_col = idx / k;
int src_row = (m - dest_row) - 1;
int src_col = dest_col;
float q = d_q[src_col * m + src_row];
d_q_trunc[idx] = q;
} );
}
/**
* Calculate the U matrix, which is defined as:
* U = A*V/sigma where A is our X Matrix, V is Q, and sigma is 1/w_i
*
* @param X
* @param Q
* @param w
* @param U
* @param context
*/
void calculate_u(const Matrix<float> &X, const Matrix<float> &Q, const Matrix<float> &w, Matrix<float> &U, DeviceContext &context){
multiply(X, Q, U, context, false, false, 1.0f); //A*V
auto d_u = U.data();
auto d_sigma = w.data();
auto column_size = U.rows();
auto counting = thrust::make_counting_iterator <int>(0);
thrust::for_each(counting, counting+U.size(), [=]__device__(int idx){
int column = idx/column_size;
float sigma = d_sigma[column];
float u = d_u[idx];
if(sigma != 0.0){
d_u[idx] = u * 1.0/sigma;
} else{
d_u[idx] = 0.0;
}
} );
}
/**
* Conduct truncated SVD on a matrix
*
* @param _X
* @param _Q
* @param _w
* @param _U
* @param _explained_variance
* @param _explained_variance_ratio
* @param _param
*/
void truncated_svd(const double* _X, double* _Q, double* _w, double* _U, double* _explained_variance, double* _explained_variance_ratio, params _param)
{
try
{
//Take in X matrix and allocate for X^TX
Matrix<float>X(_param.X_m, _param.X_n);
X.copy(_X);
Matrix<float>XtX(_param.X_n, _param.X_n);
//create context
DeviceContext context;
//Multiply X and Xt and output result to XtX
multiply(X, X, XtX, context, true, false, 1.0f);
//Set up Q (V^T) and w (singular value) matrices (w is a matrix of size Q.rows() by 1; really just a vector
Matrix<float>Q(XtX.rows(), XtX.columns()); // n X n -> V^T
Matrix<float>w(Q.rows(), 1);
calculate_eigen_pairs_exact(XtX, Q, w, context);
//Obtain Q^T to obtain vector as row major order
Matrix<float>Qt(Q.columns(), Q.rows());
transpose(Q, Qt, context); //Needed for calculate_u()
Matrix<float>QtTrunc(_param.k, Qt.columns());
row_reverse_trunc_q(Qt, QtTrunc, context);
QtTrunc.copy_to_host(_Q); //Send to host
//Obtain square root of eigenvalues, which are singular values
w.transform([=]__device__(float elem){
if(elem > 0.0){
return std::sqrt(elem);
}else{
return 0.0f;
}
}
);
//Sort from biggest singular value to smallest
std::vector<double> w_temp(w.size());
w.copy_to_host(w_temp.data()); //Send to host
std::reverse(w_temp.begin(), w_temp.end());
std::copy(w_temp.begin(), w_temp.begin() + _param.k, _w);
Matrix<float>sigma(_param.k, 1);
sigma.copy(w_temp.data());
//Get U matrix
Matrix<float>U(X.rows(), _param.k);
Matrix<float>QReversed(Q.rows(), Q.columns());
col_reverse_q(Q, QReversed, context);
calculate_u(X, QReversed, sigma, U, context);
U.copy_to_host(_U); //Send to host
//Explained Variance
Matrix<float>UmultSigma(U.rows(), U.columns());
//U * Sigma
multiply_diag(U, sigma, UmultSigma, context, false);
Matrix<float>UOnesSigma(UmultSigma.rows(), 1);
UOnesSigma.fill(1.0f);
Matrix<float>USigmaVar(_param.k, 1);
Matrix<float>USigmaColMean(_param.k, 1);
multiply(UmultSigma, UOnesSigma, USigmaColMean, context, true, false, 1.0f);
float m_usigma = UmultSigma.rows();
multiply(USigmaColMean, 1/m_usigma, context);
calc_var_numerator(UmultSigma, USigmaColMean, USigmaVar, context);
multiply(USigmaVar, 1/m_usigma, context);
USigmaVar.copy_to_host(_explained_variance);
//Explained Variance Ratio
//Set aside matrix of 1's for getting sum of columnar variances
Matrix<float>XmultOnes(X.rows(), 1);
XmultOnes.fill(1.0f);
Matrix<float>XVar(X.columns(), 1);
Matrix<float>XColMean(X.columns(), 1);
multiply(X, XmultOnes, XColMean, context, true, false, 1.0f);
float m = X.rows();
multiply(XColMean, 1/m, context);
calc_var_numerator(X, XColMean, XVar, context);
multiply(XVar, 1/m, context);
Matrix<float>XVarSum(1,1);
multiply(XVar, XmultOnes, XVarSum, context, true, false, 1.0f);
Matrix<float>ExplainedVarRatio(_param.k, 1);
divide(USigmaVar, XVarSum, ExplainedVarRatio, context);
ExplainedVarRatio.copy_to_host(_explained_variance_ratio);
}
catch (const std::exception &e)
{
std::cerr << "tsvd error: " << e.what() << "\n";
}
catch (std::string e)
{
std::cerr << "tsvd error: " << e << "\n";
}
catch (...)
{
std::cerr << "tsvd error\n";
}
}
}
|
cbb38cf528d82ae31efd0f06ac7d26efc7c6a50d.hip | // !!! This is a file automatically generated by hipify!!!
//Includes for IntelliSense
#define _SIZE_T_DEFINED
#ifndef __HIPCC__
#define __HIPCC__
#endif
#ifndef __cplusplus
#define __cplusplus
#endif
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <texture_fetch_functions.h>
#include <builtin_types.h>
#include <vector_functions.h>
#include <float.h>
extern "C" {
#define VT 4
__device__ double distance(double dx, double dy)
{
if (dx != 0.0 && dy != 0.0)
return (dx) * (dx) + (dy) * (dy);
return 1.0;
}
#ifdef CUDA_3_5
// dynamic parallelism function call
__device__ Idw4Item(const double * data_x, const double * data_y, const double * data_z, double * output, const double x, const double y, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x * VT;
if (i < N)
{
double d = distance((data_x[i] - x), (data_y[i] - y));
output[i] = data_z[i] / d * d;
}
}
__device__ double * data_output;
#endif
__device__ double Idw4(const double * data_x, const double * data_y, const double * data_z, const double x, const double y, double z, int N)
{
//hipMalloc()
double d = 0.0;
#pragma unroll
for (int i = 0; i < N; i++)
{
d = distance((data_x[i] - x), (data_y[i] - y)); // square of euclidian distance
if (d < 90.0 && d > 1.0)
z += data_z[i] / d * d; // data value weighted by distance to -4 power
}
return z;
}
__device__ bool pointInPolygon(const double x, const double y, const double * xCoords, const double * yCoords, int nPoints)
{
//__shared__
return false;
}
// Device code
__global__ void RasterInterpolate(const double* Ax, const double * Ay, const double * Az, const double* Cx, const double * Cy, double * Cz, int N, int nData)
{
int i = blockDim.x * blockIdx.x + threadIdx.x * VT;
if (i < N)
{
Cz[i] = Idw4(Ax, Ay, Az, Cx[i], Cy[i], Cy[i], nData);
}
/*if (i + VT < N)
{
#pragma unroll
for (int j = i; j <= i + VT; j++)
Cz[j] = Idw4(Ax, Ay, Az, Cx[j], Cy[j], Cy[j], nData);
}
else if (i < N)
{
for (int j = i; j < N; j++)
Cz[j] = Idw4(Ax, Ay, Az, Cx[j], Cy[j], Cy[j], nData);
}*/
__syncthreads();
}
__global__ void ClipRaster(const double* Ax, const double * Ay, const double * coordXs, const double * coordYs, uchar1 * mask, int N, int nPoints)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (pointInPolygon(Ax[i], Ay[i], coordXs, coordYs, nPoints))
{
uchar1* v = new uchar1();
v->x = '\x01';
mask[i] = *v;
}
else
{
uchar1* v = new uchar1();
v->x = '\x00';
mask[i] = *v;
}
}
} | cbb38cf528d82ae31efd0f06ac7d26efc7c6a50d.cu |
//Includes for IntelliSense
#define _SIZE_T_DEFINED
#ifndef __CUDACC__
#define __CUDACC__
#endif
#ifndef __cplusplus
#define __cplusplus
#endif
#include <cuda.h>
#include <device_launch_parameters.h>
#include <texture_fetch_functions.h>
#include <builtin_types.h>
#include <vector_functions.h>
#include <float.h>
extern "C" {
#define VT 4
__device__ double distance(double dx, double dy)
{
if (dx != 0.0 && dy != 0.0)
return (dx) * (dx) + (dy) * (dy);
return 1.0;
}
#ifdef CUDA_3_5
// dynamic parallelism function call
__device__ Idw4Item(const double * data_x, const double * data_y, const double * data_z, double * output, const double x, const double y, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x * VT;
if (i < N)
{
double d = distance((data_x[i] - x), (data_y[i] - y));
output[i] = data_z[i] / d * d;
}
}
__device__ double * data_output;
#endif
__device__ double Idw4(const double * data_x, const double * data_y, const double * data_z, const double x, const double y, double z, int N)
{
//cudaMalloc()
double d = 0.0;
#pragma unroll
for (int i = 0; i < N; i++)
{
d = distance((data_x[i] - x), (data_y[i] - y)); // square of euclidian distance
if (d < 90.0 && d > 1.0)
z += data_z[i] / d * d; // data value weighted by distance to -4 power
}
return z;
}
__device__ bool pointInPolygon(const double x, const double y, const double * xCoords, const double * yCoords, int nPoints)
{
//__shared__
return false;
}
// Device code
__global__ void RasterInterpolate(const double* Ax, const double * Ay, const double * Az, const double* Cx, const double * Cy, double * Cz, int N, int nData)
{
int i = blockDim.x * blockIdx.x + threadIdx.x * VT;
if (i < N)
{
Cz[i] = Idw4(Ax, Ay, Az, Cx[i], Cy[i], Cy[i], nData);
}
/*if (i + VT < N)
{
#pragma unroll
for (int j = i; j <= i + VT; j++)
Cz[j] = Idw4(Ax, Ay, Az, Cx[j], Cy[j], Cy[j], nData);
}
else if (i < N)
{
for (int j = i; j < N; j++)
Cz[j] = Idw4(Ax, Ay, Az, Cx[j], Cy[j], Cy[j], nData);
}*/
__syncthreads();
}
__global__ void ClipRaster(const double* Ax, const double * Ay, const double * coordXs, const double * coordYs, uchar1 * mask, int N, int nPoints)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (pointInPolygon(Ax[i], Ay[i], coordXs, coordYs, nPoints))
{
uchar1* v = new uchar1();
v->x = '\x01';
mask[i] = *v;
}
else
{
uchar1* v = new uchar1();
v->x = '\x00';
mask[i] = *v;
}
}
} |
4c691764c000f77c741e20639d6f08cfe63b9af7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Program to solve Laplace equation on a regular 3D grid
//
#include <cutil.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
////////////////////////////////////////////////////////////////////////
// define kernel block size
////////////////////////////////////////////////////////////////////////
#define BLOCK_X 32
#define BLOCK_Y 4
////////////////////////////////////////////////////////////////////////
// include kernel function
////////////////////////////////////////////////////////////////////////
#include "../benchmark_common.h"
#include "laplace3d_kernel.cu"
////////////////////////////////////////////////////////////////////////
// declaration, forward
////////////////////////////////////////////////////////////////////////
extern "C" void Gold_laplace3d(int NX,
int NY,
int NZ,
float* h_u1,
float* h_u2);
void printHelp(void);
////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////
// int main(int argc, char **argv){
int main_LPS(hipStream_t stream_app, pthread_mutex_t* mutexapp, bool flag) {
// 'h_' prefix - CPU (host) memory space
int NX, NY, NZ, bx, by, i, j, k, ind, pitch;
size_t pitch_bytes;
float *h_u1, *h_u2, *h_u3, *h_foo, err;
unsigned int hTimer;
// 'd_' prefix - GPU (device) memory space
float *d_u1, *d_u2, *d_foo;
// check command line inputs
/*if(cutCheckCmdLineFlag( argc, (const char**)argv, "help")) {
printHelp();
return 1;
}
if( cutGetCmdLineArgumenti( argc, (const char**)argv, "nx", &NX) ) {
if( NX <= 99 ) {
printf("Illegal argument - nx must be greater than 99\n");
return -1;
}
}
else
NX = 100;
if( cutGetCmdLineArgumenti( argc, (const char**)argv, "ny", &NY) ) {
if( NY <= 99 ) {
printf("Illegal argument - ny must be greater than 99\n");
return -1;
}
}
else
NY = 100;
if( cutGetCmdLineArgumenti( argc, (const char**)argv, "nz", &NZ) ) {
if( NZ <= 99 ) {
printf("Illegal argument - nz must be greater than 99\n");
return -1;
}
}
else
NZ = 100;
if( cutGetCmdLineArgumenti( argc, (const char**)argv, "repeat", &REPEAT) ) {
if( REPEAT <= 0 ) {
printf("Illegal argument - repeat must be greater than zero\n");
return -1;
}
}
else
REPEAT = 1;
*/
NX = 100;
NY = 100;
NZ = 100;
// REPEAT = 1;
printf("\nGrid dimensions: %d x %d x %d\n", NX, NY, NZ);
// initialise card and timer
int deviceCount;
CUDA_SAFE_CALL_NO_SYNC(hipGetDeviceCount(&deviceCount));
if (deviceCount == 0) {
fprintf(stderr, "There is no device.\n");
exit(EXIT_FAILURE);
}
int dev;
for (dev = 0; dev < deviceCount; ++dev) {
hipDeviceProp_t deviceProp;
CUDA_SAFE_CALL_NO_SYNC(hipGetDeviceProperties(&deviceProp, dev));
if (deviceProp.major >= 1)
break;
}
if (dev == deviceCount) {
fprintf(stderr, "There is no device supporting CUDA.\n");
exit(EXIT_FAILURE);
} else
CUDA_SAFE_CALL(hipSetDevice(dev));
CUT_SAFE_CALL(cutCreateTimer(&hTimer));
// allocate memory for arrays
h_u1 = (float*)malloc(sizeof(float) * NX * NY * NZ);
h_u2 = (float*)malloc(sizeof(float) * NX * NY * NZ);
h_u3 = (float*)malloc(sizeof(float) * NX * NY * NZ);
CUDA_SAFE_CALL(hipMallocPitch((void**)&d_u1, &pitch_bytes,
sizeof(float) * NX, NY * NZ));
CUDA_SAFE_CALL(hipMallocPitch((void**)&d_u2, &pitch_bytes,
sizeof(float) * NX, NY * NZ));
pitch = pitch_bytes / sizeof(float);
// initialise u1
for (k = 0; k < NZ; k++) {
for (j = 0; j < NY; j++) {
for (i = 0; i < NX; i++) {
ind = i + j * NX + k * NX * NY;
if (i == 0 || i == NX - 1 || j == 0 || j == NY - 1 || k == 0 ||
k == NZ - 1)
h_u1[ind] = 1.0f; // Dirichlet b.c.'s
else
h_u1[ind] = 0.0f;
}
}
}
// copy u1 to device
CUT_SAFE_CALL(cutStartTimer(hTimer));
CUDA_SAFE_CALL(hipMemcpy2D(d_u1, pitch_bytes, h_u1, sizeof(float) * NX,
sizeof(float) * NX, NY * NZ,
hipMemcpyHostToDevice));
// CUDA_SAFE_CALL( hipDeviceSynchronize() );
if (flag)
CUDA_SAFE_CALL(hipStreamSynchronize(stream_app));
// else
// CUDA_SAFE_CALL( hipDeviceSynchronize() );
CUT_SAFE_CALL(cutStopTimer(hTimer));
printf("\nCopy u1 to device: %f (ms) \n", cutGetTimerValue(hTimer));
CUT_SAFE_CALL(cutResetTimer(hTimer));
// Set up the execution configuration
bx = 1 + (NX - 1) / BLOCK_X;
by = 1 + (NY - 1) / BLOCK_Y;
dim3 dimGrid(bx, by);
dim3 dimBlock(BLOCK_X, BLOCK_Y);
printf("\n dimGrid = %d %d %d \n", dimGrid.x, dimGrid.y, dimGrid.z);
printf(" dimBlock = %d %d %d \n", dimBlock.x, dimBlock.y, dimBlock.z);
// Execute GPU kernel
// CUDA_SAFE_CALL( hipDeviceSynchronize() );
if (flag) {
CUDA_SAFE_CALL(hipStreamSynchronize(stream_app));
} else {
CUDA_SAFE_CALL(hipDeviceSynchronize());
}
CUT_SAFE_CALL(cutResetTimer(hTimer));
CUT_SAFE_CALL(cutStartTimer(hTimer));
// for (i = 1; i <= REPEAT; ++i) {
hipLaunchKernelGGL(( GPU_laplace3d), dim3(dimGrid), dim3(dimBlock), 0, stream_app, NX, NY, NZ, pitch, d_u1,
d_u2);
d_foo = d_u1;
d_u1 = d_u2;
d_u2 = d_foo; // swap d_u1 and d_u3
// CUDA_SAFE_CALL( hipDeviceSynchronize() );
pthread_mutex_unlock(mutexapp);
if (flag) {
CUDA_SAFE_CALL(hipStreamSynchronize(stream_app));
} else {
CUDA_SAFE_CALL(hipDeviceSynchronize());
}
CUT_CHECK_ERROR("GPU_laplace3d execution failed\n");
//}
CUT_SAFE_CALL(cutStopTimer(hTimer));
printf("\n%dx GPU_laplace3d: %f (ms) \n", 1, cutGetTimerValue(hTimer));
CUT_SAFE_CALL(cutResetTimer(hTimer));
// Read back GPU results
CUT_SAFE_CALL(cutStartTimer(hTimer));
CUDA_SAFE_CALL(hipMemcpy2D(h_u2, sizeof(float) * NX, d_u1, pitch_bytes,
sizeof(float) * NX, NY * NZ,
hipMemcpyDeviceToHost));
CUT_SAFE_CALL(cutStopTimer(hTimer));
printf("\nCopy u2 to host: %f (ms) \n", cutGetTimerValue(hTimer));
CUT_SAFE_CALL(cutResetTimer(hTimer));
// print out corner of array
/*
for (k=0; k<3; k++) {
for (j=0; j<8; j++) {
for (i=0; i<8; i++) {
ind = i + j*NX + k*NX*NY;
printf(" %5.2f ", h_u2[ind]);
}
printf("\n");
}
printf("\n");
}
*/
// Gold treatment
CUT_SAFE_CALL(cutResetTimer(hTimer));
CUT_SAFE_CALL(cutStartTimer(hTimer));
// for (int i = 1; i <= REPEAT; ++i) {
Gold_laplace3d(NX, NY, NZ, h_u1, h_u3);
h_foo = h_u1;
h_u1 = h_u3;
h_u3 = h_foo; // swap h_u1 and h_u3
//}
CUT_SAFE_CALL(cutStopTimer(hTimer));
printf("\n%dx Gold_laplace3d: %f (ms) \n \n", 1, cutGetTimerValue(hTimer));
// print out corner of array
/*
for (k=0; k<3; k++) {
for (j=0; j<8; j++) {
for (i=0; i<8; i++) {
ind = i + j*NX + k*NX*NY;
printf(" %5.2f ", h_u1[ind]);
}
printf("\n");
}
printf("\n");
}
*/
// error check
err = 0.0;
for (k = 0; k < NZ; k++) {
for (j = 0; j < NY; j++) {
for (i = 0; i < NX; i++) {
ind = i + j * NX + k * NX * NY;
err += (h_u1[ind] - h_u2[ind]) * (h_u1[ind] - h_u2[ind]);
}
}
}
printf("\n rms error = %f \n", sqrt(err / (float)(NX * NY * NZ)));
// Release GPU and CPU memory
printf("CUDA_SAFE_CALL( hipFree(d_u1) );\n");
fflush(stdout);
CUDA_SAFE_CALL(hipFree(d_u1));
printf("CUDA_SAFE_CALL( hipFree(d_u2) );\n");
fflush(stdout);
CUDA_SAFE_CALL(hipFree(d_u2));
printf("free(h_u1);\n");
fflush(stdout);
free(h_u1);
printf("free(h_u2);\n");
fflush(stdout);
free(h_u2);
printf("free(h_u3);\n");
fflush(stdout);
free(h_u3);
CUT_SAFE_CALL(cutDeleteTimer(hTimer));
// CUT_EXIT(argc, argv);
return 0;
}
///////////////////////////////////////////////////////////////////////////
// Print help screen
///////////////////////////////////////////////////////////////////////////
/*void printHelp(void)
{
printf("Usage: laplace3d [OPTION]...\n");
printf("6-point stencil 3D Laplace test \n");
printf("\n");
printf("Example: run 100 iterations on a 256x128x128 grid\n");
printf("./laplace3d --nx=256 --ny=128 --nz=128 --repeat=100\n");
printf("\n");
printf("Options:\n");
printf("--help\t\t\tDisplay this help menu\n");
printf("--nx=[SIZE]\t\tGrid width\n");
printf("--ny=[SIZE]\t\tGrid height\n");
printf("--nz=[SIZE]\t\tGrid depth\n");
printf("--repeat=[COUNT]\tNumber of repetitions\n");
}*/
| 4c691764c000f77c741e20639d6f08cfe63b9af7.cu | //
// Program to solve Laplace equation on a regular 3D grid
//
#include <cutil.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
////////////////////////////////////////////////////////////////////////
// define kernel block size
////////////////////////////////////////////////////////////////////////
#define BLOCK_X 32
#define BLOCK_Y 4
////////////////////////////////////////////////////////////////////////
// include kernel function
////////////////////////////////////////////////////////////////////////
#include "../benchmark_common.h"
#include "laplace3d_kernel.cu"
////////////////////////////////////////////////////////////////////////
// declaration, forward
////////////////////////////////////////////////////////////////////////
extern "C" void Gold_laplace3d(int NX,
int NY,
int NZ,
float* h_u1,
float* h_u2);
void printHelp(void);
////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////
// int main(int argc, char **argv){
int main_LPS(cudaStream_t stream_app, pthread_mutex_t* mutexapp, bool flag) {
// 'h_' prefix - CPU (host) memory space
int NX, NY, NZ, bx, by, i, j, k, ind, pitch;
size_t pitch_bytes;
float *h_u1, *h_u2, *h_u3, *h_foo, err;
unsigned int hTimer;
// 'd_' prefix - GPU (device) memory space
float *d_u1, *d_u2, *d_foo;
// check command line inputs
/*if(cutCheckCmdLineFlag( argc, (const char**)argv, "help")) {
printHelp();
return 1;
}
if( cutGetCmdLineArgumenti( argc, (const char**)argv, "nx", &NX) ) {
if( NX <= 99 ) {
printf("Illegal argument - nx must be greater than 99\n");
return -1;
}
}
else
NX = 100;
if( cutGetCmdLineArgumenti( argc, (const char**)argv, "ny", &NY) ) {
if( NY <= 99 ) {
printf("Illegal argument - ny must be greater than 99\n");
return -1;
}
}
else
NY = 100;
if( cutGetCmdLineArgumenti( argc, (const char**)argv, "nz", &NZ) ) {
if( NZ <= 99 ) {
printf("Illegal argument - nz must be greater than 99\n");
return -1;
}
}
else
NZ = 100;
if( cutGetCmdLineArgumenti( argc, (const char**)argv, "repeat", &REPEAT) ) {
if( REPEAT <= 0 ) {
printf("Illegal argument - repeat must be greater than zero\n");
return -1;
}
}
else
REPEAT = 1;
*/
NX = 100;
NY = 100;
NZ = 100;
// REPEAT = 1;
printf("\nGrid dimensions: %d x %d x %d\n", NX, NY, NZ);
// initialise card and timer
int deviceCount;
CUDA_SAFE_CALL_NO_SYNC(cudaGetDeviceCount(&deviceCount));
if (deviceCount == 0) {
fprintf(stderr, "There is no device.\n");
exit(EXIT_FAILURE);
}
int dev;
for (dev = 0; dev < deviceCount; ++dev) {
cudaDeviceProp deviceProp;
CUDA_SAFE_CALL_NO_SYNC(cudaGetDeviceProperties(&deviceProp, dev));
if (deviceProp.major >= 1)
break;
}
if (dev == deviceCount) {
fprintf(stderr, "There is no device supporting CUDA.\n");
exit(EXIT_FAILURE);
} else
CUDA_SAFE_CALL(cudaSetDevice(dev));
CUT_SAFE_CALL(cutCreateTimer(&hTimer));
// allocate memory for arrays
h_u1 = (float*)malloc(sizeof(float) * NX * NY * NZ);
h_u2 = (float*)malloc(sizeof(float) * NX * NY * NZ);
h_u3 = (float*)malloc(sizeof(float) * NX * NY * NZ);
CUDA_SAFE_CALL(cudaMallocPitch((void**)&d_u1, &pitch_bytes,
sizeof(float) * NX, NY * NZ));
CUDA_SAFE_CALL(cudaMallocPitch((void**)&d_u2, &pitch_bytes,
sizeof(float) * NX, NY * NZ));
pitch = pitch_bytes / sizeof(float);
// initialise u1
for (k = 0; k < NZ; k++) {
for (j = 0; j < NY; j++) {
for (i = 0; i < NX; i++) {
ind = i + j * NX + k * NX * NY;
if (i == 0 || i == NX - 1 || j == 0 || j == NY - 1 || k == 0 ||
k == NZ - 1)
h_u1[ind] = 1.0f; // Dirichlet b.c.'s
else
h_u1[ind] = 0.0f;
}
}
}
// copy u1 to device
CUT_SAFE_CALL(cutStartTimer(hTimer));
CUDA_SAFE_CALL(cudaMemcpy2D(d_u1, pitch_bytes, h_u1, sizeof(float) * NX,
sizeof(float) * NX, NY * NZ,
cudaMemcpyHostToDevice));
// CUDA_SAFE_CALL( cudaThreadSynchronize() );
if (flag)
CUDA_SAFE_CALL(cudaStreamSynchronize(stream_app));
// else
// CUDA_SAFE_CALL( cudaThreadSynchronize() );
CUT_SAFE_CALL(cutStopTimer(hTimer));
printf("\nCopy u1 to device: %f (ms) \n", cutGetTimerValue(hTimer));
CUT_SAFE_CALL(cutResetTimer(hTimer));
// Set up the execution configuration
bx = 1 + (NX - 1) / BLOCK_X;
by = 1 + (NY - 1) / BLOCK_Y;
dim3 dimGrid(bx, by);
dim3 dimBlock(BLOCK_X, BLOCK_Y);
printf("\n dimGrid = %d %d %d \n", dimGrid.x, dimGrid.y, dimGrid.z);
printf(" dimBlock = %d %d %d \n", dimBlock.x, dimBlock.y, dimBlock.z);
// Execute GPU kernel
// CUDA_SAFE_CALL( cudaThreadSynchronize() );
if (flag) {
CUDA_SAFE_CALL(cudaStreamSynchronize(stream_app));
} else {
CUDA_SAFE_CALL(cudaThreadSynchronize());
}
CUT_SAFE_CALL(cutResetTimer(hTimer));
CUT_SAFE_CALL(cutStartTimer(hTimer));
// for (i = 1; i <= REPEAT; ++i) {
GPU_laplace3d<<<dimGrid, dimBlock, 0, stream_app>>>(NX, NY, NZ, pitch, d_u1,
d_u2);
d_foo = d_u1;
d_u1 = d_u2;
d_u2 = d_foo; // swap d_u1 and d_u3
// CUDA_SAFE_CALL( cudaThreadSynchronize() );
pthread_mutex_unlock(mutexapp);
if (flag) {
CUDA_SAFE_CALL(cudaStreamSynchronize(stream_app));
} else {
CUDA_SAFE_CALL(cudaThreadSynchronize());
}
CUT_CHECK_ERROR("GPU_laplace3d execution failed\n");
//}
CUT_SAFE_CALL(cutStopTimer(hTimer));
printf("\n%dx GPU_laplace3d: %f (ms) \n", 1, cutGetTimerValue(hTimer));
CUT_SAFE_CALL(cutResetTimer(hTimer));
// Read back GPU results
CUT_SAFE_CALL(cutStartTimer(hTimer));
CUDA_SAFE_CALL(cudaMemcpy2D(h_u2, sizeof(float) * NX, d_u1, pitch_bytes,
sizeof(float) * NX, NY * NZ,
cudaMemcpyDeviceToHost));
CUT_SAFE_CALL(cutStopTimer(hTimer));
printf("\nCopy u2 to host: %f (ms) \n", cutGetTimerValue(hTimer));
CUT_SAFE_CALL(cutResetTimer(hTimer));
// print out corner of array
/*
for (k=0; k<3; k++) {
for (j=0; j<8; j++) {
for (i=0; i<8; i++) {
ind = i + j*NX + k*NX*NY;
printf(" %5.2f ", h_u2[ind]);
}
printf("\n");
}
printf("\n");
}
*/
// Gold treatment
CUT_SAFE_CALL(cutResetTimer(hTimer));
CUT_SAFE_CALL(cutStartTimer(hTimer));
// for (int i = 1; i <= REPEAT; ++i) {
Gold_laplace3d(NX, NY, NZ, h_u1, h_u3);
h_foo = h_u1;
h_u1 = h_u3;
h_u3 = h_foo; // swap h_u1 and h_u3
//}
CUT_SAFE_CALL(cutStopTimer(hTimer));
printf("\n%dx Gold_laplace3d: %f (ms) \n \n", 1, cutGetTimerValue(hTimer));
// print out corner of array
/*
for (k=0; k<3; k++) {
for (j=0; j<8; j++) {
for (i=0; i<8; i++) {
ind = i + j*NX + k*NX*NY;
printf(" %5.2f ", h_u1[ind]);
}
printf("\n");
}
printf("\n");
}
*/
// error check
err = 0.0;
for (k = 0; k < NZ; k++) {
for (j = 0; j < NY; j++) {
for (i = 0; i < NX; i++) {
ind = i + j * NX + k * NX * NY;
err += (h_u1[ind] - h_u2[ind]) * (h_u1[ind] - h_u2[ind]);
}
}
}
printf("\n rms error = %f \n", sqrt(err / (float)(NX * NY * NZ)));
// Release GPU and CPU memory
printf("CUDA_SAFE_CALL( cudaFree(d_u1) );\n");
fflush(stdout);
CUDA_SAFE_CALL(cudaFree(d_u1));
printf("CUDA_SAFE_CALL( cudaFree(d_u2) );\n");
fflush(stdout);
CUDA_SAFE_CALL(cudaFree(d_u2));
printf("free(h_u1);\n");
fflush(stdout);
free(h_u1);
printf("free(h_u2);\n");
fflush(stdout);
free(h_u2);
printf("free(h_u3);\n");
fflush(stdout);
free(h_u3);
CUT_SAFE_CALL(cutDeleteTimer(hTimer));
// CUT_EXIT(argc, argv);
return 0;
}
///////////////////////////////////////////////////////////////////////////
// Print help screen
///////////////////////////////////////////////////////////////////////////
/*void printHelp(void)
{
printf("Usage: laplace3d [OPTION]...\n");
printf("6-point stencil 3D Laplace test \n");
printf("\n");
printf("Example: run 100 iterations on a 256x128x128 grid\n");
printf("./laplace3d --nx=256 --ny=128 --nz=128 --repeat=100\n");
printf("\n");
printf("Options:\n");
printf("--help\t\t\tDisplay this help menu\n");
printf("--nx=[SIZE]\t\tGrid width\n");
printf("--ny=[SIZE]\t\tGrid height\n");
printf("--nz=[SIZE]\t\tGrid depth\n");
printf("--repeat=[COUNT]\tNumber of repetitions\n");
}*/
|
7beb34efcf88e2ffe0aa7ffbbe5c3e89726658ca.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <math.h>
#include <float.h>
#include <hip/hip_vector_types.h>
#include <vector_functions.h>
#include "cutil_math.h"
#define spring 0.5f
#define damping 0.02f
#define shear 0.1f
#define attraction 0.0f
#define boundaryDamping -0.5f
#define globalDamping 1.0f
#define gravity -0.03f
#define deltaTime 0.01f
#ifndef M_PI_F
#define M_PI_F 3.14159265358979323846f
#endif
extern "C"
__global__
void resetHeadList(int* __restrict__ hList)
{
// workitem/worksize info
int index = blockIdx.x * blockDim.x + threadIdx.x;
hList[index] = -1;
}
extern "C"
__global__
void createList(const float4* __restrict__ posBuffer, int* __restrict__ hList, int* __restrict__ pList,
const float particleRadius, const int gridSize)
{
// workitem/worksize info
int index = blockIdx.x * blockDim.x + threadIdx.x;
float4 pos = posBuffer[index];
int xId = clamp((int)(pos.x / (2.0f*particleRadius)), 0, gridSize-1);
int yId = clamp((int)(pos.y / (2.0f*particleRadius)), 0, gridSize-1);
int zId = clamp((int)(pos.z / (2.0f*particleRadius)), 0, gridSize-1);
int gridId = zId * gridSize*gridSize + yId * gridSize + xId;
int listId = atomicExch(hList + gridId, index);
pList[index] = listId;
}
__forceinline__ __device__ void collideSpheres(float4 posA, float4 posB, float4 velA, float4 velB, float radiusA, float radiusB, float4* force)
{
float4 relPos = posB - posA;
float dist = length(relPos);
float collideDist = radiusA + radiusB;
if(dist < collideDist)
{
float4 norm = normalize(relPos);
float4 relVel = velB - velA;
float4 tanVel = relVel - norm * dot(norm, relVel);
*force = *force - norm * spring * (collideDist-dist);
*force = *force + relVel * damping;
*force = *force + tanVel * shear;
*force = *force + relPos * attraction;
}
}
extern "C"
__global__
void collideList(const int* __restrict__ hList, const int* __restrict__ pList,
const float4* __restrict__ posBuffer, const float4* __restrict__ vel0Buffer, float4* __restrict__ vel1Buffer,
const float particleRadius, const int gridSize)
{
// workitem/worksize info
int index = blockIdx.x * blockDim.x + threadIdx.x;
float4 pos = posBuffer[index];
float4 vel = vel0Buffer[index];
float4 force = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
int xId = (int)(pos.x / (2.0f*particleRadius));
int yId = (int)(pos.y / (2.0f*particleRadius));
int zId = (int)(pos.z / (2.0f*particleRadius));
int xIdMin = max(xId-1, 0);
int yIdMin = max(yId-1, 0);
int zIdMin = max(zId-1, 0);
int xIdMax = min(xId+1, gridSize-1);
int yIdMax = min(yId+1, gridSize-1);
int zIdMax = min(zId+1, gridSize-1);
for(int k = zIdMin; k <= zIdMax; ++k)
{
for(int j = yIdMin; j <= yIdMax; ++j)
{
for(int i = xIdMin; i <= xIdMax; ++i)
{
int gridId = k * gridSize*gridSize + j * gridSize + i;
int listId = hList[gridId];
while(listId != -1)
{
int listIdNew = pList[listId];
if(index == listId)
{
listId = listIdNew;
continue;
}
float4 pos2 = posBuffer[listId];
float4 vel2 = vel0Buffer[listId];
collideSpheres(pos, pos2, vel, vel2, particleRadius, particleRadius, &force);
listId = listIdNew;
}
}
}
}
vel1Buffer[index] = vel + force;
}
extern "C"
__global__
void integrate(float4* __restrict__ posBuffer, float4* __restrict__ vel0Buffer, const float4* __restrict__ vel1Buffer, const float particleRadius)
{
// workitem/worksize info
int index = blockIdx.x * blockDim.x + threadIdx.x;
float4 pos = posBuffer[index];
float4 vel = vel1Buffer[index];
float4 g = make_float4(0.0f, gravity, 0.0f, 0.0f);
vel += g * deltaTime;
vel *= globalDamping;
pos += vel * deltaTime;
if(pos.x < particleRadius)
{
pos.x = particleRadius;
vel.x *= boundaryDamping;
}
if(pos.x > 1.0f - particleRadius)
{
pos.x = 1.0f - particleRadius;
vel.x *= boundaryDamping;
}
if(pos.y < particleRadius)
{
pos.y = particleRadius;
vel.y *= boundaryDamping;
}
if(pos.y > 1.0f - particleRadius)
{
pos.y = 1.0f - particleRadius;
vel.y *= boundaryDamping;
}
if(pos.z < particleRadius)
{
pos.z = particleRadius;
vel.z *= boundaryDamping;
}
if(pos.z > 1.0f - particleRadius)
{
pos.z = 1.0f - particleRadius;
vel.z *= boundaryDamping;
}
posBuffer[index] = pos;
vel0Buffer[index] = vel;
}
extern "C"
void cuK_reset
(
void* hList,
int gSizeX, int lSizeX
)
{
dim3 block(lSizeX, 1, 1);
dim3 grid(gSizeX / block.x, 1, 1);
hipLaunchKernelGGL(( resetHeadList) , dim3(grid), dim3(block) , 0, 0, (int*)hList);
}
extern "C"
void cuK_create
(
void* pos, void* hList, void* pList, float particleRadius, int gridSize,
int gSizeX, int lSizeX
)
{
dim3 block(lSizeX, 1, 1);
dim3 grid(gSizeX / block.x, 1, 1);
createList << < grid, block >> >((float4*)pos, (int*)hList, (int*)pList, particleRadius, gridSize);
}
extern "C"
void cuK_collide
(
void* hList, void* pList, void* pos, void* vel0, void* vel1, float particleRadius, int gridSize,
int gSizeX, int lSizeX
)
{
dim3 block(lSizeX, 1, 1);
dim3 grid(gSizeX / block.x, 1, 1);
collideList << < grid, block >> >((int*)hList, (int*)pList, (float4*)pos, (float4*)vel0, (float4*)vel1, particleRadius, gridSize);
}
extern "C"
void cuK_integrate
(
void* pos, void* vel0, void* vel1, float particleRadius,
int gSizeX, int lSizeX
)
{
dim3 block(lSizeX, 1, 1);
dim3 grid(gSizeX / block.x, 1, 1);
integrate << < grid, block >> >((float4*)pos, (float4*)vel0, (float4*)vel1, particleRadius);
} | 7beb34efcf88e2ffe0aa7ffbbe5c3e89726658ca.cu | #include <cuda.h>
#include <cuda_runtime_api.h>
#include <math.h>
#include <float.h>
#include <vector_types.h>
#include <vector_functions.h>
#include "cutil_math.h"
#define spring 0.5f
#define damping 0.02f
#define shear 0.1f
#define attraction 0.0f
#define boundaryDamping -0.5f
#define globalDamping 1.0f
#define gravity -0.03f
#define deltaTime 0.01f
#ifndef M_PI_F
#define M_PI_F 3.14159265358979323846f
#endif
extern "C"
__global__
void resetHeadList(int* __restrict__ hList)
{
// workitem/worksize info
int index = blockIdx.x * blockDim.x + threadIdx.x;
hList[index] = -1;
}
extern "C"
__global__
void createList(const float4* __restrict__ posBuffer, int* __restrict__ hList, int* __restrict__ pList,
const float particleRadius, const int gridSize)
{
// workitem/worksize info
int index = blockIdx.x * blockDim.x + threadIdx.x;
float4 pos = posBuffer[index];
int xId = clamp((int)(pos.x / (2.0f*particleRadius)), 0, gridSize-1);
int yId = clamp((int)(pos.y / (2.0f*particleRadius)), 0, gridSize-1);
int zId = clamp((int)(pos.z / (2.0f*particleRadius)), 0, gridSize-1);
int gridId = zId * gridSize*gridSize + yId * gridSize + xId;
int listId = atomicExch(hList + gridId, index);
pList[index] = listId;
}
__forceinline__ __device__ void collideSpheres(float4 posA, float4 posB, float4 velA, float4 velB, float radiusA, float radiusB, float4* force)
{
float4 relPos = posB - posA;
float dist = length(relPos);
float collideDist = radiusA + radiusB;
if(dist < collideDist)
{
float4 norm = normalize(relPos);
float4 relVel = velB - velA;
float4 tanVel = relVel - norm * dot(norm, relVel);
*force = *force - norm * spring * (collideDist-dist);
*force = *force + relVel * damping;
*force = *force + tanVel * shear;
*force = *force + relPos * attraction;
}
}
extern "C"
__global__
void collideList(const int* __restrict__ hList, const int* __restrict__ pList,
const float4* __restrict__ posBuffer, const float4* __restrict__ vel0Buffer, float4* __restrict__ vel1Buffer,
const float particleRadius, const int gridSize)
{
// workitem/worksize info
int index = blockIdx.x * blockDim.x + threadIdx.x;
float4 pos = posBuffer[index];
float4 vel = vel0Buffer[index];
float4 force = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
int xId = (int)(pos.x / (2.0f*particleRadius));
int yId = (int)(pos.y / (2.0f*particleRadius));
int zId = (int)(pos.z / (2.0f*particleRadius));
int xIdMin = max(xId-1, 0);
int yIdMin = max(yId-1, 0);
int zIdMin = max(zId-1, 0);
int xIdMax = min(xId+1, gridSize-1);
int yIdMax = min(yId+1, gridSize-1);
int zIdMax = min(zId+1, gridSize-1);
for(int k = zIdMin; k <= zIdMax; ++k)
{
for(int j = yIdMin; j <= yIdMax; ++j)
{
for(int i = xIdMin; i <= xIdMax; ++i)
{
int gridId = k * gridSize*gridSize + j * gridSize + i;
int listId = hList[gridId];
while(listId != -1)
{
int listIdNew = pList[listId];
if(index == listId)
{
listId = listIdNew;
continue;
}
float4 pos2 = posBuffer[listId];
float4 vel2 = vel0Buffer[listId];
collideSpheres(pos, pos2, vel, vel2, particleRadius, particleRadius, &force);
listId = listIdNew;
}
}
}
}
vel1Buffer[index] = vel + force;
}
extern "C"
__global__
void integrate(float4* __restrict__ posBuffer, float4* __restrict__ vel0Buffer, const float4* __restrict__ vel1Buffer, const float particleRadius)
{
// workitem/worksize info
int index = blockIdx.x * blockDim.x + threadIdx.x;
float4 pos = posBuffer[index];
float4 vel = vel1Buffer[index];
float4 g = make_float4(0.0f, gravity, 0.0f, 0.0f);
vel += g * deltaTime;
vel *= globalDamping;
pos += vel * deltaTime;
if(pos.x < particleRadius)
{
pos.x = particleRadius;
vel.x *= boundaryDamping;
}
if(pos.x > 1.0f - particleRadius)
{
pos.x = 1.0f - particleRadius;
vel.x *= boundaryDamping;
}
if(pos.y < particleRadius)
{
pos.y = particleRadius;
vel.y *= boundaryDamping;
}
if(pos.y > 1.0f - particleRadius)
{
pos.y = 1.0f - particleRadius;
vel.y *= boundaryDamping;
}
if(pos.z < particleRadius)
{
pos.z = particleRadius;
vel.z *= boundaryDamping;
}
if(pos.z > 1.0f - particleRadius)
{
pos.z = 1.0f - particleRadius;
vel.z *= boundaryDamping;
}
posBuffer[index] = pos;
vel0Buffer[index] = vel;
}
extern "C"
void cuK_reset
(
void* hList,
int gSizeX, int lSizeX
)
{
dim3 block(lSizeX, 1, 1);
dim3 grid(gSizeX / block.x, 1, 1);
resetHeadList <<< grid, block >>>((int*)hList);
}
extern "C"
void cuK_create
(
void* pos, void* hList, void* pList, float particleRadius, int gridSize,
int gSizeX, int lSizeX
)
{
dim3 block(lSizeX, 1, 1);
dim3 grid(gSizeX / block.x, 1, 1);
createList << < grid, block >> >((float4*)pos, (int*)hList, (int*)pList, particleRadius, gridSize);
}
extern "C"
void cuK_collide
(
void* hList, void* pList, void* pos, void* vel0, void* vel1, float particleRadius, int gridSize,
int gSizeX, int lSizeX
)
{
dim3 block(lSizeX, 1, 1);
dim3 grid(gSizeX / block.x, 1, 1);
collideList << < grid, block >> >((int*)hList, (int*)pList, (float4*)pos, (float4*)vel0, (float4*)vel1, particleRadius, gridSize);
}
extern "C"
void cuK_integrate
(
void* pos, void* vel0, void* vel1, float particleRadius,
int gSizeX, int lSizeX
)
{
dim3 block(lSizeX, 1, 1);
dim3 grid(gSizeX / block.x, 1, 1);
integrate << < grid, block >> >((float4*)pos, (float4*)vel0, (float4*)vel1, particleRadius);
} |
7145cd0de63a2b40979428bb963a58fd9aa199c0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <cstdlib>
#include <vector>
__global__ void vectorAdd(int* a, int* b, int* c, int n) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
c[tid] = a[tid] + b[tid];
}
int main() {
int n = 1 << 20;
// Host array
std::vector<int> h_a(n, 2);
std::vector<int> h_b(n, 3);
std::vector<int> h_c(n);
/* int* h_a; */
/* int* h_b; */
/* int* h_c; */
int bytes = sizeof(int)*n;
/* h_a = (int*)malloc(bytes); */
/* h_b = (int*)malloc(bytes); */
/* h_c = (int*)malloc(bytes); */
/* for(int i = 0; i < n; ++i) { */
/* h_a[i] = 2; */
/* h_b[i] = 3; */
/* } */
//Device array
int* d_a;
int* d_b;
int* d_c;
hipMalloc(&d_a, bytes);
hipMalloc(&d_b, bytes);
hipMalloc(&d_c, bytes);
int block_size = 1024;
int grid_size = (int)ceil((float)n/block_size);
std::cout<<"grid size: "<<grid_size<<"\n";
hipMemcpy(d_a, h_a.data(), bytes, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b.data(), bytes, hipMemcpyHostToDevice);
std::cout<<"Mem copy successfull\n";
hipLaunchKernelGGL(( vectorAdd), dim3(grid_size), dim3(block_size), 0, 0, d_a, d_b, d_c, n);
hipMemcpy(h_c.data(), d_c, bytes, hipMemcpyDeviceToHost);
for(int i = 0; i < n; ++i) {
if(h_c[i] != 5) {
std::cout<<"Error\n";
break;
}
}
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return 0;
}
| 7145cd0de63a2b40979428bb963a58fd9aa199c0.cu | #include <iostream>
#include <cstdlib>
#include <vector>
__global__ void vectorAdd(int* a, int* b, int* c, int n) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
c[tid] = a[tid] + b[tid];
}
int main() {
int n = 1 << 20;
// Host array
std::vector<int> h_a(n, 2);
std::vector<int> h_b(n, 3);
std::vector<int> h_c(n);
/* int* h_a; */
/* int* h_b; */
/* int* h_c; */
int bytes = sizeof(int)*n;
/* h_a = (int*)malloc(bytes); */
/* h_b = (int*)malloc(bytes); */
/* h_c = (int*)malloc(bytes); */
/* for(int i = 0; i < n; ++i) { */
/* h_a[i] = 2; */
/* h_b[i] = 3; */
/* } */
//Device array
int* d_a;
int* d_b;
int* d_c;
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
int block_size = 1024;
int grid_size = (int)ceil((float)n/block_size);
std::cout<<"grid size: "<<grid_size<<"\n";
cudaMemcpy(d_a, h_a.data(), bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b.data(), bytes, cudaMemcpyHostToDevice);
std::cout<<"Mem copy successfull\n";
vectorAdd<<<grid_size, block_size>>>(d_a, d_b, d_c, n);
cudaMemcpy(h_c.data(), d_c, bytes, cudaMemcpyDeviceToHost);
for(int i = 0; i < n; ++i) {
if(h_c[i] != 5) {
std::cout<<"Error\n";
break;
}
}
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
be70406859549a7fbb6ffd2a465a6ecb49226866.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime_api.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(hipblasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(hipblasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float* X,
hipStream_t str) {
hipStream_t initial_stream;
CUBLAS_CHECK(hipblasGetStream(Caffe::cublas_handle(), &initial_stream));
CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), str));
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), initial_stream));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double* X,
hipStream_t str) {
hipStream_t initial_stream;
CUBLAS_CHECK(hipblasGetStream(Caffe::cublas_handle(), &initial_stream));
CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), str));
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), initial_stream));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(hipblasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(hipblasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(hipblasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(hipblasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(hipblasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(hipblasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( set_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
template <typename Dtype>
__global__ void round_kernel(const int n, const Dtype* a, Dtype* y);
template <>
__global__ void round_kernel<float>(const int n, const float* a, float* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = (float) __float2int_rn(a[index]);
}
}
template <>
__global__ void round_kernel<double>(const int n, const double* a, double* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = (double) __double2ll_rn(a[index]);
}
}
template <>
void caffe_gpu_round<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( round_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_round<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( round_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype, typename Mtype>
__global__ void and_kernel(const int n, const Mtype m, const Dtype* a, Dtype* y);
template <>
__global__ void and_kernel<float, unsigned long>(const int n, const unsigned long m, const float* a, float* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = __int_as_float(m & __float_as_int(a[index]));
}
}
template <>
__global__ void and_kernel<double, unsigned long long>(const int n, const unsigned long long m, const double* a, double* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = __longlong_as_double(m & __double_as_longlong(a[index]));
}
}
template <>
void caffe_gpu_and<float>(const int N, const std::bitset<8*sizeof(float)> m,
const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( and_kernel<float, unsigned long>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, m.to_ulong(), a, y);
}
template <>
void caffe_gpu_and<double>(const int N, const std::bitset<8*sizeof(double)> m,
const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( and_kernel<double, unsigned long long>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, m.to_ullong(), a, y);
}
template <typename Dtype>
__global__ void sqrt_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = sqrt(a[index]);
}
}
template <>
void caffe_gpu_sqrt<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sqrt_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_sqrt<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sqrt_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(hiprandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(hiprandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(hiprandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
hiprandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
hiprandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
} // namespace caffe
| be70406859549a7fbb6ffd2a465a6ecb49226866.cu | #include <cuda_runtime_api.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float* X,
cudaStream_t str) {
cudaStream_t initial_stream;
CUBLAS_CHECK(cublasGetStream(Caffe::cublas_handle(), &initial_stream));
CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), str));
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), initial_stream));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double* X,
cudaStream_t str) {
cudaStream_t initial_stream;
CUBLAS_CHECK(cublasGetStream(Caffe::cublas_handle(), &initial_stream));
CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), str));
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), initial_stream));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
set_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
template <typename Dtype>
__global__ void round_kernel(const int n, const Dtype* a, Dtype* y);
template <>
__global__ void round_kernel<float>(const int n, const float* a, float* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = (float) __float2int_rn(a[index]);
}
}
template <>
__global__ void round_kernel<double>(const int n, const double* a, double* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = (double) __double2ll_rn(a[index]);
}
}
template <>
void caffe_gpu_round<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
round_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_round<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
round_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype, typename Mtype>
__global__ void and_kernel(const int n, const Mtype m, const Dtype* a, Dtype* y);
template <>
__global__ void and_kernel<float, unsigned long>(const int n, const unsigned long m, const float* a, float* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = __int_as_float(m & __float_as_int(a[index]));
}
}
template <>
__global__ void and_kernel<double, unsigned long long>(const int n, const unsigned long long m, const double* a, double* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = __longlong_as_double(m & __double_as_longlong(a[index]));
}
}
template <>
void caffe_gpu_and<float>(const int N, const std::bitset<8*sizeof(float)> m,
const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
and_kernel<float, unsigned long><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(N, m.to_ulong(), a, y);
}
template <>
void caffe_gpu_and<double>(const int N, const std::bitset<8*sizeof(double)> m,
const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
and_kernel<double, unsigned long long><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(N, m.to_ullong(), a, y);
}
template <typename Dtype>
__global__ void sqrt_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = sqrt(a[index]);
}
}
template <>
void caffe_gpu_sqrt<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sqrt_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_sqrt<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sqrt_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
} // namespace caffe
|
ccb17952fb6e0e81e9de4dca20f82f5d9259c7ec.hip | // !!! This is a file automatically generated by hipify!!!
// Program for Finding Maximum element in CUDA using Reduction technique
// For Hadoop-CUDA Lab
// NOTE: THIS PROGRAM USES SOME DEPRECATED FUNCTIONS; HENCE THE WARNINGS!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <hip/hip_runtime.h>
// Thread block size
#define BLOCK_SIZE 512
// Size of Array
#define SOA 8192
// Allocates an array with random integer entries.
void randomInit(int* data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand_r();
}
__global__ void ReductionMax2(int *input, int *results, int n) //take thread divergence into account
{
extern __shared__ int sdata[];
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int tx = threadIdx.x;
//load input into __shared__ memory
int x = INT_MIN;
if(i < n)
x = input[i];
sdata[tx] = x;
__syncthreads();
// block-wide reduction
for(unsigned int offset = blockDim.x>>1; offset > 0; offset >>= 1)
{
__syncthreads();
if(tx < offset)
{
if(sdata[tx + offset] > sdata[tx])
sdata[tx] = sdata[tx + offset];
}
}
// finally, thread 0 writes the result
if(threadIdx.x == 0)
{
// the result is per-block
results[blockIdx.x] = sdata[0];
}
}
// get global max element via per-block reductions
int main()
{
int num_blocks = SOA / BLOCK_SIZE;
//allocate host memory for array a
unsigned int mem_size_a = sizeof(int) * SOA;
int* h_a = (int*)malloc(mem_size_a);
//initialize host memory
randomInit(h_a,SOA);
//allocate device memory
int* d_a;
hipMalloc((void**) &d_a, mem_size_a);
//copy host memory to device
hipMemcpy(d_a, h_a, mem_size_a, hipMemcpyHostToDevice);
//allocate device memory for temporary results
unsigned int mem_size_b = sizeof(int) * SOA;
int* d_b;
hipMalloc((void**) &d_b, mem_size_b);
//allocate device memory for final result
unsigned int mem_size_c = sizeof(int) * num_blocks;
int* d_c;
hipMalloc((void**) &d_c, mem_size_c);
//setup execution parameters
dim3 block(1,BLOCK_SIZE);
dim3 grid(4,4);
//execute the kernel
//first reduce per-block partial maxs
hipLaunchKernelGGL(( ReductionMax2), dim3(grid), dim3(block), 0, 0, d_a,d_b,SOA);
//then reduce partial maxs to a final max
hipLaunchKernelGGL(( ReductionMax2), dim3(grid), dim3(block), 0, 0, d_b,d_c,num_blocks);
// allocate host memory for the result
int* h_c = (int*)malloc(mem_size_c);
//copy final result from device to host
hipMemcpy(h_c, d_c, mem_size_c, hipMemcpyDeviceToHost);
//clean up memory
free(h_a);
free(h_c);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
hipDeviceReset();
}
| ccb17952fb6e0e81e9de4dca20f82f5d9259c7ec.cu | // Program for Finding Maximum element in CUDA using Reduction technique
// For Hadoop-CUDA Lab
// NOTE: THIS PROGRAM USES SOME DEPRECATED FUNCTIONS; HENCE THE WARNINGS!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <cuda.h>
// Thread block size
#define BLOCK_SIZE 512
// Size of Array
#define SOA 8192
// Allocates an array with random integer entries.
void randomInit(int* data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand_r();
}
__global__ void ReductionMax2(int *input, int *results, int n) //take thread divergence into account
{
extern __shared__ int sdata[];
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int tx = threadIdx.x;
//load input into __shared__ memory
int x = INT_MIN;
if(i < n)
x = input[i];
sdata[tx] = x;
__syncthreads();
// block-wide reduction
for(unsigned int offset = blockDim.x>>1; offset > 0; offset >>= 1)
{
__syncthreads();
if(tx < offset)
{
if(sdata[tx + offset] > sdata[tx])
sdata[tx] = sdata[tx + offset];
}
}
// finally, thread 0 writes the result
if(threadIdx.x == 0)
{
// the result is per-block
results[blockIdx.x] = sdata[0];
}
}
// get global max element via per-block reductions
int main()
{
int num_blocks = SOA / BLOCK_SIZE;
//allocate host memory for array a
unsigned int mem_size_a = sizeof(int) * SOA;
int* h_a = (int*)malloc(mem_size_a);
//initialize host memory
randomInit(h_a,SOA);
//allocate device memory
int* d_a;
cudaMalloc((void**) &d_a, mem_size_a);
//copy host memory to device
cudaMemcpy(d_a, h_a, mem_size_a, cudaMemcpyHostToDevice);
//allocate device memory for temporary results
unsigned int mem_size_b = sizeof(int) * SOA;
int* d_b;
cudaMalloc((void**) &d_b, mem_size_b);
//allocate device memory for final result
unsigned int mem_size_c = sizeof(int) * num_blocks;
int* d_c;
cudaMalloc((void**) &d_c, mem_size_c);
//setup execution parameters
dim3 block(1,BLOCK_SIZE);
dim3 grid(4,4);
//execute the kernel
//first reduce per-block partial maxs
ReductionMax2<<<grid, block>>>(d_a,d_b,SOA);
//then reduce partial maxs to a final max
ReductionMax2<<<grid, block>>>(d_b,d_c,num_blocks);
// allocate host memory for the result
int* h_c = (int*)malloc(mem_size_c);
//copy final result from device to host
cudaMemcpy(h_c, d_c, mem_size_c, cudaMemcpyDeviceToHost);
//clean up memory
free(h_a);
free(h_c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaThreadExit();
}
|
4cbb4384c12a92c9ea384e00ad439f18c300eab9.hip | // !!! This is a file automatically generated by hipify!!!
#include <algorithm>
#include <chrono>
#include <cmath>
#include <csignal>
#include <cstdio>
#include <cstdlib>
#include <fstream>
#include <iostream>
#include <iterator>
#include <sstream>
#include <string>
#include <vector>
#include "hip/hip_runtime.h"
#include "mpi.h"
#include "omp.h"
#define NBLOCKS dim3(128, 128)
#define NTHREADS dim3(16, 16)
#define SSSAx 2
#define PHONG_INTENS 5
#define PHONG_KD 0.8
#define PHONG_KS 0.8
#define FATAL(description) \
do { \
std::cout << "Error in " << __FILE__ << ":" << __LINE__ \
<< ". Message: " << description << std::endl; \
MPI_Finalize(); \
exit(0); \
} while (0)
#define CSC(call) \
do { \
hipError_t res = call; \
if (res != hipSuccess) { \
FATAL(hipGetErrorString(res)); \
} \
} while (0)
#define CHECK_MPI(call) \
do { \
int res = call; \
if (res != MPI_SUCCESS) { \
char desc[MPI_MAX_ERROR_STRING]; \
int len; \
MPI_Error_string(res, desc, &len); \
FATAL(desc); \
} \
} while (0)
#define OpenMP 0
#define CUDA 1
int parallelizationMode;
void parse_flags(int argc, char *argv[]) {
if (argc < 2) return;
if (argc == 2) {
if (strcmp(argv[1], "--gpu") == 0) {
parallelizationMode = CUDA;
}
if (strcmp(argv[1], "--cpu") == 0) {
parallelizationMode = OpenMP;
}
else if ((strcmp(argv[1], "--cpu") != 0) and (strcmp(argv[1], "--gpu") != 0)) {
std::cout << "Unknown args" << std::endl;
MPI_Finalize();
exit(0);
}
}
if (argc > 2) {
std::cout << "A lot of args" << std::endl;
MPI_Finalize();
exit(0);
}
}
struct MPIContext {
MPIContext(int *argc, char ***argv) { CHECK_MPI(MPI_Init(argc, argv)); }
~MPIContext() {
CHECK_MPI(MPI_Barrier(MPI_COMM_WORLD));
CHECK_MPI(MPI_Finalize());
}
};
template <typename T>
struct Vector3 {
T x, y, z;
__host__ __device__ Vector3(T x = T{}, T y = T{}, T z = T{})
: x(x), y(y), z(z) {}
friend std::istream &operator>>(std::istream &is, Vector3 &v) {
is >> v.x >> v.y >> v.z;
return is;
}
};
struct CameraMovement {
double r0, z0, phi0, ar, az, wr, wz, wphi, pr, pz;
friend std::istream &operator>>(std::istream &is, CameraMovement &p) {
is >> p.r0 >> p.z0 >> p.phi0 >> p.ar >> p.az >> p.wr >> p.wz >> p.wphi >> p.pr >> p.pz;
return is;
}
};
struct FigureParams {
Vector3<double> center, color;
double radius, k_refl, k_refr;
int lights_num;
friend std::istream &operator>>(std::istream &is, FigureParams &p) {
is >> p.center >> p.color >> p.radius >> p.k_refl >> p.k_refr >> p.lights_num;
return is;
}
};
struct FloorParams {
Vector3<double> a, b, c, d, color;
double k_refl;
std::string texture_path;
friend std::istream &operator>>(std::istream &is, FloorParams &p) {
is >> p.a >> p.b >> p.c >> p.d >> p.texture_path >> p.color >> p.k_refl;
return is;
}
};
struct LightParams {
Vector3<double> pos;
Vector3<double> color;
friend std::istream &operator>>(std::istream &is, LightParams &p) {
is >> p.pos >> p.color;
return is;
}
};
struct Params {
int nframes, w, h, lights_num;
double angle;
CameraMovement camera_center, camera_dir;
FigureParams hex, octa, icos;
FloorParams floor;
std::string output_pattern;
std::vector<LightParams> lights;
friend std::istream &operator>>(std::istream &is, Params &p) {
is >> p.nframes >> p.output_pattern >> p.w >> p.h >> p.angle >> p.camera_center >> p.camera_dir >> p.hex >> p.octa >> p.icos >> p.floor >> p.lights_num;
p.lights.resize(p.lights_num);
for (auto &it : p.lights) is >> it;
return is;
}
};
struct Triangle {
Vector3<double> a, b, c, color;
};
template <typename T>
__host__ __device__ T min(const T &a, const T &b) {
if (a < b) return a;
return b;
}
template <typename T>
__host__ __device__ T max(const T &a, const T &b) {
if (a > b) return a;
return b;
}
__host__ __device__ double dot_product(const Vector3<double> &a, const Vector3<double> &b) {
return a.x * b.x + a.y * b.y + a.z * b.z;
}
__host__ __device__ Vector3<double> cross_product(const Vector3<double> &a, const Vector3<double> &b) {
return {a.y * b.z - a.z * b.y, a.z * b.x - a.x * b.z,a.x * b.y - a.y * b.x};
}
__host__ __device__ double norm(const Vector3<double> &v) {
return sqrt(dot_product(v, v));
}
__host__ __device__ Vector3<double> normalize(const Vector3<double> &v) {
double l = norm(v);
return {v.x / l, v.y / l, v.z / l};
}
__host__ __device__ Vector3<double> diff(const Vector3<double> &a, const Vector3<double> &b) {
return {a.x - b.x, a.y - b.y, a.z - b.z};
}
__host__ __device__ Vector3<double> add(const Vector3<double> &a, const Vector3<double> &b) {
return {a.x + b.x, a.y + b.y, a.z + b.z};
}
__host__ __device__ Vector3<double> mult(const Vector3<double> &a, const Vector3<double> &b) {
return {b.x * a.x, b.y * a.y, b.z * a.z};
}
__host__ __device__ Vector3<double> mult(const Vector3<double> &a, double k) {
return {k * a.x, k * a.y, k * a.z};
}
__host__ __device__ Vector3<double> mult(const Vector3<double> &a, const Vector3<double> &b, const Vector3<double> &c, const Vector3<double> &d) {
return {a.x * d.x + b.x * d.y + c.x * d.z,
a.y * d.x + b.y * d.y + c.y * d.z,
a.z * d.x + b.z * d.y + c.z * d.z};
}
__host__ __device__ Vector3<double> inverse(const Vector3<double> &v) {
return {-v.x, -v.y, -v.z};
}
__host__ __device__ Vector3<double> div(const Vector3<double> &a, double k) {
return {a.x / k, a.y / k, a.z / k};
}
__host__ __device__ Vector3<double> reflect(const Vector3<double> &v, const Vector3<double> &n) {
return diff(v, mult(n, 2.0 * dot_product(v, n)));
}
__host__ __device__ uchar4 color_from_normalized(const Vector3<double> &v) {
double x = min(v.x, 1.);
x = max(x, 0.);
double y = min(v.y, 1.);
y = max(y, 0.);
double z = min(v.z, 1.);
z = max(z, 0.);
return make_uchar4(255. * x, 255. * y, 255. * z, 0u);
}
std::vector<std::string> split_string(const std::string &s, char d) {
std::vector<std::string> result;
std::stringstream ss(s);
std::string word;
while (getline(ss, word, d)) {
result.push_back(word);
}
return result;
}
void importObj(std::vector<Triangle> &scene_Triangles, const std::string &filepath, const FigureParams &fp) {
std::ifstream is(filepath);
if (!is) {
std::string desc = "can't open " + filepath;
FATAL(desc);
}
/*
std::vector<Triangle> triangle_list(10);
triangle_list[0] = Triangle{Vector3<double> {}, Vector3<double> {}, Vector3<double> {}, fp.color}
*/
double r = 0;
std::vector<Vector3<double>> vertices;
std::vector<Triangle> figure_Triangles;
std::string line;
while (std::getline(is, line)) {
std::vector<std::string> buffer = split_string(line, ' ');
if (line.empty()) {
continue;
} else if (buffer[0] == "v") {
double x = std::stod(buffer[2]);
double y = std::stod(buffer[3]);
double z = std::stod(buffer[4]);
vertices.push_back({x, y, z});
} else if (buffer[0] == "f") {
std::vector<std::string> indexes = split_string(buffer[1], '/');
Vector3<double> a = vertices[std::stoi(indexes[0]) - 1];
indexes = split_string(buffer[2], '/');
Vector3<double> b = vertices[std::stoi(indexes[0]) - 1];
indexes = split_string(buffer[3], '/');
Vector3<double> c = vertices[std::stoi(indexes[0]) - 1];
r = max(r, norm(a));
r = max(r, norm(b));
r = max(r, norm(c));
figure_Triangles.push_back(Triangle{a, b, c, fp.color});
}
}
double radius = fp.radius / r;
for (auto &single_triangle : figure_Triangles) {
scene_Triangles.push_back({add(mult(single_triangle.a, radius), fp.center), add(mult(single_triangle.b, radius), fp.center), add(mult(single_triangle.c, radius), fp.center), single_triangle.color});
}
}
void add_floor_to_scene(std::vector<Triangle> &scene_Triangles, const FloorParams &fp) {
scene_Triangles.push_back({fp.c, fp.b, fp.a, fp.color});
scene_Triangles.push_back({fp.a, fp.d, fp.c, fp.color});
}
struct Mat3d {
double m[3][3];
__host__ __device__ Mat3d(double m11 = 0, double m12 = 0, double m13 = 0,
double m21 = 0, double m22 = 0, double m23 = 0,
double m31 = 0, double m32 = 0, double m33 = 0) {
m[0][0] = m11;
m[0][1] = m12;
m[0][2] = m13;
m[1][0] = m21;
m[1][1] = m22;
m[1][2] = m23;
m[2][0] = m31;
m[2][1] = m32;
m[2][2] = m33;
}
};
__host__ __device__ double det(const Mat3d &m) {
return m.m[0][0] * m.m[1][1] * m.m[2][2] +
m.m[1][0] * m.m[0][2] * m.m[2][1] +
m.m[2][0] * m.m[0][1] * m.m[1][2] -
m.m[0][2] * m.m[1][1] * m.m[2][0] -
m.m[0][0] * m.m[1][2] * m.m[2][1] -
m.m[0][1] * m.m[1][0] * m.m[2][2];
}
__host__ __device__ Mat3d inverse(const Mat3d &m) {
double d = det(m);
double m11 = (m.m[1][1] * m.m[2][2] - m.m[2][1] * m.m[1][2]) / d;
double m12 = (m.m[2][1] * m.m[0][2] - m.m[0][1] * m.m[2][2]) / d;
double m13 = (m.m[0][1] * m.m[1][2] - m.m[1][1] * m.m[0][2]) / d;
double m21 = (m.m[2][0] * m.m[1][2] - m.m[1][0] * m.m[2][2]) / d;
double m22 = (m.m[0][0] * m.m[2][2] - m.m[2][0] * m.m[0][2]) / d;
double m23 = (m.m[1][0] * m.m[0][2] - m.m[0][0] * m.m[1][2]) / d;
double m31 = (m.m[1][0] * m.m[2][1] - m.m[2][0] * m.m[1][1]) / d;
double m32 = (m.m[2][0] * m.m[0][1] - m.m[0][0] * m.m[2][1]) / d;
double m33 = (m.m[0][0] * m.m[1][1] - m.m[1][0] * m.m[0][1]) / d;
return Mat3d(m11, m12, m13, m21, m22, m23, m31, m32, m33);
}
__host__ __device__ Vector3<double> mult(const Mat3d &m, const Vector3<double> &v) {
Vector3<double> res;
res.x = m.m[0][0] * v.x + m.m[0][1] * v.y + m.m[0][2] * v.z;
res.y = m.m[1][0] * v.x + m.m[1][1] * v.y + m.m[1][2] * v.z;
res.z = m.m[2][0] * v.x + m.m[2][1] * v.y + m.m[2][2] * v.z;
return res;
}
__host__ __device__ void triangle_intersection(const Vector3<double> &origin, const Vector3<double> &dir, const Triangle &Triangle, double *t, double *u, double *v) {
Vector3<double> e1 = diff(Triangle.b, Triangle.a);
Vector3<double> e2 = diff(Triangle.c, Triangle.a);
Mat3d m(-dir.x, e1.x, e2.x, -dir.y, e1.y, e2.y, -dir.z, e1.z, e2.z);
Vector3<double> tmp = mult(inverse(m), diff(origin, Triangle.a));
*t = tmp.x;
*u = tmp.y;
*v = tmp.z;
}
__host__ __device__ bool shadow_ray_hit(const Vector3<double> &origin, const Vector3<double> &dir, const Triangle *scene_Triangles, int nTriangles, double *hit_t) {
double t_min = 1 / 0.;
bool hit = false;
for (int i = 0; i < nTriangles; ++i) {
auto Triangle = scene_Triangles[i];
double t, u, v;
triangle_intersection(origin, dir, Triangle, &t, &u, &v);
if (u >= 0.0 && v >= 0.0 && u + v <= 1.0 && t > 0.0) {
if (t < t_min) {
t_min = t;
}
hit = true;
}
}
*hit_t = t_min;
return hit;
}
__host__ __device__ Vector3<double> phong_model(const Vector3<double> &pos, const Vector3<double> &dir, const Triangle &TriangleObj, const Triangle *scene_Triangles, int nTriangles, const LightParams *lights, int lights_num) {
Vector3<double> normal = normalize(cross_product(diff(TriangleObj.b, TriangleObj.a), diff(TriangleObj.c, TriangleObj.a)));
Vector3<double> ambient{0.1, 0.1, 0.1};
Vector3<double> diffuse{0., 0., 0.};
Vector3<double> specular{0., 0., 0.};
for (int i = 0; i < lights_num; ++i) {
Vector3<double> light_pos = lights[i].pos;
Vector3<double> L = diff(light_pos, pos);
double d = norm(L);
L = normalize(L);
double hit_t = 0.0;
if (shadow_ray_hit(light_pos, inverse(L), scene_Triangles, nTriangles, &hit_t) && (hit_t > d || (hit_t > d || (d - hit_t < 0.0005)))) {
double k = PHONG_INTENS / (d + 0.001f);
diffuse = add(diffuse, mult(lights[i].color, max(PHONG_KD * k * dot_product(L, normal), 0.0)));
Vector3<double> R = normalize(reflect(inverse(L), normal));
Vector3<double> S = inverse(dir);
specular = add(specular, mult(lights[i].color, PHONG_KS * k * ::pow(max(dot_product(R, S), 0.0), 32)));
}
}
return add(add(mult(ambient, TriangleObj.color), mult(diffuse, TriangleObj.color)), mult(specular, TriangleObj.color));
}
__host__ __device__ uchar4 ray(const Vector3<double> &pos, const Vector3<double> &dir, const Triangle *scene_Triangles, int nTriangles, LightParams *lights, int lights_num) {
int k, k_min = -1;
double ts_min;
for (k = 0; k < nTriangles; k++) {
Vector3<double> e1 = diff(scene_Triangles[k].b, scene_Triangles[k].a);
Vector3<double> e2 = diff(scene_Triangles[k].c, scene_Triangles[k].a);
Vector3<double> p = cross_product(dir, e2);
double div = dot_product(p, e1);
if (fabs(div) < 1e-10) continue;
Vector3<double> t = diff(pos, scene_Triangles[k].a);
double u = dot_product(p, t) / div;
if (u < 0.0 || u > 1.0) continue;
Vector3<double> q = cross_product(t, e1);
double v = dot_product(q, dir) / div;
if (v < 0.0 || v + u > 1.0) continue;
double ts = dot_product(q, e2) / div;
if (ts < 0.0) continue;
if (k_min == -1 || ts < ts_min) {
k_min = k;
ts_min = ts;
}
}
if (k_min == -1) return {0, 0, 0, 0};
return color_from_normalized(phong_model(add(mult(dir, ts_min), pos), dir, scene_Triangles[k_min], scene_Triangles, nTriangles, lights, lights_num));
}
void getRenderOMP(uchar4 *data, int w, int h, Vector3<double> pc, Vector3<double> pv, double angle, const Triangle *scene_Triangles, int nTriangles, LightParams *lights, int lights_num) {
double dw = 2.0 / (w - 1.0);
double dh = 2.0 / (h - 1.0);
double z = 1.0 / tan(angle * M_PI / 360.0);
Vector3<double> bz = normalize(diff(pv, pc));
Vector3<double> bx = normalize(cross_product(bz, {0.0, 0.0, 1.0}));
Vector3<double> by = normalize(cross_product(bx, bz));
#pragma omp parallel for
for (int pix = 0; pix < w * h; ++pix) {
int i = pix % w;
int j = pix / w;
Vector3<double> v = {-1.0 + dw * i, (-1.0 + dh * j) * h / w, z};
Vector3<double> dir = mult(bx, by, bz, v);
data[(h - 1 - j) * w + i] = ray(pc, normalize(dir), scene_Triangles, nTriangles, lights, lights_num);
}
}
__global__ void getRenderCUDA(uchar4 *data, int w, int h, Vector3<double> pc, Vector3<double> pv, double angle, const Triangle *scene_Triangles, int nTriangles, LightParams *lights, int lights_num) {
int id_x = threadIdx.x + blockIdx.x * blockDim.x;
int id_y = threadIdx.y + blockIdx.y * blockDim.y;
int offset_x = blockDim.x * gridDim.x;
int offset_y = blockDim.y * gridDim.y;
double dw = 2.0 / (w - 1.0);
double dh = 2.0 / (h - 1.0);
double z = 1.0 / tan(angle * M_PI / 360.0);
Vector3<double> bz = normalize(diff(pv, pc));
Vector3<double> bx = normalize(cross_product(bz, {0.0, 0.0, 1.0}));
Vector3<double> by = normalize(cross_product(bx, bz));
for (int j = id_y; j < h; j += offset_y)
for (int i = id_x; i < w; i += offset_x) {
Vector3<double> v = {-1.0 + dw * i, (-1.0 + dh * j) * h / w, z};
Vector3<double> dir = mult(bx, by, bz, v);
data[(h - 1 - j) * w + i] =
ray(pc, normalize(dir), scene_Triangles, nTriangles, lights, lights_num);
}
}
__host__ __device__ uchar4 SSAA(uchar4 *data, int i, int j, int w, int h, int kernel_w, int kernel_h) {
Vector3<double> res;
for (int y = i; y < i + kernel_h; ++y)
for (int x = j; x < j + kernel_w; ++x) {
auto pix = data[y * w + x];
res = add(res, Vector3<double>{(double)pix.x, (double)pix.y, (double)pix.z});
}
auto pix = div(res, kernel_w * kernel_h);
return make_uchar4(pix.x, pix.y, pix.z, 0);
}
__global__ void getSSAA_CUDA(uchar4 *dst, uchar4 *src, int new_w, int new_h, int w, int h) {
int id_x = threadIdx.x + blockIdx.x * blockDim.x;
int id_y = threadIdx.y + blockIdx.y * blockDim.y;
int offset_x = blockDim.x * gridDim.x;
int offset_y = blockDim.y * gridDim.y;
int kernel_w = w / new_w;
int kernel_h = h / new_h;
for (int i = id_y; i < new_h; i += offset_y) {
for (int j = id_x; j < new_w; j += offset_x) {
int pix_i = i * kernel_h;
int pix_j = j * kernel_w;
dst[i * new_w + j] = SSAA(src, pix_i, pix_j, w, h, kernel_w, kernel_h);
}
}
}
void getSSAA_OMP(uchar4 *dst, uchar4 *src, int new_w, int new_h, int w, int h) {
int kernel_w = w / new_w;
int kernel_h = h / new_h;
#pragma omp parallel for
for (int pix = 0; pix < new_w * new_h; ++pix) {
int i = pix / new_w;
int j = pix % new_w;
int pix_i = i * kernel_h;
int pix_j = j * kernel_w;
dst[i * new_w + j] = SSAA(src, pix_i, pix_j, w, h, kernel_w, kernel_h);
}
}
void getCameraPos(const CameraMovement &c, const CameraMovement &n, double t, Vector3<double> *pc, Vector3<double> *pv) {
double phic = c.phi0 + c.wphi * t, phin = n.phi0 + n.wphi * t;
double rc = c.r0 + c.ar * sin(c.wr * t + c.pr), zc = c.z0 + c.ar * sin(c.wz * t + c.pz);
double rn = n.r0 + n.ar * sin(n.wr * t + n.pr), zn = n.z0 + n.ar * sin(n.wz * t + n.pz);
*pv = Vector3<double>{rn * cos(phin), rn * sin(phin), zn};
*pc = Vector3<double>{rc * cos(phic), rc * sin(phic), zc};
}
void write_image(const std::string &path, const std::vector<uchar4> &data, int w, int h) {
MPI_File file;
CHECK_MPI(MPI_File_open(MPI_COMM_SELF, path.data(), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &file));
CHECK_MPI(MPI_File_write(file, &w, 1, MPI_INT, MPI_STATUS_IGNORE));
CHECK_MPI(MPI_File_write(file, &h, 1, MPI_INT, MPI_STATUS_IGNORE));
CHECK_MPI(MPI_File_write(file, data.data(), sizeof(uchar4) * w * h, MPI_BYTE, MPI_STATUS_IGNORE));
CHECK_MPI(MPI_File_close(&file));
}
void signal_handler(int signal){
std::cout << "Error. Bad signal: " << signal << std::endl;
MPI_Finalize();
exit(0);
}
int main(int argc, char *argv[]) {
std::signal(SIGSEGV, signal_handler);
std::signal(SIGABRT, signal_handler);
MPIContext ctx(&argc, &argv);
parse_flags(argc, argv);
int rank, nprocesses;
CHECK_MPI(MPI_Comm_rank(MPI_COMM_WORLD, &rank));
CHECK_MPI(MPI_Comm_size(MPI_COMM_WORLD, &nprocesses));
CHECK_MPI(MPI_Barrier(MPI_COMM_WORLD));
std::vector<Triangle> scene_Triangles;
Params params;
if (rank == 0) {
std::cin >> params;
importObj(scene_Triangles, "hex.obj", params.hex);
importObj(scene_Triangles, "octa.obj", params.octa);
importObj(scene_Triangles, "icos.obj", params.icos);
scene_Triangles.push_back({params.floor.c, params.floor.b, params.floor.a, params.floor.color});
scene_Triangles.push_back({params.floor.a, params.floor.d, params.floor.c, params.floor.color});
}
int output_pattern_size = params.output_pattern.size();
CHECK_MPI(MPI_Bcast(¶ms.nframes, 1, MPI_INT, 0, MPI_COMM_WORLD));
CHECK_MPI(MPI_Bcast(&output_pattern_size, 1, MPI_INT, 0, MPI_COMM_WORLD));
params.output_pattern.resize(output_pattern_size);
CHECK_MPI(MPI_Bcast((char *)params.output_pattern.data(), output_pattern_size, MPI_CHAR, 0, MPI_COMM_WORLD));
CHECK_MPI(MPI_Bcast(¶ms.w, 1, MPI_INT, 0, MPI_COMM_WORLD));
CHECK_MPI(MPI_Bcast(¶ms.h, 1, MPI_INT, 0, MPI_COMM_WORLD));
CHECK_MPI(MPI_Bcast(¶ms.angle, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD));
CHECK_MPI(MPI_Bcast(¶ms.camera_center, sizeof(CameraMovement), MPI_BYTE, 0, MPI_COMM_WORLD));
CHECK_MPI(MPI_Bcast(¶ms.camera_dir, sizeof(CameraMovement), MPI_BYTE, 0, MPI_COMM_WORLD));
CHECK_MPI(MPI_Bcast(¶ms.hex, sizeof(FigureParams), MPI_BYTE, 0, MPI_COMM_WORLD));
CHECK_MPI(MPI_Bcast(¶ms.octa, sizeof(FigureParams), MPI_BYTE, 0, MPI_COMM_WORLD));
CHECK_MPI(MPI_Bcast(¶ms.icos, sizeof(FigureParams), MPI_BYTE, 0, MPI_COMM_WORLD));
CHECK_MPI(MPI_Bcast(¶ms.floor.a, sizeof(Vector3<double>), MPI_BYTE, 0, MPI_COMM_WORLD));
CHECK_MPI(MPI_Bcast(¶ms.floor.b, sizeof(Vector3<double>), MPI_BYTE, 0, MPI_COMM_WORLD));
CHECK_MPI(MPI_Bcast(¶ms.floor.c, sizeof(Vector3<double>), MPI_BYTE, 0, MPI_COMM_WORLD));
CHECK_MPI(MPI_Bcast(¶ms.floor.d, sizeof(Vector3<double>), MPI_BYTE, 0, MPI_COMM_WORLD));
int texture_path_size = params.floor.texture_path.size();
CHECK_MPI(MPI_Bcast(&texture_path_size, 1, MPI_INT, 0, MPI_COMM_WORLD));
params.floor.texture_path.resize(texture_path_size);
CHECK_MPI(MPI_Bcast((char *)params.floor.texture_path.data(), texture_path_size, MPI_CHAR, 0, MPI_COMM_WORLD));
CHECK_MPI(MPI_Bcast(¶ms.floor.color, sizeof(Vector3<double>), MPI_BYTE, 0, MPI_COMM_WORLD));
CHECK_MPI(MPI_Bcast(¶ms.floor.k_refl, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD));
params.lights.resize(params.lights_num);
CHECK_MPI(MPI_Bcast(¶ms.lights_num, 1, MPI_INT, 0, MPI_COMM_WORLD));
CHECK_MPI(MPI_Bcast(params.lights.data(), sizeof(LightParams) * params.lights_num, MPI_BYTE, 0, MPI_COMM_WORLD));
int nTriangles = scene_Triangles.size();
CHECK_MPI(MPI_Bcast(&nTriangles, 1, MPI_INT, 0, MPI_COMM_WORLD));
scene_Triangles.resize(nTriangles);
CHECK_MPI(MPI_Bcast(scene_Triangles.data(), sizeof(Triangle) * nTriangles, MPI_BYTE, 0, MPI_COMM_WORLD));
int ndevices;
CSC(hipGetDeviceCount(&ndevices));
CSC(hipSetDevice(rank % ndevices));
Triangle *gpu_scene_Triangles;
LightParams *gpu_lights;
if (parallelizationMode == CUDA) {
auto Triangles_size = sizeof(Triangle) * scene_Triangles.size();
CSC(hipMalloc(&gpu_scene_Triangles, Triangles_size));
CSC(hipMemcpy(gpu_scene_Triangles, scene_Triangles.data(), Triangles_size, hipMemcpyHostToDevice));
auto lights_size = sizeof(LightParams) * params.lights_num;
CSC(hipMalloc(&gpu_lights, lights_size));
CSC(hipMemcpy(gpu_lights, params.lights.data(), lights_size, hipMemcpyHostToDevice));
}
CHECK_MPI(MPI_Barrier(MPI_COMM_WORLD));
std::vector<uchar4> data_render(SSSAx*params.w * SSSAx*params.h), data_ssaa(params.w * params.h);
uchar4 *gpu_data_render, *gpu_data_ssaa;
if (parallelizationMode == CUDA) {
CSC(hipMalloc(&gpu_data_render, sizeof(uchar4) * SSSAx*params.w * SSSAx*params.h));
CSC(hipMalloc(&gpu_data_ssaa, sizeof(uchar4) * params.w * params.h));
}
for (int frame = rank; frame < params.nframes; frame += nprocesses) {
Vector3<double> pc, pv;
getCameraPos(params.camera_center, params.camera_dir, 0.1 * (double)frame, &pc, &pv);
auto start = std::chrono::high_resolution_clock::now();
if (parallelizationMode == OpenMP) {
getRenderOMP(data_render.data(), SSSAx*params.w, SSSAx*params.h, pc, pv, params.angle, scene_Triangles.data(), scene_Triangles.size(), params.lights.data(), params.lights.size());
getSSAA_OMP(data_ssaa.data(), data_render.data(), params.w, params.h, SSSAx*params.w, SSSAx*params.h);
}
if (parallelizationMode == CUDA) {
hipLaunchKernelGGL(( getRenderCUDA), dim3(NBLOCKS), dim3(NTHREADS), 0, 0, gpu_data_render, SSSAx*params.w, SSSAx*params.h, pc, pv, params.angle,gpu_scene_Triangles, scene_Triangles.size(), gpu_lights, params.lights.size());
CSC(hipDeviceSynchronize());
hipLaunchKernelGGL(( getSSAA_CUDA), dim3(NBLOCKS), dim3(NTHREADS), 0, 0, gpu_data_ssaa, gpu_data_render, params.w, params.h, SSSAx*params.w, SSSAx*params.h);
CSC(hipMemcpy(data_ssaa.data(), gpu_data_ssaa,sizeof(uchar4) * params.w * params.h, hipMemcpyDeviceToHost));
}
char output_path[256];
sprintf(output_path, params.output_pattern.data(), frame);
write_image(output_path, data_ssaa, params.w, params.h);
auto time = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::high_resolution_clock::now() - start);
std::cout << frame << "\t" << output_path << "\t" << time.count() << "ms" << std::endl;
}
if (parallelizationMode == CUDA) {
CSC(hipFree(gpu_scene_Triangles));
CSC(hipFree(gpu_lights));
CSC(hipFree(gpu_data_ssaa));
CSC(hipFree(gpu_data_render));
}
return 0;
} | 4cbb4384c12a92c9ea384e00ad439f18c300eab9.cu | #include <algorithm>
#include <chrono>
#include <cmath>
#include <csignal>
#include <cstdio>
#include <cstdlib>
#include <fstream>
#include <iostream>
#include <iterator>
#include <sstream>
#include <string>
#include <vector>
#include "cuda_runtime.h"
#include "mpi.h"
#include "omp.h"
#define NBLOCKS dim3(128, 128)
#define NTHREADS dim3(16, 16)
#define SSSAx 2
#define PHONG_INTENS 5
#define PHONG_KD 0.8
#define PHONG_KS 0.8
#define FATAL(description) \
do { \
std::cout << "Error in " << __FILE__ << ":" << __LINE__ \
<< ". Message: " << description << std::endl; \
MPI_Finalize(); \
exit(0); \
} while (0)
#define CSC(call) \
do { \
cudaError_t res = call; \
if (res != cudaSuccess) { \
FATAL(cudaGetErrorString(res)); \
} \
} while (0)
#define CHECK_MPI(call) \
do { \
int res = call; \
if (res != MPI_SUCCESS) { \
char desc[MPI_MAX_ERROR_STRING]; \
int len; \
MPI_Error_string(res, desc, &len); \
FATAL(desc); \
} \
} while (0)
#define OpenMP 0
#define CUDA 1
int parallelizationMode;
void parse_flags(int argc, char *argv[]) {
if (argc < 2) return;
if (argc == 2) {
if (strcmp(argv[1], "--gpu") == 0) {
parallelizationMode = CUDA;
}
if (strcmp(argv[1], "--cpu") == 0) {
parallelizationMode = OpenMP;
}
else if ((strcmp(argv[1], "--cpu") != 0) and (strcmp(argv[1], "--gpu") != 0)) {
std::cout << "Unknown args" << std::endl;
MPI_Finalize();
exit(0);
}
}
if (argc > 2) {
std::cout << "A lot of args" << std::endl;
MPI_Finalize();
exit(0);
}
}
struct MPIContext {
MPIContext(int *argc, char ***argv) { CHECK_MPI(MPI_Init(argc, argv)); }
~MPIContext() {
CHECK_MPI(MPI_Barrier(MPI_COMM_WORLD));
CHECK_MPI(MPI_Finalize());
}
};
template <typename T>
struct Vector3 {
T x, y, z;
__host__ __device__ Vector3(T x = T{}, T y = T{}, T z = T{})
: x(x), y(y), z(z) {}
friend std::istream &operator>>(std::istream &is, Vector3 &v) {
is >> v.x >> v.y >> v.z;
return is;
}
};
struct CameraMovement {
double r0, z0, phi0, ar, az, wr, wz, wphi, pr, pz;
friend std::istream &operator>>(std::istream &is, CameraMovement &p) {
is >> p.r0 >> p.z0 >> p.phi0 >> p.ar >> p.az >> p.wr >> p.wz >> p.wphi >> p.pr >> p.pz;
return is;
}
};
struct FigureParams {
Vector3<double> center, color;
double radius, k_refl, k_refr;
int lights_num;
friend std::istream &operator>>(std::istream &is, FigureParams &p) {
is >> p.center >> p.color >> p.radius >> p.k_refl >> p.k_refr >> p.lights_num;
return is;
}
};
struct FloorParams {
Vector3<double> a, b, c, d, color;
double k_refl;
std::string texture_path;
friend std::istream &operator>>(std::istream &is, FloorParams &p) {
is >> p.a >> p.b >> p.c >> p.d >> p.texture_path >> p.color >> p.k_refl;
return is;
}
};
struct LightParams {
Vector3<double> pos;
Vector3<double> color;
friend std::istream &operator>>(std::istream &is, LightParams &p) {
is >> p.pos >> p.color;
return is;
}
};
struct Params {
int nframes, w, h, lights_num;
double angle;
CameraMovement camera_center, camera_dir;
FigureParams hex, octa, icos;
FloorParams floor;
std::string output_pattern;
std::vector<LightParams> lights;
friend std::istream &operator>>(std::istream &is, Params &p) {
is >> p.nframes >> p.output_pattern >> p.w >> p.h >> p.angle >> p.camera_center >> p.camera_dir >> p.hex >> p.octa >> p.icos >> p.floor >> p.lights_num;
p.lights.resize(p.lights_num);
for (auto &it : p.lights) is >> it;
return is;
}
};
struct Triangle {
Vector3<double> a, b, c, color;
};
template <typename T>
__host__ __device__ T min(const T &a, const T &b) {
if (a < b) return a;
return b;
}
template <typename T>
__host__ __device__ T max(const T &a, const T &b) {
if (a > b) return a;
return b;
}
__host__ __device__ double dot_product(const Vector3<double> &a, const Vector3<double> &b) {
return a.x * b.x + a.y * b.y + a.z * b.z;
}
__host__ __device__ Vector3<double> cross_product(const Vector3<double> &a, const Vector3<double> &b) {
return {a.y * b.z - a.z * b.y, a.z * b.x - a.x * b.z,a.x * b.y - a.y * b.x};
}
__host__ __device__ double norm(const Vector3<double> &v) {
return sqrt(dot_product(v, v));
}
__host__ __device__ Vector3<double> normalize(const Vector3<double> &v) {
double l = norm(v);
return {v.x / l, v.y / l, v.z / l};
}
__host__ __device__ Vector3<double> diff(const Vector3<double> &a, const Vector3<double> &b) {
return {a.x - b.x, a.y - b.y, a.z - b.z};
}
__host__ __device__ Vector3<double> add(const Vector3<double> &a, const Vector3<double> &b) {
return {a.x + b.x, a.y + b.y, a.z + b.z};
}
__host__ __device__ Vector3<double> mult(const Vector3<double> &a, const Vector3<double> &b) {
return {b.x * a.x, b.y * a.y, b.z * a.z};
}
__host__ __device__ Vector3<double> mult(const Vector3<double> &a, double k) {
return {k * a.x, k * a.y, k * a.z};
}
__host__ __device__ Vector3<double> mult(const Vector3<double> &a, const Vector3<double> &b, const Vector3<double> &c, const Vector3<double> &d) {
return {a.x * d.x + b.x * d.y + c.x * d.z,
a.y * d.x + b.y * d.y + c.y * d.z,
a.z * d.x + b.z * d.y + c.z * d.z};
}
__host__ __device__ Vector3<double> inverse(const Vector3<double> &v) {
return {-v.x, -v.y, -v.z};
}
__host__ __device__ Vector3<double> div(const Vector3<double> &a, double k) {
return {a.x / k, a.y / k, a.z / k};
}
__host__ __device__ Vector3<double> reflect(const Vector3<double> &v, const Vector3<double> &n) {
return diff(v, mult(n, 2.0 * dot_product(v, n)));
}
__host__ __device__ uchar4 color_from_normalized(const Vector3<double> &v) {
double x = min(v.x, 1.);
x = max(x, 0.);
double y = min(v.y, 1.);
y = max(y, 0.);
double z = min(v.z, 1.);
z = max(z, 0.);
return make_uchar4(255. * x, 255. * y, 255. * z, 0u);
}
std::vector<std::string> split_string(const std::string &s, char d) {
std::vector<std::string> result;
std::stringstream ss(s);
std::string word;
while (getline(ss, word, d)) {
result.push_back(word);
}
return result;
}
void importObj(std::vector<Triangle> &scene_Triangles, const std::string &filepath, const FigureParams &fp) {
std::ifstream is(filepath);
if (!is) {
std::string desc = "can't open " + filepath;
FATAL(desc);
}
/*
std::vector<Triangle> triangle_list(10);
triangle_list[0] = Triangle{Vector3<double> {}, Vector3<double> {}, Vector3<double> {}, fp.color}
*/
double r = 0;
std::vector<Vector3<double>> vertices;
std::vector<Triangle> figure_Triangles;
std::string line;
while (std::getline(is, line)) {
std::vector<std::string> buffer = split_string(line, ' ');
if (line.empty()) {
continue;
} else if (buffer[0] == "v") {
double x = std::stod(buffer[2]);
double y = std::stod(buffer[3]);
double z = std::stod(buffer[4]);
vertices.push_back({x, y, z});
} else if (buffer[0] == "f") {
std::vector<std::string> indexes = split_string(buffer[1], '/');
Vector3<double> a = vertices[std::stoi(indexes[0]) - 1];
indexes = split_string(buffer[2], '/');
Vector3<double> b = vertices[std::stoi(indexes[0]) - 1];
indexes = split_string(buffer[3], '/');
Vector3<double> c = vertices[std::stoi(indexes[0]) - 1];
r = max(r, norm(a));
r = max(r, norm(b));
r = max(r, norm(c));
figure_Triangles.push_back(Triangle{a, b, c, fp.color});
}
}
double radius = fp.radius / r;
for (auto &single_triangle : figure_Triangles) {
scene_Triangles.push_back({add(mult(single_triangle.a, radius), fp.center), add(mult(single_triangle.b, radius), fp.center), add(mult(single_triangle.c, radius), fp.center), single_triangle.color});
}
}
void add_floor_to_scene(std::vector<Triangle> &scene_Triangles, const FloorParams &fp) {
scene_Triangles.push_back({fp.c, fp.b, fp.a, fp.color});
scene_Triangles.push_back({fp.a, fp.d, fp.c, fp.color});
}
struct Mat3d {
double m[3][3];
__host__ __device__ Mat3d(double m11 = 0, double m12 = 0, double m13 = 0,
double m21 = 0, double m22 = 0, double m23 = 0,
double m31 = 0, double m32 = 0, double m33 = 0) {
m[0][0] = m11;
m[0][1] = m12;
m[0][2] = m13;
m[1][0] = m21;
m[1][1] = m22;
m[1][2] = m23;
m[2][0] = m31;
m[2][1] = m32;
m[2][2] = m33;
}
};
__host__ __device__ double det(const Mat3d &m) {
return m.m[0][0] * m.m[1][1] * m.m[2][2] +
m.m[1][0] * m.m[0][2] * m.m[2][1] +
m.m[2][0] * m.m[0][1] * m.m[1][2] -
m.m[0][2] * m.m[1][1] * m.m[2][0] -
m.m[0][0] * m.m[1][2] * m.m[2][1] -
m.m[0][1] * m.m[1][0] * m.m[2][2];
}
__host__ __device__ Mat3d inverse(const Mat3d &m) {
double d = det(m);
double m11 = (m.m[1][1] * m.m[2][2] - m.m[2][1] * m.m[1][2]) / d;
double m12 = (m.m[2][1] * m.m[0][2] - m.m[0][1] * m.m[2][2]) / d;
double m13 = (m.m[0][1] * m.m[1][2] - m.m[1][1] * m.m[0][2]) / d;
double m21 = (m.m[2][0] * m.m[1][2] - m.m[1][0] * m.m[2][2]) / d;
double m22 = (m.m[0][0] * m.m[2][2] - m.m[2][0] * m.m[0][2]) / d;
double m23 = (m.m[1][0] * m.m[0][2] - m.m[0][0] * m.m[1][2]) / d;
double m31 = (m.m[1][0] * m.m[2][1] - m.m[2][0] * m.m[1][1]) / d;
double m32 = (m.m[2][0] * m.m[0][1] - m.m[0][0] * m.m[2][1]) / d;
double m33 = (m.m[0][0] * m.m[1][1] - m.m[1][0] * m.m[0][1]) / d;
return Mat3d(m11, m12, m13, m21, m22, m23, m31, m32, m33);
}
__host__ __device__ Vector3<double> mult(const Mat3d &m, const Vector3<double> &v) {
Vector3<double> res;
res.x = m.m[0][0] * v.x + m.m[0][1] * v.y + m.m[0][2] * v.z;
res.y = m.m[1][0] * v.x + m.m[1][1] * v.y + m.m[1][2] * v.z;
res.z = m.m[2][0] * v.x + m.m[2][1] * v.y + m.m[2][2] * v.z;
return res;
}
__host__ __device__ void triangle_intersection(const Vector3<double> &origin, const Vector3<double> &dir, const Triangle &Triangle, double *t, double *u, double *v) {
Vector3<double> e1 = diff(Triangle.b, Triangle.a);
Vector3<double> e2 = diff(Triangle.c, Triangle.a);
Mat3d m(-dir.x, e1.x, e2.x, -dir.y, e1.y, e2.y, -dir.z, e1.z, e2.z);
Vector3<double> tmp = mult(inverse(m), diff(origin, Triangle.a));
*t = tmp.x;
*u = tmp.y;
*v = tmp.z;
}
__host__ __device__ bool shadow_ray_hit(const Vector3<double> &origin, const Vector3<double> &dir, const Triangle *scene_Triangles, int nTriangles, double *hit_t) {
double t_min = 1 / 0.;
bool hit = false;
for (int i = 0; i < nTriangles; ++i) {
auto Triangle = scene_Triangles[i];
double t, u, v;
triangle_intersection(origin, dir, Triangle, &t, &u, &v);
if (u >= 0.0 && v >= 0.0 && u + v <= 1.0 && t > 0.0) {
if (t < t_min) {
t_min = t;
}
hit = true;
}
}
*hit_t = t_min;
return hit;
}
__host__ __device__ Vector3<double> phong_model(const Vector3<double> &pos, const Vector3<double> &dir, const Triangle &TriangleObj, const Triangle *scene_Triangles, int nTriangles, const LightParams *lights, int lights_num) {
Vector3<double> normal = normalize(cross_product(diff(TriangleObj.b, TriangleObj.a), diff(TriangleObj.c, TriangleObj.a)));
Vector3<double> ambient{0.1, 0.1, 0.1};
Vector3<double> diffuse{0., 0., 0.};
Vector3<double> specular{0., 0., 0.};
for (int i = 0; i < lights_num; ++i) {
Vector3<double> light_pos = lights[i].pos;
Vector3<double> L = diff(light_pos, pos);
double d = norm(L);
L = normalize(L);
double hit_t = 0.0;
if (shadow_ray_hit(light_pos, inverse(L), scene_Triangles, nTriangles, &hit_t) && (hit_t > d || (hit_t > d || (d - hit_t < 0.0005)))) {
double k = PHONG_INTENS / (d + 0.001f);
diffuse = add(diffuse, mult(lights[i].color, max(PHONG_KD * k * dot_product(L, normal), 0.0)));
Vector3<double> R = normalize(reflect(inverse(L), normal));
Vector3<double> S = inverse(dir);
specular = add(specular, mult(lights[i].color, PHONG_KS * k * std::pow(max(dot_product(R, S), 0.0), 32)));
}
}
return add(add(mult(ambient, TriangleObj.color), mult(diffuse, TriangleObj.color)), mult(specular, TriangleObj.color));
}
__host__ __device__ uchar4 ray(const Vector3<double> &pos, const Vector3<double> &dir, const Triangle *scene_Triangles, int nTriangles, LightParams *lights, int lights_num) {
int k, k_min = -1;
double ts_min;
for (k = 0; k < nTriangles; k++) {
Vector3<double> e1 = diff(scene_Triangles[k].b, scene_Triangles[k].a);
Vector3<double> e2 = diff(scene_Triangles[k].c, scene_Triangles[k].a);
Vector3<double> p = cross_product(dir, e2);
double div = dot_product(p, e1);
if (fabs(div) < 1e-10) continue;
Vector3<double> t = diff(pos, scene_Triangles[k].a);
double u = dot_product(p, t) / div;
if (u < 0.0 || u > 1.0) continue;
Vector3<double> q = cross_product(t, e1);
double v = dot_product(q, dir) / div;
if (v < 0.0 || v + u > 1.0) continue;
double ts = dot_product(q, e2) / div;
if (ts < 0.0) continue;
if (k_min == -1 || ts < ts_min) {
k_min = k;
ts_min = ts;
}
}
if (k_min == -1) return {0, 0, 0, 0};
return color_from_normalized(phong_model(add(mult(dir, ts_min), pos), dir, scene_Triangles[k_min], scene_Triangles, nTriangles, lights, lights_num));
}
void getRenderOMP(uchar4 *data, int w, int h, Vector3<double> pc, Vector3<double> pv, double angle, const Triangle *scene_Triangles, int nTriangles, LightParams *lights, int lights_num) {
double dw = 2.0 / (w - 1.0);
double dh = 2.0 / (h - 1.0);
double z = 1.0 / tan(angle * M_PI / 360.0);
Vector3<double> bz = normalize(diff(pv, pc));
Vector3<double> bx = normalize(cross_product(bz, {0.0, 0.0, 1.0}));
Vector3<double> by = normalize(cross_product(bx, bz));
#pragma omp parallel for
for (int pix = 0; pix < w * h; ++pix) {
int i = pix % w;
int j = pix / w;
Vector3<double> v = {-1.0 + dw * i, (-1.0 + dh * j) * h / w, z};
Vector3<double> dir = mult(bx, by, bz, v);
data[(h - 1 - j) * w + i] = ray(pc, normalize(dir), scene_Triangles, nTriangles, lights, lights_num);
}
}
__global__ void getRenderCUDA(uchar4 *data, int w, int h, Vector3<double> pc, Vector3<double> pv, double angle, const Triangle *scene_Triangles, int nTriangles, LightParams *lights, int lights_num) {
int id_x = threadIdx.x + blockIdx.x * blockDim.x;
int id_y = threadIdx.y + blockIdx.y * blockDim.y;
int offset_x = blockDim.x * gridDim.x;
int offset_y = blockDim.y * gridDim.y;
double dw = 2.0 / (w - 1.0);
double dh = 2.0 / (h - 1.0);
double z = 1.0 / tan(angle * M_PI / 360.0);
Vector3<double> bz = normalize(diff(pv, pc));
Vector3<double> bx = normalize(cross_product(bz, {0.0, 0.0, 1.0}));
Vector3<double> by = normalize(cross_product(bx, bz));
for (int j = id_y; j < h; j += offset_y)
for (int i = id_x; i < w; i += offset_x) {
Vector3<double> v = {-1.0 + dw * i, (-1.0 + dh * j) * h / w, z};
Vector3<double> dir = mult(bx, by, bz, v);
data[(h - 1 - j) * w + i] =
ray(pc, normalize(dir), scene_Triangles, nTriangles, lights, lights_num);
}
}
__host__ __device__ uchar4 SSAA(uchar4 *data, int i, int j, int w, int h, int kernel_w, int kernel_h) {
Vector3<double> res;
for (int y = i; y < i + kernel_h; ++y)
for (int x = j; x < j + kernel_w; ++x) {
auto pix = data[y * w + x];
res = add(res, Vector3<double>{(double)pix.x, (double)pix.y, (double)pix.z});
}
auto pix = div(res, kernel_w * kernel_h);
return make_uchar4(pix.x, pix.y, pix.z, 0);
}
__global__ void getSSAA_CUDA(uchar4 *dst, uchar4 *src, int new_w, int new_h, int w, int h) {
int id_x = threadIdx.x + blockIdx.x * blockDim.x;
int id_y = threadIdx.y + blockIdx.y * blockDim.y;
int offset_x = blockDim.x * gridDim.x;
int offset_y = blockDim.y * gridDim.y;
int kernel_w = w / new_w;
int kernel_h = h / new_h;
for (int i = id_y; i < new_h; i += offset_y) {
for (int j = id_x; j < new_w; j += offset_x) {
int pix_i = i * kernel_h;
int pix_j = j * kernel_w;
dst[i * new_w + j] = SSAA(src, pix_i, pix_j, w, h, kernel_w, kernel_h);
}
}
}
void getSSAA_OMP(uchar4 *dst, uchar4 *src, int new_w, int new_h, int w, int h) {
int kernel_w = w / new_w;
int kernel_h = h / new_h;
#pragma omp parallel for
for (int pix = 0; pix < new_w * new_h; ++pix) {
int i = pix / new_w;
int j = pix % new_w;
int pix_i = i * kernel_h;
int pix_j = j * kernel_w;
dst[i * new_w + j] = SSAA(src, pix_i, pix_j, w, h, kernel_w, kernel_h);
}
}
void getCameraPos(const CameraMovement &c, const CameraMovement &n, double t, Vector3<double> *pc, Vector3<double> *pv) {
double phic = c.phi0 + c.wphi * t, phin = n.phi0 + n.wphi * t;
double rc = c.r0 + c.ar * sin(c.wr * t + c.pr), zc = c.z0 + c.ar * sin(c.wz * t + c.pz);
double rn = n.r0 + n.ar * sin(n.wr * t + n.pr), zn = n.z0 + n.ar * sin(n.wz * t + n.pz);
*pv = Vector3<double>{rn * cos(phin), rn * sin(phin), zn};
*pc = Vector3<double>{rc * cos(phic), rc * sin(phic), zc};
}
void write_image(const std::string &path, const std::vector<uchar4> &data, int w, int h) {
MPI_File file;
CHECK_MPI(MPI_File_open(MPI_COMM_SELF, path.data(), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &file));
CHECK_MPI(MPI_File_write(file, &w, 1, MPI_INT, MPI_STATUS_IGNORE));
CHECK_MPI(MPI_File_write(file, &h, 1, MPI_INT, MPI_STATUS_IGNORE));
CHECK_MPI(MPI_File_write(file, data.data(), sizeof(uchar4) * w * h, MPI_BYTE, MPI_STATUS_IGNORE));
CHECK_MPI(MPI_File_close(&file));
}
void signal_handler(int signal){
std::cout << "Error. Bad signal: " << signal << std::endl;
MPI_Finalize();
exit(0);
}
int main(int argc, char *argv[]) {
std::signal(SIGSEGV, signal_handler);
std::signal(SIGABRT, signal_handler);
MPIContext ctx(&argc, &argv);
parse_flags(argc, argv);
int rank, nprocesses;
CHECK_MPI(MPI_Comm_rank(MPI_COMM_WORLD, &rank));
CHECK_MPI(MPI_Comm_size(MPI_COMM_WORLD, &nprocesses));
CHECK_MPI(MPI_Barrier(MPI_COMM_WORLD));
std::vector<Triangle> scene_Triangles;
Params params;
if (rank == 0) {
std::cin >> params;
importObj(scene_Triangles, "hex.obj", params.hex);
importObj(scene_Triangles, "octa.obj", params.octa);
importObj(scene_Triangles, "icos.obj", params.icos);
scene_Triangles.push_back({params.floor.c, params.floor.b, params.floor.a, params.floor.color});
scene_Triangles.push_back({params.floor.a, params.floor.d, params.floor.c, params.floor.color});
}
int output_pattern_size = params.output_pattern.size();
CHECK_MPI(MPI_Bcast(¶ms.nframes, 1, MPI_INT, 0, MPI_COMM_WORLD));
CHECK_MPI(MPI_Bcast(&output_pattern_size, 1, MPI_INT, 0, MPI_COMM_WORLD));
params.output_pattern.resize(output_pattern_size);
CHECK_MPI(MPI_Bcast((char *)params.output_pattern.data(), output_pattern_size, MPI_CHAR, 0, MPI_COMM_WORLD));
CHECK_MPI(MPI_Bcast(¶ms.w, 1, MPI_INT, 0, MPI_COMM_WORLD));
CHECK_MPI(MPI_Bcast(¶ms.h, 1, MPI_INT, 0, MPI_COMM_WORLD));
CHECK_MPI(MPI_Bcast(¶ms.angle, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD));
CHECK_MPI(MPI_Bcast(¶ms.camera_center, sizeof(CameraMovement), MPI_BYTE, 0, MPI_COMM_WORLD));
CHECK_MPI(MPI_Bcast(¶ms.camera_dir, sizeof(CameraMovement), MPI_BYTE, 0, MPI_COMM_WORLD));
CHECK_MPI(MPI_Bcast(¶ms.hex, sizeof(FigureParams), MPI_BYTE, 0, MPI_COMM_WORLD));
CHECK_MPI(MPI_Bcast(¶ms.octa, sizeof(FigureParams), MPI_BYTE, 0, MPI_COMM_WORLD));
CHECK_MPI(MPI_Bcast(¶ms.icos, sizeof(FigureParams), MPI_BYTE, 0, MPI_COMM_WORLD));
CHECK_MPI(MPI_Bcast(¶ms.floor.a, sizeof(Vector3<double>), MPI_BYTE, 0, MPI_COMM_WORLD));
CHECK_MPI(MPI_Bcast(¶ms.floor.b, sizeof(Vector3<double>), MPI_BYTE, 0, MPI_COMM_WORLD));
CHECK_MPI(MPI_Bcast(¶ms.floor.c, sizeof(Vector3<double>), MPI_BYTE, 0, MPI_COMM_WORLD));
CHECK_MPI(MPI_Bcast(¶ms.floor.d, sizeof(Vector3<double>), MPI_BYTE, 0, MPI_COMM_WORLD));
int texture_path_size = params.floor.texture_path.size();
CHECK_MPI(MPI_Bcast(&texture_path_size, 1, MPI_INT, 0, MPI_COMM_WORLD));
params.floor.texture_path.resize(texture_path_size);
CHECK_MPI(MPI_Bcast((char *)params.floor.texture_path.data(), texture_path_size, MPI_CHAR, 0, MPI_COMM_WORLD));
CHECK_MPI(MPI_Bcast(¶ms.floor.color, sizeof(Vector3<double>), MPI_BYTE, 0, MPI_COMM_WORLD));
CHECK_MPI(MPI_Bcast(¶ms.floor.k_refl, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD));
params.lights.resize(params.lights_num);
CHECK_MPI(MPI_Bcast(¶ms.lights_num, 1, MPI_INT, 0, MPI_COMM_WORLD));
CHECK_MPI(MPI_Bcast(params.lights.data(), sizeof(LightParams) * params.lights_num, MPI_BYTE, 0, MPI_COMM_WORLD));
int nTriangles = scene_Triangles.size();
CHECK_MPI(MPI_Bcast(&nTriangles, 1, MPI_INT, 0, MPI_COMM_WORLD));
scene_Triangles.resize(nTriangles);
CHECK_MPI(MPI_Bcast(scene_Triangles.data(), sizeof(Triangle) * nTriangles, MPI_BYTE, 0, MPI_COMM_WORLD));
int ndevices;
CSC(cudaGetDeviceCount(&ndevices));
CSC(cudaSetDevice(rank % ndevices));
Triangle *gpu_scene_Triangles;
LightParams *gpu_lights;
if (parallelizationMode == CUDA) {
auto Triangles_size = sizeof(Triangle) * scene_Triangles.size();
CSC(cudaMalloc(&gpu_scene_Triangles, Triangles_size));
CSC(cudaMemcpy(gpu_scene_Triangles, scene_Triangles.data(), Triangles_size, cudaMemcpyHostToDevice));
auto lights_size = sizeof(LightParams) * params.lights_num;
CSC(cudaMalloc(&gpu_lights, lights_size));
CSC(cudaMemcpy(gpu_lights, params.lights.data(), lights_size, cudaMemcpyHostToDevice));
}
CHECK_MPI(MPI_Barrier(MPI_COMM_WORLD));
std::vector<uchar4> data_render(SSSAx*params.w * SSSAx*params.h), data_ssaa(params.w * params.h);
uchar4 *gpu_data_render, *gpu_data_ssaa;
if (parallelizationMode == CUDA) {
CSC(cudaMalloc(&gpu_data_render, sizeof(uchar4) * SSSAx*params.w * SSSAx*params.h));
CSC(cudaMalloc(&gpu_data_ssaa, sizeof(uchar4) * params.w * params.h));
}
for (int frame = rank; frame < params.nframes; frame += nprocesses) {
Vector3<double> pc, pv;
getCameraPos(params.camera_center, params.camera_dir, 0.1 * (double)frame, &pc, &pv);
auto start = std::chrono::high_resolution_clock::now();
if (parallelizationMode == OpenMP) {
getRenderOMP(data_render.data(), SSSAx*params.w, SSSAx*params.h, pc, pv, params.angle, scene_Triangles.data(), scene_Triangles.size(), params.lights.data(), params.lights.size());
getSSAA_OMP(data_ssaa.data(), data_render.data(), params.w, params.h, SSSAx*params.w, SSSAx*params.h);
}
if (parallelizationMode == CUDA) {
getRenderCUDA<<<NBLOCKS, NTHREADS>>>(gpu_data_render, SSSAx*params.w, SSSAx*params.h, pc, pv, params.angle,gpu_scene_Triangles, scene_Triangles.size(), gpu_lights, params.lights.size());
CSC(cudaDeviceSynchronize());
getSSAA_CUDA<<<NBLOCKS, NTHREADS>>>(gpu_data_ssaa, gpu_data_render, params.w, params.h, SSSAx*params.w, SSSAx*params.h);
CSC(cudaMemcpy(data_ssaa.data(), gpu_data_ssaa,sizeof(uchar4) * params.w * params.h, cudaMemcpyDeviceToHost));
}
char output_path[256];
sprintf(output_path, params.output_pattern.data(), frame);
write_image(output_path, data_ssaa, params.w, params.h);
auto time = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::high_resolution_clock::now() - start);
std::cout << frame << "\t" << output_path << "\t" << time.count() << "ms" << std::endl;
}
if (parallelizationMode == CUDA) {
CSC(cudaFree(gpu_scene_Triangles));
CSC(cudaFree(gpu_lights));
CSC(cudaFree(gpu_data_ssaa));
CSC(cudaFree(gpu_data_render));
}
return 0;
} |
91ccc78d3d9842b90974a674f73f2b8f260266a8.hip | // !!! This is a file automatically generated by hipify!!!
//
// Calculates the SPME forces and energies for a pair of charges
//
#include <hip/hip_runtime.h>
#include <iostream>
#include "XYZQ.h"
#include "Force.h"
#include "CudaPMERecip.h"
template <typename T, typename T2>
void calcPair(const double r, const double L, const double kappa, const int nfft, const int order);
int main(int argc, char *argv[]) {
double L=0.0, r=0.0, kappa=0.0;
int nfft=0, order=0;
bool arg_ok = true;
int iarg = 1;
while (iarg < argc) {
if (strcmp(argv[iarg],"-r")==0) {
iarg++;
if (iarg == argc) {
arg_ok = false;
break;
}
sscanf(argv[iarg],"%lf",&r);
iarg++;
} else if (strcmp(argv[iarg],"-L")==0) {
iarg++;
if (iarg == argc) {
arg_ok = false;
break;
}
sscanf(argv[iarg],"%lf",&L);
iarg++;
} else if (strcmp(argv[iarg],"-kappa")==0) {
iarg++;
if (iarg == argc) {
arg_ok = false;
break;
}
sscanf(argv[iarg],"%lf",&kappa);
iarg++;
} else if (strcmp(argv[iarg],"-nfft")==0) {
iarg++;
if (iarg == argc) {
arg_ok = false;
break;
}
sscanf(argv[iarg],"%d",&nfft);
iarg++;
} else if (strcmp(argv[iarg],"-order")==0) {
iarg++;
if (iarg == argc) {
arg_ok = false;
break;
}
sscanf(argv[iarg],"%d",&order);
iarg++;
} else {
std::cout << "Invalid input parameter " << argv[iarg] << std::endl;
arg_ok = false;
break;
}
}
if (!arg_ok || r == 0.0 || L == 0.0 || kappa == 0.0 || nfft == 0 || order == 0) {
std::cout << "Usage: gpu_pair -r r -L L -kappa kappa -nfft nfft -order order"<< std::endl;
return 1;
}
calcPair<float, float2>(r, L, kappa, nfft, order);
return 1;
}
template <typename T, typename T2>
void calcPair(const double r, const double L, const double kappa, const int nfft, const int order) {
const FFTtype fft_type = BOX;
// Setup reciprocal vectors
double recip[9];
for (int i=0;i < 9;i++) recip[i] = 0;
recip[0] = 1.0/L;
recip[4] = 1.0/L;
recip[8] = 1.0/L;
CudaEnergyVirial energyVirial;
XYZQ xyzq(2);
CudaPMERecip<int, T, T2> grid(nfft, nfft, nfft, order, fft_type, 1, 0,
energyVirial, "recip", "self");
Force<T> force(2);
// r = Distance along diagonal
double a = r/(2.0*sqrt(3.0));
float4 h_xyzq[2];
T fx[2], fy[2], fz[2];
//h_xyzq[0].x = -a + 0.5*L;
//h_xyzq[0].y = -a + 0.5*L;
//h_xyzq[0].z = -a + 0.5*L;
//h_xyzq[1].x = a + 0.5*L;
//h_xyzq[1].y = a + 0.5*L;
//h_xyzq[1].z = a + 0.5*L;
h_xyzq[0].x = -r/2.0 + 0.5*L;
h_xyzq[0].y = 0.5*L;
h_xyzq[0].z = 0.5*L;
h_xyzq[1].x = r/2.0 + 0.5*L;
h_xyzq[1].y = 0.5*L;
h_xyzq[1].z = 0.5*L;
h_xyzq[0].w = -1.0;
h_xyzq[1].w = 1.0;
xyzq.set_xyzq(2, h_xyzq);
energyVirial.clear();
grid.spread_charge(xyzq.xyzq, xyzq.ncoord, recip);
grid.r2c_fft();
grid.scalar_sum(recip, kappa, true, true);
grid.c2r_fft();
grid.gather_force(xyzq.xyzq, xyzq.ncoord, recip, force.stride(), force.xyz());
force.getXYZ(fx, fy, fz);
double energy, energy_self, virial[9];
//grid.get_energy_virial(kappa, true, true, energy, energy_self, virial);
energyVirial.copyToHost();
cudaCheck(hipDeviceSynchronize());
energy = energyVirial.getEnergy("recip");
energy_self = energyVirial.getEnergy("self");
energyVirial.getVirial(virial);
printf("%lf %e %e %e %e %e %e %e %e\n",r,energy,energy_self,fx[0],fy[0],fz[0],fx[1],fy[1],fz[1]);
}
| 91ccc78d3d9842b90974a674f73f2b8f260266a8.cu | //
// Calculates the SPME forces and energies for a pair of charges
//
#include <cuda.h>
#include <iostream>
#include "XYZQ.h"
#include "Force.h"
#include "CudaPMERecip.h"
template <typename T, typename T2>
void calcPair(const double r, const double L, const double kappa, const int nfft, const int order);
int main(int argc, char *argv[]) {
double L=0.0, r=0.0, kappa=0.0;
int nfft=0, order=0;
bool arg_ok = true;
int iarg = 1;
while (iarg < argc) {
if (strcmp(argv[iarg],"-r")==0) {
iarg++;
if (iarg == argc) {
arg_ok = false;
break;
}
sscanf(argv[iarg],"%lf",&r);
iarg++;
} else if (strcmp(argv[iarg],"-L")==0) {
iarg++;
if (iarg == argc) {
arg_ok = false;
break;
}
sscanf(argv[iarg],"%lf",&L);
iarg++;
} else if (strcmp(argv[iarg],"-kappa")==0) {
iarg++;
if (iarg == argc) {
arg_ok = false;
break;
}
sscanf(argv[iarg],"%lf",&kappa);
iarg++;
} else if (strcmp(argv[iarg],"-nfft")==0) {
iarg++;
if (iarg == argc) {
arg_ok = false;
break;
}
sscanf(argv[iarg],"%d",&nfft);
iarg++;
} else if (strcmp(argv[iarg],"-order")==0) {
iarg++;
if (iarg == argc) {
arg_ok = false;
break;
}
sscanf(argv[iarg],"%d",&order);
iarg++;
} else {
std::cout << "Invalid input parameter " << argv[iarg] << std::endl;
arg_ok = false;
break;
}
}
if (!arg_ok || r == 0.0 || L == 0.0 || kappa == 0.0 || nfft == 0 || order == 0) {
std::cout << "Usage: gpu_pair -r r -L L -kappa kappa -nfft nfft -order order"<< std::endl;
return 1;
}
calcPair<float, float2>(r, L, kappa, nfft, order);
return 1;
}
template <typename T, typename T2>
void calcPair(const double r, const double L, const double kappa, const int nfft, const int order) {
const FFTtype fft_type = BOX;
// Setup reciprocal vectors
double recip[9];
for (int i=0;i < 9;i++) recip[i] = 0;
recip[0] = 1.0/L;
recip[4] = 1.0/L;
recip[8] = 1.0/L;
CudaEnergyVirial energyVirial;
XYZQ xyzq(2);
CudaPMERecip<int, T, T2> grid(nfft, nfft, nfft, order, fft_type, 1, 0,
energyVirial, "recip", "self");
Force<T> force(2);
// r = Distance along diagonal
double a = r/(2.0*sqrt(3.0));
float4 h_xyzq[2];
T fx[2], fy[2], fz[2];
//h_xyzq[0].x = -a + 0.5*L;
//h_xyzq[0].y = -a + 0.5*L;
//h_xyzq[0].z = -a + 0.5*L;
//h_xyzq[1].x = a + 0.5*L;
//h_xyzq[1].y = a + 0.5*L;
//h_xyzq[1].z = a + 0.5*L;
h_xyzq[0].x = -r/2.0 + 0.5*L;
h_xyzq[0].y = 0.5*L;
h_xyzq[0].z = 0.5*L;
h_xyzq[1].x = r/2.0 + 0.5*L;
h_xyzq[1].y = 0.5*L;
h_xyzq[1].z = 0.5*L;
h_xyzq[0].w = -1.0;
h_xyzq[1].w = 1.0;
xyzq.set_xyzq(2, h_xyzq);
energyVirial.clear();
grid.spread_charge(xyzq.xyzq, xyzq.ncoord, recip);
grid.r2c_fft();
grid.scalar_sum(recip, kappa, true, true);
grid.c2r_fft();
grid.gather_force(xyzq.xyzq, xyzq.ncoord, recip, force.stride(), force.xyz());
force.getXYZ(fx, fy, fz);
double energy, energy_self, virial[9];
//grid.get_energy_virial(kappa, true, true, energy, energy_self, virial);
energyVirial.copyToHost();
cudaCheck(cudaDeviceSynchronize());
energy = energyVirial.getEnergy("recip");
energy_self = energyVirial.getEnergy("self");
energyVirial.getVirial(virial);
printf("%lf %e %e %e %e %e %e %e %e\n",r,energy,energy_self,fx[0],fy[0],fz[0],fx[1],fy[1],fz[1]);
}
|
09111744d6e50d4bdeb67f3630ea292b022f35f6.hip | // !!! This is a file automatically generated by hipify!!!
// C++ includes.
#include <iostream>
#include <vector>
// CUDA includes.
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
// Custom includes.
#include "EvalCUDA.hh"
#define NBINS 100
#define BINL 0.5
#define NEVENTS 5000
namespace ReCUDA
{
__global__ void GetR( double *dQBins, double *dEBins, double *R )
{
int Index = NBINS * threadIdx.x + blockIdx.x;
R[ Index ] = dQBins[ Index ] / dEBins[ Index ];
}
__global__ void Birks( double *dE, double *dx, int *B, double F, double* p, size_t n, double *dQBins, double *dEBins )
{
int ThreadID = blockIdx.x * blockDim.x + threadIdx.x;
int ParameterID = 2 * threadIdx.y;
double StepdQ, StepdE;
if( ThreadID < n )
{
StepdQ = ( p[ParameterID + 0] * ( dE[ThreadID] / dx[ThreadID] ) /
( 1 + ( p[ParameterID + 1] / F ) *
( dE[ThreadID] / dx[ThreadID] ) ) ) * dx[ThreadID];
StepdE = dE[ThreadID];
if( !std::isnan(StepdQ) ) atomicAdd(dQBins + NBINS * threadIdx.y + B[ThreadID], StepdQ / (BINL * NEVENTS));
if( !std::isnan(StepdE) ) atomicAdd(dEBins + NBINS * threadIdx.y + B[ThreadID], StepdE / (BINL * NEVENTS));
}
}
__global__ void DMBirks( double *dE, double *dx, double F, size_t n, double* p, double* QY )
{
int ThreadID = blockIdx.x * blockDim.x + threadIdx.x;
int ParameterID = 2 * threadIdx.y;
double StepdQ;
if( ThreadID < n )
{
StepdQ = ( p[ParameterID + 0] * ( dE[ThreadID] / dx[ThreadID] ) /
( 1 + ( p[ParameterID + 1] / F ) *
( dE[ThreadID] / dx[ThreadID] ) ) ) * dx[ThreadID];
if( !std::isnan(StepdQ) ) atomicAdd( QY + threadIdx.y, StepdQ / ( 1000 ) );
}
}
void DMBirksWrapper( const std::vector<double> &dE, const std::vector<double> &dx,
const std::vector<double> &F, const std::vector<double> &p,
std::vector< std::vector<double> > &QY )
{
// Set the thread-dimensions of each block.
const int NThreadEntries(32);
const int NThreadParameters(32);
const dim3 ThreadDimensions( NThreadEntries, NThreadParameters );
// We will need a few constants/dimensions for the calculation.
const size_t N(dE.size());
const size_t NFields(F.size());
// Create the pointers for the arrays in device memory.
double *DeviceG4dE;
double *DeviceG4dx;
double *DeviceP;
double *DeviceQY;
// Now we need to allocate memory on the device for these arrays. The raw input only needs
// to be copied once, but the parameters will only be processed in batches of 32 (allocate
// memory in loop).
hipMalloc( (void**)&DeviceG4dE, sizeof(double) * N );
hipMalloc( (void**)&DeviceG4dx, sizeof(double) * N );
hipMalloc( (void**)&DeviceP, sizeof(double) * 64 );
hipMalloc( (void**)&DeviceQY, sizeof(double) * 32 );
// Now copy the raw G4 input to the device.
hipMemcpy( DeviceG4dE, &dE[0], sizeof(double) * N, hipMemcpyHostToDevice );
hipMemcpy( DeviceG4dx, &dx[0], sizeof(double) * N, hipMemcpyHostToDevice );
// So the major time cost with outsourcing the recombination calculation to a GPU is in
// copying to and from memory, and other overhead. We can minimize this by copying the
// G4 input to the device memory once and performing all calculations on it at once. We will
// need to copy the dQ calculations back to the host and the batch of parameters to the
// device, but this is relatively little compared to the large size of the raw input.
size_t ParHead;
for( size_t fi(0); fi < NFields; ++fi )
{
ParHead = 0;
while( ParHead + 64 <= p.size() )
{
// Reset the arrays allocated for the QY calculation.
hipMemset( DeviceQY, 0, sizeof(double) * 32 );
// Copy the current batch of parameters to the device.
hipMemcpy( DeviceP, &p[ParHead], sizeof(double) * 64, hipMemcpyHostToDevice );
// Make a kernel call for this batch of parameters and field.
hipLaunchKernelGGL(( DMBirks), dim3(( N / NThreadEntries + 1 )), dim3(ThreadDimensions) , 0, 0, DeviceG4dE,
DeviceG4dx,
F[fi],
N,
DeviceP,
DeviceQY );
hipDeviceSynchronize();
// Retrieve any error states (if any).
hipError_t Error = hipGetLastError();
if( Error != hipSuccess ) std::cerr << "CUDA error: " << hipGetErrorString(Error) << std::endl;
// Copy the resulting calculated values back to the host.
hipMemcpy( &QY[fi][ 32 * (ParHead / 64) ], DeviceQY, sizeof(double) * 32, hipMemcpyDeviceToHost );
ParHead += 64;
}
}
// Once we are finished we need to release the memory that was allocated on the device.
hipFree( DeviceG4dE );
hipFree( DeviceG4dx );
hipFree( DeviceP );
hipFree( DeviceQY );
}
void BirksWrapper( const std::vector<double> &dE, const std::vector<double> &dx,
const std::vector<int> &B, const std::vector<double> &F,
const std::vector<double> &p, std::vector< std::vector<double> > &RBins,
std::vector< std::vector<double> > &dEBins )
{
// Set the thread-dimensions of each block.
const int NThreadEntries(32);
const int NThreadParameters(32);
const dim3 ThreadDimensions( NThreadEntries, NThreadParameters );
// We will need a few constants/dimensions for the calculation.
const size_t N(dE.size());
const size_t NFields(F.size());
// Create the pointers for the arrays in device memory.
double *DeviceG4dE;
double *DeviceG4dx;
double *DevicedQBins;
double *DevicedEBins;
double *DeviceRBins;
double *DeviceP;
int *DeviceB;
// Now we need to allocate memory on the device for these arrays. The raw input only needs
// to be copied once, but the parameters will only be processed in batches of 32 (allocate
// memory in loop).
hipMalloc( (void**)&DeviceG4dE, sizeof(double) * N );
hipMalloc( (void**)&DeviceG4dx, sizeof(double) * N );
hipMalloc( (void**)&DevicedQBins, sizeof(double) * 32 * NBINS );
hipMalloc( (void**)&DevicedEBins, sizeof(double) * 32 * NBINS );
hipMalloc( (void**)&DeviceRBins, sizeof(double) * 32 * NBINS );
hipMalloc( (void**)&DeviceP, sizeof(double) * 64 );
hipMalloc( (void**)&DeviceB, sizeof(int) * N );
// Now copy the raw G4 input to the device.
hipMemcpy( DeviceG4dE, &dE[0], sizeof(double) * N, hipMemcpyHostToDevice );
hipMemcpy( DeviceG4dx, &dx[0], sizeof(double) * N, hipMemcpyHostToDevice );
hipMemcpy( DeviceB, &B[0], sizeof(int) * N, hipMemcpyHostToDevice );
// So the major time cost with outsourcing the recombination calculation to a GPU is in
// copying to and from memory, and other overhead. We can minimize this by copying the
// G4 input to the device memory once and performing all calculations on it at once. We will
// need to copy the dQ calculations back to the host and the batch of parameters to the
// device, but this is relatively little compared to the large size of the raw input.
size_t ParHead;
for( size_t fi(0); fi < NFields; ++fi )
{
ParHead = 0;
while( ParHead + 64 <= p.size() )
{
// Reset the arrays allocated for the segment dQ and dE calculation.
hipMemset( DevicedQBins, 0, sizeof(double) * 32 * NBINS );
hipMemset( DevicedEBins, 0, sizeof(double) * 32 * NBINS );
// Copy the current batch of parameters to the device.
hipMemcpy( DeviceP, &p[ParHead], sizeof(double) * 64, hipMemcpyHostToDevice );
// Make a kernel call for this batch of parameters and field.
hipLaunchKernelGGL(( Birks), dim3(( N / NThreadEntries + 1 )), dim3(ThreadDimensions) , 0, 0, DeviceG4dE,
DeviceG4dx,
DeviceB,
F[fi],
DeviceP,
N,
DevicedQBins,
DevicedEBins );
hipDeviceSynchronize();
// Retrieve any error states (if any).
hipError_t Error = hipGetLastError();
if( Error != hipSuccess ) std::cerr << "CUDA error: " << hipGetErrorString(Error) << std::endl;
// Copy the resulting calculated values back to the host.
hipLaunchKernelGGL(( GetR), dim3(NBINS), dim3(32) , 0, 0, DevicedQBins, DevicedEBins, DeviceRBins );
hipMemcpy( &RBins[fi][ 32 * NBINS * (ParHead / 64) ], DeviceRBins, sizeof(double) * 32 * NBINS, hipMemcpyDeviceToHost );
hipMemcpy( &dEBins[fi][ 32 * NBINS * (ParHead / 64) ], DevicedEBins, sizeof(double) * 32 * NBINS, hipMemcpyDeviceToHost );
ParHead += 64;
}
}
// Once we are finished we need to release the memory that was allocated on the device.
hipFree( DeviceG4dE );
hipFree( DeviceG4dx );
hipFree( DevicedQBins );
hipFree( DevicedEBins );
hipFree( DeviceRBins );
hipFree( DeviceP );
hipFree( DeviceB );
}
}
| 09111744d6e50d4bdeb67f3630ea292b022f35f6.cu | // C++ includes.
#include <iostream>
#include <vector>
// CUDA includes.
#include <cuda.h>
#include <cuda_runtime.h>
// Custom includes.
#include "EvalCUDA.hh"
#define NBINS 100
#define BINL 0.5
#define NEVENTS 5000
namespace ReCUDA
{
__global__ void GetR( double *dQBins, double *dEBins, double *R )
{
int Index = NBINS * threadIdx.x + blockIdx.x;
R[ Index ] = dQBins[ Index ] / dEBins[ Index ];
}
__global__ void Birks( double *dE, double *dx, int *B, double F, double* p, size_t n, double *dQBins, double *dEBins )
{
int ThreadID = blockIdx.x * blockDim.x + threadIdx.x;
int ParameterID = 2 * threadIdx.y;
double StepdQ, StepdE;
if( ThreadID < n )
{
StepdQ = ( p[ParameterID + 0] * ( dE[ThreadID] / dx[ThreadID] ) /
( 1 + ( p[ParameterID + 1] / F ) *
( dE[ThreadID] / dx[ThreadID] ) ) ) * dx[ThreadID];
StepdE = dE[ThreadID];
if( !std::isnan(StepdQ) ) atomicAdd(dQBins + NBINS * threadIdx.y + B[ThreadID], StepdQ / (BINL * NEVENTS));
if( !std::isnan(StepdE) ) atomicAdd(dEBins + NBINS * threadIdx.y + B[ThreadID], StepdE / (BINL * NEVENTS));
}
}
__global__ void DMBirks( double *dE, double *dx, double F, size_t n, double* p, double* QY )
{
int ThreadID = blockIdx.x * blockDim.x + threadIdx.x;
int ParameterID = 2 * threadIdx.y;
double StepdQ;
if( ThreadID < n )
{
StepdQ = ( p[ParameterID + 0] * ( dE[ThreadID] / dx[ThreadID] ) /
( 1 + ( p[ParameterID + 1] / F ) *
( dE[ThreadID] / dx[ThreadID] ) ) ) * dx[ThreadID];
if( !std::isnan(StepdQ) ) atomicAdd( QY + threadIdx.y, StepdQ / ( 1000 ) );
}
}
void DMBirksWrapper( const std::vector<double> &dE, const std::vector<double> &dx,
const std::vector<double> &F, const std::vector<double> &p,
std::vector< std::vector<double> > &QY )
{
// Set the thread-dimensions of each block.
const int NThreadEntries(32);
const int NThreadParameters(32);
const dim3 ThreadDimensions( NThreadEntries, NThreadParameters );
// We will need a few constants/dimensions for the calculation.
const size_t N(dE.size());
const size_t NFields(F.size());
// Create the pointers for the arrays in device memory.
double *DeviceG4dE;
double *DeviceG4dx;
double *DeviceP;
double *DeviceQY;
// Now we need to allocate memory on the device for these arrays. The raw input only needs
// to be copied once, but the parameters will only be processed in batches of 32 (allocate
// memory in loop).
cudaMalloc( (void**)&DeviceG4dE, sizeof(double) * N );
cudaMalloc( (void**)&DeviceG4dx, sizeof(double) * N );
cudaMalloc( (void**)&DeviceP, sizeof(double) * 64 );
cudaMalloc( (void**)&DeviceQY, sizeof(double) * 32 );
// Now copy the raw G4 input to the device.
cudaMemcpy( DeviceG4dE, &dE[0], sizeof(double) * N, cudaMemcpyHostToDevice );
cudaMemcpy( DeviceG4dx, &dx[0], sizeof(double) * N, cudaMemcpyHostToDevice );
// So the major time cost with outsourcing the recombination calculation to a GPU is in
// copying to and from memory, and other overhead. We can minimize this by copying the
// G4 input to the device memory once and performing all calculations on it at once. We will
// need to copy the dQ calculations back to the host and the batch of parameters to the
// device, but this is relatively little compared to the large size of the raw input.
size_t ParHead;
for( size_t fi(0); fi < NFields; ++fi )
{
ParHead = 0;
while( ParHead + 64 <= p.size() )
{
// Reset the arrays allocated for the QY calculation.
cudaMemset( DeviceQY, 0, sizeof(double) * 32 );
// Copy the current batch of parameters to the device.
cudaMemcpy( DeviceP, &p[ParHead], sizeof(double) * 64, cudaMemcpyHostToDevice );
// Make a kernel call for this batch of parameters and field.
DMBirks<<< ( N / NThreadEntries + 1 ), ThreadDimensions >>>( DeviceG4dE,
DeviceG4dx,
F[fi],
N,
DeviceP,
DeviceQY );
cudaDeviceSynchronize();
// Retrieve any error states (if any).
cudaError_t Error = cudaGetLastError();
if( Error != cudaSuccess ) std::cerr << "CUDA error: " << cudaGetErrorString(Error) << std::endl;
// Copy the resulting calculated values back to the host.
cudaMemcpy( &QY[fi][ 32 * (ParHead / 64) ], DeviceQY, sizeof(double) * 32, cudaMemcpyDeviceToHost );
ParHead += 64;
}
}
// Once we are finished we need to release the memory that was allocated on the device.
cudaFree( DeviceG4dE );
cudaFree( DeviceG4dx );
cudaFree( DeviceP );
cudaFree( DeviceQY );
}
void BirksWrapper( const std::vector<double> &dE, const std::vector<double> &dx,
const std::vector<int> &B, const std::vector<double> &F,
const std::vector<double> &p, std::vector< std::vector<double> > &RBins,
std::vector< std::vector<double> > &dEBins )
{
// Set the thread-dimensions of each block.
const int NThreadEntries(32);
const int NThreadParameters(32);
const dim3 ThreadDimensions( NThreadEntries, NThreadParameters );
// We will need a few constants/dimensions for the calculation.
const size_t N(dE.size());
const size_t NFields(F.size());
// Create the pointers for the arrays in device memory.
double *DeviceG4dE;
double *DeviceG4dx;
double *DevicedQBins;
double *DevicedEBins;
double *DeviceRBins;
double *DeviceP;
int *DeviceB;
// Now we need to allocate memory on the device for these arrays. The raw input only needs
// to be copied once, but the parameters will only be processed in batches of 32 (allocate
// memory in loop).
cudaMalloc( (void**)&DeviceG4dE, sizeof(double) * N );
cudaMalloc( (void**)&DeviceG4dx, sizeof(double) * N );
cudaMalloc( (void**)&DevicedQBins, sizeof(double) * 32 * NBINS );
cudaMalloc( (void**)&DevicedEBins, sizeof(double) * 32 * NBINS );
cudaMalloc( (void**)&DeviceRBins, sizeof(double) * 32 * NBINS );
cudaMalloc( (void**)&DeviceP, sizeof(double) * 64 );
cudaMalloc( (void**)&DeviceB, sizeof(int) * N );
// Now copy the raw G4 input to the device.
cudaMemcpy( DeviceG4dE, &dE[0], sizeof(double) * N, cudaMemcpyHostToDevice );
cudaMemcpy( DeviceG4dx, &dx[0], sizeof(double) * N, cudaMemcpyHostToDevice );
cudaMemcpy( DeviceB, &B[0], sizeof(int) * N, cudaMemcpyHostToDevice );
// So the major time cost with outsourcing the recombination calculation to a GPU is in
// copying to and from memory, and other overhead. We can minimize this by copying the
// G4 input to the device memory once and performing all calculations on it at once. We will
// need to copy the dQ calculations back to the host and the batch of parameters to the
// device, but this is relatively little compared to the large size of the raw input.
size_t ParHead;
for( size_t fi(0); fi < NFields; ++fi )
{
ParHead = 0;
while( ParHead + 64 <= p.size() )
{
// Reset the arrays allocated for the segment dQ and dE calculation.
cudaMemset( DevicedQBins, 0, sizeof(double) * 32 * NBINS );
cudaMemset( DevicedEBins, 0, sizeof(double) * 32 * NBINS );
// Copy the current batch of parameters to the device.
cudaMemcpy( DeviceP, &p[ParHead], sizeof(double) * 64, cudaMemcpyHostToDevice );
// Make a kernel call for this batch of parameters and field.
Birks<<< ( N / NThreadEntries + 1 ), ThreadDimensions >>>( DeviceG4dE,
DeviceG4dx,
DeviceB,
F[fi],
DeviceP,
N,
DevicedQBins,
DevicedEBins );
cudaDeviceSynchronize();
// Retrieve any error states (if any).
cudaError_t Error = cudaGetLastError();
if( Error != cudaSuccess ) std::cerr << "CUDA error: " << cudaGetErrorString(Error) << std::endl;
// Copy the resulting calculated values back to the host.
GetR<<< NBINS, 32 >>>( DevicedQBins, DevicedEBins, DeviceRBins );
cudaMemcpy( &RBins[fi][ 32 * NBINS * (ParHead / 64) ], DeviceRBins, sizeof(double) * 32 * NBINS, cudaMemcpyDeviceToHost );
cudaMemcpy( &dEBins[fi][ 32 * NBINS * (ParHead / 64) ], DevicedEBins, sizeof(double) * 32 * NBINS, cudaMemcpyDeviceToHost );
ParHead += 64;
}
}
// Once we are finished we need to release the memory that was allocated on the device.
cudaFree( DeviceG4dE );
cudaFree( DeviceG4dx );
cudaFree( DevicedQBins );
cudaFree( DevicedEBins );
cudaFree( DeviceRBins );
cudaFree( DeviceP );
cudaFree( DeviceB );
}
}
|
58fe7500b82c303d7d6a94c1bdbde19657c2cf7f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Software License Agreement (BSD License)
*
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (c) 2011, Willow Garage, Inc.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "device.hpp"
//#include "pcl/gpu/utils/device/vector_math.hpp"
namespace pcl
{
namespace device
{
__global__ void
initColorVolumeKernel (PtrStep<uchar4> volume)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < VOLUME_X && y < VOLUME_Y)
{
uchar4 *pos = volume.ptr (y) + x;
int z_step = VOLUME_Y * volume.step / sizeof(*pos);
#pragma unroll
for (int z = 0; z < VOLUME_Z; ++z, pos += z_step)
*pos = make_uchar4 (0, 0, 0, 0);
}
}
}
}
void
pcl::device::initColorVolume (PtrStep<uchar4> color_volume)
{
dim3 block (32, 16);
dim3 grid (1, 1, 1);
grid.x = divUp (VOLUME_X, block.x);
grid.y = divUp (VOLUME_Y, block.y);
hipLaunchKernelGGL(( initColorVolumeKernel), dim3(grid), dim3(block), 0, 0, color_volume);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall (hipDeviceSynchronize ());
}
namespace pcl
{
namespace device
{
struct ColorVolumeImpl
{
enum
{
CTA_SIZE_X = 32,
CTA_SIZE_Y = 8,
ONE_VOXEL = 0
};
Intr intr;
PtrStep<float> vmap;
PtrStepSz<uchar3> colors;
Mat33 R_inv;
float3 t;
float3 cell_size;
float tranc_dist;
int max_weight;
mutable PtrStep<uchar4> color_volume;
__device__ __forceinline__ int3
getVoxel (float3 point) const
{
int vx = __float2int_rd (point.x / cell_size.x); // round to negative infinity
int vy = __float2int_rd (point.y / cell_size.y);
int vz = __float2int_rd (point.z / cell_size.z);
return make_int3 (vx, vy, vz);
}
__device__ __forceinline__ float3
getVoxelGCoo (int x, int y, int z) const
{
float3 coo = make_float3 (x, y, z);
coo += 0.5f; //shift to cell center;
coo.x *= cell_size.x;
coo.y *= cell_size.y;
coo.z *= cell_size.z;
return coo;
}
__device__ __forceinline__ void
operator () () const
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= VOLUME_X || y >= VOLUME_Y)
return;
for (int z = 0; z < VOLUME_X; ++z)
{
float3 v_g = getVoxelGCoo (x, y, z);
float3 v = R_inv * (v_g - t);
if (v.z <= 0)
continue;
int2 coo; //project to current cam
coo.x = __float2int_rn (v.x * intr.fx / v.z + intr.cx);
coo.y = __float2int_rn (v.y * intr.fy / v.z + intr.cy);
if (coo.x >= 0 && coo.y >= 0 && coo.x < colors.cols && coo.y < colors.rows)
{
float3 p;
p.x = vmap.ptr (coo.y)[coo.x];
if (isnan (p.x))
continue;
p.y = vmap.ptr (coo.y + colors.rows )[coo.x];
p.z = vmap.ptr (coo.y + colors.rows * 2)[coo.x];
bool update = false;
if (ONE_VOXEL)
{
int3 vp = getVoxel (p);
update = vp.x == x && vp.y == y && vp.z == z;
}
else
{
float dist = norm (p - v_g);
update = dist < tranc_dist;
}
if (update)
{
uchar4 *ptr = color_volume.ptr (VOLUME_Y * z + y) + x;
uchar3 rgb = colors.ptr (coo.y)[coo.x];
uchar4 volume_rgbw = *ptr;
int weight_prev = volume_rgbw.w;
const float Wrk = 1.f;
float new_x = (volume_rgbw.x * weight_prev + Wrk * rgb.x) / (weight_prev + Wrk);
float new_y = (volume_rgbw.y * weight_prev + Wrk * rgb.y) / (weight_prev + Wrk);
float new_z = (volume_rgbw.z * weight_prev + Wrk * rgb.z) / (weight_prev + Wrk);
int weight_new = weight_prev + 1;
uchar4 volume_rgbw_new;
volume_rgbw_new.x = min (255, max (0, __float2int_rn (new_x)));
volume_rgbw_new.y = min (255, max (0, __float2int_rn (new_y)));
volume_rgbw_new.z = min (255, max (0, __float2int_rn (new_z)));
volume_rgbw_new.w = min (max_weight, weight_new);
*ptr = volume_rgbw_new;
}
} /* in camera image range */
} /* for(int z = 0; z < VOLUME_X; ++z) */
} /* void operator() */
};
__global__ void
updateColorVolumeKernel (const ColorVolumeImpl cvi) {
cvi ();
}
}
}
void
pcl::device::updateColorVolume (const Intr& intr, float tranc_dist, const Mat33& R_inv, const float3& t,
const MapArr& vmap, const PtrStepSz<uchar3>& colors, const float3& volume_size, PtrStep<uchar4> color_volume, int max_weight)
{
ColorVolumeImpl cvi;
cvi.vmap = vmap;
cvi.colors = colors;
cvi.color_volume = color_volume;
cvi.R_inv = R_inv;
cvi.t = t;
cvi.intr = intr;
cvi.tranc_dist = tranc_dist;
cvi.max_weight = min (max (0, max_weight), 255);
cvi.cell_size.x = volume_size.x / VOLUME_X;
cvi.cell_size.y = volume_size.y / VOLUME_Y;
cvi.cell_size.z = volume_size.z / VOLUME_Z;
dim3 block (ColorVolumeImpl::CTA_SIZE_X, ColorVolumeImpl::CTA_SIZE_Y);
dim3 grid (divUp (VOLUME_X, block.x), divUp (VOLUME_Y, block.y));
hipLaunchKernelGGL(( updateColorVolumeKernel), dim3(grid), dim3(block), 0, 0, cvi);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall (hipDeviceSynchronize ());
}
namespace pcl
{
namespace device
{
__global__ void
extractColorsKernel (const float3 cell_size, const PtrStep<uchar4> color_volume, const PtrSz<PointType> points, uchar4 *colors)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < points.size)
{
int3 v;
float3 p = *(const float3*)(points.data + idx);
v.x = __float2int_rd (p.x / cell_size.x); // round to negative infinity
v.y = __float2int_rd (p.y / cell_size.y);
v.z = __float2int_rd (p.z / cell_size.z);
uchar4 rgbw = color_volume.ptr (VOLUME_Y * v.z + v.y)[v.x];
colors[idx] = make_uchar4 (rgbw.z, rgbw.y, rgbw.x, 0); //bgra
}
}
}
}
void
pcl::device::exctractColors (const PtrStep<uchar4>& color_volume, const float3& volume_size, const PtrSz<PointType>& points, uchar4* colors)
{
const int block = 256;
float3 cell_size = make_float3 (volume_size.x / VOLUME_X, volume_size.y / VOLUME_Y, volume_size.z / VOLUME_Z);
hipLaunchKernelGGL(( extractColorsKernel), dim3(divUp (points.size, block)), dim3(block), 0, 0, cell_size, color_volume, points, colors);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall (hipDeviceSynchronize ());
}; | 58fe7500b82c303d7d6a94c1bdbde19657c2cf7f.cu | /*
* Software License Agreement (BSD License)
*
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (c) 2011, Willow Garage, Inc.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "device.hpp"
//#include "pcl/gpu/utils/device/vector_math.hpp"
namespace pcl
{
namespace device
{
__global__ void
initColorVolumeKernel (PtrStep<uchar4> volume)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < VOLUME_X && y < VOLUME_Y)
{
uchar4 *pos = volume.ptr (y) + x;
int z_step = VOLUME_Y * volume.step / sizeof(*pos);
#pragma unroll
for (int z = 0; z < VOLUME_Z; ++z, pos += z_step)
*pos = make_uchar4 (0, 0, 0, 0);
}
}
}
}
void
pcl::device::initColorVolume (PtrStep<uchar4> color_volume)
{
dim3 block (32, 16);
dim3 grid (1, 1, 1);
grid.x = divUp (VOLUME_X, block.x);
grid.y = divUp (VOLUME_Y, block.y);
initColorVolumeKernel<<<grid, block>>>(color_volume);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}
namespace pcl
{
namespace device
{
struct ColorVolumeImpl
{
enum
{
CTA_SIZE_X = 32,
CTA_SIZE_Y = 8,
ONE_VOXEL = 0
};
Intr intr;
PtrStep<float> vmap;
PtrStepSz<uchar3> colors;
Mat33 R_inv;
float3 t;
float3 cell_size;
float tranc_dist;
int max_weight;
mutable PtrStep<uchar4> color_volume;
__device__ __forceinline__ int3
getVoxel (float3 point) const
{
int vx = __float2int_rd (point.x / cell_size.x); // round to negative infinity
int vy = __float2int_rd (point.y / cell_size.y);
int vz = __float2int_rd (point.z / cell_size.z);
return make_int3 (vx, vy, vz);
}
__device__ __forceinline__ float3
getVoxelGCoo (int x, int y, int z) const
{
float3 coo = make_float3 (x, y, z);
coo += 0.5f; //shift to cell center;
coo.x *= cell_size.x;
coo.y *= cell_size.y;
coo.z *= cell_size.z;
return coo;
}
__device__ __forceinline__ void
operator () () const
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= VOLUME_X || y >= VOLUME_Y)
return;
for (int z = 0; z < VOLUME_X; ++z)
{
float3 v_g = getVoxelGCoo (x, y, z);
float3 v = R_inv * (v_g - t);
if (v.z <= 0)
continue;
int2 coo; //project to current cam
coo.x = __float2int_rn (v.x * intr.fx / v.z + intr.cx);
coo.y = __float2int_rn (v.y * intr.fy / v.z + intr.cy);
if (coo.x >= 0 && coo.y >= 0 && coo.x < colors.cols && coo.y < colors.rows)
{
float3 p;
p.x = vmap.ptr (coo.y)[coo.x];
if (isnan (p.x))
continue;
p.y = vmap.ptr (coo.y + colors.rows )[coo.x];
p.z = vmap.ptr (coo.y + colors.rows * 2)[coo.x];
bool update = false;
if (ONE_VOXEL)
{
int3 vp = getVoxel (p);
update = vp.x == x && vp.y == y && vp.z == z;
}
else
{
float dist = norm (p - v_g);
update = dist < tranc_dist;
}
if (update)
{
uchar4 *ptr = color_volume.ptr (VOLUME_Y * z + y) + x;
uchar3 rgb = colors.ptr (coo.y)[coo.x];
uchar4 volume_rgbw = *ptr;
int weight_prev = volume_rgbw.w;
const float Wrk = 1.f;
float new_x = (volume_rgbw.x * weight_prev + Wrk * rgb.x) / (weight_prev + Wrk);
float new_y = (volume_rgbw.y * weight_prev + Wrk * rgb.y) / (weight_prev + Wrk);
float new_z = (volume_rgbw.z * weight_prev + Wrk * rgb.z) / (weight_prev + Wrk);
int weight_new = weight_prev + 1;
uchar4 volume_rgbw_new;
volume_rgbw_new.x = min (255, max (0, __float2int_rn (new_x)));
volume_rgbw_new.y = min (255, max (0, __float2int_rn (new_y)));
volume_rgbw_new.z = min (255, max (0, __float2int_rn (new_z)));
volume_rgbw_new.w = min (max_weight, weight_new);
*ptr = volume_rgbw_new;
}
} /* in camera image range */
} /* for(int z = 0; z < VOLUME_X; ++z) */
} /* void operator() */
};
__global__ void
updateColorVolumeKernel (const ColorVolumeImpl cvi) {
cvi ();
}
}
}
void
pcl::device::updateColorVolume (const Intr& intr, float tranc_dist, const Mat33& R_inv, const float3& t,
const MapArr& vmap, const PtrStepSz<uchar3>& colors, const float3& volume_size, PtrStep<uchar4> color_volume, int max_weight)
{
ColorVolumeImpl cvi;
cvi.vmap = vmap;
cvi.colors = colors;
cvi.color_volume = color_volume;
cvi.R_inv = R_inv;
cvi.t = t;
cvi.intr = intr;
cvi.tranc_dist = tranc_dist;
cvi.max_weight = min (max (0, max_weight), 255);
cvi.cell_size.x = volume_size.x / VOLUME_X;
cvi.cell_size.y = volume_size.y / VOLUME_Y;
cvi.cell_size.z = volume_size.z / VOLUME_Z;
dim3 block (ColorVolumeImpl::CTA_SIZE_X, ColorVolumeImpl::CTA_SIZE_Y);
dim3 grid (divUp (VOLUME_X, block.x), divUp (VOLUME_Y, block.y));
updateColorVolumeKernel<<<grid, block>>>(cvi);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}
namespace pcl
{
namespace device
{
__global__ void
extractColorsKernel (const float3 cell_size, const PtrStep<uchar4> color_volume, const PtrSz<PointType> points, uchar4 *colors)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < points.size)
{
int3 v;
float3 p = *(const float3*)(points.data + idx);
v.x = __float2int_rd (p.x / cell_size.x); // round to negative infinity
v.y = __float2int_rd (p.y / cell_size.y);
v.z = __float2int_rd (p.z / cell_size.z);
uchar4 rgbw = color_volume.ptr (VOLUME_Y * v.z + v.y)[v.x];
colors[idx] = make_uchar4 (rgbw.z, rgbw.y, rgbw.x, 0); //bgra
}
}
}
}
void
pcl::device::exctractColors (const PtrStep<uchar4>& color_volume, const float3& volume_size, const PtrSz<PointType>& points, uchar4* colors)
{
const int block = 256;
float3 cell_size = make_float3 (volume_size.x / VOLUME_X, volume_size.y / VOLUME_Y, volume_size.z / VOLUME_Z);
extractColorsKernel<<<divUp (points.size, block), block>>>(cell_size, color_volume, points, colors);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}; |
07a088e928d37459d36b196340e1ea13739b67c9.hip | // !!! This is a file automatically generated by hipify!!!
#include "compute_com_extents.h"
#include <mirheo/core/pvs/object_vector.h>
#include <mirheo/core/pvs/views/ov.h>
#include <mirheo/core/utils/cuda_common.h>
#include <mirheo/core/utils/kernel_launch.h>
#include <hip/hip_runtime.h>
namespace mirheo
{
namespace ComputeComExtentsKernels
{
__global__ void minMaxCom(OVview ovView)
{
const int gid = threadIdx.x + blockDim.x * blockIdx.x;
const int objId = gid / warpSize;
const int laneId = gid % warpSize;
if (objId >= ovView.nObjects) return;
real3 mymin = make_real3(+1e10_r);
real3 mymax = make_real3(-1e10_r);
real3 mycom = make_real3(0.0_r);
#pragma unroll 3
for (int i = laneId; i < ovView.objSize; i += warpSize)
{
const int offset = objId * ovView.objSize + i;
const real3 coo = make_real3(ovView.readPosition(offset));
mymin = math::min(mymin, coo);
mymax = math::max(mymax, coo);
mycom += coo;
}
mycom = warpReduce( mycom, [] (real a, real b) { return a+b; } );
mymin = warpReduce( mymin, [] (real a, real b) { return math::min(a, b); } );
mymax = warpReduce( mymax, [] (real a, real b) { return math::max(a, b); } );
if (laneId == 0)
ovView.comAndExtents[objId] = {mycom / ovView.objSize, mymin, mymax};
}
} // namespace ComputeComExtentsKernels
void computeComExtents(ObjectVector *ov, LocalObjectVector *lov, hipStream_t stream)
{
OVview view(ov, lov);
constexpr int warpSize = 32;
const int nthreads = 128;
const int nblocks = getNblocks(view.nObjects * warpSize, nthreads);
SAFE_KERNEL_LAUNCH(
ComputeComExtentsKernels::minMaxCom,
nblocks, nthreads, 0, stream,
view );
}
} // namespace mirheo
| 07a088e928d37459d36b196340e1ea13739b67c9.cu | #include "compute_com_extents.h"
#include <mirheo/core/pvs/object_vector.h>
#include <mirheo/core/pvs/views/ov.h>
#include <mirheo/core/utils/cuda_common.h>
#include <mirheo/core/utils/kernel_launch.h>
#include <cuda_runtime.h>
namespace mirheo
{
namespace ComputeComExtentsKernels
{
__global__ void minMaxCom(OVview ovView)
{
const int gid = threadIdx.x + blockDim.x * blockIdx.x;
const int objId = gid / warpSize;
const int laneId = gid % warpSize;
if (objId >= ovView.nObjects) return;
real3 mymin = make_real3(+1e10_r);
real3 mymax = make_real3(-1e10_r);
real3 mycom = make_real3(0.0_r);
#pragma unroll 3
for (int i = laneId; i < ovView.objSize; i += warpSize)
{
const int offset = objId * ovView.objSize + i;
const real3 coo = make_real3(ovView.readPosition(offset));
mymin = math::min(mymin, coo);
mymax = math::max(mymax, coo);
mycom += coo;
}
mycom = warpReduce( mycom, [] (real a, real b) { return a+b; } );
mymin = warpReduce( mymin, [] (real a, real b) { return math::min(a, b); } );
mymax = warpReduce( mymax, [] (real a, real b) { return math::max(a, b); } );
if (laneId == 0)
ovView.comAndExtents[objId] = {mycom / ovView.objSize, mymin, mymax};
}
} // namespace ComputeComExtentsKernels
void computeComExtents(ObjectVector *ov, LocalObjectVector *lov, cudaStream_t stream)
{
OVview view(ov, lov);
constexpr int warpSize = 32;
const int nthreads = 128;
const int nblocks = getNblocks(view.nObjects * warpSize, nthreads);
SAFE_KERNEL_LAUNCH(
ComputeComExtentsKernels::minMaxCom,
nblocks, nthreads, 0, stream,
view );
}
} // namespace mirheo
|
4d5e191e0974c5b297d8158f4e74407b5855eafa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
__global__ void arradd(int* md, int* nd, int* pd, int size)
{
__shared__ int ms[400], ns[400];
int myid = threadIdx.x;
ms[myid] = md[myid];
ns[myid] = nd[myid];
__syncthreads();
pd[myid] = ms[myid] + ns[myid];
}
int main()
{
int size = 400 * sizeof(int);
int m[400], n[400], p[400],*md, *nd,*pd;
int i=0;
for(i=0; i<400; i++ )
{
m[i] = i;
n[i] = i;
p[i] = 0;
}
hipMalloc(&md, size);
hipMemcpy(md, m, size, hipMemcpyHostToDevice);
hipMalloc(&nd, size);
hipMemcpy(nd, n, size, hipMemcpyHostToDevice);
hipMalloc(&pd, size);
dim3 DimGrid(1, 1);
dim3 DimBlock(400, 1);
hipLaunchKernelGGL(( arradd), dim3(DimGrid),dim3(DimBlock) , 0, 0, md,nd,pd,size);
hipMemcpy(p, pd, size, hipMemcpyDeviceToHost);
hipFree(md);
hipFree(nd);
hipFree (pd);
for(i=0; i<400; i++ )
{
printf("\t%d",p[i]);
}
}
| 4d5e191e0974c5b297d8158f4e74407b5855eafa.cu | #include<stdio.h>
#include<stdlib.h>
__global__ void arradd(int* md, int* nd, int* pd, int size)
{
__shared__ int ms[400], ns[400];
int myid = threadIdx.x;
ms[myid] = md[myid];
ns[myid] = nd[myid];
__syncthreads();
pd[myid] = ms[myid] + ns[myid];
}
int main()
{
int size = 400 * sizeof(int);
int m[400], n[400], p[400],*md, *nd,*pd;
int i=0;
for(i=0; i<400; i++ )
{
m[i] = i;
n[i] = i;
p[i] = 0;
}
cudaMalloc(&md, size);
cudaMemcpy(md, m, size, cudaMemcpyHostToDevice);
cudaMalloc(&nd, size);
cudaMemcpy(nd, n, size, cudaMemcpyHostToDevice);
cudaMalloc(&pd, size);
dim3 DimGrid(1, 1);
dim3 DimBlock(400, 1);
arradd<<< DimGrid,DimBlock >>>(md,nd,pd,size);
cudaMemcpy(p, pd, size, cudaMemcpyDeviceToHost);
cudaFree(md);
cudaFree(nd);
cudaFree (pd);
for(i=0; i<400; i++ )
{
printf("\t%d",p[i]);
}
}
|
e5bbf1aa936bb4ba14b6a35595fe54247d69ac56.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
This example demonstrates several CUTLASS utilities in the context of a mixed-precision
floating-point matrix product computation.
These utilities are intended to be useful supporting components for managing tensor and matrix
memory allocations, initializing and comparing results, and computing reference output.
CUTLASS utilities are defined in the directory `tools/util`, and definitions appear
namespace `cutlass::` or an inner namespace therein. Operations in `cutlass::reference::` have
both host-side and device-side implementations, and the choice to use device-side initialization
and host-side verification in this example was arbitrary.
cutlass::half_t
This is a numeric type implementing IEEE half-precision quantities. It is functional in host
and device code. In host-side code, CUTLASS_ENABLE_F16C optionally enables harware-accelerated
numeric conversion on x86-64 CPUs support F16C extensions. In device code, all available
hardware is used to implement conversion and numeric operations.
cutlass::HostTensor<>
This template class simplifies the creation of tensors for all supported layouts. It simplifies
allocation and management of host- and device- memory allocations.
This class offers methods device_view() and host_view() to provide TensorView objects for
device- and host-side memory allocations.
cutlass::reference::device::TensorFillRandomGaussian()
This template function initializes elementsof a tensor to a random Gaussian distribution. It
uses cuRAND in device code to compute random numbers.
cutlass::reference::host::Gemm<>
This template function computes the general matrix product. This template supports unique
data types for each matrix operand, the internal accumulation type, and the scalar parameters
alpha and beta.
cutlass::reference::host::TensorEquals()
Compares two tensors of identical rank and returns true if values are bit equivalent.
*/
// Standard Library includes
#include <iostream>
#include <sstream>
#include <vector>
#include <fstream>
// CUTLASS includes needed for half-precision GEMM kernel
#include "cutlass/cutlass.h"
#include "cutlass/core_io.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/device/gemm.h"
//
// CUTLASS utility includes
//
// Defines operator<<() to write TensorView objects to std::ostream
#include "cutlass/util/tensor_view_io.h"
// Defines cutlass::HostTensor<>
#include "cutlass/util/host_tensor.h"
// Defines cutlass::half_t
#include "cutlass/numeric_types.h"
// Defines device_memory::copy_device_to_device()
#include "cutlass/util/device_memory.h"
// Defines cutlass::reference::device::TensorFillRandomGaussian()
#include "cutlass/util/reference/device/tensor_fill.h"
// Defines cutlass::reference::host::TensorEquals()
#include "cutlass/util/reference/host/tensor_compare.h"
// Defines cutlass::reference::host::Gemm()
#include "cutlass/util/reference/host/gemm.h"
#pragma warning( disable : 4503)
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Define a CUTLASS GEMM template and launch a GEMM kernel.
hipError_t cutlass_hgemm_nn(
int M,
int N,
int K,
cutlass::half_t alpha,
cutlass::half_t const *A,
cutlass::layout::ColumnMajor::Stride::Index lda,
cutlass::half_t const *B,
cutlass::layout::ColumnMajor::Stride::Index ldb,
cutlass::half_t beta,
cutlass::half_t *C,
cutlass::layout::ColumnMajor::Stride::Index ldc) {
// Define the GEMM operation
using Gemm = cutlass::gemm::device::Gemm<
cutlass::half_t, // ElementA
cutlass::layout::ColumnMajor, // LayoutA
cutlass::half_t, // ElementB
cutlass::layout::ColumnMajor, // LayoutB
cutlass::half_t, // ElementOutput
cutlass::layout::ColumnMajor // LayoutOutput
>;
Gemm gemm_op;
cutlass::Status status = gemm_op({
{M, N, K},
{A, lda},
{B, ldb},
{C, ldc},
{C, ldc},
{alpha, beta}
});
if (status != cutlass::Status::kSuccess) {
return hipErrorUnknown;
}
return hipSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Allocate several matrices in GPU device memory and call a single-precision
/// CUTLASS GEMM kernel.
hipError_t TestCutlassGemm(int M, int N, int K, cutlass::half_t alpha, cutlass::half_t beta) {
hipError_t result;
//
// Construct cutlass::HostTensor<> using the half-precision host-side type.
//
// cutlass::HostTensor<> allocates memory on both the host and device corresponding to rank=2
// tensors in column-major layout. Explicit synchronization methods are offered to copy the
// tensor to the device or to the host.
//
// M-by-K matrix of cutlass::half_t
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> A(cutlass::MatrixCoord(M, K));
// K-by-N matrix of cutlass::half_t
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> B(cutlass::MatrixCoord(K, N));
// M-by-N matrix of cutlass::half_t
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> C_cutlass(cutlass::MatrixCoord(M, N));
// M-by-N matrix of cutlass::half_t
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> C_reference(cutlass::MatrixCoord(M, N));
//
// Initialize matrices with small, random integers.
//
// Arbitrary RNG seed value. Hard-coded for deterministic results.
uint64_t seed = 2080;
// Gaussian random distribution
cutlass::half_t mean = 0.0_hf;
cutlass::half_t stddev = 5.0_hf;
// Specify the number of bits right of the binary decimal that are permitted
// to be non-zero. A value of "0" here truncates random values to integers
int bits_less_than_one = 0;
cutlass::reference::device::TensorFillRandomGaussian(
A.device_view(),
seed,
mean,
stddev,
bits_less_than_one
);
cutlass::reference::device::TensorFillRandomGaussian(
B.device_view(),
seed * 2019,
mean,
stddev,
bits_less_than_one
);
cutlass::reference::device::TensorFillRandomGaussian(
C_cutlass.device_view(),
seed * 1993,
mean,
stddev,
bits_less_than_one
);
// Copy C_cutlass into C_reference so the GEMM is correct when beta != 0.
cutlass::device_memory::copy_device_to_device(
C_reference.device_data(),
C_cutlass.device_data(),
C_cutlass.capacity());
// Copy the device-side view into host memory
C_reference.sync_host();
//
// Launch the CUTLASS GEMM kernel
//
result = cutlass_hgemm_nn(
M,
N,
K,
alpha,
A.device_data(),
A.stride(0),
B.device_data(),
B.stride(0),
beta,
C_cutlass.device_data(),
C_cutlass.stride(0)
);
if (result != hipSuccess) {
return result;
}
//
// Verify the result using a host-side reference
//
// A and B were initialized using device-side procedures. The intent of this example is to
// use the host-side reference GEMM, so we must perform a device-to-host copy.
A.sync_host();
B.sync_host();
// Copy CUTLASS's GEMM results into host memory.
C_cutlass.sync_host();
// Compute the reference result using the host-side GEMM reference implementation.
cutlass::reference::host::Gemm<
cutlass::half_t, // ElementA
cutlass::layout::ColumnMajor, // LayoutA
cutlass::half_t, // ElementB
cutlass::layout::ColumnMajor, // LayoutB
cutlass::half_t, // ElementOutput
cutlass::layout::ColumnMajor, // LayoutOutput
cutlass::half_t,
cutlass::half_t
> gemm_ref;
gemm_ref(
{M, N, K}, // problem size (type: cutlass::gemm::GemmCoord)
alpha, // alpha (type: cutlass::half_t)
A.host_ref(), // A (type: TensorRef<half_t, ColumnMajor>)
B.host_ref(), // B (type: TensorRef<half_t, ColumnMajor>)
beta, // beta (type: cutlass::half_t)
C_reference.host_ref() // C (type: TensorRef<half_t, ColumnMajor>)
);
// Compare reference to computed results.
if (!cutlass::reference::host::TensorEquals(
C_reference.host_view(),
C_cutlass.host_view())) {
char const *filename = "errors_01_cutlass_utilities.csv";
std::cerr << "Error - CUTLASS GEMM kernel differs from reference. Wrote computed and reference results to '" << filename << "'" << std::endl;
//
// On error, print C_cutlass and C_reference to std::cerr.
//
// Note, these are matrices of half-precision elements stored in host memory as
// arrays of type cutlass::half_t.
//
std::ofstream file(filename);
// Result of CUTLASS GEMM kernel
file << "\n\nCUTLASS =\n" << C_cutlass.host_view() << std::endl;
// Result of reference computation
file << "\n\nReference =\n" << C_reference.host_view() << std::endl;
// Return error code.
return hipErrorUnknown;
}
// Passed error check
return hipSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Entry point to cutlass_utilities example.
//
// usage:
//
// 01_cutlass_utilities <M> <N> <K> <alpha> <beta>
//
int main(int argc, const char *arg[]) {
//
// This example uses half-precision and is only suitable for devices with compute capabitliy 5.3 or greater.
//
hipDeviceProp_t prop;
hipError_t result = hipGetDeviceProperties(&prop, 0);
if (result != hipSuccess) {
std::cerr << "Failed to query device properties with error " << hipGetErrorString(result) << std::endl;
return -1;
}
if (!(prop.major > 5 || (prop.major == 5 && prop.minor >= 3))) {
std::cerr << "This example uses half precision and is only suitable for devices with compute capability 5.3 or greater.\n";
std::cerr << "You are using a CUDA device with compute capability " << prop.major << "." << prop.minor << std::endl;
return -1;
}
//
// Parse the command line to obtain GEMM dimensions and scalar values.
//
// GEMM problem dimensions: <M> <N> <K>
int problem[3] = { 128, 128, 128 };
for (int i = 1; i < argc && i < 4; ++i) {
std::stringstream ss(arg[i]);
ss >> problem[i - 1];
}
// Linear scale factors in GEMM. Note, these are half-precision values stored as
// cutlass::half_t.
//
// Values outside the range of IEEE FP16 will overflow to infinity or underflow to zero.
//
cutlass::half_t scalars[2] = { 1.0_hf, 0.0_hf };
for (int i = 4; i < argc && i < 6; ++i) {
std::stringstream ss(arg[i]);
ss >> scalars[i - 4]; // lexical cast to cutlass::half_t
}
//
// Run the CUTLASS GEMM test.
//
result = TestCutlassGemm(
problem[0], // GEMM M dimension
problem[1], // GEMM N dimension
problem[2], // GEMM K dimension
scalars[0], // alpha
scalars[1] // beta
);
if (result == hipSuccess) {
std::cout << "Passed." << std::endl;
}
// Exit.
return result == hipSuccess ? 0 : -1;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
| e5bbf1aa936bb4ba14b6a35595fe54247d69ac56.cu | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
This example demonstrates several CUTLASS utilities in the context of a mixed-precision
floating-point matrix product computation.
These utilities are intended to be useful supporting components for managing tensor and matrix
memory allocations, initializing and comparing results, and computing reference output.
CUTLASS utilities are defined in the directory `tools/util`, and definitions appear
namespace `cutlass::` or an inner namespace therein. Operations in `cutlass::reference::` have
both host-side and device-side implementations, and the choice to use device-side initialization
and host-side verification in this example was arbitrary.
cutlass::half_t
This is a numeric type implementing IEEE half-precision quantities. It is functional in host
and device code. In host-side code, CUTLASS_ENABLE_F16C optionally enables harware-accelerated
numeric conversion on x86-64 CPUs support F16C extensions. In device code, all available
hardware is used to implement conversion and numeric operations.
cutlass::HostTensor<>
This template class simplifies the creation of tensors for all supported layouts. It simplifies
allocation and management of host- and device- memory allocations.
This class offers methods device_view() and host_view() to provide TensorView objects for
device- and host-side memory allocations.
cutlass::reference::device::TensorFillRandomGaussian()
This template function initializes elementsof a tensor to a random Gaussian distribution. It
uses cuRAND in device code to compute random numbers.
cutlass::reference::host::Gemm<>
This template function computes the general matrix product. This template supports unique
data types for each matrix operand, the internal accumulation type, and the scalar parameters
alpha and beta.
cutlass::reference::host::TensorEquals()
Compares two tensors of identical rank and returns true if values are bit equivalent.
*/
// Standard Library includes
#include <iostream>
#include <sstream>
#include <vector>
#include <fstream>
// CUTLASS includes needed for half-precision GEMM kernel
#include "cutlass/cutlass.h"
#include "cutlass/core_io.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/device/gemm.h"
//
// CUTLASS utility includes
//
// Defines operator<<() to write TensorView objects to std::ostream
#include "cutlass/util/tensor_view_io.h"
// Defines cutlass::HostTensor<>
#include "cutlass/util/host_tensor.h"
// Defines cutlass::half_t
#include "cutlass/numeric_types.h"
// Defines device_memory::copy_device_to_device()
#include "cutlass/util/device_memory.h"
// Defines cutlass::reference::device::TensorFillRandomGaussian()
#include "cutlass/util/reference/device/tensor_fill.h"
// Defines cutlass::reference::host::TensorEquals()
#include "cutlass/util/reference/host/tensor_compare.h"
// Defines cutlass::reference::host::Gemm()
#include "cutlass/util/reference/host/gemm.h"
#pragma warning( disable : 4503)
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Define a CUTLASS GEMM template and launch a GEMM kernel.
cudaError_t cutlass_hgemm_nn(
int M,
int N,
int K,
cutlass::half_t alpha,
cutlass::half_t const *A,
cutlass::layout::ColumnMajor::Stride::Index lda,
cutlass::half_t const *B,
cutlass::layout::ColumnMajor::Stride::Index ldb,
cutlass::half_t beta,
cutlass::half_t *C,
cutlass::layout::ColumnMajor::Stride::Index ldc) {
// Define the GEMM operation
using Gemm = cutlass::gemm::device::Gemm<
cutlass::half_t, // ElementA
cutlass::layout::ColumnMajor, // LayoutA
cutlass::half_t, // ElementB
cutlass::layout::ColumnMajor, // LayoutB
cutlass::half_t, // ElementOutput
cutlass::layout::ColumnMajor // LayoutOutput
>;
Gemm gemm_op;
cutlass::Status status = gemm_op({
{M, N, K},
{A, lda},
{B, ldb},
{C, ldc},
{C, ldc},
{alpha, beta}
});
if (status != cutlass::Status::kSuccess) {
return cudaErrorUnknown;
}
return cudaSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Allocate several matrices in GPU device memory and call a single-precision
/// CUTLASS GEMM kernel.
cudaError_t TestCutlassGemm(int M, int N, int K, cutlass::half_t alpha, cutlass::half_t beta) {
cudaError_t result;
//
// Construct cutlass::HostTensor<> using the half-precision host-side type.
//
// cutlass::HostTensor<> allocates memory on both the host and device corresponding to rank=2
// tensors in column-major layout. Explicit synchronization methods are offered to copy the
// tensor to the device or to the host.
//
// M-by-K matrix of cutlass::half_t
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> A(cutlass::MatrixCoord(M, K));
// K-by-N matrix of cutlass::half_t
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> B(cutlass::MatrixCoord(K, N));
// M-by-N matrix of cutlass::half_t
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> C_cutlass(cutlass::MatrixCoord(M, N));
// M-by-N matrix of cutlass::half_t
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> C_reference(cutlass::MatrixCoord(M, N));
//
// Initialize matrices with small, random integers.
//
// Arbitrary RNG seed value. Hard-coded for deterministic results.
uint64_t seed = 2080;
// Gaussian random distribution
cutlass::half_t mean = 0.0_hf;
cutlass::half_t stddev = 5.0_hf;
// Specify the number of bits right of the binary decimal that are permitted
// to be non-zero. A value of "0" here truncates random values to integers
int bits_less_than_one = 0;
cutlass::reference::device::TensorFillRandomGaussian(
A.device_view(),
seed,
mean,
stddev,
bits_less_than_one
);
cutlass::reference::device::TensorFillRandomGaussian(
B.device_view(),
seed * 2019,
mean,
stddev,
bits_less_than_one
);
cutlass::reference::device::TensorFillRandomGaussian(
C_cutlass.device_view(),
seed * 1993,
mean,
stddev,
bits_less_than_one
);
// Copy C_cutlass into C_reference so the GEMM is correct when beta != 0.
cutlass::device_memory::copy_device_to_device(
C_reference.device_data(),
C_cutlass.device_data(),
C_cutlass.capacity());
// Copy the device-side view into host memory
C_reference.sync_host();
//
// Launch the CUTLASS GEMM kernel
//
result = cutlass_hgemm_nn(
M,
N,
K,
alpha,
A.device_data(),
A.stride(0),
B.device_data(),
B.stride(0),
beta,
C_cutlass.device_data(),
C_cutlass.stride(0)
);
if (result != cudaSuccess) {
return result;
}
//
// Verify the result using a host-side reference
//
// A and B were initialized using device-side procedures. The intent of this example is to
// use the host-side reference GEMM, so we must perform a device-to-host copy.
A.sync_host();
B.sync_host();
// Copy CUTLASS's GEMM results into host memory.
C_cutlass.sync_host();
// Compute the reference result using the host-side GEMM reference implementation.
cutlass::reference::host::Gemm<
cutlass::half_t, // ElementA
cutlass::layout::ColumnMajor, // LayoutA
cutlass::half_t, // ElementB
cutlass::layout::ColumnMajor, // LayoutB
cutlass::half_t, // ElementOutput
cutlass::layout::ColumnMajor, // LayoutOutput
cutlass::half_t,
cutlass::half_t
> gemm_ref;
gemm_ref(
{M, N, K}, // problem size (type: cutlass::gemm::GemmCoord)
alpha, // alpha (type: cutlass::half_t)
A.host_ref(), // A (type: TensorRef<half_t, ColumnMajor>)
B.host_ref(), // B (type: TensorRef<half_t, ColumnMajor>)
beta, // beta (type: cutlass::half_t)
C_reference.host_ref() // C (type: TensorRef<half_t, ColumnMajor>)
);
// Compare reference to computed results.
if (!cutlass::reference::host::TensorEquals(
C_reference.host_view(),
C_cutlass.host_view())) {
char const *filename = "errors_01_cutlass_utilities.csv";
std::cerr << "Error - CUTLASS GEMM kernel differs from reference. Wrote computed and reference results to '" << filename << "'" << std::endl;
//
// On error, print C_cutlass and C_reference to std::cerr.
//
// Note, these are matrices of half-precision elements stored in host memory as
// arrays of type cutlass::half_t.
//
std::ofstream file(filename);
// Result of CUTLASS GEMM kernel
file << "\n\nCUTLASS =\n" << C_cutlass.host_view() << std::endl;
// Result of reference computation
file << "\n\nReference =\n" << C_reference.host_view() << std::endl;
// Return error code.
return cudaErrorUnknown;
}
// Passed error check
return cudaSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Entry point to cutlass_utilities example.
//
// usage:
//
// 01_cutlass_utilities <M> <N> <K> <alpha> <beta>
//
int main(int argc, const char *arg[]) {
//
// This example uses half-precision and is only suitable for devices with compute capabitliy 5.3 or greater.
//
cudaDeviceProp prop;
cudaError_t result = cudaGetDeviceProperties(&prop, 0);
if (result != cudaSuccess) {
std::cerr << "Failed to query device properties with error " << cudaGetErrorString(result) << std::endl;
return -1;
}
if (!(prop.major > 5 || (prop.major == 5 && prop.minor >= 3))) {
std::cerr << "This example uses half precision and is only suitable for devices with compute capability 5.3 or greater.\n";
std::cerr << "You are using a CUDA device with compute capability " << prop.major << "." << prop.minor << std::endl;
return -1;
}
//
// Parse the command line to obtain GEMM dimensions and scalar values.
//
// GEMM problem dimensions: <M> <N> <K>
int problem[3] = { 128, 128, 128 };
for (int i = 1; i < argc && i < 4; ++i) {
std::stringstream ss(arg[i]);
ss >> problem[i - 1];
}
// Linear scale factors in GEMM. Note, these are half-precision values stored as
// cutlass::half_t.
//
// Values outside the range of IEEE FP16 will overflow to infinity or underflow to zero.
//
cutlass::half_t scalars[2] = { 1.0_hf, 0.0_hf };
for (int i = 4; i < argc && i < 6; ++i) {
std::stringstream ss(arg[i]);
ss >> scalars[i - 4]; // lexical cast to cutlass::half_t
}
//
// Run the CUTLASS GEMM test.
//
result = TestCutlassGemm(
problem[0], // GEMM M dimension
problem[1], // GEMM N dimension
problem[2], // GEMM K dimension
scalars[0], // alpha
scalars[1] // beta
);
if (result == cudaSuccess) {
std::cout << "Passed." << std::endl;
}
// Exit.
return result == cudaSuccess ? 0 : -1;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
|
f309a42cde3405ad44c80b434de53d814f173ded.hip | // !!! This is a file automatically generated by hipify!!!
/* CUDA blur
* Kevin Yuh, 2014 */
#include <cstdio>
#include <hip/hip_runtime.h>
#include "Blur_cuda.cuh"
__global__
void
cudaBlurKernel(const float *raw_data, const float *blur_v, float *out_data,
int N, int blur_v_size) {
/*
It may be helpful to use the information in the lecture slides,
as well as the CPU implementation, as a reference. */
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
// Each thread idx will handle the convolution of a single element
// from the input array.
while (i < N) {
out_data[i] = 0;
for (int j = 0; j < blur_v_size; j++){
// Handle boundary case at start of array when i < blur_v_size
if (j > i)
break;
out_data[i] += raw_data[i - j] * blur_v[j];
}
// Increment thread idx if not enough blocks
i += blockDim.x * gridDim.x;
}
}
void cudaCallBlurKernel(const unsigned int blocks,
const unsigned int threadsPerBlock,
const float *raw_data,
const float *blur_v,
float *out_data,
const unsigned int N,
const unsigned int blur_v_size) {
// Call kernel function
hipLaunchKernelGGL(( cudaBlurKernel), dim3(blocks), dim3(threadsPerBlock), 0, 0, raw_data, blur_v, out_data, N, blur_v_size);
}
| f309a42cde3405ad44c80b434de53d814f173ded.cu | /* CUDA blur
* Kevin Yuh, 2014 */
#include <cstdio>
#include <cuda_runtime.h>
#include "Blur_cuda.cuh"
__global__
void
cudaBlurKernel(const float *raw_data, const float *blur_v, float *out_data,
int N, int blur_v_size) {
/*
It may be helpful to use the information in the lecture slides,
as well as the CPU implementation, as a reference. */
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
// Each thread idx will handle the convolution of a single element
// from the input array.
while (i < N) {
out_data[i] = 0;
for (int j = 0; j < blur_v_size; j++){
// Handle boundary case at start of array when i < blur_v_size
if (j > i)
break;
out_data[i] += raw_data[i - j] * blur_v[j];
}
// Increment thread idx if not enough blocks
i += blockDim.x * gridDim.x;
}
}
void cudaCallBlurKernel(const unsigned int blocks,
const unsigned int threadsPerBlock,
const float *raw_data,
const float *blur_v,
float *out_data,
const unsigned int N,
const unsigned int blur_v_size) {
// Call kernel function
cudaBlurKernel<<<blocks, threadsPerBlock>>>(raw_data, blur_v, out_data, N, blur_v_size);
}
|
3c599c42a8b6374f73252e77d7c7bdae1ab1316c.hip | // !!! This is a file automatically generated by hipify!!!
#define CUB_IGNORE_DEPRECATED_CPP_DIALECT 1 //ignore warnings caused by old compiler version
#define THRUST_IGNORE_DEPRECATED_CPP_DIALECT 1
#include <iostream>
#include <vector>
#include <iomanip>
#include <algorithm>
#include <ctime>
#include <fstream>
#include <sstream>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/shuffle.h>
#include <thrust/random.h>
//file for handling command line arguments
#include "args.hxx"
#define DELTA 0.5
struct Event
{
double s12;
double s13;
double s24;
double s34;
double s134;
double hmg;//helpful variable to reduce time needed for calculating euclidean distance
};
/**
Energy test wrapper, calls other functions based on command line arguments
@param argc number of command line arguments
@param argv array of command line arguments
*/
int energy_test_gpu_wrapper(int argc, char ** argv);
/**
Reads data from file and returns it as thrust::host_vector
@param file_name name of the file to read data from
@param events number of events to read
*/
thrust::host_vector<Event> load_data(const std::string & file_name, const size_t events);
/**
Calculates the T value of 2 dataset
@param cpu_dataset_1 first dataset
@param cpu_dataset_2 second dataset
@param size_1 number of events in 1st dataset
@param size_2 number of events in 2nd dataset
@param prop structure with active device's properties
@param show_distances display distances
*/
double compute_statistic(const thrust::host_vector<Event> & cpu_dataset_1, const thrust::host_vector <Event> & cpu_dataset_2, int size_1, int size_2, hipDeviceProp_t & prop, bool show_distances=true);
/**
Computes individual statistic contributions(Ti values) and returns them in array
@param cpu_dataset_1 1st dataset, Ti values are related with this set
@param cpu_dataset_2 2nd dataset
@param size_1 number of events in 1st dataset
@param size_2 number of events in 2nd dataset
@param prop structure with active device's properties
*/
double * compute_statistic_contributions(const thrust::host_vector<Event> & cpu_dataset_1, const thrust::host_vector<Event> & cpu_dataset_2, int size_1, int size_2, hipDeviceProp_t & prop);
/**
Device kernel that calculates distance between 2 datasets
@param dataset_1 1st dataset
@param dataset_2 2nd dataset
@param size_1 number of elements in 1st dataset
@param size_2 number of elements in 2nd dataset
@param sum pointer to variable storing the distance between 2 datasets
@param shared_el number of elements from 2nd dataset stored in each block's shared memory
@param same_sets if true, datasets are the same, do not compute distance between same elements
*/
__global__
void compute_distance(const Event * dataset_1, const Event * dataset_2, const int size_1, const int size_2, double * sum, const int shared_el, bool same_sets=true);
/**
Device kernel that calculates distance between dataset and single event
@param dataset dataset
@param ev single event
@param size_1 number of events in dataset
@param sum pointer to variable storing the distance
@param shared_el number of elements stored each block's shared memory
@param index_to_skip index of ev (if ev is from dataset) used to skip it's distance to itself, otherwise -1
*/
__global__
void compute_individual_distance(const Event * dataset, const Event * ev, const int size_1, double * sum, int shared_el, int index_to_skip=-1);
/**
Function checking for CUDA runtime errors
@param result error code returns by CUDA functions
*/
inline hipError_t checkCudaError(hipError_t result);
int main(int argc, char *argv[])
{
try
{
return energy_test_gpu_wrapper(argc, argv);
}
catch(std::runtime_error & err)
{
std::cerr << err.what() << std::endl;
exit (EXIT_FAILURE);
}
return 0;
}
int energy_test_gpu_wrapper(int argc, char ** argv)
{
// Parsing command line arguments, setting up execution options
args::ArgumentParser parser("GPU based energy test");
args::HelpFlag help(parser, "help", "Display this help menu", {'h', "help"});
args::Flag calculate_ti(parser, "calculate ti", "Calculate individual contributions to test statistic", {"calculate-ti"});
args::Flag permutations_only(parser, "permutations only", "Only calculate permutations", {"permutations-only"});
args::Flag output_write(parser, "output write", "write output Tvalues", {"output-write"});
args::ValueFlag<size_t> n_permutations(parser, "n_permutations", "Number of permutations to run", {"n-permutations"});
args::ValueFlag<size_t> max_events_1(parser, "max events 1", "Maximum number of events to use from dataset 1", {"max-events-1"});
args::ValueFlag<size_t> max_events_2(parser, "max events 2", "Maximum number of events to use from dataset 2", {"max-events-2"});
args::ValueFlag<size_t> max_events(parser, "max events", "Max number of events in each dataset", {"max-events"});
args::ValueFlag<size_t> seed(parser, "seed", "seed for permutations", {"seed"});
args::ValueFlag<size_t> max_permutation_events_1(parser, "max permutation events 1", "Max number of events in dataset 1 for permutations",
{"max-permutation-events-1"});
args::ValueFlag<std::string> ti_output_fn_1(parser, "ti output filename 1", "Filename for individual contributions to test statistic from dataset 1", {"ti-output-fn-1"});
args::ValueFlag<std::string> ti_output_fn_2(parser, "ti output filename 2", "Filename for individual contributions to test statistic from dataset 2", {"ti-output-fn-2"});
args::ValueFlag<std::string> permutation_ti_minmax_output_fn(parser, "permutation ti min-max filename", "Output filename for the minimum and maximum Ti values from permutations",
{"permutation-ti-minmax-output-fn"});
args::Positional<std::string> filename_1(parser, "dataset 1", "Filename for the first dataset");
args::Positional<std::string> filename_2(parser, "dataset 2", "Filename for the second dataset");
args::Positional<std::string> permutation_output_fn(parser, "permutation output filename", "Output filename for the permutation test statistics", {"permutation-output-fn"});
try
{
parser.ParseCLI(argc, argv);
//Check for neccessary options
if (!filename_1 || !filename_2)
{
throw args::ParseError("Direct paths to two dataset files must be given");
}
if ((max_events_1 || max_events_2) && max_events)
{
throw args::ParseError("--max-events cannot be used with --max-events-1 or --max-events-2");
}
if (calculate_ti && max_permutation_events_1)
{
throw args::ParseError("--calculate-ti cannot be used with --max-permutation-events-1");
}
}
catch (args::Help)
{
std::cout << parser;
return 0;
}
catch (args::ParseError & err)
{
std::cerr << err.what() << std::endl;
std::cerr << parser;
return 1;
}
// Displaying information about the device
int deviceId;
checkCudaError(hipGetDevice(&deviceId));
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, deviceId);
printf("\nDevice name: %s\n", prop.name);
printf("Shared memory per block: %zu\n", prop.sharedMemPerBlock);
printf("Threads per block: %d\n", prop.maxThreadsPerBlock);
printf("Number of SMs: %d\n", prop.multiProcessorCount);
printf("Shared memory per SM: %zu\n", prop.sharedMemPerMultiprocessor);
printf("Max Block per SM: %d\n", prop.maxBlocksPerMultiProcessor);
int val;
hipDeviceGetAttribute(&val, hipDeviceAttributeMaxThreadsPerBlock, deviceId);
printf("Max Threads per SM: %d\n\n", val);
// Load specified number of events from data files
size_t data_1_limit, data_2_limit;
data_1_limit = data_2_limit = std::numeric_limits<size_t>::max();
if (max_events)
{
data_1_limit = data_2_limit = args::get(max_events);
}
else
{
if (max_events_1)
{
data_1_limit = args::get(max_events_1);
}
if (max_events_2)
{
data_2_limit = args::get(max_events_2);
}
}
//Read data from files
thrust::host_vector<Event>dataset_1 = load_data(args::get(filename_1), data_1_limit);
thrust::host_vector<Event>dataset_2 = load_data(args::get(filename_2), data_2_limit);
int size_1 = static_cast<int>(dataset_1.size());
int size_2 = static_cast<int>(dataset_2.size());
std::cout << "Size of dataset 1: " << size_1 << std::endl;
std::cout << "Size of dataset 2: " << size_2 << std::endl << std::endl;
double t_value;
if (!permutations_only)
{
// Calculate individual contrubutions to the test (Ti values)
if (calculate_ti)
{
std::cout << "Calculating contributions of individual events to test statistic..." << std::endl;
double * tis_1 = compute_statistic_contributions(dataset_1, dataset_2, size_1, size_2, prop);
double * tis_2 = compute_statistic_contributions(dataset_2, dataset_1, size_2, size_1, prop);
double total = 0;
for (int i=0; i < size_1; i++)
total += tis_1[i];
for (int i=0; i < size_2; i++)
total += tis_2[i];
t_value = total;
std::cout << "Test statistic for nominal dataset:" << std::endl << " T = " << t_value << std::endl << std::endl;
//write Ti values to file
std::string ti_file_1;
std::string ti_file_2;
if (ti_output_fn_1)
{
ti_file_1 = args::get(ti_output_fn_1);
}
else
{
ti_file_1 = "Ti_dataset_1_" + std::to_string(size_1) + ".txt";
}
if (ti_output_fn_2)
{
ti_file_2 = args::get(ti_output_fn_2);
}
else
{
ti_file_2 = "Ti_dataset_2_" + std::to_string(size_2) + ".txt";
}
std::ofstream file1(ti_file_1);
if (!file1.is_open())
{
throw std::runtime_error("Cannot open file" + ti_file_1);
}
for (int i=0; i < size_1; i++)
{
file1 << dataset_1[i].s12 << " " << dataset_1[i].s13 << " " << dataset_1[i].s24 << " ";
file1 << dataset_1[i].s34 << " " << dataset_1[i].s134 << " Ti = " << tis_1[i] << std::endl;
}
file1.close();
std::cout << "Ti values for dataset 1 written to " << ti_file_1 << std::endl;
std::ofstream file2(ti_file_2);
if (!file2.is_open())
{
throw std::runtime_error("Cannot open file" + ti_file_2);
}
for (int i=0; i < size_2; i++)
{
file2 << dataset_2[i].s12 << " " << dataset_2[i].s13 << " " << dataset_2[i].s24 << " ";
file2 << dataset_2[i].s34 << " " << dataset_2[i].s134 << " Ti = " << tis_2[i] << std::endl;
}
file2.close();
std::cout << "Ti values for dataset 2 written to " << ti_file_2 << std::endl;
free(tis_1);
free(tis_2);
}
// calculate T value for nominal datasets
else
{
t_value = compute_statistic(dataset_1, dataset_2, size_1, size_2, prop);
std::cout << "\nT = " << t_value << std::endl;
}
}
if (n_permutations)
{
// Vector for permutations of events
thrust::host_vector<Event>permuted_events;
permuted_events.insert(permuted_events.end(), dataset_1.begin(), dataset_1.end());
permuted_events.insert(permuted_events.end(), dataset_2.begin(), dataset_2.end());
// Number of permutations to run
int N = static_cast<int>(args::get(n_permutations));
// Number of events used in permutation
int n_events_1 = size_1;
int n_events_2 = size_2;
if (max_permutation_events_1)
{
n_events_1 = ::min(n_events_1, static_cast<int>(args::get(max_permutation_events_1)));
n_events_2 = std::round(n_events_1 * (static_cast<double>(size_2) / static_cast<double>(size_1)));
}
double factor = static_cast<double>(n_events_1 + n_events_2) / static_cast<double>(size_1 + size_2);
// Random generator for shuffling
int random_seed = static_cast<int>(seed ? args::get(seed) : std::mt19937::default_seed);
thrust::default_random_engine random_generator(random_seed);
// Output to files
//T values
std::string T_file;
if (permutation_output_fn)
{
T_file = args::get(permutation_output_fn);
}
else
{
T_file = "T_values_permutations_" + std::to_string(size_1) + "_" + std::to_string(size_2) + "_" + std::to_string(random_seed) + ".txt";
}
std::ofstream t_file;
if (output_write || permutation_output_fn)
{
t_file.open(T_file);
}
//T min/max values
std::string T_minmax_file;
if (permutation_ti_minmax_output_fn)
{
T_minmax_file = args::get(permutation_ti_minmax_output_fn);
}
else
{
T_minmax_file = "T_minmax_permutations_" + std::to_string(size_1) + "_" + std::to_string(size_2) + "_" + std::to_string(random_seed) + ".txt";
}
std::ofstream t_minmax_file;
if (output_write || permutation_ti_minmax_output_fn)
{
t_minmax_file.open(T_minmax_file);
}
// p values
std::ofstream p_file;
if (!permutations_only)
{
p_file.open("pvalues.txt", std::iostream::out | std::iostream::app);//append if file exists
}
int nsig = 0;
int skip = permuted_events.size()+1;
std::cout << "Calculating " << N << " permutations" << std::endl;
for (int i=0; i < N; i++)
{
if (skip + n_events_1 + n_events_2 > static_cast<int>(permuted_events.size()))//avoid unneccesary shuffling
{
thrust::shuffle(permuted_events.begin(), permuted_events.end(), random_generator);
skip = 0;
}
//get permuted datasets
thrust::host_vector<Event>permuted_set_1(permuted_events.begin() + skip, permuted_events.begin() + n_events_1 + skip);
skip += n_events_1;
thrust::host_vector<Event>permuted_set_2(permuted_events.begin() + skip, permuted_events.begin() + n_events_2 + skip);
skip += n_events_2;
double test_statistic;
// Calculate Ti values for permuted set
if (calculate_ti)
{
double * tis_1 = compute_statistic_contributions(permuted_set_1, permuted_set_2, n_events_1, n_events_2, prop);
double * tis_2 = compute_statistic_contributions(permuted_set_2, permuted_set_1, n_events_2, n_events_1, prop);
double total = 0;
for (int i=0; i < n_events_1; i++)
total += tis_1[i];
for (int i=0; i < n_events_2; i++)
total += tis_2[i];
test_statistic = total;
// Find min and max Ti values
double ti_min = tis_1[0];
double ti_max = tis_1[0];
for (int i=1; i < n_events_1; i++)
{
if (tis_1[i] > ti_max)
ti_max = tis_1[i];
if (tis_1[i] < ti_min)
ti_min = tis_1[i];
}
for (int i=0; i < n_events_2; i++)
{
if (tis_2[i] > ti_max)
ti_max = tis_2[i];
if (tis_2[i] < ti_min)
ti_min = tis_2[i];
}
if (output_write || permutation_ti_minmax_output_fn)
{
t_minmax_file << ti_min << " " << ti_max << std::endl;
}
free(tis_1);
free(tis_2);
}
else
{
test_statistic = compute_statistic(permuted_set_1, permuted_set_2, n_events_1, n_events_2, prop, false);
}
if (output_write || permutation_output_fn)
{
t_file << test_statistic << std::endl;
}
if (!permutations_only)
{
if (factor * test_statistic > t_value)
nsig++; //used to calculate p-value
}
}
if (!permutations_only)
{
double p_value = static_cast<double>(nsig)/static_cast<double>(N);
p_file << DELTA << " " << p_value << std::endl;
std::cout << "p-value = " << p_value << std::endl;
}
t_file.close();
t_minmax_file.close();
p_file.close();
}
return 0;
}
thrust::host_vector<Event> load_data(const std::string & file_name, const size_t max_size)
{
std::ifstream file(file_name);
if (!file.is_open())
{
throw std::runtime_error("Cannot open file" + file_name);
}
thrust::host_vector<Event> events;
events.reserve(::min(max_size, static_cast<size_t>(500000)));
std::string line;
while (std::getline(file, line) && events.size() < max_size)
{
Event e;
std::istringstream is (line);
//extracting each line for double values
is >> e.s12 >> e.s13 >> e.s24 >> e.s34 >> e.s134;
if (is.fail())
{
throw std::runtime_error("Error reading line in " + file_name);
}
e.hmg = 0.5 * (e.s12*e.s12 + e.s13*e.s13 + e.s24*e.s24 + e.s34*e.s34 + e.s134*e.s134);
events.push_back(e);
}
return events;
}
double compute_statistic(const thrust::host_vector<Event> & cpu_dataset_1, const thrust::host_vector <Event> & cpu_dataset_2, int size_1, int size_2, hipDeviceProp_t & prop, bool show_distances)
{
//allocating device memory
thrust::device_vector<Event> gpu_dataset_1 = cpu_dataset_1;
thrust::device_vector<Event> gpu_dataset_2 = cpu_dataset_2;
double dist_11, dist_22, dist_12;
double *d_dist_11, *d_dist_22, *d_dist_12;
checkCudaError(hipMalloc(&d_dist_11, sizeof(double)));
checkCudaError(hipMalloc(&d_dist_22, sizeof(double)));
checkCudaError(hipMalloc(&d_dist_12, sizeof(double)));
checkCudaError(hipMemset(d_dist_11, 0.0, sizeof(double)));
checkCudaError(hipMemset(d_dist_22, 0.0, sizeof(double)));
checkCudaError(hipMemset(d_dist_12, 0.0, sizeof(double)));
//set up execution configuration
int blocks = 1;
int threads = 1024;
int shared_el;
int max_shared_per_block = prop.sharedMemPerBlock;
//distance 1_1
shared_el = size_1 / blocks;
if (size_1 % blocks)
shared_el++;
while (shared_el * static_cast<int>(sizeof(Event)) > max_shared_per_block)
{
blocks *= 2;
shared_el = size_1 / blocks;
if (size_1 % blocks)
shared_el++;
}
hipLaunchKernelGGL(( compute_distance), dim3(blocks), dim3(threads), shared_el * sizeof(Event), 0, thrust::raw_pointer_cast(gpu_dataset_1.data()), thrust::raw_pointer_cast(gpu_dataset_1.data()), size_1, size_1, d_dist_11, shared_el, true);
//distance 2_2
blocks = 1;
threads = 1024;
shared_el = size_2 / blocks;
if (size_2 % blocks)
shared_el++;
while (shared_el * static_cast<int>(sizeof(Event)) > max_shared_per_block)
{
blocks *= 2;
shared_el = size_2 / blocks;
if (size_2 % blocks)
shared_el++;
}
hipLaunchKernelGGL(( compute_distance), dim3(blocks), dim3(threads), shared_el * sizeof(Event), 0, thrust::raw_pointer_cast(gpu_dataset_2.data()), thrust::raw_pointer_cast(gpu_dataset_2.data()), size_2, size_2, d_dist_22, shared_el, true);
//distance 1_2
blocks = 1;
threads = 1024;
shared_el = size_2 / blocks;
if (size_2 % blocks)
shared_el++;
while (shared_el * static_cast<int>(sizeof(Event)) > max_shared_per_block)
{
blocks *= 2;
shared_el = size_2 / blocks;
if (size_2 % blocks)
shared_el++;
}
hipLaunchKernelGGL(( compute_distance), dim3(blocks), dim3(threads), shared_el * sizeof(Event), 0, thrust::raw_pointer_cast(gpu_dataset_1.data()), thrust::raw_pointer_cast(gpu_dataset_2.data()), size_1, size_2, d_dist_12, shared_el, false);
checkCudaError(hipDeviceSynchronize());
checkCudaError(hipMemcpy(&dist_11, d_dist_11, sizeof(double), hipMemcpyDeviceToHost));
checkCudaError(hipMemcpy(&dist_22, d_dist_22, sizeof(double), hipMemcpyDeviceToHost));
checkCudaError(hipMemcpy(&dist_12, d_dist_12, sizeof(double), hipMemcpyDeviceToHost));
dist_11 /= (2.0 * size_1 * (size_1 - 1));
dist_22 /= (2.0 * size_2 * (size_2 - 1));
dist_12 /= (1.0 * size_1 * size_2);
if (show_distances)
{
std::cout << "dist_11 = " << dist_11 << std::endl;
std::cout << "dist_22 = " << dist_22 << std::endl;
std::cout << "dist_12 = " << dist_12 << std::endl;
}
checkCudaError(hipFree(d_dist_11));
checkCudaError(hipFree(d_dist_22));
checkCudaError(hipFree(d_dist_12));
return dist_11 + dist_22 - dist_12;
}
double * compute_statistic_contributions(const thrust::host_vector<Event> & cpu_dataset_1, const thrust::host_vector<Event> & cpu_dataset_2, int size_1, int size_2, hipDeviceProp_t & prop)
{
double * statistic_contributions = (double *)malloc(size_1 * sizeof(double));
if (statistic_contributions == NULL)
{
throw std::runtime_error("Cannot allocate host memory");
}
//Allocate device memory
thrust::device_vector<Event> gpu_dataset_1 = cpu_dataset_1;
thrust::device_vector<Event> gpu_dataset_2 = cpu_dataset_2;
double * d_dist_e1, * d_dist_e2;
checkCudaError(hipMalloc(&d_dist_e1, sizeof(double)));
checkCudaError(hipMalloc(&d_dist_e2, sizeof(double)));
double dist_e1, dist_e2;
int blocks_1=1, blocks_2=1;
int threads_1 = 1024;
int threads_2 = 1024;
int shared_el_1 = size_1/blocks_1;
int shared_el_2 = size_2/blocks_2;
if (size_1 % blocks_1) shared_el_1++;
if (size_2 % blocks_2) shared_el_2++;
while ((shared_el_1+1) * sizeof(Event) > prop.sharedMemPerBlock)
{
blocks_1 *= 2;
shared_el_1 = size_1/blocks_1;
if (size_1 % blocks_1) shared_el_1++;
}
while ((shared_el_2+1) * sizeof(Event) > prop.sharedMemPerBlock)
{
blocks_2 *= 2;
shared_el_2 = size_2/blocks_2;
if (size_2 % blocks_2) shared_el_2++;
}
Event * set_1_ptr = thrust::raw_pointer_cast(gpu_dataset_1.data());
Event * set_2_ptr = thrust::raw_pointer_cast(gpu_dataset_2.data());
for (int i=0; i < size_1; i++)
{
checkCudaError(hipMemset(d_dist_e1, 0.0, sizeof(double)));
checkCudaError(hipMemset(d_dist_e2, 0.0, sizeof(double)));
hipLaunchKernelGGL(( compute_individual_distance), dim3(blocks_1), dim3(threads_1), (shared_el_1 + 1) * sizeof(Event), 0, set_1_ptr, &set_1_ptr[i], size_1, d_dist_e1, shared_el_1+1, i);
hipLaunchKernelGGL(( compute_individual_distance), dim3(blocks_2), dim3(threads_2), (shared_el_2 + 1) * sizeof(Event), 0, set_2_ptr, &set_1_ptr[i], size_2, d_dist_e2, shared_el_2+1);
checkCudaError(hipDeviceSynchronize());
checkCudaError(hipMemcpy(&dist_e1, d_dist_e1, sizeof(double), hipMemcpyDeviceToHost));
checkCudaError(hipMemcpy(&dist_e2, d_dist_e2, sizeof(double), hipMemcpyDeviceToHost));
dist_e1 /= 2.0 * size_1 * (size_1 - 1);
dist_e2 /= 2.0 * size_1 * size_2;
statistic_contributions[i] = dist_e1 - dist_e2;
}
checkCudaError(hipFree(d_dist_e1));
checkCudaError(hipFree(d_dist_e2));
return statistic_contributions;
}
__global__
void compute_distance(const Event * dataset_1, const Event * dataset_2, const int size_1, const int size_2, double * sum, int shared_el, bool same_sets)
{
extern __shared__ Event event2[];
int thread = threadIdx.x;
int block_size = blockDim.x;
int start = blockIdx.x * shared_el;
while (thread+start < size_2 && thread < shared_el)
{
event2[thread] = dataset_2[thread+start];
thread += block_size;
}
__syncthreads();
double total = 0;
for (int i=threadIdx.x; i < size_1; i += block_size)
{
const Event event1 = dataset_1[i];
for (int j=0; j < shared_el && j+start < size_2; j++)
{
if (same_sets && i == j+start)
continue;
//calculate half of euclidean distance squared
double dist = event1.hmg + event2[j].hmg -
(event1.s12*event2[j].s12 + event1.s13*event2[j].s13 + event1.s24*event2[j].s24 + event1.s34*event2[j].s34 + event1.s134*event2[j].s134);
total += exp(-dist/(DELTA*DELTA));
}
}
atomicAdd(sum, total);
}
__global__
void compute_individual_distance(const Event * dataset, const Event * ev, const int size_1, double * sum, int shared_el, int index_to_skip)
{
extern __shared__ Event event[];
int thread = threadIdx.x;
int block_size = blockDim.x;
int start = blockIdx.x * (shared_el-1);
if (thread == 0)
{
event[0] = *ev;
}
while (thread+start < size_1 && thread+1 < shared_el)
{
event[thread+1] = dataset[thread+start];
thread += block_size;
}
__syncthreads();
double total=0;
for (int i=threadIdx.x+1; i < shared_el && i+start-1 < size_1; i += block_size)
{
if (i+start-1 == index_to_skip)
continue;
double dist = event[0].hmg + event[i].hmg -
(event[0].s12*event[i].s12 + event[0].s13*event[i].s13 + event[0].s24*event[i].s24 + event[0].s34*event[i].s34 + event[0].s134*event[i].s134);
total += exp(-dist/(DELTA*DELTA));
}
atomicAdd(sum, total);
}
inline hipError_t checkCudaError(hipError_t result)
{
if (result != hipSuccess)
{
fprintf(stderr, "CUDA Error: %s\n", hipGetErrorString(result));
assert(result == hipSuccess);
}
return result;
}
| 3c599c42a8b6374f73252e77d7c7bdae1ab1316c.cu | #define CUB_IGNORE_DEPRECATED_CPP_DIALECT 1 //ignore warnings caused by old compiler version
#define THRUST_IGNORE_DEPRECATED_CPP_DIALECT 1
#include <iostream>
#include <vector>
#include <iomanip>
#include <algorithm>
#include <ctime>
#include <fstream>
#include <sstream>
#include <assert.h>
#include <cuda_runtime.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/shuffle.h>
#include <thrust/random.h>
//file for handling command line arguments
#include "args.hxx"
#define DELTA 0.5
struct Event
{
double s12;
double s13;
double s24;
double s34;
double s134;
double hmg;//helpful variable to reduce time needed for calculating euclidean distance
};
/**
Energy test wrapper, calls other functions based on command line arguments
@param argc number of command line arguments
@param argv array of command line arguments
*/
int energy_test_gpu_wrapper(int argc, char ** argv);
/**
Reads data from file and returns it as thrust::host_vector
@param file_name name of the file to read data from
@param events number of events to read
*/
thrust::host_vector<Event> load_data(const std::string & file_name, const size_t events);
/**
Calculates the T value of 2 dataset
@param cpu_dataset_1 first dataset
@param cpu_dataset_2 second dataset
@param size_1 number of events in 1st dataset
@param size_2 number of events in 2nd dataset
@param prop structure with active device's properties
@param show_distances display distances
*/
double compute_statistic(const thrust::host_vector<Event> & cpu_dataset_1, const thrust::host_vector <Event> & cpu_dataset_2, int size_1, int size_2, cudaDeviceProp & prop, bool show_distances=true);
/**
Computes individual statistic contributions(Ti values) and returns them in array
@param cpu_dataset_1 1st dataset, Ti values are related with this set
@param cpu_dataset_2 2nd dataset
@param size_1 number of events in 1st dataset
@param size_2 number of events in 2nd dataset
@param prop structure with active device's properties
*/
double * compute_statistic_contributions(const thrust::host_vector<Event> & cpu_dataset_1, const thrust::host_vector<Event> & cpu_dataset_2, int size_1, int size_2, cudaDeviceProp & prop);
/**
Device kernel that calculates distance between 2 datasets
@param dataset_1 1st dataset
@param dataset_2 2nd dataset
@param size_1 number of elements in 1st dataset
@param size_2 number of elements in 2nd dataset
@param sum pointer to variable storing the distance between 2 datasets
@param shared_el number of elements from 2nd dataset stored in each block's shared memory
@param same_sets if true, datasets are the same, do not compute distance between same elements
*/
__global__
void compute_distance(const Event * dataset_1, const Event * dataset_2, const int size_1, const int size_2, double * sum, const int shared_el, bool same_sets=true);
/**
Device kernel that calculates distance between dataset and single event
@param dataset dataset
@param ev single event
@param size_1 number of events in dataset
@param sum pointer to variable storing the distance
@param shared_el number of elements stored each block's shared memory
@param index_to_skip index of ev (if ev is from dataset) used to skip it's distance to itself, otherwise -1
*/
__global__
void compute_individual_distance(const Event * dataset, const Event * ev, const int size_1, double * sum, int shared_el, int index_to_skip=-1);
/**
Function checking for CUDA runtime errors
@param result error code returns by CUDA functions
*/
inline cudaError_t checkCudaError(cudaError_t result);
int main(int argc, char *argv[])
{
try
{
return energy_test_gpu_wrapper(argc, argv);
}
catch(std::runtime_error & err)
{
std::cerr << err.what() << std::endl;
exit (EXIT_FAILURE);
}
return 0;
}
int energy_test_gpu_wrapper(int argc, char ** argv)
{
// Parsing command line arguments, setting up execution options
args::ArgumentParser parser("GPU based energy test");
args::HelpFlag help(parser, "help", "Display this help menu", {'h', "help"});
args::Flag calculate_ti(parser, "calculate ti", "Calculate individual contributions to test statistic", {"calculate-ti"});
args::Flag permutations_only(parser, "permutations only", "Only calculate permutations", {"permutations-only"});
args::Flag output_write(parser, "output write", "write output Tvalues", {"output-write"});
args::ValueFlag<size_t> n_permutations(parser, "n_permutations", "Number of permutations to run", {"n-permutations"});
args::ValueFlag<size_t> max_events_1(parser, "max events 1", "Maximum number of events to use from dataset 1", {"max-events-1"});
args::ValueFlag<size_t> max_events_2(parser, "max events 2", "Maximum number of events to use from dataset 2", {"max-events-2"});
args::ValueFlag<size_t> max_events(parser, "max events", "Max number of events in each dataset", {"max-events"});
args::ValueFlag<size_t> seed(parser, "seed", "seed for permutations", {"seed"});
args::ValueFlag<size_t> max_permutation_events_1(parser, "max permutation events 1", "Max number of events in dataset 1 for permutations",
{"max-permutation-events-1"});
args::ValueFlag<std::string> ti_output_fn_1(parser, "ti output filename 1", "Filename for individual contributions to test statistic from dataset 1", {"ti-output-fn-1"});
args::ValueFlag<std::string> ti_output_fn_2(parser, "ti output filename 2", "Filename for individual contributions to test statistic from dataset 2", {"ti-output-fn-2"});
args::ValueFlag<std::string> permutation_ti_minmax_output_fn(parser, "permutation ti min-max filename", "Output filename for the minimum and maximum Ti values from permutations",
{"permutation-ti-minmax-output-fn"});
args::Positional<std::string> filename_1(parser, "dataset 1", "Filename for the first dataset");
args::Positional<std::string> filename_2(parser, "dataset 2", "Filename for the second dataset");
args::Positional<std::string> permutation_output_fn(parser, "permutation output filename", "Output filename for the permutation test statistics", {"permutation-output-fn"});
try
{
parser.ParseCLI(argc, argv);
//Check for neccessary options
if (!filename_1 || !filename_2)
{
throw args::ParseError("Direct paths to two dataset files must be given");
}
if ((max_events_1 || max_events_2) && max_events)
{
throw args::ParseError("--max-events cannot be used with --max-events-1 or --max-events-2");
}
if (calculate_ti && max_permutation_events_1)
{
throw args::ParseError("--calculate-ti cannot be used with --max-permutation-events-1");
}
}
catch (args::Help)
{
std::cout << parser;
return 0;
}
catch (args::ParseError & err)
{
std::cerr << err.what() << std::endl;
std::cerr << parser;
return 1;
}
// Displaying information about the device
int deviceId;
checkCudaError(cudaGetDevice(&deviceId));
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, deviceId);
printf("\nDevice name: %s\n", prop.name);
printf("Shared memory per block: %zu\n", prop.sharedMemPerBlock);
printf("Threads per block: %d\n", prop.maxThreadsPerBlock);
printf("Number of SMs: %d\n", prop.multiProcessorCount);
printf("Shared memory per SM: %zu\n", prop.sharedMemPerMultiprocessor);
printf("Max Block per SM: %d\n", prop.maxBlocksPerMultiProcessor);
int val;
cudaDeviceGetAttribute(&val, cudaDevAttrMaxThreadsPerBlock, deviceId);
printf("Max Threads per SM: %d\n\n", val);
// Load specified number of events from data files
size_t data_1_limit, data_2_limit;
data_1_limit = data_2_limit = std::numeric_limits<size_t>::max();
if (max_events)
{
data_1_limit = data_2_limit = args::get(max_events);
}
else
{
if (max_events_1)
{
data_1_limit = args::get(max_events_1);
}
if (max_events_2)
{
data_2_limit = args::get(max_events_2);
}
}
//Read data from files
thrust::host_vector<Event>dataset_1 = load_data(args::get(filename_1), data_1_limit);
thrust::host_vector<Event>dataset_2 = load_data(args::get(filename_2), data_2_limit);
int size_1 = static_cast<int>(dataset_1.size());
int size_2 = static_cast<int>(dataset_2.size());
std::cout << "Size of dataset 1: " << size_1 << std::endl;
std::cout << "Size of dataset 2: " << size_2 << std::endl << std::endl;
double t_value;
if (!permutations_only)
{
// Calculate individual contrubutions to the test (Ti values)
if (calculate_ti)
{
std::cout << "Calculating contributions of individual events to test statistic..." << std::endl;
double * tis_1 = compute_statistic_contributions(dataset_1, dataset_2, size_1, size_2, prop);
double * tis_2 = compute_statistic_contributions(dataset_2, dataset_1, size_2, size_1, prop);
double total = 0;
for (int i=0; i < size_1; i++)
total += tis_1[i];
for (int i=0; i < size_2; i++)
total += tis_2[i];
t_value = total;
std::cout << "Test statistic for nominal dataset:" << std::endl << " T = " << t_value << std::endl << std::endl;
//write Ti values to file
std::string ti_file_1;
std::string ti_file_2;
if (ti_output_fn_1)
{
ti_file_1 = args::get(ti_output_fn_1);
}
else
{
ti_file_1 = "Ti_dataset_1_" + std::to_string(size_1) + ".txt";
}
if (ti_output_fn_2)
{
ti_file_2 = args::get(ti_output_fn_2);
}
else
{
ti_file_2 = "Ti_dataset_2_" + std::to_string(size_2) + ".txt";
}
std::ofstream file1(ti_file_1);
if (!file1.is_open())
{
throw std::runtime_error("Cannot open file" + ti_file_1);
}
for (int i=0; i < size_1; i++)
{
file1 << dataset_1[i].s12 << " " << dataset_1[i].s13 << " " << dataset_1[i].s24 << " ";
file1 << dataset_1[i].s34 << " " << dataset_1[i].s134 << " Ti = " << tis_1[i] << std::endl;
}
file1.close();
std::cout << "Ti values for dataset 1 written to " << ti_file_1 << std::endl;
std::ofstream file2(ti_file_2);
if (!file2.is_open())
{
throw std::runtime_error("Cannot open file" + ti_file_2);
}
for (int i=0; i < size_2; i++)
{
file2 << dataset_2[i].s12 << " " << dataset_2[i].s13 << " " << dataset_2[i].s24 << " ";
file2 << dataset_2[i].s34 << " " << dataset_2[i].s134 << " Ti = " << tis_2[i] << std::endl;
}
file2.close();
std::cout << "Ti values for dataset 2 written to " << ti_file_2 << std::endl;
free(tis_1);
free(tis_2);
}
// calculate T value for nominal datasets
else
{
t_value = compute_statistic(dataset_1, dataset_2, size_1, size_2, prop);
std::cout << "\nT = " << t_value << std::endl;
}
}
if (n_permutations)
{
// Vector for permutations of events
thrust::host_vector<Event>permuted_events;
permuted_events.insert(permuted_events.end(), dataset_1.begin(), dataset_1.end());
permuted_events.insert(permuted_events.end(), dataset_2.begin(), dataset_2.end());
// Number of permutations to run
int N = static_cast<int>(args::get(n_permutations));
// Number of events used in permutation
int n_events_1 = size_1;
int n_events_2 = size_2;
if (max_permutation_events_1)
{
n_events_1 = std::min(n_events_1, static_cast<int>(args::get(max_permutation_events_1)));
n_events_2 = std::round(n_events_1 * (static_cast<double>(size_2) / static_cast<double>(size_1)));
}
double factor = static_cast<double>(n_events_1 + n_events_2) / static_cast<double>(size_1 + size_2);
// Random generator for shuffling
int random_seed = static_cast<int>(seed ? args::get(seed) : std::mt19937::default_seed);
thrust::default_random_engine random_generator(random_seed);
// Output to files
//T values
std::string T_file;
if (permutation_output_fn)
{
T_file = args::get(permutation_output_fn);
}
else
{
T_file = "T_values_permutations_" + std::to_string(size_1) + "_" + std::to_string(size_2) + "_" + std::to_string(random_seed) + ".txt";
}
std::ofstream t_file;
if (output_write || permutation_output_fn)
{
t_file.open(T_file);
}
//T min/max values
std::string T_minmax_file;
if (permutation_ti_minmax_output_fn)
{
T_minmax_file = args::get(permutation_ti_minmax_output_fn);
}
else
{
T_minmax_file = "T_minmax_permutations_" + std::to_string(size_1) + "_" + std::to_string(size_2) + "_" + std::to_string(random_seed) + ".txt";
}
std::ofstream t_minmax_file;
if (output_write || permutation_ti_minmax_output_fn)
{
t_minmax_file.open(T_minmax_file);
}
// p values
std::ofstream p_file;
if (!permutations_only)
{
p_file.open("pvalues.txt", std::iostream::out | std::iostream::app);//append if file exists
}
int nsig = 0;
int skip = permuted_events.size()+1;
std::cout << "Calculating " << N << " permutations" << std::endl;
for (int i=0; i < N; i++)
{
if (skip + n_events_1 + n_events_2 > static_cast<int>(permuted_events.size()))//avoid unneccesary shuffling
{
thrust::shuffle(permuted_events.begin(), permuted_events.end(), random_generator);
skip = 0;
}
//get permuted datasets
thrust::host_vector<Event>permuted_set_1(permuted_events.begin() + skip, permuted_events.begin() + n_events_1 + skip);
skip += n_events_1;
thrust::host_vector<Event>permuted_set_2(permuted_events.begin() + skip, permuted_events.begin() + n_events_2 + skip);
skip += n_events_2;
double test_statistic;
// Calculate Ti values for permuted set
if (calculate_ti)
{
double * tis_1 = compute_statistic_contributions(permuted_set_1, permuted_set_2, n_events_1, n_events_2, prop);
double * tis_2 = compute_statistic_contributions(permuted_set_2, permuted_set_1, n_events_2, n_events_1, prop);
double total = 0;
for (int i=0; i < n_events_1; i++)
total += tis_1[i];
for (int i=0; i < n_events_2; i++)
total += tis_2[i];
test_statistic = total;
// Find min and max Ti values
double ti_min = tis_1[0];
double ti_max = tis_1[0];
for (int i=1; i < n_events_1; i++)
{
if (tis_1[i] > ti_max)
ti_max = tis_1[i];
if (tis_1[i] < ti_min)
ti_min = tis_1[i];
}
for (int i=0; i < n_events_2; i++)
{
if (tis_2[i] > ti_max)
ti_max = tis_2[i];
if (tis_2[i] < ti_min)
ti_min = tis_2[i];
}
if (output_write || permutation_ti_minmax_output_fn)
{
t_minmax_file << ti_min << " " << ti_max << std::endl;
}
free(tis_1);
free(tis_2);
}
else
{
test_statistic = compute_statistic(permuted_set_1, permuted_set_2, n_events_1, n_events_2, prop, false);
}
if (output_write || permutation_output_fn)
{
t_file << test_statistic << std::endl;
}
if (!permutations_only)
{
if (factor * test_statistic > t_value)
nsig++; //used to calculate p-value
}
}
if (!permutations_only)
{
double p_value = static_cast<double>(nsig)/static_cast<double>(N);
p_file << DELTA << " " << p_value << std::endl;
std::cout << "p-value = " << p_value << std::endl;
}
t_file.close();
t_minmax_file.close();
p_file.close();
}
return 0;
}
thrust::host_vector<Event> load_data(const std::string & file_name, const size_t max_size)
{
std::ifstream file(file_name);
if (!file.is_open())
{
throw std::runtime_error("Cannot open file" + file_name);
}
thrust::host_vector<Event> events;
events.reserve(std::min(max_size, static_cast<size_t>(500000)));
std::string line;
while (std::getline(file, line) && events.size() < max_size)
{
Event e;
std::istringstream is (line);
//extracting each line for double values
is >> e.s12 >> e.s13 >> e.s24 >> e.s34 >> e.s134;
if (is.fail())
{
throw std::runtime_error("Error reading line in " + file_name);
}
e.hmg = 0.5 * (e.s12*e.s12 + e.s13*e.s13 + e.s24*e.s24 + e.s34*e.s34 + e.s134*e.s134);
events.push_back(e);
}
return events;
}
double compute_statistic(const thrust::host_vector<Event> & cpu_dataset_1, const thrust::host_vector <Event> & cpu_dataset_2, int size_1, int size_2, cudaDeviceProp & prop, bool show_distances)
{
//allocating device memory
thrust::device_vector<Event> gpu_dataset_1 = cpu_dataset_1;
thrust::device_vector<Event> gpu_dataset_2 = cpu_dataset_2;
double dist_11, dist_22, dist_12;
double *d_dist_11, *d_dist_22, *d_dist_12;
checkCudaError(cudaMalloc(&d_dist_11, sizeof(double)));
checkCudaError(cudaMalloc(&d_dist_22, sizeof(double)));
checkCudaError(cudaMalloc(&d_dist_12, sizeof(double)));
checkCudaError(cudaMemset(d_dist_11, 0.0, sizeof(double)));
checkCudaError(cudaMemset(d_dist_22, 0.0, sizeof(double)));
checkCudaError(cudaMemset(d_dist_12, 0.0, sizeof(double)));
//set up execution configuration
int blocks = 1;
int threads = 1024;
int shared_el;
int max_shared_per_block = prop.sharedMemPerBlock;
//distance 1_1
shared_el = size_1 / blocks;
if (size_1 % blocks)
shared_el++;
while (shared_el * static_cast<int>(sizeof(Event)) > max_shared_per_block)
{
blocks *= 2;
shared_el = size_1 / blocks;
if (size_1 % blocks)
shared_el++;
}
compute_distance<<<blocks, threads, shared_el * sizeof(Event)>>>(thrust::raw_pointer_cast(gpu_dataset_1.data()), thrust::raw_pointer_cast(gpu_dataset_1.data()), size_1, size_1, d_dist_11, shared_el, true);
//distance 2_2
blocks = 1;
threads = 1024;
shared_el = size_2 / blocks;
if (size_2 % blocks)
shared_el++;
while (shared_el * static_cast<int>(sizeof(Event)) > max_shared_per_block)
{
blocks *= 2;
shared_el = size_2 / blocks;
if (size_2 % blocks)
shared_el++;
}
compute_distance<<<blocks, threads, shared_el * sizeof(Event)>>>(thrust::raw_pointer_cast(gpu_dataset_2.data()), thrust::raw_pointer_cast(gpu_dataset_2.data()), size_2, size_2, d_dist_22, shared_el, true);
//distance 1_2
blocks = 1;
threads = 1024;
shared_el = size_2 / blocks;
if (size_2 % blocks)
shared_el++;
while (shared_el * static_cast<int>(sizeof(Event)) > max_shared_per_block)
{
blocks *= 2;
shared_el = size_2 / blocks;
if (size_2 % blocks)
shared_el++;
}
compute_distance<<<blocks, threads, shared_el * sizeof(Event)>>>(thrust::raw_pointer_cast(gpu_dataset_1.data()), thrust::raw_pointer_cast(gpu_dataset_2.data()), size_1, size_2, d_dist_12, shared_el, false);
checkCudaError(cudaDeviceSynchronize());
checkCudaError(cudaMemcpy(&dist_11, d_dist_11, sizeof(double), cudaMemcpyDeviceToHost));
checkCudaError(cudaMemcpy(&dist_22, d_dist_22, sizeof(double), cudaMemcpyDeviceToHost));
checkCudaError(cudaMemcpy(&dist_12, d_dist_12, sizeof(double), cudaMemcpyDeviceToHost));
dist_11 /= (2.0 * size_1 * (size_1 - 1));
dist_22 /= (2.0 * size_2 * (size_2 - 1));
dist_12 /= (1.0 * size_1 * size_2);
if (show_distances)
{
std::cout << "dist_11 = " << dist_11 << std::endl;
std::cout << "dist_22 = " << dist_22 << std::endl;
std::cout << "dist_12 = " << dist_12 << std::endl;
}
checkCudaError(cudaFree(d_dist_11));
checkCudaError(cudaFree(d_dist_22));
checkCudaError(cudaFree(d_dist_12));
return dist_11 + dist_22 - dist_12;
}
double * compute_statistic_contributions(const thrust::host_vector<Event> & cpu_dataset_1, const thrust::host_vector<Event> & cpu_dataset_2, int size_1, int size_2, cudaDeviceProp & prop)
{
double * statistic_contributions = (double *)malloc(size_1 * sizeof(double));
if (statistic_contributions == NULL)
{
throw std::runtime_error("Cannot allocate host memory");
}
//Allocate device memory
thrust::device_vector<Event> gpu_dataset_1 = cpu_dataset_1;
thrust::device_vector<Event> gpu_dataset_2 = cpu_dataset_2;
double * d_dist_e1, * d_dist_e2;
checkCudaError(cudaMalloc(&d_dist_e1, sizeof(double)));
checkCudaError(cudaMalloc(&d_dist_e2, sizeof(double)));
double dist_e1, dist_e2;
int blocks_1=1, blocks_2=1;
int threads_1 = 1024;
int threads_2 = 1024;
int shared_el_1 = size_1/blocks_1;
int shared_el_2 = size_2/blocks_2;
if (size_1 % blocks_1) shared_el_1++;
if (size_2 % blocks_2) shared_el_2++;
while ((shared_el_1+1) * sizeof(Event) > prop.sharedMemPerBlock)
{
blocks_1 *= 2;
shared_el_1 = size_1/blocks_1;
if (size_1 % blocks_1) shared_el_1++;
}
while ((shared_el_2+1) * sizeof(Event) > prop.sharedMemPerBlock)
{
blocks_2 *= 2;
shared_el_2 = size_2/blocks_2;
if (size_2 % blocks_2) shared_el_2++;
}
Event * set_1_ptr = thrust::raw_pointer_cast(gpu_dataset_1.data());
Event * set_2_ptr = thrust::raw_pointer_cast(gpu_dataset_2.data());
for (int i=0; i < size_1; i++)
{
checkCudaError(cudaMemset(d_dist_e1, 0.0, sizeof(double)));
checkCudaError(cudaMemset(d_dist_e2, 0.0, sizeof(double)));
compute_individual_distance<<<blocks_1, threads_1, (shared_el_1 + 1) * sizeof(Event)>>>(set_1_ptr, &set_1_ptr[i], size_1, d_dist_e1, shared_el_1+1, i);
compute_individual_distance<<<blocks_2, threads_2, (shared_el_2 + 1) * sizeof(Event)>>>(set_2_ptr, &set_1_ptr[i], size_2, d_dist_e2, shared_el_2+1);
checkCudaError(cudaDeviceSynchronize());
checkCudaError(cudaMemcpy(&dist_e1, d_dist_e1, sizeof(double), cudaMemcpyDeviceToHost));
checkCudaError(cudaMemcpy(&dist_e2, d_dist_e2, sizeof(double), cudaMemcpyDeviceToHost));
dist_e1 /= 2.0 * size_1 * (size_1 - 1);
dist_e2 /= 2.0 * size_1 * size_2;
statistic_contributions[i] = dist_e1 - dist_e2;
}
checkCudaError(cudaFree(d_dist_e1));
checkCudaError(cudaFree(d_dist_e2));
return statistic_contributions;
}
__global__
void compute_distance(const Event * dataset_1, const Event * dataset_2, const int size_1, const int size_2, double * sum, int shared_el, bool same_sets)
{
extern __shared__ Event event2[];
int thread = threadIdx.x;
int block_size = blockDim.x;
int start = blockIdx.x * shared_el;
while (thread+start < size_2 && thread < shared_el)
{
event2[thread] = dataset_2[thread+start];
thread += block_size;
}
__syncthreads();
double total = 0;
for (int i=threadIdx.x; i < size_1; i += block_size)
{
const Event event1 = dataset_1[i];
for (int j=0; j < shared_el && j+start < size_2; j++)
{
if (same_sets && i == j+start)
continue;
//calculate half of euclidean distance squared
double dist = event1.hmg + event2[j].hmg -
(event1.s12*event2[j].s12 + event1.s13*event2[j].s13 + event1.s24*event2[j].s24 + event1.s34*event2[j].s34 + event1.s134*event2[j].s134);
total += exp(-dist/(DELTA*DELTA));
}
}
atomicAdd(sum, total);
}
__global__
void compute_individual_distance(const Event * dataset, const Event * ev, const int size_1, double * sum, int shared_el, int index_to_skip)
{
extern __shared__ Event event[];
int thread = threadIdx.x;
int block_size = blockDim.x;
int start = blockIdx.x * (shared_el-1);
if (thread == 0)
{
event[0] = *ev;
}
while (thread+start < size_1 && thread+1 < shared_el)
{
event[thread+1] = dataset[thread+start];
thread += block_size;
}
__syncthreads();
double total=0;
for (int i=threadIdx.x+1; i < shared_el && i+start-1 < size_1; i += block_size)
{
if (i+start-1 == index_to_skip)
continue;
double dist = event[0].hmg + event[i].hmg -
(event[0].s12*event[i].s12 + event[0].s13*event[i].s13 + event[0].s24*event[i].s24 + event[0].s34*event[i].s34 + event[0].s134*event[i].s134);
total += exp(-dist/(DELTA*DELTA));
}
atomicAdd(sum, total);
}
inline cudaError_t checkCudaError(cudaError_t result)
{
if (result != cudaSuccess)
{
fprintf(stderr, "CUDA Error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
return result;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.