hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
f878bb509c650eece25a2e9e593595e40662bf58.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/tile_op.h"
#include <array>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void TileCopyCUDAKernel(
const int total_size,
const int inner_size,
const int tiles,
const T* X,
T* Y) {
const int x = blockIdx.x * CAFFE_CUDA_NUM_THREADS + threadIdx.x;
if (x < total_size) {
const int r = x / inner_size / tiles;
const int c = x % inner_size;
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
Y[x] = __ldg(X + r * inner_size + c);
#else
Y[x] = X[r * inner_size + c];
#endif
}
}
} // namespace
template <>
template <typename T>
bool TileOp<CUDAContext>::DoTile(
const int outer_size,
const int inner_size,
const T* X,
T* Y) {
const std::int64_t total_size = static_cast<std::int64_t>(outer_size) *
static_cast<std::int64_t>(tiles_) * static_cast<std::int64_t>(inner_size);
const int M = math::DivUp<std::int64_t>(total_size, CAFFE_CUDA_NUM_THREADS);
hipLaunchKernelGGL(( TileCopyCUDAKernel<T>)
, dim3(M), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
total_size, inner_size, tiles_, X, Y);
return true;
}
template <>
template <typename T>
bool TileGradientOp<CUDAContext>::DoTileGradient(
const int outer_size,
const int inner_size,
const T* dY,
T* dX) {
const std::array<int, 3> dY_dims = {outer_size, tiles_, inner_size};
const std::array<int, 3> dX_dims = {outer_size, 1, inner_size};
math::ReduceSum<T, CUDAContext>(
3, dY_dims.data(), dX_dims.data(), T(1), dY, dX, &context_);
return true;
}
template <>
template <>
bool TileGradientOp<CUDAContext>::DoTileGradient<float>(
const int outer_size,
const int inner_size,
const float* dY,
float* dX) {
if (inner_size == 1) {
const std::array<int, 2> dY_dims = {outer_size, tiles_};
const std::array<int, 2> dX_dims = {outer_size, 1};
math::ReduceSum<float, CUDAContext>(
2, dY_dims.data(), dX_dims.data(), 1.0f, dY, dX, &context_);
} else {
ReinitializeTensor(&ones_, tiles_, at::dtype<float>().device(CUDA));
math::Set<float, CUDAContext>(
tiles_, 1.0f, ones_.template mutable_data<float>(), &context_);
math::GemmStridedBatched<float, CUDAContext>(
CblasTrans,
CblasNoTrans,
outer_size,
inner_size,
1,
tiles_,
1.0f,
dY,
tiles_ * inner_size,
ones_.template data<float>(),
0,
0.0f,
dX,
inner_size,
&context_);
}
return true;
}
REGISTER_CUDA_OPERATOR(Tile, TileOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(TileGradient, TileGradientOp<CUDAContext>);
} // namespace caffe2
| f878bb509c650eece25a2e9e593595e40662bf58.cu | #include "caffe2/operators/tile_op.h"
#include <array>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void TileCopyCUDAKernel(
const int total_size,
const int inner_size,
const int tiles,
const T* X,
T* Y) {
const int x = blockIdx.x * CAFFE_CUDA_NUM_THREADS + threadIdx.x;
if (x < total_size) {
const int r = x / inner_size / tiles;
const int c = x % inner_size;
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
Y[x] = __ldg(X + r * inner_size + c);
#else
Y[x] = X[r * inner_size + c];
#endif
}
}
} // namespace
template <>
template <typename T>
bool TileOp<CUDAContext>::DoTile(
const int outer_size,
const int inner_size,
const T* X,
T* Y) {
const std::int64_t total_size = static_cast<std::int64_t>(outer_size) *
static_cast<std::int64_t>(tiles_) * static_cast<std::int64_t>(inner_size);
const int M = math::DivUp<std::int64_t>(total_size, CAFFE_CUDA_NUM_THREADS);
TileCopyCUDAKernel<T>
<<<M, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
total_size, inner_size, tiles_, X, Y);
return true;
}
template <>
template <typename T>
bool TileGradientOp<CUDAContext>::DoTileGradient(
const int outer_size,
const int inner_size,
const T* dY,
T* dX) {
const std::array<int, 3> dY_dims = {outer_size, tiles_, inner_size};
const std::array<int, 3> dX_dims = {outer_size, 1, inner_size};
math::ReduceSum<T, CUDAContext>(
3, dY_dims.data(), dX_dims.data(), T(1), dY, dX, &context_);
return true;
}
template <>
template <>
bool TileGradientOp<CUDAContext>::DoTileGradient<float>(
const int outer_size,
const int inner_size,
const float* dY,
float* dX) {
if (inner_size == 1) {
const std::array<int, 2> dY_dims = {outer_size, tiles_};
const std::array<int, 2> dX_dims = {outer_size, 1};
math::ReduceSum<float, CUDAContext>(
2, dY_dims.data(), dX_dims.data(), 1.0f, dY, dX, &context_);
} else {
ReinitializeTensor(&ones_, tiles_, at::dtype<float>().device(CUDA));
math::Set<float, CUDAContext>(
tiles_, 1.0f, ones_.template mutable_data<float>(), &context_);
math::GemmStridedBatched<float, CUDAContext>(
CblasTrans,
CblasNoTrans,
outer_size,
inner_size,
1,
tiles_,
1.0f,
dY,
tiles_ * inner_size,
ones_.template data<float>(),
0,
0.0f,
dX,
inner_size,
&context_);
}
return true;
}
REGISTER_CUDA_OPERATOR(Tile, TileOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(TileGradient, TileGradientOp<CUDAContext>);
} // namespace caffe2
|
1b73f2997f89ef353bbf865c3583967beceba0f1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2016 Mikko Ronkainen <firstname@mikkoronkainen.com>
// License: MIT, see the LICENSE file.
#include "Utils/App.h"
#include "Utils/CudaUtils.h"
#include "Cuda/Renderer.h"
#include "Cuda/Camera.h"
#include "Cuda/Filtering.h"
#include "Cuda/Intersect.h"
#include "Cuda/Kernels.h"
#include "Cuda/Material.h"
#include "Cuda/Misc.h"
#include "Cuda/Math.h"
#include "Cuda/Random.h"
#include "Cuda/Sampling.h"
#include "Cuda/Structs.h"
using namespace Varjo;
void Renderer::initialize(const Scene& scene)
{
CameraData cameraData = scene.camera.getCameraData();
CudaUtils::checkError(hipMallocManaged(&camera, sizeof(CameraData)), "Could not allocate CUDA device memory");
CudaUtils::checkError(hipMallocManaged(&nodes, sizeof(BVHNode) * scene.nodes.size()), "Could not allocate CUDA device memory");
CudaUtils::checkError(hipMallocManaged(&triangles, sizeof(Triangle) * scene.triangles.size()), "Could not allocate CUDA device memory");
CudaUtils::checkError(hipMallocManaged(&emitters, sizeof(uint32_t) * scene.emitters.size()), "Could not allocate CUDA device memory");
CudaUtils::checkError(hipMallocManaged(&materials, sizeof(Material) * scene.materials.size()), "Could not allocate CUDA device memory");
CudaUtils::checkError(hipMallocManaged(&paths, sizeof(Paths)), "Could not allocate CUDA device memory");
CudaUtils::checkError(hipMallocManaged(&paths->random, sizeof(Random) * pathCount), "Could not allocate CUDA device memory");
CudaUtils::checkError(hipMallocManaged(&paths->filmSample, sizeof(Sample) * pathCount), "Could not allocate CUDA device memory");
CudaUtils::checkError(hipMallocManaged(&paths->filmSamplePosition, sizeof(float2) * pathCount), "Could not allocate CUDA device memory");
CudaUtils::checkError(hipMallocManaged(&paths->throughput, sizeof(float3) * pathCount), "Could not allocate CUDA device memory");
CudaUtils::checkError(hipMallocManaged(&paths->result, sizeof(float3) * pathCount), "Could not allocate CUDA device memory");
CudaUtils::checkError(hipMallocManaged(&paths->length, sizeof(uint32_t) * pathCount), "Could not allocate CUDA device memory");
CudaUtils::checkError(hipMallocManaged(&paths->extensionRay, sizeof(Ray) * pathCount), "Could not allocate CUDA device memory");
CudaUtils::checkError(hipMallocManaged(&paths->extensionIntersection, sizeof(Intersection) * pathCount), "Could not allocate CUDA device memory");
CudaUtils::checkError(hipMallocManaged(&paths->extensionBrdf, sizeof(float3) * pathCount), "Could not allocate CUDA device memory");
CudaUtils::checkError(hipMallocManaged(&paths->extensionBrdfPdf, sizeof(float) * pathCount), "Could not allocate CUDA device memory");
CudaUtils::checkError(hipMallocManaged(&paths->extensionCosine, sizeof(float) * pathCount), "Could not allocate CUDA device memory");
CudaUtils::checkError(hipMallocManaged(&paths->lightRay, sizeof(Ray) * pathCount), "Could not allocate CUDA device memory");
CudaUtils::checkError(hipMallocManaged(&paths->lightEmittance, sizeof(float3) * pathCount), "Could not allocate CUDA device memory");
CudaUtils::checkError(hipMallocManaged(&paths->lightBrdf, sizeof(float3) * pathCount), "Could not allocate CUDA device memory");
CudaUtils::checkError(hipMallocManaged(&paths->lightBrdfPdf, sizeof(float) * pathCount), "Could not allocate CUDA device memory");
CudaUtils::checkError(hipMallocManaged(&paths->lightPdf, sizeof(float) * pathCount), "Could not allocate CUDA device memory");
CudaUtils::checkError(hipMallocManaged(&paths->lightCosine, sizeof(float) * pathCount), "Could not allocate CUDA device memory");
CudaUtils::checkError(hipMallocManaged(&paths->lightRayBlocked, sizeof(bool) * pathCount), "Could not allocate CUDA device memory");
CudaUtils::checkError(hipMallocManaged(&queues, sizeof(Queues)), "Could not allocate CUDA device memory");
CudaUtils::checkError(hipMallocManaged(&queues->newPathQueue, sizeof(uint32_t) * pathCount), "Could not allocate CUDA device memory");
CudaUtils::checkError(hipMallocManaged(&queues->diffuseMaterialQueue, sizeof(uint32_t) * pathCount), "Could not allocate CUDA device memory");
CudaUtils::checkError(hipMallocManaged(&queues->extensionRayQueue, sizeof(uint32_t) * pathCount), "Could not allocate CUDA device memory");
CudaUtils::checkError(hipMallocManaged(&queues->lightRayQueue, sizeof(uint32_t) * pathCount), "Could not allocate CUDA device memory");
memcpy(camera, &cameraData, sizeof(CameraData));
memcpy(nodes, scene.nodes.data(), sizeof(BVHNode) * scene.nodes.size());
memcpy(triangles, scene.triangles.data(), sizeof(Triangle) * scene.triangles.size());
memcpy(emitters, scene.emitters.data(), sizeof(uint32_t) * scene.emitters.size());
memcpy(materials, scene.materials.data(), sizeof(Material) * scene.materials.size());
calculateDimensions(reinterpret_cast<void*>(initPathsKernel), "initPathsKernel", pathCount, initPathsBlockSize, initPathsGridSize);
calculateDimensions(reinterpret_cast<void*>(clearPathsKernel), "clearPathsKernel", pathCount, clearPathsBlockSize, clearPathsGridSize);
calculateDimensions(reinterpret_cast<void*>(logicKernel), "logicKernel", pathCount, logicBlockSize, logicGridSize);
calculateDimensions(reinterpret_cast<void*>(newPathKernel), "newPathKernel", pathCount, newPathBlockSize, newPathGridSize);
calculateDimensions(reinterpret_cast<void*>(diffuseMaterialKernel), "diffuseMaterialKernel", pathCount, diffuseMaterialBlockSize, diffuseMaterialGridSize);
calculateDimensions(reinterpret_cast<void*>(extensionRayKernel), "extensionRayKernel", pathCount, extensionRayBlockSize, extensionRayGridSize);
calculateDimensions(reinterpret_cast<void*>(lightRayKernel), "lightRayKernel", pathCount, lightRayBlockSize, lightRayGridSize);
uint64_t time = std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::high_resolution_clock::now().time_since_epoch()).count();
hipLaunchKernelGGL(( initPathsKernel), dim3(initPathsGridSize), dim3(initPathsBlockSize), 0, 0, paths, time, pathCount);
CudaUtils::checkError(hipPeekAtLastError(), "Could not launch CUDA kernel (initPaths)");
CudaUtils::checkError(hipDeviceSynchronize(), "Could not execute CUDA kernel (initPaths)");
averagePathsPerSecond.setAlpha(0.05f);
averageRaysPerSecond.setAlpha(0.05f);
emitterCount = uint32_t(scene.emitters.size());
queues->newPathQueueLength = 0;
queues->diffuseMaterialQueueLength = 0;
queues->extensionRayQueueLength = 0;
queues->lightRayQueueLength = 0;
}
void Renderer::shutdown()
{
CudaUtils::checkError(hipFree(camera), "Could not free CUDA device memory");
CudaUtils::checkError(hipFree(nodes), "Could not free CUDA device memory");
CudaUtils::checkError(hipFree(triangles), "Could not free CUDA device memory");
CudaUtils::checkError(hipFree(emitters), "Could not free CUDA device memory");
CudaUtils::checkError(hipFree(materials), "Could not free CUDA device memory");
CudaUtils::checkError(hipFree(paths->random), "Could not free CUDA device memory");
CudaUtils::checkError(hipFree(paths->filmSample), "Could not free CUDA device memory");
CudaUtils::checkError(hipFree(paths->filmSamplePosition), "Could not free CUDA device memory");
CudaUtils::checkError(hipFree(paths->throughput), "Could not free CUDA device memory");
CudaUtils::checkError(hipFree(paths->result), "Could not free CUDA device memory");
CudaUtils::checkError(hipFree(paths->length), "Could not free CUDA device memory");
CudaUtils::checkError(hipFree(paths->extensionRay), "Could not free CUDA device memory");
CudaUtils::checkError(hipFree(paths->extensionIntersection), "Could not free CUDA device memory");
CudaUtils::checkError(hipFree(paths->extensionBrdf), "Could not free CUDA device memory");
CudaUtils::checkError(hipFree(paths->extensionBrdfPdf), "Could not free CUDA device memory");
CudaUtils::checkError(hipFree(paths->extensionCosine), "Could not free CUDA device memory");
CudaUtils::checkError(hipFree(paths->lightRay), "Could not free CUDA device memory");
CudaUtils::checkError(hipFree(paths->lightEmittance), "Could not free CUDA device memory");
CudaUtils::checkError(hipFree(paths->lightBrdf), "Could not free CUDA device memory");
CudaUtils::checkError(hipFree(paths->lightBrdfPdf), "Could not free CUDA device memory");
CudaUtils::checkError(hipFree(paths->lightPdf), "Could not free CUDA device memory");
CudaUtils::checkError(hipFree(paths->lightCosine), "Could not free CUDA device memory");
CudaUtils::checkError(hipFree(paths->lightRayBlocked), "Could not free CUDA device memory");
CudaUtils::checkError(hipFree(paths), "Could not free CUDA device memory");
CudaUtils::checkError(hipFree(queues->newPathQueue), "Could not free CUDA device memory");
CudaUtils::checkError(hipFree(queues->diffuseMaterialQueue), "Could not free CUDA device memory");
CudaUtils::checkError(hipFree(queues->extensionRayQueue), "Could not free CUDA device memory");
CudaUtils::checkError(hipFree(queues->lightRayQueue), "Could not free CUDA device memory");
CudaUtils::checkError(hipFree(queues), "Could not free CUDA device memory");
CudaUtils::checkError(hipFree(pixels), "Could not free CUDA device memory");
}
void Renderer::update(const Scene& scene)
{
CameraData cameraData = scene.camera.getCameraData();
memcpy(camera, &cameraData, sizeof(CameraData));
maxPathLength = scene.camera.isMoving() ? 2 : 8;
}
void Renderer::filmResized(uint32_t filmWidth, uint32_t filmHeight)
{
pixelCount = filmWidth * filmHeight;
if (pixels != nullptr)
CudaUtils::checkError(hipFree(pixels), "Could not free CUDA device memory");
CudaUtils::checkError(hipMallocManaged(&pixels, sizeof(Pixel) * pixelCount), "Could not allocate CUDA device memory");
calculateDimensions(reinterpret_cast<void*>(clearPixelsKernel), "clearPixelsKernel", pixelCount, clearPixelsBlockSize, clearPixelsGridSize);
calculateDimensions(reinterpret_cast<void*>(writeFilmKernel), "writeFilmKernel", pixelCount, writeFilmBlockSize, writeFilmGridSize);
clearFull();
}
void Renderer::clear()
{
hipLaunchKernelGGL(( clearPathsKernel), dim3(clearPathsGridSize), dim3(clearPathsBlockSize), 0, 0, paths, pathCount, false);
CudaUtils::checkError(hipPeekAtLastError(), "Could not launch CUDA kernel (clearPaths)");
CudaUtils::checkError(hipDeviceSynchronize(), "Could not execute CUDA kernel (clearPaths)");
hipLaunchKernelGGL(( clearPixelsKernel), dim3(clearPixelsGridSize), dim3(clearPixelsBlockSize), 0, 0, pixels, pixelCount);
CudaUtils::checkError(hipPeekAtLastError(), "Could not launch CUDA kernel (clearPixels)");
CudaUtils::checkError(hipDeviceSynchronize(), "Could not execute CUDA kernel (clearPixels)");
}
void Renderer::clearFull()
{
hipLaunchKernelGGL(( clearPathsKernel), dim3(clearPathsGridSize), dim3(clearPathsBlockSize), 0, 0, paths, pathCount, true);
CudaUtils::checkError(hipPeekAtLastError(), "Could not launch CUDA kernel (clearPaths)");
CudaUtils::checkError(hipDeviceSynchronize(), "Could not execute CUDA kernel (clearPaths)");
hipLaunchKernelGGL(( clearPixelsKernel), dim3(clearPixelsGridSize), dim3(clearPixelsBlockSize), 0, 0, pixels, pixelCount);
CudaUtils::checkError(hipPeekAtLastError(), "Could not launch CUDA kernel (clearPixels)");
CudaUtils::checkError(hipDeviceSynchronize(), "Could not execute CUDA kernel (clearPixels)");
}
void Renderer::render()
{
Film& film = App::getWindow().getFilm();
hipLaunchKernelGGL(( logicKernel), dim3(logicGridSize), dim3(logicBlockSize), 0, 0, paths, queues, triangles, emitters, materials, pixels, pathCount, emitterCount, film.getWidth(), film.getHeight(), maxPathLength);
hipLaunchKernelGGL(( newPathKernel), dim3(newPathGridSize), dim3(newPathBlockSize), 0, 0, paths, queues, camera, film.getWidth(), film.getHeight(), film.getLength());
hipLaunchKernelGGL(( diffuseMaterialKernel), dim3(diffuseMaterialGridSize), dim3(diffuseMaterialBlockSize), 0, 0, paths, queues, materials);
hipLaunchKernelGGL(( extensionRayKernel), dim3(extensionRayGridSize), dim3(extensionRayBlockSize), 0, 0, paths, queues, nodes, triangles);
hipLaunchKernelGGL(( lightRayKernel), dim3(lightRayGridSize), dim3(lightRayBlockSize), 0, 0, paths, queues, nodes, triangles);
hipSurfaceObject_t filmSurfaceObject = film.getFilmSurfaceObject();
hipLaunchKernelGGL(( writeFilmKernel), dim3(writeFilmGridSize), dim3(writeFilmBlockSize), 0, 0, pixels, pixelCount, filmSurfaceObject, film.getWidth());
CudaUtils::checkError(hipPeekAtLastError(), "Could not launch CUDA kernel (writeFilm)");
CudaUtils::checkError(hipDeviceSynchronize(), "Could not execute CUDA kernel (writeFilm)");
film.releaseFilmSurfaceObject();
float elapsedSeconds = timer.getElapsedSeconds();
averagePathsPerSecond.addMeasurement(float(queues->newPathQueueLength) / elapsedSeconds);
averageRaysPerSecond.addMeasurement(float(queues->extensionRayQueueLength + queues->lightRayQueueLength) / elapsedSeconds);
timer.restart();
queues->newPathQueueLength = 0;
queues->diffuseMaterialQueueLength = 0;
queues->extensionRayQueueLength = 0;
queues->lightRayQueueLength = 0;
}
float Renderer::getPathsPerSecond() const
{
return averagePathsPerSecond.getAverage();
}
float Renderer::getRaysPerSecond() const
{
return averageRaysPerSecond.getAverage();
}
| 1b73f2997f89ef353bbf865c3583967beceba0f1.cu | // Copyright © 2016 Mikko Ronkainen <firstname@mikkoronkainen.com>
// License: MIT, see the LICENSE file.
#include "Utils/App.h"
#include "Utils/CudaUtils.h"
#include "Cuda/Renderer.h"
#include "Cuda/Camera.h"
#include "Cuda/Filtering.h"
#include "Cuda/Intersect.h"
#include "Cuda/Kernels.h"
#include "Cuda/Material.h"
#include "Cuda/Misc.h"
#include "Cuda/Math.h"
#include "Cuda/Random.h"
#include "Cuda/Sampling.h"
#include "Cuda/Structs.h"
using namespace Varjo;
void Renderer::initialize(const Scene& scene)
{
CameraData cameraData = scene.camera.getCameraData();
CudaUtils::checkError(cudaMallocManaged(&camera, sizeof(CameraData)), "Could not allocate CUDA device memory");
CudaUtils::checkError(cudaMallocManaged(&nodes, sizeof(BVHNode) * scene.nodes.size()), "Could not allocate CUDA device memory");
CudaUtils::checkError(cudaMallocManaged(&triangles, sizeof(Triangle) * scene.triangles.size()), "Could not allocate CUDA device memory");
CudaUtils::checkError(cudaMallocManaged(&emitters, sizeof(uint32_t) * scene.emitters.size()), "Could not allocate CUDA device memory");
CudaUtils::checkError(cudaMallocManaged(&materials, sizeof(Material) * scene.materials.size()), "Could not allocate CUDA device memory");
CudaUtils::checkError(cudaMallocManaged(&paths, sizeof(Paths)), "Could not allocate CUDA device memory");
CudaUtils::checkError(cudaMallocManaged(&paths->random, sizeof(Random) * pathCount), "Could not allocate CUDA device memory");
CudaUtils::checkError(cudaMallocManaged(&paths->filmSample, sizeof(Sample) * pathCount), "Could not allocate CUDA device memory");
CudaUtils::checkError(cudaMallocManaged(&paths->filmSamplePosition, sizeof(float2) * pathCount), "Could not allocate CUDA device memory");
CudaUtils::checkError(cudaMallocManaged(&paths->throughput, sizeof(float3) * pathCount), "Could not allocate CUDA device memory");
CudaUtils::checkError(cudaMallocManaged(&paths->result, sizeof(float3) * pathCount), "Could not allocate CUDA device memory");
CudaUtils::checkError(cudaMallocManaged(&paths->length, sizeof(uint32_t) * pathCount), "Could not allocate CUDA device memory");
CudaUtils::checkError(cudaMallocManaged(&paths->extensionRay, sizeof(Ray) * pathCount), "Could not allocate CUDA device memory");
CudaUtils::checkError(cudaMallocManaged(&paths->extensionIntersection, sizeof(Intersection) * pathCount), "Could not allocate CUDA device memory");
CudaUtils::checkError(cudaMallocManaged(&paths->extensionBrdf, sizeof(float3) * pathCount), "Could not allocate CUDA device memory");
CudaUtils::checkError(cudaMallocManaged(&paths->extensionBrdfPdf, sizeof(float) * pathCount), "Could not allocate CUDA device memory");
CudaUtils::checkError(cudaMallocManaged(&paths->extensionCosine, sizeof(float) * pathCount), "Could not allocate CUDA device memory");
CudaUtils::checkError(cudaMallocManaged(&paths->lightRay, sizeof(Ray) * pathCount), "Could not allocate CUDA device memory");
CudaUtils::checkError(cudaMallocManaged(&paths->lightEmittance, sizeof(float3) * pathCount), "Could not allocate CUDA device memory");
CudaUtils::checkError(cudaMallocManaged(&paths->lightBrdf, sizeof(float3) * pathCount), "Could not allocate CUDA device memory");
CudaUtils::checkError(cudaMallocManaged(&paths->lightBrdfPdf, sizeof(float) * pathCount), "Could not allocate CUDA device memory");
CudaUtils::checkError(cudaMallocManaged(&paths->lightPdf, sizeof(float) * pathCount), "Could not allocate CUDA device memory");
CudaUtils::checkError(cudaMallocManaged(&paths->lightCosine, sizeof(float) * pathCount), "Could not allocate CUDA device memory");
CudaUtils::checkError(cudaMallocManaged(&paths->lightRayBlocked, sizeof(bool) * pathCount), "Could not allocate CUDA device memory");
CudaUtils::checkError(cudaMallocManaged(&queues, sizeof(Queues)), "Could not allocate CUDA device memory");
CudaUtils::checkError(cudaMallocManaged(&queues->newPathQueue, sizeof(uint32_t) * pathCount), "Could not allocate CUDA device memory");
CudaUtils::checkError(cudaMallocManaged(&queues->diffuseMaterialQueue, sizeof(uint32_t) * pathCount), "Could not allocate CUDA device memory");
CudaUtils::checkError(cudaMallocManaged(&queues->extensionRayQueue, sizeof(uint32_t) * pathCount), "Could not allocate CUDA device memory");
CudaUtils::checkError(cudaMallocManaged(&queues->lightRayQueue, sizeof(uint32_t) * pathCount), "Could not allocate CUDA device memory");
memcpy(camera, &cameraData, sizeof(CameraData));
memcpy(nodes, scene.nodes.data(), sizeof(BVHNode) * scene.nodes.size());
memcpy(triangles, scene.triangles.data(), sizeof(Triangle) * scene.triangles.size());
memcpy(emitters, scene.emitters.data(), sizeof(uint32_t) * scene.emitters.size());
memcpy(materials, scene.materials.data(), sizeof(Material) * scene.materials.size());
calculateDimensions(reinterpret_cast<void*>(initPathsKernel), "initPathsKernel", pathCount, initPathsBlockSize, initPathsGridSize);
calculateDimensions(reinterpret_cast<void*>(clearPathsKernel), "clearPathsKernel", pathCount, clearPathsBlockSize, clearPathsGridSize);
calculateDimensions(reinterpret_cast<void*>(logicKernel), "logicKernel", pathCount, logicBlockSize, logicGridSize);
calculateDimensions(reinterpret_cast<void*>(newPathKernel), "newPathKernel", pathCount, newPathBlockSize, newPathGridSize);
calculateDimensions(reinterpret_cast<void*>(diffuseMaterialKernel), "diffuseMaterialKernel", pathCount, diffuseMaterialBlockSize, diffuseMaterialGridSize);
calculateDimensions(reinterpret_cast<void*>(extensionRayKernel), "extensionRayKernel", pathCount, extensionRayBlockSize, extensionRayGridSize);
calculateDimensions(reinterpret_cast<void*>(lightRayKernel), "lightRayKernel", pathCount, lightRayBlockSize, lightRayGridSize);
uint64_t time = std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::high_resolution_clock::now().time_since_epoch()).count();
initPathsKernel<<<initPathsGridSize, initPathsBlockSize>>>(paths, time, pathCount);
CudaUtils::checkError(cudaPeekAtLastError(), "Could not launch CUDA kernel (initPaths)");
CudaUtils::checkError(cudaDeviceSynchronize(), "Could not execute CUDA kernel (initPaths)");
averagePathsPerSecond.setAlpha(0.05f);
averageRaysPerSecond.setAlpha(0.05f);
emitterCount = uint32_t(scene.emitters.size());
queues->newPathQueueLength = 0;
queues->diffuseMaterialQueueLength = 0;
queues->extensionRayQueueLength = 0;
queues->lightRayQueueLength = 0;
}
void Renderer::shutdown()
{
CudaUtils::checkError(cudaFree(camera), "Could not free CUDA device memory");
CudaUtils::checkError(cudaFree(nodes), "Could not free CUDA device memory");
CudaUtils::checkError(cudaFree(triangles), "Could not free CUDA device memory");
CudaUtils::checkError(cudaFree(emitters), "Could not free CUDA device memory");
CudaUtils::checkError(cudaFree(materials), "Could not free CUDA device memory");
CudaUtils::checkError(cudaFree(paths->random), "Could not free CUDA device memory");
CudaUtils::checkError(cudaFree(paths->filmSample), "Could not free CUDA device memory");
CudaUtils::checkError(cudaFree(paths->filmSamplePosition), "Could not free CUDA device memory");
CudaUtils::checkError(cudaFree(paths->throughput), "Could not free CUDA device memory");
CudaUtils::checkError(cudaFree(paths->result), "Could not free CUDA device memory");
CudaUtils::checkError(cudaFree(paths->length), "Could not free CUDA device memory");
CudaUtils::checkError(cudaFree(paths->extensionRay), "Could not free CUDA device memory");
CudaUtils::checkError(cudaFree(paths->extensionIntersection), "Could not free CUDA device memory");
CudaUtils::checkError(cudaFree(paths->extensionBrdf), "Could not free CUDA device memory");
CudaUtils::checkError(cudaFree(paths->extensionBrdfPdf), "Could not free CUDA device memory");
CudaUtils::checkError(cudaFree(paths->extensionCosine), "Could not free CUDA device memory");
CudaUtils::checkError(cudaFree(paths->lightRay), "Could not free CUDA device memory");
CudaUtils::checkError(cudaFree(paths->lightEmittance), "Could not free CUDA device memory");
CudaUtils::checkError(cudaFree(paths->lightBrdf), "Could not free CUDA device memory");
CudaUtils::checkError(cudaFree(paths->lightBrdfPdf), "Could not free CUDA device memory");
CudaUtils::checkError(cudaFree(paths->lightPdf), "Could not free CUDA device memory");
CudaUtils::checkError(cudaFree(paths->lightCosine), "Could not free CUDA device memory");
CudaUtils::checkError(cudaFree(paths->lightRayBlocked), "Could not free CUDA device memory");
CudaUtils::checkError(cudaFree(paths), "Could not free CUDA device memory");
CudaUtils::checkError(cudaFree(queues->newPathQueue), "Could not free CUDA device memory");
CudaUtils::checkError(cudaFree(queues->diffuseMaterialQueue), "Could not free CUDA device memory");
CudaUtils::checkError(cudaFree(queues->extensionRayQueue), "Could not free CUDA device memory");
CudaUtils::checkError(cudaFree(queues->lightRayQueue), "Could not free CUDA device memory");
CudaUtils::checkError(cudaFree(queues), "Could not free CUDA device memory");
CudaUtils::checkError(cudaFree(pixels), "Could not free CUDA device memory");
}
void Renderer::update(const Scene& scene)
{
CameraData cameraData = scene.camera.getCameraData();
memcpy(camera, &cameraData, sizeof(CameraData));
maxPathLength = scene.camera.isMoving() ? 2 : 8;
}
void Renderer::filmResized(uint32_t filmWidth, uint32_t filmHeight)
{
pixelCount = filmWidth * filmHeight;
if (pixels != nullptr)
CudaUtils::checkError(cudaFree(pixels), "Could not free CUDA device memory");
CudaUtils::checkError(cudaMallocManaged(&pixels, sizeof(Pixel) * pixelCount), "Could not allocate CUDA device memory");
calculateDimensions(reinterpret_cast<void*>(clearPixelsKernel), "clearPixelsKernel", pixelCount, clearPixelsBlockSize, clearPixelsGridSize);
calculateDimensions(reinterpret_cast<void*>(writeFilmKernel), "writeFilmKernel", pixelCount, writeFilmBlockSize, writeFilmGridSize);
clearFull();
}
void Renderer::clear()
{
clearPathsKernel<<<clearPathsGridSize, clearPathsBlockSize>>>(paths, pathCount, false);
CudaUtils::checkError(cudaPeekAtLastError(), "Could not launch CUDA kernel (clearPaths)");
CudaUtils::checkError(cudaDeviceSynchronize(), "Could not execute CUDA kernel (clearPaths)");
clearPixelsKernel<<<clearPixelsGridSize, clearPixelsBlockSize>>>(pixels, pixelCount);
CudaUtils::checkError(cudaPeekAtLastError(), "Could not launch CUDA kernel (clearPixels)");
CudaUtils::checkError(cudaDeviceSynchronize(), "Could not execute CUDA kernel (clearPixels)");
}
void Renderer::clearFull()
{
clearPathsKernel<<<clearPathsGridSize, clearPathsBlockSize>>>(paths, pathCount, true);
CudaUtils::checkError(cudaPeekAtLastError(), "Could not launch CUDA kernel (clearPaths)");
CudaUtils::checkError(cudaDeviceSynchronize(), "Could not execute CUDA kernel (clearPaths)");
clearPixelsKernel<<<clearPixelsGridSize, clearPixelsBlockSize>>>(pixels, pixelCount);
CudaUtils::checkError(cudaPeekAtLastError(), "Could not launch CUDA kernel (clearPixels)");
CudaUtils::checkError(cudaDeviceSynchronize(), "Could not execute CUDA kernel (clearPixels)");
}
void Renderer::render()
{
Film& film = App::getWindow().getFilm();
logicKernel<<<logicGridSize, logicBlockSize>>>(paths, queues, triangles, emitters, materials, pixels, pathCount, emitterCount, film.getWidth(), film.getHeight(), maxPathLength);
newPathKernel<<<newPathGridSize, newPathBlockSize>>>(paths, queues, camera, film.getWidth(), film.getHeight(), film.getLength());
diffuseMaterialKernel<<<diffuseMaterialGridSize, diffuseMaterialBlockSize>>>(paths, queues, materials);
extensionRayKernel<<<extensionRayGridSize, extensionRayBlockSize>>>(paths, queues, nodes, triangles);
lightRayKernel<<<lightRayGridSize, lightRayBlockSize>>>(paths, queues, nodes, triangles);
cudaSurfaceObject_t filmSurfaceObject = film.getFilmSurfaceObject();
writeFilmKernel<<<writeFilmGridSize, writeFilmBlockSize>>>(pixels, pixelCount, filmSurfaceObject, film.getWidth());
CudaUtils::checkError(cudaPeekAtLastError(), "Could not launch CUDA kernel (writeFilm)");
CudaUtils::checkError(cudaDeviceSynchronize(), "Could not execute CUDA kernel (writeFilm)");
film.releaseFilmSurfaceObject();
float elapsedSeconds = timer.getElapsedSeconds();
averagePathsPerSecond.addMeasurement(float(queues->newPathQueueLength) / elapsedSeconds);
averageRaysPerSecond.addMeasurement(float(queues->extensionRayQueueLength + queues->lightRayQueueLength) / elapsedSeconds);
timer.restart();
queues->newPathQueueLength = 0;
queues->diffuseMaterialQueueLength = 0;
queues->extensionRayQueueLength = 0;
queues->lightRayQueueLength = 0;
}
float Renderer::getPathsPerSecond() const
{
return averagePathsPerSecond.getAverage();
}
float Renderer::getRaysPerSecond() const
{
return averageRaysPerSecond.getAverage();
}
|
669b4b2b4f008be586c7456ca25c82f8ac93f6e5.hip | // !!! This is a file automatically generated by hipify!!!
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author raver119@gmail.com
//
#include "testlayers.h"
#include <array/NDArray.h>
#include <array/NDArrayFactory.h>
#include <graph/Context.h>
#include <graph/Node.h>
#include <graph/Variable.h>
#include <graph/VariableSpace.h>
#include <execution/LaunchContext.h>
#include <ops/specials_cuda.h>
#include <helpers/TAD.h>
#include <hip/hip_runtime.h>
using namespace sd;
using namespace sd::graph;
class NDArrayConstructorsTests : public testing::Test {
public:
};
TEST_F(NDArrayConstructorsTests, test_constructor_1) {
auto x = NDArrayFactory::empty_<float>();
ASSERT_TRUE(x->buffer() == nullptr);
ASSERT_TRUE(x->specialBuffer() == nullptr);
ASSERT_FALSE(x->shapeInfo() == nullptr);
ASSERT_FALSE(x->specialShapeInfo() == nullptr);
ASSERT_TRUE(x->isActualOnDeviceSide());
ASSERT_TRUE(x->isActualOnHostSide());
delete x;
}
TEST_F(NDArrayConstructorsTests, test_constructor_2) {
auto x = NDArrayFactory::vector<float>(5, 1.0f);
ASSERT_FALSE(x->buffer() == nullptr);
ASSERT_FALSE(x->specialBuffer() == nullptr);
ASSERT_FALSE(x->shapeInfo() == nullptr);
ASSERT_FALSE(x->specialShapeInfo() == nullptr);
ASSERT_TRUE(x->isActualOnDeviceSide());
ASSERT_FALSE(x->isActualOnHostSide());
delete x;
}
TEST_F(NDArrayConstructorsTests, test_constructor_3) {
auto x = NDArrayFactory::create<float>('c',{5, 5});
ASSERT_TRUE(x.buffer() == nullptr);
ASSERT_FALSE(x.specialBuffer() == nullptr);
ASSERT_FALSE(x.shapeInfo() == nullptr);
ASSERT_FALSE(x.specialShapeInfo() == nullptr);
ASSERT_TRUE(x.isActualOnDeviceSide());
ASSERT_FALSE(x.isActualOnHostSide());
}
TEST_F(NDArrayConstructorsTests, test_constructor_4) {
auto x = NDArrayFactory::create(sd::DataType::FLOAT32, 1.0f);
ASSERT_FALSE(x.buffer() == nullptr);
ASSERT_FALSE(x.specialBuffer() == nullptr);
ASSERT_FALSE(x.shapeInfo() == nullptr);
ASSERT_FALSE(x.specialShapeInfo() == nullptr);
ASSERT_TRUE(x.isActualOnDeviceSide());
ASSERT_TRUE(x.isActualOnHostSide());
}
TEST_F(NDArrayConstructorsTests, test_constructor_5) {
auto x = NDArrayFactory::create<double>('c',{2, 2}, {1, 2, 3, 4});
ASSERT_TRUE(x.buffer() == nullptr);
ASSERT_FALSE(x.specialBuffer() == nullptr);
ASSERT_FALSE(x.shapeInfo() == nullptr);
ASSERT_FALSE(x.specialShapeInfo() == nullptr);
ASSERT_TRUE(x.isActualOnDeviceSide());
ASSERT_FALSE(x.isActualOnHostSide());
}
TEST_F(NDArrayConstructorsTests, test_constructor_6) {
auto x = NDArrayFactory::create<double>('c', {2, 2}, {1, 2, 3, 4});
NDArray y(x);
ASSERT_TRUE(y.buffer() == nullptr);
ASSERT_FALSE(y.specialBuffer() == nullptr);
ASSERT_FALSE(y.shapeInfo() == nullptr);
ASSERT_FALSE(y.specialShapeInfo() == nullptr);
ASSERT_TRUE(y.isActualOnDeviceSide());
ASSERT_FALSE(y.isActualOnHostSide());
}
TEST_F(NDArrayConstructorsTests, test_constructor_7) {
auto x = NDArrayFactory::create<float>(1.0f);
ASSERT_FALSE(x.buffer() == nullptr);
ASSERT_FALSE(x.specialBuffer() == nullptr);
ASSERT_FALSE(x.shapeInfo() == nullptr);
ASSERT_FALSE(x.specialShapeInfo() == nullptr);
ASSERT_TRUE(x.isActualOnDeviceSide());
ASSERT_TRUE(x.isActualOnHostSide());
}
TEST_F(NDArrayConstructorsTests, test_constructor_8) {
auto x = NDArrayFactory::create_<double>('c',{2, 2}, {1, 2, 3, 4});
ASSERT_TRUE(x->buffer() == nullptr);
ASSERT_FALSE(x->specialBuffer() == nullptr);
ASSERT_FALSE(x->shapeInfo() == nullptr);
ASSERT_FALSE(x->specialShapeInfo() == nullptr);
ASSERT_TRUE(x->isActualOnDeviceSide());
ASSERT_FALSE(x->isActualOnHostSide());
delete x;
}
TEST_F(NDArrayConstructorsTests, test_constructor_9) {
auto x = NDArrayFactory::create_<double>('c',{2, 2});
ASSERT_TRUE(x->buffer() == nullptr);
ASSERT_FALSE(x->specialBuffer() == nullptr);
ASSERT_FALSE(x->shapeInfo() == nullptr);
ASSERT_FALSE(x->specialShapeInfo() == nullptr);
ASSERT_TRUE(x->isActualOnDeviceSide());
ASSERT_FALSE(x->isActualOnHostSide());
delete x;
}
TEST_F(NDArrayConstructorsTests, test_linspace_1) {
auto x = NDArrayFactory::linspace<float>(1.0f, 10.0f, 20);
ASSERT_FALSE(x->buffer() == nullptr);
ASSERT_FALSE(x->specialBuffer() == nullptr);
ASSERT_FALSE(x->shapeInfo() == nullptr);
ASSERT_FALSE(x->specialShapeInfo() == nullptr);
ASSERT_TRUE(x->isActualOnDeviceSide());
ASSERT_TRUE(x->isActualOnHostSide());
delete x;
}
TEST_F(NDArrayConstructorsTests, test_constructor_10) {
NDArray scalar1(sd::DataType::DOUBLE); // scalar1 = 0
NDArray scalar2('c', {}, std::vector<double>{0});
ASSERT_TRUE(scalar1.isActualOnDeviceSide());
ASSERT_TRUE(!scalar1.isActualOnHostSide());
ASSERT_TRUE(scalar2.isActualOnDeviceSide());
ASSERT_TRUE(scalar2.isActualOnHostSide());
ASSERT_TRUE(scalar2.equalsTo(scalar1));
ASSERT_TRUE(scalar1.isActualOnDeviceSide());
ASSERT_TRUE(!scalar1.isActualOnHostSide());
ASSERT_TRUE(scalar2.isActualOnDeviceSide());
ASSERT_TRUE(scalar2.isActualOnHostSide());
ASSERT_TRUE(scalar1.getBuffer() == nullptr);
ASSERT_TRUE(scalar1.getSpecialBuffer() != nullptr);
ASSERT_TRUE(scalar1.getShapeInfo() != nullptr);
ASSERT_TRUE(scalar1.getSpecialShapeInfo() != nullptr);
ASSERT_TRUE(scalar1.lengthOf() == 1);
} | 669b4b2b4f008be586c7456ca25c82f8ac93f6e5.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author raver119@gmail.com
//
#include "testlayers.h"
#include <array/NDArray.h>
#include <array/NDArrayFactory.h>
#include <graph/Context.h>
#include <graph/Node.h>
#include <graph/Variable.h>
#include <graph/VariableSpace.h>
#include <execution/LaunchContext.h>
#include <ops/specials_cuda.h>
#include <helpers/TAD.h>
#include <cuda.h>
using namespace sd;
using namespace sd::graph;
class NDArrayConstructorsTests : public testing::Test {
public:
};
TEST_F(NDArrayConstructorsTests, test_constructor_1) {
auto x = NDArrayFactory::empty_<float>();
ASSERT_TRUE(x->buffer() == nullptr);
ASSERT_TRUE(x->specialBuffer() == nullptr);
ASSERT_FALSE(x->shapeInfo() == nullptr);
ASSERT_FALSE(x->specialShapeInfo() == nullptr);
ASSERT_TRUE(x->isActualOnDeviceSide());
ASSERT_TRUE(x->isActualOnHostSide());
delete x;
}
TEST_F(NDArrayConstructorsTests, test_constructor_2) {
auto x = NDArrayFactory::vector<float>(5, 1.0f);
ASSERT_FALSE(x->buffer() == nullptr);
ASSERT_FALSE(x->specialBuffer() == nullptr);
ASSERT_FALSE(x->shapeInfo() == nullptr);
ASSERT_FALSE(x->specialShapeInfo() == nullptr);
ASSERT_TRUE(x->isActualOnDeviceSide());
ASSERT_FALSE(x->isActualOnHostSide());
delete x;
}
TEST_F(NDArrayConstructorsTests, test_constructor_3) {
auto x = NDArrayFactory::create<float>('c',{5, 5});
ASSERT_TRUE(x.buffer() == nullptr);
ASSERT_FALSE(x.specialBuffer() == nullptr);
ASSERT_FALSE(x.shapeInfo() == nullptr);
ASSERT_FALSE(x.specialShapeInfo() == nullptr);
ASSERT_TRUE(x.isActualOnDeviceSide());
ASSERT_FALSE(x.isActualOnHostSide());
}
TEST_F(NDArrayConstructorsTests, test_constructor_4) {
auto x = NDArrayFactory::create(sd::DataType::FLOAT32, 1.0f);
ASSERT_FALSE(x.buffer() == nullptr);
ASSERT_FALSE(x.specialBuffer() == nullptr);
ASSERT_FALSE(x.shapeInfo() == nullptr);
ASSERT_FALSE(x.specialShapeInfo() == nullptr);
ASSERT_TRUE(x.isActualOnDeviceSide());
ASSERT_TRUE(x.isActualOnHostSide());
}
TEST_F(NDArrayConstructorsTests, test_constructor_5) {
auto x = NDArrayFactory::create<double>('c',{2, 2}, {1, 2, 3, 4});
ASSERT_TRUE(x.buffer() == nullptr);
ASSERT_FALSE(x.specialBuffer() == nullptr);
ASSERT_FALSE(x.shapeInfo() == nullptr);
ASSERT_FALSE(x.specialShapeInfo() == nullptr);
ASSERT_TRUE(x.isActualOnDeviceSide());
ASSERT_FALSE(x.isActualOnHostSide());
}
TEST_F(NDArrayConstructorsTests, test_constructor_6) {
auto x = NDArrayFactory::create<double>('c', {2, 2}, {1, 2, 3, 4});
NDArray y(x);
ASSERT_TRUE(y.buffer() == nullptr);
ASSERT_FALSE(y.specialBuffer() == nullptr);
ASSERT_FALSE(y.shapeInfo() == nullptr);
ASSERT_FALSE(y.specialShapeInfo() == nullptr);
ASSERT_TRUE(y.isActualOnDeviceSide());
ASSERT_FALSE(y.isActualOnHostSide());
}
TEST_F(NDArrayConstructorsTests, test_constructor_7) {
auto x = NDArrayFactory::create<float>(1.0f);
ASSERT_FALSE(x.buffer() == nullptr);
ASSERT_FALSE(x.specialBuffer() == nullptr);
ASSERT_FALSE(x.shapeInfo() == nullptr);
ASSERT_FALSE(x.specialShapeInfo() == nullptr);
ASSERT_TRUE(x.isActualOnDeviceSide());
ASSERT_TRUE(x.isActualOnHostSide());
}
TEST_F(NDArrayConstructorsTests, test_constructor_8) {
auto x = NDArrayFactory::create_<double>('c',{2, 2}, {1, 2, 3, 4});
ASSERT_TRUE(x->buffer() == nullptr);
ASSERT_FALSE(x->specialBuffer() == nullptr);
ASSERT_FALSE(x->shapeInfo() == nullptr);
ASSERT_FALSE(x->specialShapeInfo() == nullptr);
ASSERT_TRUE(x->isActualOnDeviceSide());
ASSERT_FALSE(x->isActualOnHostSide());
delete x;
}
TEST_F(NDArrayConstructorsTests, test_constructor_9) {
auto x = NDArrayFactory::create_<double>('c',{2, 2});
ASSERT_TRUE(x->buffer() == nullptr);
ASSERT_FALSE(x->specialBuffer() == nullptr);
ASSERT_FALSE(x->shapeInfo() == nullptr);
ASSERT_FALSE(x->specialShapeInfo() == nullptr);
ASSERT_TRUE(x->isActualOnDeviceSide());
ASSERT_FALSE(x->isActualOnHostSide());
delete x;
}
TEST_F(NDArrayConstructorsTests, test_linspace_1) {
auto x = NDArrayFactory::linspace<float>(1.0f, 10.0f, 20);
ASSERT_FALSE(x->buffer() == nullptr);
ASSERT_FALSE(x->specialBuffer() == nullptr);
ASSERT_FALSE(x->shapeInfo() == nullptr);
ASSERT_FALSE(x->specialShapeInfo() == nullptr);
ASSERT_TRUE(x->isActualOnDeviceSide());
ASSERT_TRUE(x->isActualOnHostSide());
delete x;
}
TEST_F(NDArrayConstructorsTests, test_constructor_10) {
NDArray scalar1(sd::DataType::DOUBLE); // scalar1 = 0
NDArray scalar2('c', {}, std::vector<double>{0});
ASSERT_TRUE(scalar1.isActualOnDeviceSide());
ASSERT_TRUE(!scalar1.isActualOnHostSide());
ASSERT_TRUE(scalar2.isActualOnDeviceSide());
ASSERT_TRUE(scalar2.isActualOnHostSide());
ASSERT_TRUE(scalar2.equalsTo(scalar1));
ASSERT_TRUE(scalar1.isActualOnDeviceSide());
ASSERT_TRUE(!scalar1.isActualOnHostSide());
ASSERT_TRUE(scalar2.isActualOnDeviceSide());
ASSERT_TRUE(scalar2.isActualOnHostSide());
ASSERT_TRUE(scalar1.getBuffer() == nullptr);
ASSERT_TRUE(scalar1.getSpecialBuffer() != nullptr);
ASSERT_TRUE(scalar1.getShapeInfo() != nullptr);
ASSERT_TRUE(scalar1.getSpecialShapeInfo() != nullptr);
ASSERT_TRUE(scalar1.lengthOf() == 1);
} |
5948a2a0524c97fd8402cbbca2f9b3fc259cf923.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void k3(const int N, int* augPath, bool* visited, int* frontier, bool* new_frontier, bool* par_mat, int* cap_mat, bool* adj_mat, int* cap_max_mat, int* maxflow, bool* augFound) {
augFound[0] = false;
//Find the augmented path
augPath[0] = N - 1;
int i = 1, vertex = N - 1;
while(vertex != 0) {
for(int j = 0; j < N; j++) {
if(par_mat[vertex * N + j]) {
vertex = j;
augPath[i] = vertex;
i++;
break;
}
}
}
//Compute the bottleneck for the augmented path
int bottleneck = -1;
for(int i = 0; i < N; i++) {
if(augPath[i] == 0)
break;
else {
int k = augPath[i];
int j = augPath[i + 1];
int freeCap;
if(adj_mat[j * N + k]) {
freeCap = cap_max_mat[j * N + k] - cap_mat[j * N + k];
} else {
freeCap = cap_mat[k * N + j];
}
if(bottleneck == -1)
bottleneck = freeCap;
else if(freeCap < bottleneck)
bottleneck = freeCap;
}
}
maxflow[0] += bottleneck;
//Update capacities in d_cap_mat
for(int i = 0; i < N; i++) {
if(augPath[i] == 0)
break;
else {
int k = augPath[i];
int j = augPath[i + 1];
if(adj_mat[j * N + k]) {
cap_mat[j * N + k] += bottleneck;
} else {
cap_mat[k * N + j] -= bottleneck;
}
}
}
//Initialize par_mat
for(int i=0;i<N*N;i++)
par_mat[i] = false;
//Initialize visited and frontier
for(int i=0;i<N;i++) visited[i] = false;
for(int i=0;i<N;i++) new_frontier[i] = false;
visited[0] = true;
frontier[0] = 1;
frontier[1] = 0;
} | 5948a2a0524c97fd8402cbbca2f9b3fc259cf923.cu | #include "includes.h"
__global__ void k3(const int N, int* augPath, bool* visited, int* frontier, bool* new_frontier, bool* par_mat, int* cap_mat, bool* adj_mat, int* cap_max_mat, int* maxflow, bool* augFound) {
augFound[0] = false;
//Find the augmented path
augPath[0] = N - 1;
int i = 1, vertex = N - 1;
while(vertex != 0) {
for(int j = 0; j < N; j++) {
if(par_mat[vertex * N + j]) {
vertex = j;
augPath[i] = vertex;
i++;
break;
}
}
}
//Compute the bottleneck for the augmented path
int bottleneck = -1;
for(int i = 0; i < N; i++) {
if(augPath[i] == 0)
break;
else {
int k = augPath[i];
int j = augPath[i + 1];
int freeCap;
if(adj_mat[j * N + k]) {
freeCap = cap_max_mat[j * N + k] - cap_mat[j * N + k];
} else {
freeCap = cap_mat[k * N + j];
}
if(bottleneck == -1)
bottleneck = freeCap;
else if(freeCap < bottleneck)
bottleneck = freeCap;
}
}
maxflow[0] += bottleneck;
//Update capacities in d_cap_mat
for(int i = 0; i < N; i++) {
if(augPath[i] == 0)
break;
else {
int k = augPath[i];
int j = augPath[i + 1];
if(adj_mat[j * N + k]) {
cap_mat[j * N + k] += bottleneck;
} else {
cap_mat[k * N + j] -= bottleneck;
}
}
}
//Initialize par_mat
for(int i=0;i<N*N;i++)
par_mat[i] = false;
//Initialize visited and frontier
for(int i=0;i<N;i++) visited[i] = false;
for(int i=0;i<N;i++) new_frontier[i] = false;
visited[0] = true;
frontier[0] = 1;
frontier[1] = 0;
} |
5bb75eec8f4c2c0f82ebf7ef9438591e6d9a97d9.hip | // !!! This is a file automatically generated by hipify!!!
#include <torch/extension.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
using torch::Tensor;
__device__ __inline__ c10::Half __shfl_down_sync(const unsigned mask, const c10::Half var,
const unsigned int delta, const int width) {
__half var_ = var;
return __shfl_down_sync(mask, var_, delta, width);
}
__device__ __inline__ c10::Half __shfl_sync(const unsigned mask, const c10::Half var,
const unsigned int delta, const int width) {
__half var_ = var;
return __shfl_sync(mask, var_, delta, width);
}
template <typename scalar_t>
__global__ void minimax_cuda_kernel(const scalar_t* __restrict__ data,
scalar_t* __restrict__ min,
scalar_t* __restrict__ max,
int64_t N,
int64_t D) {
scalar_t max_val, min_val;
max_val = -1e30;
min_val = 1e30;
for (int64_t k1_outer = 0; k1_outer < D / 32; ++k1_outer) {
max_val = ::max(max_val, data[blockIdx.x * D + k1_outer * 32 + threadIdx.x]);
min_val = ::min(min_val, data[blockIdx.x * D + k1_outer * 32 + threadIdx.x]);
}
unsigned int mask;
scalar_t max_val_t, min_val_t;
mask = __activemask();
max_val_t = __shfl_down_sync(mask, max_val, 16, 32);
max_val = ::max(max_val, max_val_t);
max_val_t = __shfl_down_sync(mask, max_val, 8, 32);
max_val = ::max(max_val, max_val_t);
max_val_t = __shfl_down_sync(mask, max_val, 4, 32);
max_val = ::max(max_val, max_val_t);
max_val_t = __shfl_down_sync(mask, max_val, 2, 32);
max_val = ::max(max_val, max_val_t);
max_val_t = __shfl_down_sync(mask, max_val, 1, 32);
max_val = ::max(max_val, max_val_t);
max_val = __shfl_sync(mask, max_val, 0, 32);
max[blockIdx.x] = max_val;
min_val_t = __shfl_down_sync(mask, min_val, 16, 32);
min_val = ::min(min_val, min_val_t);
min_val_t = __shfl_down_sync(mask, min_val, 8, 32);
min_val = ::min(min_val, min_val_t);
min_val_t = __shfl_down_sync(mask, min_val, 4, 32);
min_val = ::min(min_val, min_val_t);
min_val_t = __shfl_down_sync(mask, min_val, 2, 32);
min_val = ::min(min_val, min_val_t);
min_val_t = __shfl_down_sync(mask, min_val, 1, 32);
min_val = ::min(min_val, min_val_t);
min_val = __shfl_sync(mask, min_val, 0, 32);
min[blockIdx.x] = min_val;
}
std::pair<Tensor, Tensor> minimax_cuda(torch::Tensor data) {
int64_t N = data.size(0);
int64_t D = data.size(1);
auto options = torch::TensorOptions().dtype(data.dtype()).device(data.device());
Tensor min = torch::empty({N,}, options);
Tensor max = torch::empty({N,}, options);
int blocks = N;
int threads = 32;
TORCH_CHECK(D % 32 == 0 && D > 32);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(data.scalar_type(), "minimax_cuda", ([&] {
hipLaunchKernelGGL(( minimax_cuda_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
data.data_ptr<scalar_t>(), min.data_ptr<scalar_t>(), max.data_ptr<scalar_t>(),
N, D);
}));
return std::make_pair(min, max);
}
| 5bb75eec8f4c2c0f82ebf7ef9438591e6d9a97d9.cu | #include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
using torch::Tensor;
__device__ __inline__ c10::Half __shfl_down_sync(const unsigned mask, const c10::Half var,
const unsigned int delta, const int width) {
__half var_ = var;
return __shfl_down_sync(mask, var_, delta, width);
}
__device__ __inline__ c10::Half __shfl_sync(const unsigned mask, const c10::Half var,
const unsigned int delta, const int width) {
__half var_ = var;
return __shfl_sync(mask, var_, delta, width);
}
template <typename scalar_t>
__global__ void minimax_cuda_kernel(const scalar_t* __restrict__ data,
scalar_t* __restrict__ min,
scalar_t* __restrict__ max,
int64_t N,
int64_t D) {
scalar_t max_val, min_val;
max_val = -1e30;
min_val = 1e30;
for (int64_t k1_outer = 0; k1_outer < D / 32; ++k1_outer) {
max_val = std::max(max_val, data[blockIdx.x * D + k1_outer * 32 + threadIdx.x]);
min_val = std::min(min_val, data[blockIdx.x * D + k1_outer * 32 + threadIdx.x]);
}
unsigned int mask;
scalar_t max_val_t, min_val_t;
mask = __activemask();
max_val_t = __shfl_down_sync(mask, max_val, 16, 32);
max_val = std::max(max_val, max_val_t);
max_val_t = __shfl_down_sync(mask, max_val, 8, 32);
max_val = std::max(max_val, max_val_t);
max_val_t = __shfl_down_sync(mask, max_val, 4, 32);
max_val = std::max(max_val, max_val_t);
max_val_t = __shfl_down_sync(mask, max_val, 2, 32);
max_val = std::max(max_val, max_val_t);
max_val_t = __shfl_down_sync(mask, max_val, 1, 32);
max_val = std::max(max_val, max_val_t);
max_val = __shfl_sync(mask, max_val, 0, 32);
max[blockIdx.x] = max_val;
min_val_t = __shfl_down_sync(mask, min_val, 16, 32);
min_val = std::min(min_val, min_val_t);
min_val_t = __shfl_down_sync(mask, min_val, 8, 32);
min_val = std::min(min_val, min_val_t);
min_val_t = __shfl_down_sync(mask, min_val, 4, 32);
min_val = std::min(min_val, min_val_t);
min_val_t = __shfl_down_sync(mask, min_val, 2, 32);
min_val = std::min(min_val, min_val_t);
min_val_t = __shfl_down_sync(mask, min_val, 1, 32);
min_val = std::min(min_val, min_val_t);
min_val = __shfl_sync(mask, min_val, 0, 32);
min[blockIdx.x] = min_val;
}
std::pair<Tensor, Tensor> minimax_cuda(torch::Tensor data) {
int64_t N = data.size(0);
int64_t D = data.size(1);
auto options = torch::TensorOptions().dtype(data.dtype()).device(data.device());
Tensor min = torch::empty({N,}, options);
Tensor max = torch::empty({N,}, options);
int blocks = N;
int threads = 32;
TORCH_CHECK(D % 32 == 0 && D > 32);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(data.scalar_type(), "minimax_cuda", ([&] {
minimax_cuda_kernel<scalar_t><<<blocks, threads>>>(
data.data_ptr<scalar_t>(), min.data_ptr<scalar_t>(), max.data_ptr<scalar_t>(),
N, D);
}));
return std::make_pair(min, max);
}
|
0234bf8d6b31aa6bdd161bb660c3ee073fe473f4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "DS_timer.h"
#pragma warning(disable : 4996)
#define BLOCK_SIZE (8)
__global__ void matrixMulti(float *_a, float *_b, float *_c, int m, int n, int k) {
int dx = blockIdx.x * blockDim.x + threadIdx.x;
int dy = blockIdx.y * blockDim.y + threadIdx.y;
float sum = 0.;
if(dy < m && dx < n) {
for(int index_k = 0 ; index_k < k ; index_k++) {
sum += _a[dy * k + index_k] * _b[index_k * n + dx];
}
_c[dy * n + dx] = sum;
}
}
void serial(float *a, float *b, float *c, int m, int n, int k)
{
for (int index_m = 0; index_m < m; index_m++) {
for (int index_n = 0; index_n < n; index_n++) {
c[index_m * n + index_n] = 0.0;
for (int index_k = 0; index_k < k; index_k++) {
c[index_m * n + index_n] += a[index_m * k + index_k] * b[index_k * n + index_n];
}
}
}
}
void check(float *a, float *b, float *c, int m, int n, int k, int C_SIZE)
{
bool result = true;
float * temp = new float[C_SIZE];
for (int index_m = 0; index_m < m; index_m++) {
for (int index_n = 0; index_n < n; index_n++) {
temp[index_m * n + index_n] = 0.0;
for (int index_k = 0; index_k < k; index_k++) {
temp[index_m * n + index_n] += a[index_m * k + index_k] * b[index_k * n + index_n];
}
}
}
for (int i = 0; i < C_SIZE; i++) {
if (temp[i] - c[i] > 1.0) {
printf("[%d] The resutls is not matched! (c: %.2f, temp: %.2f)\n", i, c[i], temp[i]);
result = false;
}
if (c[i] - temp[i] > 1.0) {
printf("[%d] The resutls is not matched! (c: %.2f, temp: %.2f)\n", i, c[i], temp[i]);
result = false;
}
}
if (result)
printf("works well!\n");
else
printf("not work\n");
}
int main() {
DS_timer timer(4);
timer.initTimers();
int m, n, k;
printf("m, n, k=");
scanf("%d %d %d", &m, &n, &k);
float *a, *b, *c;
float *d_a, *d_b, *d_c;
int A_SIZE = m * k;
int B_SIZE = k * n;
int C_SIZE = m * n;
int A_MEM_SIZE = A_SIZE * sizeof(float);
int B_MEM_SIZE = B_SIZE * sizeof(float);
int C_MEM_SIZE = C_SIZE * sizeof(float);
a = new float[A_SIZE]; memset(a, 0, A_MEM_SIZE);
b = new float[B_SIZE]; memset(b, 0, B_MEM_SIZE);
c = new float[C_SIZE]; memset(c, 0, C_MEM_SIZE);
for (int i = 0; i < A_SIZE; i++) {
a[i] = (rand() % 100) / 10.0 + 1;
}
for (int i = 0; i < B_SIZE; i++) {
b[i] = (rand() % 100) / 10.0 + 1;
}
// serial
timer.onTimer(0);
serial(a, b, c, m, n, k);
timer.offTimer(0);
// serial result validation check
check(a, b, c, m, n, k, C_SIZE);
// device global memory allocation
timer.onTimer(1);
hipMalloc(&d_a, A_MEM_SIZE);
hipMalloc(&d_b, B_MEM_SIZE);
hipMalloc(&d_c, C_MEM_SIZE);
// memory cpy Host to Device
hipMemcpy(d_a, a, A_MEM_SIZE, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, B_MEM_SIZE, hipMemcpyHostToDevice);
timer.offTimer(1);
dim3 dimGrid((n+BLOCK_SIZE-1)/BLOCK_SIZE, (m+BLOCK_SIZE-1)/BLOCK_SIZE);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
timer.onTimer(2);
hipLaunchKernelGGL(( matrixMulti) , dim3(dimGrid), dim3(dimBlock) , 0, 0, d_a, d_b, d_c, m, n, k);
hipDeviceSynchronize();
timer.offTimer(2);
timer.onTimer(3);
hipMemcpy(c, d_c, C_MEM_SIZE, hipMemcpyDeviceToHost);
timer.offTimer(3);
check(a, b, c, m, n, k, C_SIZE);
// timer display
timer.printTimer();
hipFree(d_a); hipFree(d_b); hipFree(d_c);
delete[] a, delete[] b, delete[] c;
return 0;
}
| 0234bf8d6b31aa6bdd161bb660c3ee073fe473f4.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "DS_timer.h"
#pragma warning(disable : 4996)
#define BLOCK_SIZE (8)
__global__ void matrixMulti(float *_a, float *_b, float *_c, int m, int n, int k) {
int dx = blockIdx.x * blockDim.x + threadIdx.x;
int dy = blockIdx.y * blockDim.y + threadIdx.y;
float sum = 0.;
if(dy < m && dx < n) {
for(int index_k = 0 ; index_k < k ; index_k++) {
sum += _a[dy * k + index_k] * _b[index_k * n + dx];
}
_c[dy * n + dx] = sum;
}
}
void serial(float *a, float *b, float *c, int m, int n, int k)
{
for (int index_m = 0; index_m < m; index_m++) {
for (int index_n = 0; index_n < n; index_n++) {
c[index_m * n + index_n] = 0.0;
for (int index_k = 0; index_k < k; index_k++) {
c[index_m * n + index_n] += a[index_m * k + index_k] * b[index_k * n + index_n];
}
}
}
}
void check(float *a, float *b, float *c, int m, int n, int k, int C_SIZE)
{
bool result = true;
float * temp = new float[C_SIZE];
for (int index_m = 0; index_m < m; index_m++) {
for (int index_n = 0; index_n < n; index_n++) {
temp[index_m * n + index_n] = 0.0;
for (int index_k = 0; index_k < k; index_k++) {
temp[index_m * n + index_n] += a[index_m * k + index_k] * b[index_k * n + index_n];
}
}
}
for (int i = 0; i < C_SIZE; i++) {
if (temp[i] - c[i] > 1.0) {
printf("[%d] The resutls is not matched! (c: %.2f, temp: %.2f)\n", i, c[i], temp[i]);
result = false;
}
if (c[i] - temp[i] > 1.0) {
printf("[%d] The resutls is not matched! (c: %.2f, temp: %.2f)\n", i, c[i], temp[i]);
result = false;
}
}
if (result)
printf("works well!\n");
else
printf("not work\n");
}
int main() {
DS_timer timer(4);
timer.initTimers();
int m, n, k;
printf("m, n, k=");
scanf("%d %d %d", &m, &n, &k);
float *a, *b, *c;
float *d_a, *d_b, *d_c;
int A_SIZE = m * k;
int B_SIZE = k * n;
int C_SIZE = m * n;
int A_MEM_SIZE = A_SIZE * sizeof(float);
int B_MEM_SIZE = B_SIZE * sizeof(float);
int C_MEM_SIZE = C_SIZE * sizeof(float);
a = new float[A_SIZE]; memset(a, 0, A_MEM_SIZE);
b = new float[B_SIZE]; memset(b, 0, B_MEM_SIZE);
c = new float[C_SIZE]; memset(c, 0, C_MEM_SIZE);
for (int i = 0; i < A_SIZE; i++) {
a[i] = (rand() % 100) / 10.0 + 1;
}
for (int i = 0; i < B_SIZE; i++) {
b[i] = (rand() % 100) / 10.0 + 1;
}
// serial
timer.onTimer(0);
serial(a, b, c, m, n, k);
timer.offTimer(0);
// serial result validation check
check(a, b, c, m, n, k, C_SIZE);
// device global memory allocation
timer.onTimer(1);
cudaMalloc(&d_a, A_MEM_SIZE);
cudaMalloc(&d_b, B_MEM_SIZE);
cudaMalloc(&d_c, C_MEM_SIZE);
// memory cpy Host to Device
cudaMemcpy(d_a, a, A_MEM_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, B_MEM_SIZE, cudaMemcpyHostToDevice);
timer.offTimer(1);
dim3 dimGrid((n+BLOCK_SIZE-1)/BLOCK_SIZE, (m+BLOCK_SIZE-1)/BLOCK_SIZE);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
timer.onTimer(2);
matrixMulti <<< dimGrid, dimBlock >>> (d_a, d_b, d_c, m, n, k);
cudaThreadSynchronize();
timer.offTimer(2);
timer.onTimer(3);
cudaMemcpy(c, d_c, C_MEM_SIZE, cudaMemcpyDeviceToHost);
timer.offTimer(3);
check(a, b, c, m, n, k, C_SIZE);
// timer display
timer.printTimer();
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
delete[] a, delete[] b, delete[] c;
return 0;
}
|
a88a0199f20316698d09041602572edaf693dba1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <sys/time.h>
#include <unistd.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/stat.h>
#include <getopt.h>
#include <stdint.h>
#include <vector>
#include <sys/types.h>
#include <math.h>
#include "hash.h"
#include "checksum.h"
void recalcu(int chunk_size, int chunk_num, int *stat, int jump_pos, int file_len, int total_threads, char *h_file, int *match_offset, int *match_chunkid, Node *ht, int j);
uint32 h_get_checksum1(char *buf1, int32 len, uint32 *d_s1, uint32 *d_s2);
Node *lookup_ht(Node *ht, int32 rc, int *chunk_id);
__device__ uint32 d_get_checksum1(char *buf1, int32 len, uint32 *d_s1, uint32 *d_s2);
__device__ void d_get_checksum2(char *buf, int32 len, char *sum);
__device__ void MD5Init(MD5_CTX *context);
__device__ void MD5Update(MD5_CTX *context, char *input,unsigned int inputlen);
__device__ void MD5Transform(unsigned int state[4], char block[64]);
__device__ void MD5Decode(unsigned int *output, char *input,unsigned int len);
__device__ void MD5Encode(char *output,unsigned int *input,unsigned int len);
__device__ void MD5Final(MD5_CTX *context, char digest[16]);
__device__ uint d_hash(uint32 rc);
__device__ Node *d_lookup_ht(Node *ht, int32 rc, int *chunk_id);
__constant__ char PADDING[]={(char)0x80,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
__global__ void oldfile_match_build(char *dFileContent, int *dmatchIdArray, int chunk_size, int chunk_num, int *dMatchOffset, int *dMatchChunkid, char *d_new_file, int totalChunkNum, int *multi_match_array, int *multi_match_num){
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
if(thread_id < totalChunkNum){
int match_pos = thread_id;
int chunk_id = dMatchChunkid[match_pos];
if(chunk_id == -1) return;
if(dmatchIdArray[chunk_id] != -1){
int old_file_pos = dMatchOffset[match_pos];
int new_file_pos = chunk_id * chunk_size;
memcpy(&d_new_file[new_file_pos], &dFileContent[old_file_pos], chunk_size);
}
else{
int old = atomicAdd(multi_match_num, 1);
multi_match_array[old] = match_pos;
}
}
}
__global__ void multiwarp_match(Node *ht, char *file, size_t file_len, int total_threads, int chunk_size, int chunk_num,
int *match_offset, int *match_chunkid, int *stat)
{
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
int fileBeginPos = chunk_num*chunk_size*thread_id;
int chunkBeginPos = chunk_num*thread_id;
//printf("block %d, thread %d, id %d, filebeginpos %d, total file len %d\n",blockIdx.x,threadIdx.x, thread_id, fileBeginPos, file_len);
if(fileBeginPos < file_len){
int recalcu = 1;
uint32 rc;
int chunk_id;
int match_num = 0;
int i = 0;
uint32 s1 = 0, s2 = 0;
//the char in the head of a chunk, it can be used to store as the unmatch value and use to recalcu
char chunk_head_value;
int length = chunk_size;
length = chunk_size;
for(; i < chunk_size*chunk_num;){
//chunk_size
if(fileBeginPos+i>file_len-chunk_size){
length = file_len-fileBeginPos-i;
}
if(recalcu == 1) rc = d_get_checksum1(&file[fileBeginPos + i], length, &s1, &s2);
else if(recalcu == 0){
s1 -= chunk_head_value + CHAR_OFFSET;
s2 -= chunk_size * (chunk_head_value + CHAR_OFFSET);
s1 += file[fileBeginPos+i+length-1] + CHAR_OFFSET;
s2 += s1;
rc = (s1 & 0xffff) + (s2 << 16);
}
chunk_head_value = file[fileBeginPos+i];
Node *np = d_lookup_ht(ht, rc, &chunk_id);
if(np == NULL){
recalcu = 0;
i += 1;
}
else{
char sum2[16];
bool md5match = true;
d_get_checksum2(&file[fileBeginPos+i], length, sum2);
while(1){
md5match = true;
for(int j=0;j<16;++j){
if(sum2[j]!=np->md5[j]){
md5match = false;
break;
}
}
if(md5match){
match_chunkid[chunkBeginPos + match_num] = np->chunk_id;
match_offset[chunkBeginPos + match_num] = fileBeginPos + i;
match_num ++;
recalcu = 1;
i += chunk_size;
break;
}
else{
np = np->next;
if(np == NULL){
recalcu = 0;
i += 1;
break;
}
}
}
}
}
//record match_num
stat[thread_id] = match_num;
}
}
__global__ void gpu_recalcu(Node *ht, char *file, int chunk_size, int chunk_num, int *match_offset, int *match_chunkid, int *stat, int region_size)
{
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
int start_t = thread_id * region_size;
//printf("thread %d start recalcu on %d thread, region size %d\n", thread_id, start_t, region_size);
for(int i=start_t; i<start_t+region_size-1; ++i){
//printf("thread %d recalcu on its %d thread\n", thread_id, i-start_t);
int t_match_num = stat[i];
int j = i+1;
int jump_pos = match_offset[chunk_num*i+t_match_num-1]+chunk_size;
if(t_match_num > 0 && stat[j] > 0 && jump_pos > match_offset[chunk_num*j]){
//std::cout << "thread " << start_t << " need recalcu" << std::endl;
int match_index = 0;
int recalcu = 1;
int chunk_id;
int j_match_num = stat[j];
int j_match_begin = chunk_num*j;
char chunk_head_value;
uint32 s1, s2, rc;
while(1){
if(recalcu == 1) rc = d_get_checksum1(&file[jump_pos], chunk_size, &s1, &s2);
else if(recalcu == 0){
s1 -= chunk_head_value + CHAR_OFFSET;
s2 -= chunk_size * (chunk_head_value + CHAR_OFFSET);
s1 += file[jump_pos+chunk_size-1] + CHAR_OFFSET;
s2 += s1;
rc = (s1 & 0xffff) + (s2 << 16);
}
while(jump_pos > match_offset[j_match_begin+match_index]){
if(match_index < j_match_num){
match_chunkid[j_match_begin+match_index] = -1;
stat[j]--;
match_index++;
}
else break;
}
if(jump_pos == match_offset[j_match_begin+match_index]) break;
Node *np = d_lookup_ht(ht, rc, &chunk_id);
if(np == NULL){
recalcu = 0;
jump_pos += 1;
}
else{
char sum2[16];
bool md5match = true;
d_get_checksum2(&file[jump_pos], chunk_size, sum2);
while(1){
md5match = true;
for(int k=0;k<16;++k){
if(sum2[k]!=np->md5[k]){
md5match = false;
break;
}
}
if(md5match){
for(int k=j_match_begin;k<j_match_begin+chunk_num;++k){
if(match_chunkid[k]==-1 || jump_pos+chunk_size > match_offset[k]){
match_offset[k] = jump_pos;
match_chunkid[k] = chunk_id;
stat[j]++;
break;
}
else{
printf("error in 324 in new_file.cu\n");
}
}
jump_pos += chunk_size;
recalcu = 1;
break;
}
else{
np = np->next;
if(np == NULL){
chunk_head_value = file[jump_pos];
jump_pos += 1;
recalcu = 0;
break;
}
}
}
}
if(match_index >= j_match_num) break;
}
}
}
}
void recalcu(int chunk_size, int chunk_num, int *stat, int jump_pos, int file_len, int total_threads,
char *h_file, int *match_offset, int *match_chunkid, Node *ht, int j, int recalcu_region_size){
int match_index = 0;
int unmatch_index = 0; //
int recalcu = 1;
int chunk_id;
int length = chunk_size;
int j_match_num = 0;
for(int i=0;i<recalcu_region_size;++i){
j_match_num += stat[j+i];
}
int j_match_begin = chunk_num*j;
char chunk_head_value;
uint32 s1, s2, rc;
while(1){
if(recalcu == 1) rc = h_get_checksum1(&h_file[jump_pos], length, &s1, &s2);
else if(recalcu == 0){
s1 -= chunk_head_value + CHAR_OFFSET;
s2 -= chunk_size * (chunk_head_value + CHAR_OFFSET);
s1 += h_file[jump_pos+length-1] + CHAR_OFFSET;
s2 += s1;
rc = (s1 & 0xffff) + (s2 << 16);
}
while(jump_pos > match_offset[j_match_begin+match_index+unmatch_index]){
if(match_chunkid[j_match_begin+match_index+unmatch_index] == -1){
unmatch_index += 1;
}
else if(match_index < j_match_num){
match_chunkid[j_match_begin+match_index+unmatch_index] = -1;
//stat[j]--;
match_index++;
}
else break;
}
if(jump_pos == match_offset[j_match_begin+match_index+unmatch_index] && match_chunkid[j_match_begin+match_index+unmatch_index] != -1) break;
Node *np = lookup_ht(ht, rc, &chunk_id);
if(np == NULL){
recalcu = 0;
jump_pos += 1;
}
else{
char sum2[16];
bool md5match = true;
h_get_checksum2(&h_file[jump_pos], length, sum2);
while(1){
md5match = true;
for(int k=0;k<16;++k){
if(sum2[k]!=np->md5[k]){
md5match = false;
break;
}
}
if(md5match){
for(int k=j_match_begin;k<j_match_begin+chunk_num*recalcu_region_size;++k){
//-1-1
if(match_chunkid[k]==-1 || jump_pos+chunk_size > match_offset[k]){
match_offset[k] = jump_pos;
match_chunkid[k] = chunk_id;
//stat[j]++;
break;
}
else{
printf("error in 324 in new_file.cu\n");
}
}
jump_pos += chunk_size;
recalcu = 1;
break;
}
else{
np = np->next;
if(np == NULL){
chunk_head_value = h_file[jump_pos];
jump_pos += 1;
recalcu = 0;
break;
}
}
}
}
//chunk_size*chunk_num
if(match_index >= j_match_num) break;
//printf("match_index is %d, j_match_num is %d\n",match_index, j_match_num);
}
}
Node *lookup_ht(Node *ht, int32 rc, int *chunk_id){
uint index = hash(rc);
Node n = ht[index];
if(n.chunk_id == -1){
return NULL;
}
else{
Node *np = &n;
for(; np != NULL; np=np->next){
if(rc == np->checksum){
*chunk_id = np->chunk_id;
return np;
}
}
return NULL;
}
}
__device__ void d_get_checksum2(char *buf, int32 len, char *sum){
MD5_CTX md5;
MD5Init(&md5);
MD5Update(&md5, buf, len);
MD5Final(&md5, sum);
}
__device__ void MD5Init(MD5_CTX *context)
{
context->count[0] = 0;
context->count[1] = 0;
//
context->state[0] = 0x67452301;
context->state[1] = 0xEFCDAB89;
context->state[2] = 0x98BADCFE;
context->state[3] = 0x10325476;
}
__device__ void MD5Update(MD5_CTX *context, char *input,unsigned int inputlen)
{
unsigned int i = 0,index = 0,partlen = 0;
index = (context->count[0] >> 3) & 0x3F;
partlen = 64 - index;
context->count[0] += inputlen << 3;
if(context->count[0] < (inputlen << 3))
context->count[1]++;
context->count[1] += inputlen >> 29;
if(inputlen >= partlen) {
memcpy(&context->buffer[index],input,partlen);
MD5Transform(context->state,context->buffer);
for(i = partlen;i+64 <= inputlen;i+=64)
MD5Transform(context->state,&input[i]);
index = 0;
}
else i = 0;
memcpy(&context->buffer[index],&input[i],inputlen-i);
}
__device__ void MD5Final(MD5_CTX *context, char digest[16])
{
unsigned int index = 0,padlen = 0;
char bits[8];
index = (context->count[0] >> 3) & 0x3F;
padlen = (index < 56)?(56-index):(120-index);
MD5Encode(bits,context->count,8);
MD5Update(context,PADDING,padlen);
MD5Update(context,bits,8);
MD5Encode(digest,context->state,16);
}
__device__ void MD5Decode(unsigned int *output, char *input,unsigned int len)
{
unsigned int i = 0,j = 0;
while(j < len)
{
output[i] = (input[j]) |
(input[j+1] << 8) |
(input[j+2] << 16) |
(input[j+3] << 24);
i++;
j+=4;
}
}
__device__ void MD5Encode(char *output,unsigned int *input,unsigned int len)
{
unsigned int i = 0,j = 0;
while(j < len)
{
output[j] = input[i] & 0xFF;
output[j+1] = (input[i] >> 8) & 0xFF;
output[j+2] = (input[i] >> 16) & 0xFF;
output[j+3] = (input[i] >> 24) & 0xFF;
i++;
j+=4;
}
}
__device__ void MD5Transform(unsigned int state[4], char block[64])
{
unsigned int a = state[0];
unsigned int b = state[1];
unsigned int c = state[2];
unsigned int d = state[3];
unsigned int x[16];
MD5Decode(x,block,64);
FF(a, b, c, d, x[ 0], 7, 0xd76aa478);
FF(d, a, b, c, x[ 1], 12, 0xe8c7b756);
FF(c, d, a, b, x[ 2], 17, 0x242070db);
FF(b, c, d, a, x[ 3], 22, 0xc1bdceee);
FF(a, b, c, d, x[ 4], 7, 0xf57c0faf);
FF(d, a, b, c, x[ 5], 12, 0x4787c62a);
FF(c, d, a, b, x[ 6], 17, 0xa8304613);
FF(b, c, d, a, x[ 7], 22, 0xfd469501);
FF(a, b, c, d, x[ 8], 7, 0x698098d8);
FF(d, a, b, c, x[ 9], 12, 0x8b44f7af);
FF(c, d, a, b, x[10], 17, 0xffff5bb1);
FF(b, c, d, a, x[11], 22, 0x895cd7be);
FF(a, b, c, d, x[12], 7, 0x6b901122);
FF(d, a, b, c, x[13], 12, 0xfd987193);
FF(c, d, a, b, x[14], 17, 0xa679438e);
FF(b, c, d, a, x[15], 22, 0x49b40821);
GG(a, b, c, d, x[ 1], 5, 0xf61e2562);
GG(d, a, b, c, x[ 6], 9, 0xc040b340);
GG(c, d, a, b, x[11], 14, 0x265e5a51);
GG(b, c, d, a, x[ 0], 20, 0xe9b6c7aa);
GG(a, b, c, d, x[ 5], 5, 0xd62f105d);
GG(d, a, b, c, x[10], 9, 0x2441453);
GG(c, d, a, b, x[15], 14, 0xd8a1e681);
GG(b, c, d, a, x[ 4], 20, 0xe7d3fbc8);
GG(a, b, c, d, x[ 9], 5, 0x21e1cde6);
GG(d, a, b, c, x[14], 9, 0xc33707d6);
GG(c, d, a, b, x[ 3], 14, 0xf4d50d87);
GG(b, c, d, a, x[ 8], 20, 0x455a14ed);
GG(a, b, c, d, x[13], 5, 0xa9e3e905);
GG(d, a, b, c, x[ 2], 9, 0xfcefa3f8);
GG(c, d, a, b, x[ 7], 14, 0x676f02d9);
GG(b, c, d, a, x[12], 20, 0x8d2a4c8a);
HH(a, b, c, d, x[ 5], 4, 0xfffa3942);
HH(d, a, b, c, x[ 8], 11, 0x8771f681);
HH(c, d, a, b, x[11], 16, 0x6d9d6122);
HH(b, c, d, a, x[14], 23, 0xfde5380c);
HH(a, b, c, d, x[ 1], 4, 0xa4beea44);
HH(d, a, b, c, x[ 4], 11, 0x4bdecfa9);
HH(c, d, a, b, x[ 7], 16, 0xf6bb4b60);
HH(b, c, d, a, x[10], 23, 0xbebfbc70);
HH(a, b, c, d, x[13], 4, 0x289b7ec6);
HH(d, a, b, c, x[ 0], 11, 0xeaa127fa);
HH(c, d, a, b, x[ 3], 16, 0xd4ef3085);
HH(b, c, d, a, x[ 6], 23, 0x4881d05);
HH(a, b, c, d, x[ 9], 4, 0xd9d4d039);
HH(d, a, b, c, x[12], 11, 0xe6db99e5);
HH(c, d, a, b, x[15], 16, 0x1fa27cf8);
HH(b, c, d, a, x[ 2], 23, 0xc4ac5665);
II(a, b, c, d, x[ 0], 6, 0xf4292244);
II(d, a, b, c, x[ 7], 10, 0x432aff97);
II(c, d, a, b, x[14], 15, 0xab9423a7);
II(b, c, d, a, x[ 5], 21, 0xfc93a039);
II(a, b, c, d, x[12], 6, 0x655b59c3);
II(d, a, b, c, x[ 3], 10, 0x8f0ccc92);
II(c, d, a, b, x[10], 15, 0xffeff47d);
II(b, c, d, a, x[ 1], 21, 0x85845dd1);
II(a, b, c, d, x[ 8], 6, 0x6fa87e4f);
II(d, a, b, c, x[15], 10, 0xfe2ce6e0);
II(c, d, a, b, x[ 6], 15, 0xa3014314);
II(b, c, d, a, x[13], 21, 0x4e0811a1);
II(a, b, c, d, x[ 4], 6, 0xf7537e82);
II(d, a, b, c, x[11], 10, 0xbd3af235);
II(c, d, a, b, x[ 2], 15, 0x2ad7d2bb);
II(b, c, d, a, x[ 9], 21, 0xeb86d391);
state[0] += a;
state[1] += b;
state[2] += c;
state[3] += d;
}
__device__ uint32 d_get_checksum1(char *buf1, int32 len, uint32 *d_s1, uint32 *d_s2)
{
int32 i;
uint32 s1, s2;
char *buf = (char *)buf1;
s1 = s2 = 0;
for (i = 0; i < (len-4); i+=4) {
s2 += 4*(s1 + buf[i]) + 3*buf[i+1] + 2*buf[i+2] + buf[i+3] +
10*CHAR_OFFSET;
s1 += (buf[i+0] + buf[i+1] + buf[i+2] + buf[i+3] + 4*CHAR_OFFSET);
}
for (; i < len; i++) {
s1 += (buf[i]+CHAR_OFFSET); s2 += s1;
}
*d_s1 = s1;
*d_s2 = s2;
return (s1 & 0xffff) + (s2 << 16);
}
uint32 h_get_checksum1(char *buf1, int32 len, uint32 *d_s1, uint32 *d_s2)
{
int32 i;
uint32 s1, s2;
char *buf = (char *)buf1;
s1 = s2 = 0;
for (i = 0; i < (len-4); i+=4) {
s2 += 4*(s1 + buf[i]) + 3*buf[i+1] + 2*buf[i+2] + buf[i+3] +
10*CHAR_OFFSET;
s1 += (buf[i+0] + buf[i+1] + buf[i+2] + buf[i+3] + 4*CHAR_OFFSET);
}
for (; i < len; i++) {
s1 += (buf[i]+CHAR_OFFSET); s2 += s1;
}
*d_s1 = s1;
*d_s2 = s2;
return (s1 & 0xffff) + (s2 << 16);
}
__device__ uint d_hash(uint32 rc){
uint p = 1867;
return (((rc>>16)& 0xffff ^ ((rc&0xffff) * p)) & 0xffff)%HASHSIZE;
}
__device__ Node* d_lookup_ht(Node *ht, int32 rc, int *chunk_id){
uint index = d_hash(rc);
Node n = ht[index];
if(n.chunk_id == -1){
return NULL;
}
else{
Node *np = &n;
for(; np != NULL; np=np->next){
if(rc == np->checksum){
*chunk_id = np->chunk_id;
return np;
}
}
return NULL;
}
} | a88a0199f20316698d09041602572edaf693dba1.cu | #include <sys/time.h>
#include <unistd.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/stat.h>
#include <getopt.h>
#include <stdint.h>
#include <vector>
#include <sys/types.h>
#include <math.h>
#include "hash.h"
#include "checksum.h"
void recalcu(int chunk_size, int chunk_num, int *stat, int jump_pos, int file_len, int total_threads, char *h_file, int *match_offset, int *match_chunkid, Node *ht, int j);
uint32 h_get_checksum1(char *buf1, int32 len, uint32 *d_s1, uint32 *d_s2);
Node *lookup_ht(Node *ht, int32 rc, int *chunk_id);
__device__ uint32 d_get_checksum1(char *buf1, int32 len, uint32 *d_s1, uint32 *d_s2);
__device__ void d_get_checksum2(char *buf, int32 len, char *sum);
__device__ void MD5Init(MD5_CTX *context);
__device__ void MD5Update(MD5_CTX *context, char *input,unsigned int inputlen);
__device__ void MD5Transform(unsigned int state[4], char block[64]);
__device__ void MD5Decode(unsigned int *output, char *input,unsigned int len);
__device__ void MD5Encode(char *output,unsigned int *input,unsigned int len);
__device__ void MD5Final(MD5_CTX *context, char digest[16]);
__device__ uint d_hash(uint32 rc);
__device__ Node *d_lookup_ht(Node *ht, int32 rc, int *chunk_id);
__constant__ char PADDING[]={(char)0x80,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
__global__ void oldfile_match_build(char *dFileContent, int *dmatchIdArray, int chunk_size, int chunk_num, int *dMatchOffset, int *dMatchChunkid, char *d_new_file, int totalChunkNum, int *multi_match_array, int *multi_match_num){
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
if(thread_id < totalChunkNum){
int match_pos = thread_id;
int chunk_id = dMatchChunkid[match_pos];
if(chunk_id == -1) return;
if(dmatchIdArray[chunk_id] != -1){
int old_file_pos = dMatchOffset[match_pos];
int new_file_pos = chunk_id * chunk_size;
memcpy(&d_new_file[new_file_pos], &dFileContent[old_file_pos], chunk_size);
}
else{
int old = atomicAdd(multi_match_num, 1);
multi_match_array[old] = match_pos;
}
}
}
__global__ void multiwarp_match(Node *ht, char *file, size_t file_len, int total_threads, int chunk_size, int chunk_num,
int *match_offset, int *match_chunkid, int *stat)
{
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
int fileBeginPos = chunk_num*chunk_size*thread_id;
int chunkBeginPos = chunk_num*thread_id;
//printf("block %d, thread %d, id %d, filebeginpos %d, total file len %d\n",blockIdx.x,threadIdx.x, thread_id, fileBeginPos, file_len);
if(fileBeginPos < file_len){
int recalcu = 1;
uint32 rc;
int chunk_id;
int match_num = 0;
int i = 0;
uint32 s1 = 0, s2 = 0;
//the char in the head of a chunk, it can be used to store as the unmatch value and use to recalcu
char chunk_head_value;
int length = chunk_size;
length = chunk_size;
for(; i < chunk_size*chunk_num;){
//剩下的内容以及不够一个chunk_size
if(fileBeginPos+i>file_len-chunk_size){
length = file_len-fileBeginPos-i;
}
if(recalcu == 1) rc = d_get_checksum1(&file[fileBeginPos + i], length, &s1, &s2);
else if(recalcu == 0){
s1 -= chunk_head_value + CHAR_OFFSET;
s2 -= chunk_size * (chunk_head_value + CHAR_OFFSET);
s1 += file[fileBeginPos+i+length-1] + CHAR_OFFSET;
s2 += s1;
rc = (s1 & 0xffff) + (s2 << 16);
}
chunk_head_value = file[fileBeginPos+i];
Node *np = d_lookup_ht(ht, rc, &chunk_id);
if(np == NULL){
recalcu = 0;
i += 1;
}
else{
char sum2[16];
bool md5match = true;
d_get_checksum2(&file[fileBeginPos+i], length, sum2);
while(1){
md5match = true;
for(int j=0;j<16;++j){
if(sum2[j]!=np->md5[j]){
md5match = false;
break;
}
}
if(md5match){
match_chunkid[chunkBeginPos + match_num] = np->chunk_id;
match_offset[chunkBeginPos + match_num] = fileBeginPos + i;
match_num ++;
recalcu = 1;
i += chunk_size;
break;
}
else{
np = np->next;
if(np == NULL){
recalcu = 0;
i += 1;
break;
}
}
}
}
}
//record match_num
stat[thread_id] = match_num;
}
}
__global__ void gpu_recalcu(Node *ht, char *file, int chunk_size, int chunk_num, int *match_offset, int *match_chunkid, int *stat, int region_size)
{
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
int start_t = thread_id * region_size;
//printf("thread %d start recalcu on %d thread, region size %d\n", thread_id, start_t, region_size);
for(int i=start_t; i<start_t+region_size-1; ++i){
//printf("thread %d recalcu on its %d thread\n", thread_id, i-start_t);
int t_match_num = stat[i];
int j = i+1;
int jump_pos = match_offset[chunk_num*i+t_match_num-1]+chunk_size;
if(t_match_num > 0 && stat[j] > 0 && jump_pos > match_offset[chunk_num*j]){
//std::cout << "thread " << start_t << " need recalcu" << std::endl;
int match_index = 0;
int recalcu = 1;
int chunk_id;
int j_match_num = stat[j];
int j_match_begin = chunk_num*j;
char chunk_head_value;
uint32 s1, s2, rc;
while(1){
if(recalcu == 1) rc = d_get_checksum1(&file[jump_pos], chunk_size, &s1, &s2);
else if(recalcu == 0){
s1 -= chunk_head_value + CHAR_OFFSET;
s2 -= chunk_size * (chunk_head_value + CHAR_OFFSET);
s1 += file[jump_pos+chunk_size-1] + CHAR_OFFSET;
s2 += s1;
rc = (s1 & 0xffff) + (s2 << 16);
}
while(jump_pos > match_offset[j_match_begin+match_index]){
if(match_index < j_match_num){
match_chunkid[j_match_begin+match_index] = -1;
stat[j]--;
match_index++;
}
else break;
}
if(jump_pos == match_offset[j_match_begin+match_index]) break;
Node *np = d_lookup_ht(ht, rc, &chunk_id);
if(np == NULL){
recalcu = 0;
jump_pos += 1;
}
else{
char sum2[16];
bool md5match = true;
d_get_checksum2(&file[jump_pos], chunk_size, sum2);
while(1){
md5match = true;
for(int k=0;k<16;++k){
if(sum2[k]!=np->md5[k]){
md5match = false;
break;
}
}
if(md5match){
for(int k=j_match_begin;k<j_match_begin+chunk_num;++k){
if(match_chunkid[k]==-1 || jump_pos+chunk_size > match_offset[k]){
match_offset[k] = jump_pos;
match_chunkid[k] = chunk_id;
stat[j]++;
break;
}
else{
printf("error in 324 in new_file.cu\n");
}
}
jump_pos += chunk_size;
recalcu = 1;
break;
}
else{
np = np->next;
if(np == NULL){
chunk_head_value = file[jump_pos];
jump_pos += 1;
recalcu = 0;
break;
}
}
}
}
if(match_index >= j_match_num) break;
}
}
}
}
void recalcu(int chunk_size, int chunk_num, int *stat, int jump_pos, int file_len, int total_threads,
char *h_file, int *match_offset, int *match_chunkid, Node *ht, int j, int recalcu_region_size){
int match_index = 0;
int unmatch_index = 0; //
int recalcu = 1;
int chunk_id;
int length = chunk_size;
int j_match_num = 0;
for(int i=0;i<recalcu_region_size;++i){
j_match_num += stat[j+i];
}
int j_match_begin = chunk_num*j;
char chunk_head_value;
uint32 s1, s2, rc;
while(1){
if(recalcu == 1) rc = h_get_checksum1(&h_file[jump_pos], length, &s1, &s2);
else if(recalcu == 0){
s1 -= chunk_head_value + CHAR_OFFSET;
s2 -= chunk_size * (chunk_head_value + CHAR_OFFSET);
s1 += h_file[jump_pos+length-1] + CHAR_OFFSET;
s2 += s1;
rc = (s1 & 0xffff) + (s2 << 16);
}
while(jump_pos > match_offset[j_match_begin+match_index+unmatch_index]){
if(match_chunkid[j_match_begin+match_index+unmatch_index] == -1){
unmatch_index += 1;
}
else if(match_index < j_match_num){
match_chunkid[j_match_begin+match_index+unmatch_index] = -1;
//stat[j]--;
match_index++;
}
else break;
}
if(jump_pos == match_offset[j_match_begin+match_index+unmatch_index] && match_chunkid[j_match_begin+match_index+unmatch_index] != -1) break;
Node *np = lookup_ht(ht, rc, &chunk_id);
if(np == NULL){
recalcu = 0;
jump_pos += 1;
}
else{
char sum2[16];
bool md5match = true;
h_get_checksum2(&h_file[jump_pos], length, sum2);
while(1){
md5match = true;
for(int k=0;k<16;++k){
if(sum2[k]!=np->md5[k]){
md5match = false;
break;
}
}
if(md5match){
for(int k=j_match_begin;k<j_match_begin+chunk_num*recalcu_region_size;++k){
//已经被置为-1或者目前还没有但马上会被置为-1的
if(match_chunkid[k]==-1 || jump_pos+chunk_size > match_offset[k]){
match_offset[k] = jump_pos;
match_chunkid[k] = chunk_id;
//stat[j]++;
break;
}
else{
printf("error in 324 in new_file.cu\n");
}
}
jump_pos += chunk_size;
recalcu = 1;
break;
}
else{
np = np->next;
if(np == NULL){
chunk_head_value = h_file[jump_pos];
jump_pos += 1;
recalcu = 0;
break;
}
}
}
}
//还一种可能就是整个chunk_size*chunk_num都没有匹配
if(match_index >= j_match_num) break;
//printf("match_index is %d, j_match_num is %d\n",match_index, j_match_num);
}
}
Node *lookup_ht(Node *ht, int32 rc, int *chunk_id){
uint index = hash(rc);
Node n = ht[index];
if(n.chunk_id == -1){
return NULL;
}
else{
Node *np = &n;
for(; np != NULL; np=np->next){
if(rc == np->checksum){
*chunk_id = np->chunk_id;
return np;
}
}
return NULL;
}
}
__device__ void d_get_checksum2(char *buf, int32 len, char *sum){
MD5_CTX md5;
MD5Init(&md5);
MD5Update(&md5, buf, len);
MD5Final(&md5, sum);
}
__device__ void MD5Init(MD5_CTX *context)
{
context->count[0] = 0;
context->count[1] = 0;
//分别赋固定值
context->state[0] = 0x67452301;
context->state[1] = 0xEFCDAB89;
context->state[2] = 0x98BADCFE;
context->state[3] = 0x10325476;
}
__device__ void MD5Update(MD5_CTX *context, char *input,unsigned int inputlen)
{
unsigned int i = 0,index = 0,partlen = 0;
index = (context->count[0] >> 3) & 0x3F;
partlen = 64 - index;
context->count[0] += inputlen << 3;
if(context->count[0] < (inputlen << 3))
context->count[1]++;
context->count[1] += inputlen >> 29;
if(inputlen >= partlen) {
memcpy(&context->buffer[index],input,partlen);
MD5Transform(context->state,context->buffer);
for(i = partlen;i+64 <= inputlen;i+=64)
MD5Transform(context->state,&input[i]);
index = 0;
}
else i = 0;
memcpy(&context->buffer[index],&input[i],inputlen-i);
}
__device__ void MD5Final(MD5_CTX *context, char digest[16])
{
unsigned int index = 0,padlen = 0;
char bits[8];
index = (context->count[0] >> 3) & 0x3F;
padlen = (index < 56)?(56-index):(120-index);
MD5Encode(bits,context->count,8);
MD5Update(context,PADDING,padlen);
MD5Update(context,bits,8);
MD5Encode(digest,context->state,16);
}
__device__ void MD5Decode(unsigned int *output, char *input,unsigned int len)
{
unsigned int i = 0,j = 0;
while(j < len)
{
output[i] = (input[j]) |
(input[j+1] << 8) |
(input[j+2] << 16) |
(input[j+3] << 24);
i++;
j+=4;
}
}
__device__ void MD5Encode(char *output,unsigned int *input,unsigned int len)
{
unsigned int i = 0,j = 0;
while(j < len)
{
output[j] = input[i] & 0xFF;
output[j+1] = (input[i] >> 8) & 0xFF;
output[j+2] = (input[i] >> 16) & 0xFF;
output[j+3] = (input[i] >> 24) & 0xFF;
i++;
j+=4;
}
}
__device__ void MD5Transform(unsigned int state[4], char block[64])
{
unsigned int a = state[0];
unsigned int b = state[1];
unsigned int c = state[2];
unsigned int d = state[3];
unsigned int x[16];
MD5Decode(x,block,64);
FF(a, b, c, d, x[ 0], 7, 0xd76aa478);
FF(d, a, b, c, x[ 1], 12, 0xe8c7b756);
FF(c, d, a, b, x[ 2], 17, 0x242070db);
FF(b, c, d, a, x[ 3], 22, 0xc1bdceee);
FF(a, b, c, d, x[ 4], 7, 0xf57c0faf);
FF(d, a, b, c, x[ 5], 12, 0x4787c62a);
FF(c, d, a, b, x[ 6], 17, 0xa8304613);
FF(b, c, d, a, x[ 7], 22, 0xfd469501);
FF(a, b, c, d, x[ 8], 7, 0x698098d8);
FF(d, a, b, c, x[ 9], 12, 0x8b44f7af);
FF(c, d, a, b, x[10], 17, 0xffff5bb1);
FF(b, c, d, a, x[11], 22, 0x895cd7be);
FF(a, b, c, d, x[12], 7, 0x6b901122);
FF(d, a, b, c, x[13], 12, 0xfd987193);
FF(c, d, a, b, x[14], 17, 0xa679438e);
FF(b, c, d, a, x[15], 22, 0x49b40821);
GG(a, b, c, d, x[ 1], 5, 0xf61e2562);
GG(d, a, b, c, x[ 6], 9, 0xc040b340);
GG(c, d, a, b, x[11], 14, 0x265e5a51);
GG(b, c, d, a, x[ 0], 20, 0xe9b6c7aa);
GG(a, b, c, d, x[ 5], 5, 0xd62f105d);
GG(d, a, b, c, x[10], 9, 0x2441453);
GG(c, d, a, b, x[15], 14, 0xd8a1e681);
GG(b, c, d, a, x[ 4], 20, 0xe7d3fbc8);
GG(a, b, c, d, x[ 9], 5, 0x21e1cde6);
GG(d, a, b, c, x[14], 9, 0xc33707d6);
GG(c, d, a, b, x[ 3], 14, 0xf4d50d87);
GG(b, c, d, a, x[ 8], 20, 0x455a14ed);
GG(a, b, c, d, x[13], 5, 0xa9e3e905);
GG(d, a, b, c, x[ 2], 9, 0xfcefa3f8);
GG(c, d, a, b, x[ 7], 14, 0x676f02d9);
GG(b, c, d, a, x[12], 20, 0x8d2a4c8a);
HH(a, b, c, d, x[ 5], 4, 0xfffa3942);
HH(d, a, b, c, x[ 8], 11, 0x8771f681);
HH(c, d, a, b, x[11], 16, 0x6d9d6122);
HH(b, c, d, a, x[14], 23, 0xfde5380c);
HH(a, b, c, d, x[ 1], 4, 0xa4beea44);
HH(d, a, b, c, x[ 4], 11, 0x4bdecfa9);
HH(c, d, a, b, x[ 7], 16, 0xf6bb4b60);
HH(b, c, d, a, x[10], 23, 0xbebfbc70);
HH(a, b, c, d, x[13], 4, 0x289b7ec6);
HH(d, a, b, c, x[ 0], 11, 0xeaa127fa);
HH(c, d, a, b, x[ 3], 16, 0xd4ef3085);
HH(b, c, d, a, x[ 6], 23, 0x4881d05);
HH(a, b, c, d, x[ 9], 4, 0xd9d4d039);
HH(d, a, b, c, x[12], 11, 0xe6db99e5);
HH(c, d, a, b, x[15], 16, 0x1fa27cf8);
HH(b, c, d, a, x[ 2], 23, 0xc4ac5665);
II(a, b, c, d, x[ 0], 6, 0xf4292244);
II(d, a, b, c, x[ 7], 10, 0x432aff97);
II(c, d, a, b, x[14], 15, 0xab9423a7);
II(b, c, d, a, x[ 5], 21, 0xfc93a039);
II(a, b, c, d, x[12], 6, 0x655b59c3);
II(d, a, b, c, x[ 3], 10, 0x8f0ccc92);
II(c, d, a, b, x[10], 15, 0xffeff47d);
II(b, c, d, a, x[ 1], 21, 0x85845dd1);
II(a, b, c, d, x[ 8], 6, 0x6fa87e4f);
II(d, a, b, c, x[15], 10, 0xfe2ce6e0);
II(c, d, a, b, x[ 6], 15, 0xa3014314);
II(b, c, d, a, x[13], 21, 0x4e0811a1);
II(a, b, c, d, x[ 4], 6, 0xf7537e82);
II(d, a, b, c, x[11], 10, 0xbd3af235);
II(c, d, a, b, x[ 2], 15, 0x2ad7d2bb);
II(b, c, d, a, x[ 9], 21, 0xeb86d391);
state[0] += a;
state[1] += b;
state[2] += c;
state[3] += d;
}
__device__ uint32 d_get_checksum1(char *buf1, int32 len, uint32 *d_s1, uint32 *d_s2)
{
int32 i;
uint32 s1, s2;
char *buf = (char *)buf1;
s1 = s2 = 0;
for (i = 0; i < (len-4); i+=4) {
s2 += 4*(s1 + buf[i]) + 3*buf[i+1] + 2*buf[i+2] + buf[i+3] +
10*CHAR_OFFSET;
s1 += (buf[i+0] + buf[i+1] + buf[i+2] + buf[i+3] + 4*CHAR_OFFSET);
}
for (; i < len; i++) {
s1 += (buf[i]+CHAR_OFFSET); s2 += s1;
}
*d_s1 = s1;
*d_s2 = s2;
return (s1 & 0xffff) + (s2 << 16);
}
uint32 h_get_checksum1(char *buf1, int32 len, uint32 *d_s1, uint32 *d_s2)
{
int32 i;
uint32 s1, s2;
char *buf = (char *)buf1;
s1 = s2 = 0;
for (i = 0; i < (len-4); i+=4) {
s2 += 4*(s1 + buf[i]) + 3*buf[i+1] + 2*buf[i+2] + buf[i+3] +
10*CHAR_OFFSET;
s1 += (buf[i+0] + buf[i+1] + buf[i+2] + buf[i+3] + 4*CHAR_OFFSET);
}
for (; i < len; i++) {
s1 += (buf[i]+CHAR_OFFSET); s2 += s1;
}
*d_s1 = s1;
*d_s2 = s2;
return (s1 & 0xffff) + (s2 << 16);
}
__device__ uint d_hash(uint32 rc){
uint p = 1867;
return (((rc>>16)& 0xffff ^ ((rc&0xffff) * p)) & 0xffff)%HASHSIZE;
}
__device__ Node* d_lookup_ht(Node *ht, int32 rc, int *chunk_id){
uint index = d_hash(rc);
Node n = ht[index];
if(n.chunk_id == -1){
return NULL;
}
else{
Node *np = &n;
for(; np != NULL; np=np->next){
if(rc == np->checksum){
*chunk_id = np->chunk_id;
return np;
}
}
return NULL;
}
} |
48403b0e2be2feab3014ca9310e27fd2d11a3f87.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__device__ unsigned int getGid3d3d(){
int blockId = blockIdx.x + blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z;
int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z)
+ (threadIdx.y * blockDim.x)
+ (threadIdx.z * (blockDim.x * blockDim.y)) + threadIdx.x;
return threadId;
}
__global__ void scalarDiv(double* in, double factor, double* out){
double result;
unsigned int gid = getGid3d3d();
result = (in[gid] / factor);
out[gid] = result;
} | 48403b0e2be2feab3014ca9310e27fd2d11a3f87.cu | #include "includes.h"
__device__ unsigned int getGid3d3d(){
int blockId = blockIdx.x + blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z;
int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z)
+ (threadIdx.y * blockDim.x)
+ (threadIdx.z * (blockDim.x * blockDim.y)) + threadIdx.x;
return threadId;
}
__global__ void scalarDiv(double* in, double factor, double* out){
double result;
unsigned int gid = getGid3d3d();
result = (in[gid] / factor);
out[gid] = result;
} |
edad6e5dfc3bd9f48eb19b830bb18925e261b1dc.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
__global__ void kernel(void)
{
}
int main()
{
hipLaunchKernelGGL(( kernel), dim3(1),dim3(1), 0, 0, );
std::cout << "Hello World!" << std::endl;
return 0;
}
| edad6e5dfc3bd9f48eb19b830bb18925e261b1dc.cu | #include <iostream>
#include <cuda_runtime.h>
__global__ void kernel(void)
{
}
int main()
{
kernel<<<1,1>>>();
std::cout << "Hello World!" << std::endl;
return 0;
}
|
87e24696f97144885d722c27235ce83ce4d24cb7.hip | // !!! This is a file automatically generated by hipify!!!
#include <opencv2/opencv.hpp>
#include <thrust/window_2d.h>
#define PI 3.14159
using namespace cv;
class transFunctor
{
public:
__device__ uchar operator() (const uchar a,const uchar b) const
{
return (uchar) sqrt((float) a*a + b*b);
}
};
class convolutionFunctor //:public thrust::shared_unary_window_transform_functor<uchar>
{
public:
int dim;
thrust::block_2d<float> * kernel;
convolutionFunctor( thrust::block_2d<float> * kernel,int dim)
{
this->dim =dim;
this->kernel = kernel;
}
__device__ uchar operator() (const thrust::window_2d<uchar> & input_window,const thrust::window_2d<uchar> & output_window) const
{
uchar temp = 0;
for(int i = 0; i< dim; i++)
{
for(int j = 0; j<dim; j++)
{
temp+=input_window[i][j]*(*kernel)[i][j];
}
}
output_window[1][1]=temp;
return 0.0 ;
}
};
int main(int argc, char const *argv[]) {
hipDeviceProp_t dev_prop;
hipGetDeviceProperties(&dev_prop,0);
Mat small = imread("car.jpg",CV_LOAD_IMAGE_GRAYSCALE);
Mat image;
int dim = 512;
if(argc ==2)
{
dim = atoi(argv[1]);
}
resize(small,image,Size(dim,dim));
thrust::host_block_2d<float> kernelx(3,3);
thrust::host_block_2d<float> kernely(3,3);
thrust::block_2d<float> dkernelx(3,3);
thrust::block_2d<float> dkernely(3,3);
//Scharr Filter
kernelx[0][0]=-3;
kernelx[0][1]=0;
kernelx[0][2]=+3;
kernelx[1][0]=-10;
kernelx[1][1]=0;
kernelx[1][2]=+10;
kernelx[2][0]=-3;
kernelx[2][1]=0;
kernelx[2][2]=+3;
kernely[0][0]=-3;
kernely[0][1]=-10;
kernely[0][2]=-3;
kernely[1][0]=0;
kernely[1][1]=0;
kernely[1][2]=0;
kernely[2][0]=+3;
kernely[2][1]=+10;
kernely[2][2]=+3;
dkernelx=kernelx;
dkernely=kernely;
thrust::block_2d<uchar> uchar_image_block (image.cols,image.rows);
thrust::block_2d<uchar> convolve1_block (image.cols,image.rows);
thrust::block_2d<uchar> convolve2_block (image.cols,image.rows);
thrust::block_2d<uchar> outBlock (image.cols,image.rows);
thrust::block_2d<uchar> zero_image_block (image.cols,image.rows);
uchar * img = (uchar * )malloc(sizeof(uchar)*(uchar_image_block.end()-uchar_image_block.begin()));
for(int i = 0; i<image.cols*image.rows;i++)
{
img[i]=(uchar)image.ptr()[i];
}
uchar_image_block.upload(img);
convolve1_block.upload(img);
convolve2_block.upload(img);
thrust::window_vector<uchar> input_wv(&uchar_image_block,3,3,1,1);
thrust::window_vector<uchar> output_wv_x(&convolve1_block,3,3,1,1);
thrust::window_vector<uchar> output_wv_y(&convolve2_block,3,3,1,1);
thrust::transform(input_wv.begin(),input_wv.end(),output_wv_x.begin(),zero_image_block.begin(),convolutionFunctor(dkernelx.device_pointer,3));
thrust::transform(input_wv.begin(),input_wv.end(),output_wv_y.begin(),zero_image_block.begin(),convolutionFunctor(dkernely.device_pointer,3));
thrust::transform(convolve1_block.begin(),convolve1_block.end(),convolve2_block.begin(),outBlock.begin(),transFunctor());
unsigned char * outputFloatImageData = (unsigned char *)malloc(sizeof(unsigned char)*(uchar_image_block.end()-uchar_image_block.begin()));
outBlock.download(&img);
for(int i = 0; i<image.cols*image.rows;i++)
{
outputFloatImageData[i]=(unsigned char)img[i];
}
Mat output (Size(image.cols,image.rows),CV_8UC1,outputFloatImageData);
#ifdef OWRITE
imwrite("input.png",image);
imwrite("output.png",output);
#endif
#ifdef SHOW
imshow("input.png",image);
imshow("output.png",output);
waitKey(0);
#endif
return 0;
}
| 87e24696f97144885d722c27235ce83ce4d24cb7.cu | #include <opencv2/opencv.hpp>
#include <thrust/window_2d.h>
#define PI 3.14159
using namespace cv;
class transFunctor
{
public:
__device__ uchar operator() (const uchar a,const uchar b) const
{
return (uchar) sqrt((float) a*a + b*b);
}
};
class convolutionFunctor //:public thrust::shared_unary_window_transform_functor<uchar>
{
public:
int dim;
thrust::block_2d<float> * kernel;
convolutionFunctor( thrust::block_2d<float> * kernel,int dim)
{
this->dim =dim;
this->kernel = kernel;
}
__device__ uchar operator() (const thrust::window_2d<uchar> & input_window,const thrust::window_2d<uchar> & output_window) const
{
uchar temp = 0;
for(int i = 0; i< dim; i++)
{
for(int j = 0; j<dim; j++)
{
temp+=input_window[i][j]*(*kernel)[i][j];
}
}
output_window[1][1]=temp;
return 0.0 ;
}
};
int main(int argc, char const *argv[]) {
cudaDeviceProp dev_prop;
cudaGetDeviceProperties(&dev_prop,0);
Mat small = imread("car.jpg",CV_LOAD_IMAGE_GRAYSCALE);
Mat image;
int dim = 512;
if(argc ==2)
{
dim = atoi(argv[1]);
}
resize(small,image,Size(dim,dim));
thrust::host_block_2d<float> kernelx(3,3);
thrust::host_block_2d<float> kernely(3,3);
thrust::block_2d<float> dkernelx(3,3);
thrust::block_2d<float> dkernely(3,3);
//Scharr Filter
kernelx[0][0]=-3;
kernelx[0][1]=0;
kernelx[0][2]=+3;
kernelx[1][0]=-10;
kernelx[1][1]=0;
kernelx[1][2]=+10;
kernelx[2][0]=-3;
kernelx[2][1]=0;
kernelx[2][2]=+3;
kernely[0][0]=-3;
kernely[0][1]=-10;
kernely[0][2]=-3;
kernely[1][0]=0;
kernely[1][1]=0;
kernely[1][2]=0;
kernely[2][0]=+3;
kernely[2][1]=+10;
kernely[2][2]=+3;
dkernelx=kernelx;
dkernely=kernely;
thrust::block_2d<uchar> uchar_image_block (image.cols,image.rows);
thrust::block_2d<uchar> convolve1_block (image.cols,image.rows);
thrust::block_2d<uchar> convolve2_block (image.cols,image.rows);
thrust::block_2d<uchar> outBlock (image.cols,image.rows);
thrust::block_2d<uchar> zero_image_block (image.cols,image.rows);
uchar * img = (uchar * )malloc(sizeof(uchar)*(uchar_image_block.end()-uchar_image_block.begin()));
for(int i = 0; i<image.cols*image.rows;i++)
{
img[i]=(uchar)image.ptr()[i];
}
uchar_image_block.upload(img);
convolve1_block.upload(img);
convolve2_block.upload(img);
thrust::window_vector<uchar> input_wv(&uchar_image_block,3,3,1,1);
thrust::window_vector<uchar> output_wv_x(&convolve1_block,3,3,1,1);
thrust::window_vector<uchar> output_wv_y(&convolve2_block,3,3,1,1);
thrust::transform(input_wv.begin(),input_wv.end(),output_wv_x.begin(),zero_image_block.begin(),convolutionFunctor(dkernelx.device_pointer,3));
thrust::transform(input_wv.begin(),input_wv.end(),output_wv_y.begin(),zero_image_block.begin(),convolutionFunctor(dkernely.device_pointer,3));
thrust::transform(convolve1_block.begin(),convolve1_block.end(),convolve2_block.begin(),outBlock.begin(),transFunctor());
unsigned char * outputFloatImageData = (unsigned char *)malloc(sizeof(unsigned char)*(uchar_image_block.end()-uchar_image_block.begin()));
outBlock.download(&img);
for(int i = 0; i<image.cols*image.rows;i++)
{
outputFloatImageData[i]=(unsigned char)img[i];
}
Mat output (Size(image.cols,image.rows),CV_8UC1,outputFloatImageData);
#ifdef OWRITE
imwrite("input.png",image);
imwrite("output.png",output);
#endif
#ifdef SHOW
imshow("input.png",image);
imshow("output.png",output);
waitKey(0);
#endif
return 0;
}
|
0007e3c635a9a0f88cef8b11dce5400fb4691d1f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "pairwise_hist.cuh"
#include "split_properties_helpers.cuh"
#include "compute_pair_hist_loop.cuh"
#include <hip/hip_cooperative_groups.h>
#include <library/cpp/cuda/wrappers/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
using namespace cooperative_groups;
namespace NKernel {
//TODO(noxoomo): tune it
template <bool IsFullPass>
struct THalfBytePairwiseHistUnrollTrait {
static constexpr int InnerUnroll() {
#if __CUDA_ARCH__ <= 350
return 2;
#elif __CUDA_ARCH__ < 700
return 2;
#else
return 8;//IsFullPass ? 4 : 8;
#endif
}
static constexpr int OuterUnroll() {
#if __CUDA_ARCH__ <= 350
return 4;
#elif __CUDA_ARCH__ < 700
return 2;
#else
return 1;
#endif
}
};
template <int BLOCK_SIZE, class TCmpBins = TCmpBinsWithoutOneHot>
struct TPairHistHalfByte {
TCmpBins CmpBinsFunc;
float* Slice;
__forceinline__ __device__ int SliceOffset() {
const int warpOffset = 1024 * (threadIdx.x / 32);
//we store 4 histograms per block
// x8 feature and x4 histograms, though histStart = blockIdx * 16
return warpOffset + (threadIdx.x & 16);
}
__forceinline__ __device__ TPairHistHalfByte(float* buff, TCmpBins cmpBinsFunc)
: CmpBinsFunc(cmpBinsFunc) {
Slice = buff;
for (int i = threadIdx.x; i < BLOCK_SIZE * 32; i += BLOCK_SIZE) {
Slice[i] = 0;
}
Slice += SliceOffset();
__syncthreads();
}
__forceinline__ __device__ void AddPair(const ui32 ci1,
const ui32 ci2,
const float w) {
thread_block_tile<16> groupTile = tiled_partition<16>(this_thread_block());
const bool flag = threadIdx.x & 1;
const int shift = threadIdx.x & 14;
const ui32 bins1 = RotateRight(flag ? ci2 : ci1, 2 * shift);
const ui32 bins2 = RotateRight(flag ? ci1 : ci2, 2 * shift);
#pragma unroll
for (int i = 0; i < 8; i++) {
const int f = ((shift + 2 * i) & 14);
const int bin1 = (bins1 >> (28 - 4 * i)) & 15;
const int bin2 = (bins2 >> (28 - 4 * i)) & 15;
const int tmp = (CmpBinsFunc.Compare(i, bin1, bin2, flag) ? 0 : 512) + f;
const int offset1 = 32 * bin1 + tmp + flag;
const int offset2 = 32 * bin2 + tmp + !flag;
groupTile.sync();
Slice[offset1] += w;
groupTile.sync();
Slice[offset2] += w;
}
}
#if __CUDA_ARCH__ < 700
template <int N>
__forceinline__ __device__ void AddPairs(const ui32* ci1,
const ui32* ci2,
const float* w) {
#pragma unroll
for (int k = 0; k < N; ++k) {
AddPair(ci1[k], ci2[k], w[k]);
}
}
#else
template <int N>
__forceinline__ __device__ void AddPairs(const ui32* ci1,
const ui32* ci2,
const float* w) {
thread_block_tile<16> groupTile = tiled_partition<16>(this_thread_block());
const bool flag = threadIdx.x & 1;
const int shift = threadIdx.x & 14;
ui32 bins1[N];
ui32 bins2[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
bins1[k] = RotateRight(flag ? ci2[k] : ci1[k], 2 * shift);
bins2[k] = RotateRight(flag ? ci1[k] : ci2[k], 2 * shift);
}
#pragma unroll
for (int i = 0; i < 8; i++) {
const int f = ((shift + 2 * i) & 14);
int bin1[N];
int bin2[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
bin1[k] = (bins1[k] >> (28 - 4 * i)) & 15;
bin2[k] = (bins2[k] >> (28 - 4 * i)) & 15;
}
int offset1[N];
int offset2[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
const int tmp = (CmpBinsFunc.Compare(i, bin1[k], bin2[k], flag) ? 0 : 512) + f;
offset1[k] = 32 * bin1[k] + tmp + flag;
offset2[k] = 32 * bin2[k] + tmp + !flag;
}
groupTile.sync();
#pragma unroll
for (int k = 0; k < N; ++k) {
Slice[offset1[k]] += w[k];
}
groupTile.sync();
#pragma unroll
for (int k = 0; k < N; ++k) {
Slice[offset2[k]] += w[k];
}
}
}
#endif
__forceinline__ __device__ void Reduce() {
Slice -= SliceOffset();
__syncthreads();
{
const int warpHistSize = 1024;
for (int start = threadIdx.x; start < warpHistSize; start += BLOCK_SIZE) {
float sum = 0;
#pragma unroll 12
for (int i = start; i < 32 * BLOCK_SIZE; i += warpHistSize) {
sum += Slice[i];
}
Slice[warpHistSize + start] = sum;
}
}
__syncthreads();
const int maxFoldCount = 16;
const int fold = (threadIdx.x >> 1) & 15;
const int f = threadIdx.x / 32;
if (threadIdx.x < 256) {
float weightLeq = 0;
float weightGe = 0;
const bool isSecondBin = (threadIdx.x & 1);
if (fold < maxFoldCount) {
const volatile float* __restrict__ src = Slice
+ 1024 //warpHistSize
+ 32 * fold
+ 2 * f
+ isSecondBin;
weightLeq = src[0] + src[16];
weightGe = src[512] + src[528];
Slice[4 * (maxFoldCount * f + fold) + isSecondBin] = weightLeq;
Slice[4 * (maxFoldCount * f + fold) + 2 + isSecondBin] = weightGe;
}
}
__syncthreads();
}
};
template <int BlockSize, int N, int OuterUnroll>
__forceinline__ __device__ void ComputeSplitPropertiesHalfBytePass(const TCFeature* feature, int fCount,
const ui32* __restrict cindex,
const uint2* __restrict pairs,
const float* __restrict weight,
const TDataPartition* partition,
int blockIdx, int blockCount,
float* __restrict histogram,
float* __restrict smem) {
const int minDocsPerBlock = BlockSize * N * 8;
const int activeBlockCount = min((partition->Size + minDocsPerBlock - 1) / minDocsPerBlock, blockCount);
if (blockIdx >= activeBlockCount) {
return;
}
#define RUN_COMPUTE_HIST() \
ComputePairHistogram < BlockSize, N, OuterUnroll, THist >(partition->Offset, partition->Size,\
cindex,\
pairs, weight, \
blockIdx, activeBlockCount, \
hist);
if (HasOneHotFeatures(feature, fCount, reinterpret_cast<int*>(smem))) {
using TCmpBins = TCmpBinsWithOneHot<8>;
TCmpBins cmpBins(feature, fCount);
using THist = TPairHistHalfByte<BlockSize, TCmpBins>;
THist hist(smem, cmpBins);
RUN_COMPUTE_HIST();
} else {
using THist = TPairHistHalfByte<BlockSize>;
THist hist(smem, TCmpBinsWithoutOneHot());
RUN_COMPUTE_HIST();
}
#undef RUN_COMPUTE_HIST
if (threadIdx.x < 256) {
const int histId = threadIdx.x & 3;
const int fold = (threadIdx.x >> 2) & 15;
const int firstFid = (threadIdx.x >> 6) & 3;
for (int fid = firstFid; fid < fCount; fid += 4) {
const ui32 bfStart = feature[fid].FirstFoldIndex;
if (fold < feature[fid].Folds) {
const int readOffset = 4 * (16 * fid + fold) + histId;
const float val = smem[readOffset];
if (abs(val) > 1e-20f) {
atomicAdd(histogram + 4 * bfStart + 4 * fold + histId, val);
}
}
}
}
}
template <int BlockSize, bool IsFullPass>
#if __CUDA_ARCH__ >= 700
__launch_bounds__(BlockSize, 2)
#else
__launch_bounds__(BlockSize, 1)
#endif
__global__ void ComputeSplitPropertiesHalfBytePairs(const TCFeature* feature, int fCount,
const ui32* cindex,
const uint2* pairs,
const float* weight,
const TDataPartition* partition,
int histLineSize,
float* histogram) {
const int blocksPerPart = gridDim.x / ((fCount + 7) / 8);
const int localBlockIdx = blockIdx.x % blocksPerPart;
//histogram line size - size of one part hist.
const int featureOffset = (blockIdx.x / blocksPerPart) * 8;
feature += featureOffset;
cindex += feature->Offset;
fCount = min(fCount - featureOffset, 8);
if (IsFullPass) {
partition += blockIdx.y;
histogram += blockIdx.y * histLineSize * 4ULL;
} else {
const int depth = (int)log2((float)gridDim.y);
int partId = GetPairwisePartIdToCalculate(partition);
partition += partId;
histogram += (((blockIdx.z + 1) << depth) | blockIdx.y) * histLineSize * 4ULL;
}
if (partition->Size == 0) {
return;
}
__shared__ float localHist[32 * BlockSize];
const int innerUnroll = THalfBytePairwiseHistUnrollTrait<IsFullPass>::InnerUnroll();
const int outerUnroll = THalfBytePairwiseHistUnrollTrait<IsFullPass>::OuterUnroll();
ComputeSplitPropertiesHalfBytePass<BlockSize, innerUnroll, outerUnroll>(feature, fCount, cindex, pairs,
weight, partition,
localBlockIdx, blocksPerPart,
histogram, &localHist[0]);
}
void ComputePairwiseHistogramHalfByte(const TCFeature* features, const TCFeature*,
const ui32 featureCount,
const ui32 halfByteFeatureCount,
const ui32* compressedIndex,
const uint2* pairs,
ui32 /*pairCount*/,
const float* weight,
const TDataPartition* partition,
ui32 partCount,
ui32 histLineSize,
bool fullPass,
float* histogram,
int parallelStreams,
TCudaStream stream) {
Y_ASSERT(featureCount == halfByteFeatureCount);
if (featureCount > 0 && partCount / (fullPass ? 1 : 4) > 0) {
const int blockSize = 384;
dim3 numBlocks;
numBlocks.x = (featureCount + 7) / 8;
numBlocks.y = fullPass ? partCount : partCount / 4;
numBlocks.z = fullPass ? 1 : 3;
const int blocksPerSm = TArchProps::GetMajorVersion() > 3 ? 2 : 1;
const int blockPerFeatureMultiplier = CeilDivide<int>(TArchProps::SMCount() * blocksPerSm * 4, (parallelStreams * numBlocks.x * numBlocks.y * numBlocks.z));
numBlocks.x *= blockPerFeatureMultiplier;
#define NB_HIST(IS_FULL) \
ComputeSplitPropertiesHalfBytePairs < blockSize, IS_FULL > << <numBlocks, blockSize, 0, stream>>>(\
features, featureCount, compressedIndex, pairs,\
weight, partition, histLineSize, histogram);
if (fullPass) {
NB_HIST(true)
} else {
NB_HIST(false)
}
#undef NB_HIST
}
}
}
| 0007e3c635a9a0f88cef8b11dce5400fb4691d1f.cu | #include "pairwise_hist.cuh"
#include "split_properties_helpers.cuh"
#include "compute_pair_hist_loop.cuh"
#include <cooperative_groups.h>
#include <library/cpp/cuda/wrappers/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
using namespace cooperative_groups;
namespace NKernel {
//TODO(noxoomo): tune it
template <bool IsFullPass>
struct THalfBytePairwiseHistUnrollTrait {
static constexpr int InnerUnroll() {
#if __CUDA_ARCH__ <= 350
return 2;
#elif __CUDA_ARCH__ < 700
return 2;
#else
return 8;//IsFullPass ? 4 : 8;
#endif
}
static constexpr int OuterUnroll() {
#if __CUDA_ARCH__ <= 350
return 4;
#elif __CUDA_ARCH__ < 700
return 2;
#else
return 1;
#endif
}
};
template <int BLOCK_SIZE, class TCmpBins = TCmpBinsWithoutOneHot>
struct TPairHistHalfByte {
TCmpBins CmpBinsFunc;
float* Slice;
__forceinline__ __device__ int SliceOffset() {
const int warpOffset = 1024 * (threadIdx.x / 32);
//we store 4 histograms per block
// x8 feature and x4 histograms, though histStart = blockIdx * 16
return warpOffset + (threadIdx.x & 16);
}
__forceinline__ __device__ TPairHistHalfByte(float* buff, TCmpBins cmpBinsFunc)
: CmpBinsFunc(cmpBinsFunc) {
Slice = buff;
for (int i = threadIdx.x; i < BLOCK_SIZE * 32; i += BLOCK_SIZE) {
Slice[i] = 0;
}
Slice += SliceOffset();
__syncthreads();
}
__forceinline__ __device__ void AddPair(const ui32 ci1,
const ui32 ci2,
const float w) {
thread_block_tile<16> groupTile = tiled_partition<16>(this_thread_block());
const bool flag = threadIdx.x & 1;
const int shift = threadIdx.x & 14;
const ui32 bins1 = RotateRight(flag ? ci2 : ci1, 2 * shift);
const ui32 bins2 = RotateRight(flag ? ci1 : ci2, 2 * shift);
#pragma unroll
for (int i = 0; i < 8; i++) {
const int f = ((shift + 2 * i) & 14);
const int bin1 = (bins1 >> (28 - 4 * i)) & 15;
const int bin2 = (bins2 >> (28 - 4 * i)) & 15;
const int tmp = (CmpBinsFunc.Compare(i, bin1, bin2, flag) ? 0 : 512) + f;
const int offset1 = 32 * bin1 + tmp + flag;
const int offset2 = 32 * bin2 + tmp + !flag;
groupTile.sync();
Slice[offset1] += w;
groupTile.sync();
Slice[offset2] += w;
}
}
#if __CUDA_ARCH__ < 700
template <int N>
__forceinline__ __device__ void AddPairs(const ui32* ci1,
const ui32* ci2,
const float* w) {
#pragma unroll
for (int k = 0; k < N; ++k) {
AddPair(ci1[k], ci2[k], w[k]);
}
}
#else
template <int N>
__forceinline__ __device__ void AddPairs(const ui32* ci1,
const ui32* ci2,
const float* w) {
thread_block_tile<16> groupTile = tiled_partition<16>(this_thread_block());
const bool flag = threadIdx.x & 1;
const int shift = threadIdx.x & 14;
ui32 bins1[N];
ui32 bins2[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
bins1[k] = RotateRight(flag ? ci2[k] : ci1[k], 2 * shift);
bins2[k] = RotateRight(flag ? ci1[k] : ci2[k], 2 * shift);
}
#pragma unroll
for (int i = 0; i < 8; i++) {
const int f = ((shift + 2 * i) & 14);
int bin1[N];
int bin2[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
bin1[k] = (bins1[k] >> (28 - 4 * i)) & 15;
bin2[k] = (bins2[k] >> (28 - 4 * i)) & 15;
}
int offset1[N];
int offset2[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
const int tmp = (CmpBinsFunc.Compare(i, bin1[k], bin2[k], flag) ? 0 : 512) + f;
offset1[k] = 32 * bin1[k] + tmp + flag;
offset2[k] = 32 * bin2[k] + tmp + !flag;
}
groupTile.sync();
#pragma unroll
for (int k = 0; k < N; ++k) {
Slice[offset1[k]] += w[k];
}
groupTile.sync();
#pragma unroll
for (int k = 0; k < N; ++k) {
Slice[offset2[k]] += w[k];
}
}
}
#endif
__forceinline__ __device__ void Reduce() {
Slice -= SliceOffset();
__syncthreads();
{
const int warpHistSize = 1024;
for (int start = threadIdx.x; start < warpHistSize; start += BLOCK_SIZE) {
float sum = 0;
#pragma unroll 12
for (int i = start; i < 32 * BLOCK_SIZE; i += warpHistSize) {
sum += Slice[i];
}
Slice[warpHistSize + start] = sum;
}
}
__syncthreads();
const int maxFoldCount = 16;
const int fold = (threadIdx.x >> 1) & 15;
const int f = threadIdx.x / 32;
if (threadIdx.x < 256) {
float weightLeq = 0;
float weightGe = 0;
const bool isSecondBin = (threadIdx.x & 1);
if (fold < maxFoldCount) {
const volatile float* __restrict__ src = Slice
+ 1024 //warpHistSize
+ 32 * fold
+ 2 * f
+ isSecondBin;
weightLeq = src[0] + src[16];
weightGe = src[512] + src[528];
Slice[4 * (maxFoldCount * f + fold) + isSecondBin] = weightLeq;
Slice[4 * (maxFoldCount * f + fold) + 2 + isSecondBin] = weightGe;
}
}
__syncthreads();
}
};
template <int BlockSize, int N, int OuterUnroll>
__forceinline__ __device__ void ComputeSplitPropertiesHalfBytePass(const TCFeature* feature, int fCount,
const ui32* __restrict cindex,
const uint2* __restrict pairs,
const float* __restrict weight,
const TDataPartition* partition,
int blockIdx, int blockCount,
float* __restrict histogram,
float* __restrict smem) {
const int minDocsPerBlock = BlockSize * N * 8;
const int activeBlockCount = min((partition->Size + minDocsPerBlock - 1) / minDocsPerBlock, blockCount);
if (blockIdx >= activeBlockCount) {
return;
}
#define RUN_COMPUTE_HIST() \
ComputePairHistogram < BlockSize, N, OuterUnroll, THist >(partition->Offset, partition->Size,\
cindex,\
pairs, weight, \
blockIdx, activeBlockCount, \
hist);
if (HasOneHotFeatures(feature, fCount, reinterpret_cast<int*>(smem))) {
using TCmpBins = TCmpBinsWithOneHot<8>;
TCmpBins cmpBins(feature, fCount);
using THist = TPairHistHalfByte<BlockSize, TCmpBins>;
THist hist(smem, cmpBins);
RUN_COMPUTE_HIST();
} else {
using THist = TPairHistHalfByte<BlockSize>;
THist hist(smem, TCmpBinsWithoutOneHot());
RUN_COMPUTE_HIST();
}
#undef RUN_COMPUTE_HIST
if (threadIdx.x < 256) {
const int histId = threadIdx.x & 3;
const int fold = (threadIdx.x >> 2) & 15;
const int firstFid = (threadIdx.x >> 6) & 3;
for (int fid = firstFid; fid < fCount; fid += 4) {
const ui32 bfStart = feature[fid].FirstFoldIndex;
if (fold < feature[fid].Folds) {
const int readOffset = 4 * (16 * fid + fold) + histId;
const float val = smem[readOffset];
if (abs(val) > 1e-20f) {
atomicAdd(histogram + 4 * bfStart + 4 * fold + histId, val);
}
}
}
}
}
template <int BlockSize, bool IsFullPass>
#if __CUDA_ARCH__ >= 700
__launch_bounds__(BlockSize, 2)
#else
__launch_bounds__(BlockSize, 1)
#endif
__global__ void ComputeSplitPropertiesHalfBytePairs(const TCFeature* feature, int fCount,
const ui32* cindex,
const uint2* pairs,
const float* weight,
const TDataPartition* partition,
int histLineSize,
float* histogram) {
const int blocksPerPart = gridDim.x / ((fCount + 7) / 8);
const int localBlockIdx = blockIdx.x % blocksPerPart;
//histogram line size - size of one part hist.
const int featureOffset = (blockIdx.x / blocksPerPart) * 8;
feature += featureOffset;
cindex += feature->Offset;
fCount = min(fCount - featureOffset, 8);
if (IsFullPass) {
partition += blockIdx.y;
histogram += blockIdx.y * histLineSize * 4ULL;
} else {
const int depth = (int)log2((float)gridDim.y);
int partId = GetPairwisePartIdToCalculate(partition);
partition += partId;
histogram += (((blockIdx.z + 1) << depth) | blockIdx.y) * histLineSize * 4ULL;
}
if (partition->Size == 0) {
return;
}
__shared__ float localHist[32 * BlockSize];
const int innerUnroll = THalfBytePairwiseHistUnrollTrait<IsFullPass>::InnerUnroll();
const int outerUnroll = THalfBytePairwiseHistUnrollTrait<IsFullPass>::OuterUnroll();
ComputeSplitPropertiesHalfBytePass<BlockSize, innerUnroll, outerUnroll>(feature, fCount, cindex, pairs,
weight, partition,
localBlockIdx, blocksPerPart,
histogram, &localHist[0]);
}
void ComputePairwiseHistogramHalfByte(const TCFeature* features, const TCFeature*,
const ui32 featureCount,
const ui32 halfByteFeatureCount,
const ui32* compressedIndex,
const uint2* pairs,
ui32 /*pairCount*/,
const float* weight,
const TDataPartition* partition,
ui32 partCount,
ui32 histLineSize,
bool fullPass,
float* histogram,
int parallelStreams,
TCudaStream stream) {
Y_ASSERT(featureCount == halfByteFeatureCount);
if (featureCount > 0 && partCount / (fullPass ? 1 : 4) > 0) {
const int blockSize = 384;
dim3 numBlocks;
numBlocks.x = (featureCount + 7) / 8;
numBlocks.y = fullPass ? partCount : partCount / 4;
numBlocks.z = fullPass ? 1 : 3;
const int blocksPerSm = TArchProps::GetMajorVersion() > 3 ? 2 : 1;
const int blockPerFeatureMultiplier = CeilDivide<int>(TArchProps::SMCount() * blocksPerSm * 4, (parallelStreams * numBlocks.x * numBlocks.y * numBlocks.z));
numBlocks.x *= blockPerFeatureMultiplier;
#define NB_HIST(IS_FULL) \
ComputeSplitPropertiesHalfBytePairs < blockSize, IS_FULL > << <numBlocks, blockSize, 0, stream>>>(\
features, featureCount, compressedIndex, pairs,\
weight, partition, histLineSize, histogram);
if (fullPass) {
NB_HIST(true)
} else {
NB_HIST(false)
}
#undef NB_HIST
}
}
}
|
b1f9b3a10224dca84b28bfae84d0f8ddff594e7b.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Test file for conversion between sparse and dense matrices
* Matrices assumed to be generated using generate_sparse_mat.py
*
* cuSPARSE assumes matrices are stored in column major order
*/
#include <hip/hip_runtime.h>
#include <hipsparse.h>
#include <stdio.h>
#include "sparse_conversion.h"
#include "matrix_io.h"
int main(int argc, char * argv[])
{
struct Matrix mat;
if (argc != 2){
printf("usage ./sparse_conversion_test filename\n");
exit(1);
}
char * filename = argv[1];
int num_elems;
read_matrix_dims(filename, &mat, &num_elems);
mat.vals = (float *)calloc(num_elems, sizeof(float));
read_matrix_vals(filename, &mat, 1);
print_matrix(&mat);
// Initialize cusparse library
hipsparseHandle_t handle;
hipsparseCreate(&handle);
// Call conversion func
struct SparseMat spm;
convert_to_sparse(&spm, &mat, handle);
copyDeviceCSR2Host(&spm);
printf("Num rows: %d\n", mat.dims[2]);
print_sparse_matrix(&spm);
struct Matrix mat2;
mat2.vals = (float *)calloc(num_elems, sizeof(float));
for (int i = 0; i < 4; i++)
{
mat2.dims[i] = mat.dims[i];
}
mat2.is_column_first = mat.is_column_first;
convert_to_dense(&spm, &mat2, handle);
print_matrix(&mat2);
// Free memory
hipsparseDestroy(handle);
destroySparseMatrix(&spm);
destroyMatrix(&mat);
destroyMatrix(&mat2);
}
| b1f9b3a10224dca84b28bfae84d0f8ddff594e7b.cu | /*
* Test file for conversion between sparse and dense matrices
* Matrices assumed to be generated using generate_sparse_mat.py
*
* cuSPARSE assumes matrices are stored in column major order
*/
#include <cuda.h>
#include <cusparse.h>
#include <stdio.h>
#include "sparse_conversion.h"
#include "matrix_io.h"
int main(int argc, char * argv[])
{
struct Matrix mat;
if (argc != 2){
printf("usage ./sparse_conversion_test filename\n");
exit(1);
}
char * filename = argv[1];
int num_elems;
read_matrix_dims(filename, &mat, &num_elems);
mat.vals = (float *)calloc(num_elems, sizeof(float));
read_matrix_vals(filename, &mat, 1);
print_matrix(&mat);
// Initialize cusparse library
cusparseHandle_t handle;
cusparseCreate(&handle);
// Call conversion func
struct SparseMat spm;
convert_to_sparse(&spm, &mat, handle);
copyDeviceCSR2Host(&spm);
printf("Num rows: %d\n", mat.dims[2]);
print_sparse_matrix(&spm);
struct Matrix mat2;
mat2.vals = (float *)calloc(num_elems, sizeof(float));
for (int i = 0; i < 4; i++)
{
mat2.dims[i] = mat.dims[i];
}
mat2.is_column_first = mat.is_column_first;
convert_to_dense(&spm, &mat2, handle);
print_matrix(&mat2);
// Free memory
cusparseDestroy(handle);
destroySparseMatrix(&spm);
destroyMatrix(&mat);
destroyMatrix(&mat2);
}
|
2e374fa9277f9bbf00cd3e39a514b706ed0db607.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zgemm_fermi.cu normal z -> s, Tue Sep 2 12:38:17 2014
@author Jakub Kurzak
@author Stan Tomov
@author Mark Gates
[zcds]gemm_fermi.cu defines the CPU driver.
[zcds]gemm_fermi_kernels.h defines the block sizes for each precision.
gemm_stencil_defs.h defines types and functions for precision-independent code.
gemm_stencil.cu defines the GPU kernel. It gets included
multiple times, once for each transpose version.
*/
#include "common_magma.h"
#include "commonblas_s.h"
#define PRECISION_s
///////////////////////////////////////////////////////////////////////////////////////////////////
#include "sgemm_fermi_kernels.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
SGEMM performs one of the matrix-matrix operations
C = alpha*op( A )*op( B ) + beta*C,
where op( X ) is one of
op( X ) = X or op( X ) = X**T or op( X ) = X**H,
alpha and beta are scalars, and A, B and C are matrices, with
op( A ) an m by k matrix, op( B ) a k by n matrix and C an m by n matrix.
Parameters
----------
@param[in]
TRANSA CHARACTER*1.
On entry, TRANSA specifies the form of op( A ) to be used in
the matrix multiplication as follows:
- = 'N': op( A ) = A.
- = 'T': op( A ) = A**T.
- = 'C': op( A ) = A**H.
@param[in]
TRANSB CHARACTER*1.
On entry, TRANSB specifies the form of op( B ) to be used in
the matrix multiplication as follows:
- = 'N': op( B ) = B.
- = 'T': op( B ) = B**T.
- = 'C': op( B ) = B**H.
@param[in]
m INTEGER.
On entry, M specifies the number of rows of the matrix
op( d_A ) and of the matrix d_C. M must be at least zero.
@param[in]
n INTEGER.
On entry, N specifies the number of columns of the matrix
op( d_B ) and the number of columns of the matrix d_C. N must be
at least zero.
@param[in]
k INTEGER.
On entry, K specifies the number of columns of the matrix
op( d_A ) and the number of rows of the matrix op( d_B ). K must
be at least zero.
@param[in]
alpha REAL
On entry, ALPHA specifies the scalar alpha.
@param[in]
d_A REAL array of DIMENSION ( LDA, ka ), where ka is
k when TRANSA = MagmaNoTrans, and is m otherwise.
Before entry with TRANSA = MagmaNoTrans, the leading m by k
part of the array d_A must contain the matrix d_A, otherwise
the leading k by m part of the array d_A must contain the
matrix d_A.
@param[in]
lda INTEGER.
On entry, LDA specifies the first dimension of A as declared
in the calling (sub) program. When TRANSA = MagmaNoTrans then
LDA must be at least max( 1, m ), otherwise LDA must be at
least max( 1, k ).
@param[in]
d_B REAL array of DIMENSION ( LDB, kb ), where kb is
n when TRANSB = MagmaNoTrans, and is k otherwise.
Before entry with TRANSB = MagmaNoTrans, the leading k by n
part of the array d_B must contain the matrix d_B, otherwise
the leading n by k part of the array d_B must contain the
matrix d_B.
@param[in]
ldb INTEGER.
On entry, LDB specifies the first dimension of d_B as declared
in the calling (sub) program. When TRANSB = MagmaNoTrans then
LDB must be at least max( 1, k ), otherwise LDB must be at
least max( 1, n ).
@param[in]
beta REAL.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then d_C need not be set on input.
@param[in,out]
d_C REAL array of DIMENSION ( LDC, n ).
Before entry, the leading m by n part of the array d_C must
contain the matrix d_C, except when beta is zero, in which
case d_C need not be set on entry.
On exit, the array d_C is overwritten by the m by n matrix
( alpha*op( d_A )*op( d_B ) + beta*d_C ).
@param[in]
ldc INTEGER.
On entry, LDC specifies the first dimension of d_C as declared
in the calling (sub) program. LDC must be at least
max( 1, m ).
@ingroup magma_sblas3
********************************************************************/
extern "C" void
magmablas_sgemm(
magma_trans_t TRANSA, magma_trans_t TRANSB, magma_int_t m, magma_int_t n, magma_int_t k,
float alpha,
const float *d_A, magma_int_t lda,
const float *d_B, magma_int_t ldb,
float beta,
float *d_C, magma_int_t ldc )
{
magma_int_t info = 0;
if ( TRANSA != MagmaNoTrans && TRANSA != MagmaTrans && TRANSA != MagmaConjTrans )
info = -1;
else if ( TRANSB != MagmaNoTrans && TRANSB != MagmaTrans && TRANSB != MagmaConjTrans )
info = -2;
else if ( m < 0 )
info = -3;
else if ( n < 0 )
info = -4;
else if ( k < 0 )
info = -5;
else if ( TRANSA == MagmaNoTrans ? lda < m : lda < k )
info = -8;
else if ( TRANSB == MagmaNoTrans ? ldb < k : ldb < n )
info = -10;
else if ( ldc < m )
info = -13;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// call CUDA ARCH 1.x version
// magmablas for [sd] precisions, cublas for [zc] precisions.
#if defined(PRECISION_z) || defined(PRECISION_c)
magma_sgemm(
TRANSA, TRANSB,
m, n, k, alpha, d_A, lda, d_B, ldb, beta, d_C, ldc );
#else
magmablas_sgemm_tesla(
TRANSA, TRANSB, m, n, k, alpha, d_A, lda, d_B, ldb, beta, d_C, ldc );
#endif
return;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
if ( m <= 0 || n <= 0 || k <= 0 )
return;
size_t offsetA = 0;
size_t offsetB = 0;
int TransA = 2, TransB = 2;
if ( TRANSA == MagmaTrans )
TransA = 1;
else if ( TRANSA == MagmaNoTrans )
TransA = 0;
if ( TRANSB == MagmaTrans )
TransB = 1;
else if ( TRANSB == MagmaNoTrans )
TransB = 0;
size_t sizeA = (size_t) lda * (size_t) (!TransA ? k : m);
size_t sizeB = (size_t) ldb * (size_t) (!TransB ? n : k);
size_t CUBLAS_MAX_1DBUF_SIZE = ((1 << 27) - 512);
if ( sizeA >= CUBLAS_MAX_1DBUF_SIZE ||
sizeB >= CUBLAS_MAX_1DBUF_SIZE )
{
magma_sgemm( TRANSA, TRANSB, m, n, k, alpha,
d_A, lda, d_B, ldb,
beta, d_C, ldc );
return;
}
#ifdef TEXTURE_1D
// Set textures parameters
tex_ref_A.normalized = false;
tex_ref_A.filterMode = hipFilterModePoint;
tex_ref_A.addressMode[0] = hipAddressModeClamp;
tex_ref_B.normalized = false;
tex_ref_B.filterMode = hipFilterModePoint;
tex_ref_B.addressMode[0] = hipAddressModeClamp;
// Bind A and B to texture references
hipError_t err;
err = hipBindTexture(&offsetA, tex_ref_A, d_A, sizeA*sizeof(float));
if ( err != hipSuccess ) {
fprintf( stderr, "cannot bind A to texture: %s (%d)\n", hipGetErrorString(err), err );
return;
}
err = hipBindTexture(&offsetB, tex_ref_B, d_B, sizeB*sizeof(float));
if ( err != hipSuccess ) {
fprintf( stderr, "cannot bind B to texture: %s (%d)\n", hipGetErrorString(err), err );
hipUnbindTexture( tex_ref_A );
return;
}
#endif
// Set up grids
dim3 dimBlock(DIM_X, DIM_Y);
offsetA = offsetA/sizeof(d_A[0]);
offsetB = offsetB/sizeof(d_B[0]);
if ( TransA == 0 && TransB == 0 ) {
dim3 dimGrid( (m - 1)/BLK_M_nn + 1,
(n - 1)/BLK_N_nn + 1 );
hipLaunchKernelGGL(( sgemm_kernel_fermi_nn), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 0 && TransB == 1 ) {
dim3 dimGrid( (m - 1)/BLK_M_nt + 1,
(n - 1)/BLK_N_nt + 1 );
hipLaunchKernelGGL(( sgemm_kernel_fermi_nt), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 0 && TransB == 2 ) {
dim3 dimGrid( (m - 1)/BLK_M_nc + 1,
(n - 1)/BLK_N_nc + 1 );
hipLaunchKernelGGL(( sgemm_kernel_fermi_nc), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 1 && TransB == 0 ) {
dim3 dimGrid( (m - 1)/BLK_M_tn + 1,
(n - 1)/BLK_N_tn + 1 );
hipLaunchKernelGGL(( sgemm_kernel_fermi_tn), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 1 && TransB == 1 ) {
dim3 dimGrid( (m - 1)/BLK_M_tt + 1,
(n - 1)/BLK_N_tt + 1 );
hipLaunchKernelGGL(( sgemm_kernel_fermi_tt), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 1 && TransB == 2 ) {
dim3 dimGrid( (m - 1)/BLK_M_tc + 1,
(n - 1)/BLK_N_tc + 1 );
hipLaunchKernelGGL(( sgemm_kernel_fermi_tc), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 2 && TransB == 0 ) {
dim3 dimGrid( (m - 1)/BLK_M_cn + 1,
(n - 1)/BLK_N_cn + 1 );
hipLaunchKernelGGL(( sgemm_kernel_fermi_cn), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 2 && TransB == 1 ) {
dim3 dimGrid( (m - 1)/BLK_M_ct + 1,
(n - 1)/BLK_N_ct + 1 );
hipLaunchKernelGGL(( sgemm_kernel_fermi_ct), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 2 && TransB == 2 ) {
dim3 dimGrid( (m - 1)/BLK_M_cc + 1,
(n - 1)/BLK_N_cc + 1 );
hipLaunchKernelGGL(( sgemm_kernel_fermi_cc), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
hipUnbindTexture( tex_ref_A );
hipUnbindTexture( tex_ref_B );
}
///////////////////////////////////////////////////////////////////////////////////////////////////
| 2e374fa9277f9bbf00cd3e39a514b706ed0db607.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zgemm_fermi.cu normal z -> s, Tue Sep 2 12:38:17 2014
@author Jakub Kurzak
@author Stan Tomov
@author Mark Gates
[zcds]gemm_fermi.cu defines the CPU driver.
[zcds]gemm_fermi_kernels.h defines the block sizes for each precision.
gemm_stencil_defs.h defines types and functions for precision-independent code.
gemm_stencil.cu defines the GPU kernel. It gets included
multiple times, once for each transpose version.
*/
#include "common_magma.h"
#include "commonblas_s.h"
#define PRECISION_s
///////////////////////////////////////////////////////////////////////////////////////////////////
#include "sgemm_fermi_kernels.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
SGEMM performs one of the matrix-matrix operations
C = alpha*op( A )*op( B ) + beta*C,
where op( X ) is one of
op( X ) = X or op( X ) = X**T or op( X ) = X**H,
alpha and beta are scalars, and A, B and C are matrices, with
op( A ) an m by k matrix, op( B ) a k by n matrix and C an m by n matrix.
Parameters
----------
@param[in]
TRANSA CHARACTER*1.
On entry, TRANSA specifies the form of op( A ) to be used in
the matrix multiplication as follows:
- = 'N': op( A ) = A.
- = 'T': op( A ) = A**T.
- = 'C': op( A ) = A**H.
@param[in]
TRANSB CHARACTER*1.
On entry, TRANSB specifies the form of op( B ) to be used in
the matrix multiplication as follows:
- = 'N': op( B ) = B.
- = 'T': op( B ) = B**T.
- = 'C': op( B ) = B**H.
@param[in]
m INTEGER.
On entry, M specifies the number of rows of the matrix
op( d_A ) and of the matrix d_C. M must be at least zero.
@param[in]
n INTEGER.
On entry, N specifies the number of columns of the matrix
op( d_B ) and the number of columns of the matrix d_C. N must be
at least zero.
@param[in]
k INTEGER.
On entry, K specifies the number of columns of the matrix
op( d_A ) and the number of rows of the matrix op( d_B ). K must
be at least zero.
@param[in]
alpha REAL
On entry, ALPHA specifies the scalar alpha.
@param[in]
d_A REAL array of DIMENSION ( LDA, ka ), where ka is
k when TRANSA = MagmaNoTrans, and is m otherwise.
Before entry with TRANSA = MagmaNoTrans, the leading m by k
part of the array d_A must contain the matrix d_A, otherwise
the leading k by m part of the array d_A must contain the
matrix d_A.
@param[in]
lda INTEGER.
On entry, LDA specifies the first dimension of A as declared
in the calling (sub) program. When TRANSA = MagmaNoTrans then
LDA must be at least max( 1, m ), otherwise LDA must be at
least max( 1, k ).
@param[in]
d_B REAL array of DIMENSION ( LDB, kb ), where kb is
n when TRANSB = MagmaNoTrans, and is k otherwise.
Before entry with TRANSB = MagmaNoTrans, the leading k by n
part of the array d_B must contain the matrix d_B, otherwise
the leading n by k part of the array d_B must contain the
matrix d_B.
@param[in]
ldb INTEGER.
On entry, LDB specifies the first dimension of d_B as declared
in the calling (sub) program. When TRANSB = MagmaNoTrans then
LDB must be at least max( 1, k ), otherwise LDB must be at
least max( 1, n ).
@param[in]
beta REAL.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then d_C need not be set on input.
@param[in,out]
d_C REAL array of DIMENSION ( LDC, n ).
Before entry, the leading m by n part of the array d_C must
contain the matrix d_C, except when beta is zero, in which
case d_C need not be set on entry.
On exit, the array d_C is overwritten by the m by n matrix
( alpha*op( d_A )*op( d_B ) + beta*d_C ).
@param[in]
ldc INTEGER.
On entry, LDC specifies the first dimension of d_C as declared
in the calling (sub) program. LDC must be at least
max( 1, m ).
@ingroup magma_sblas3
********************************************************************/
extern "C" void
magmablas_sgemm(
magma_trans_t TRANSA, magma_trans_t TRANSB, magma_int_t m, magma_int_t n, magma_int_t k,
float alpha,
const float *d_A, magma_int_t lda,
const float *d_B, magma_int_t ldb,
float beta,
float *d_C, magma_int_t ldc )
{
magma_int_t info = 0;
if ( TRANSA != MagmaNoTrans && TRANSA != MagmaTrans && TRANSA != MagmaConjTrans )
info = -1;
else if ( TRANSB != MagmaNoTrans && TRANSB != MagmaTrans && TRANSB != MagmaConjTrans )
info = -2;
else if ( m < 0 )
info = -3;
else if ( n < 0 )
info = -4;
else if ( k < 0 )
info = -5;
else if ( TRANSA == MagmaNoTrans ? lda < m : lda < k )
info = -8;
else if ( TRANSB == MagmaNoTrans ? ldb < k : ldb < n )
info = -10;
else if ( ldc < m )
info = -13;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// call CUDA ARCH 1.x version
// magmablas for [sd] precisions, cublas for [zc] precisions.
#if defined(PRECISION_z) || defined(PRECISION_c)
magma_sgemm(
TRANSA, TRANSB,
m, n, k, alpha, d_A, lda, d_B, ldb, beta, d_C, ldc );
#else
magmablas_sgemm_tesla(
TRANSA, TRANSB, m, n, k, alpha, d_A, lda, d_B, ldb, beta, d_C, ldc );
#endif
return;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
if ( m <= 0 || n <= 0 || k <= 0 )
return;
size_t offsetA = 0;
size_t offsetB = 0;
int TransA = 2, TransB = 2;
if ( TRANSA == MagmaTrans )
TransA = 1;
else if ( TRANSA == MagmaNoTrans )
TransA = 0;
if ( TRANSB == MagmaTrans )
TransB = 1;
else if ( TRANSB == MagmaNoTrans )
TransB = 0;
size_t sizeA = (size_t) lda * (size_t) (!TransA ? k : m);
size_t sizeB = (size_t) ldb * (size_t) (!TransB ? n : k);
size_t CUBLAS_MAX_1DBUF_SIZE = ((1 << 27) - 512);
if ( sizeA >= CUBLAS_MAX_1DBUF_SIZE ||
sizeB >= CUBLAS_MAX_1DBUF_SIZE )
{
magma_sgemm( TRANSA, TRANSB, m, n, k, alpha,
d_A, lda, d_B, ldb,
beta, d_C, ldc );
return;
}
#ifdef TEXTURE_1D
// Set textures parameters
tex_ref_A.normalized = false;
tex_ref_A.filterMode = cudaFilterModePoint;
tex_ref_A.addressMode[0] = cudaAddressModeClamp;
tex_ref_B.normalized = false;
tex_ref_B.filterMode = cudaFilterModePoint;
tex_ref_B.addressMode[0] = cudaAddressModeClamp;
// Bind A and B to texture references
cudaError_t err;
err = cudaBindTexture(&offsetA, tex_ref_A, d_A, sizeA*sizeof(float));
if ( err != cudaSuccess ) {
fprintf( stderr, "cannot bind A to texture: %s (%d)\n", cudaGetErrorString(err), err );
return;
}
err = cudaBindTexture(&offsetB, tex_ref_B, d_B, sizeB*sizeof(float));
if ( err != cudaSuccess ) {
fprintf( stderr, "cannot bind B to texture: %s (%d)\n", cudaGetErrorString(err), err );
cudaUnbindTexture( tex_ref_A );
return;
}
#endif
// Set up grids
dim3 dimBlock(DIM_X, DIM_Y);
offsetA = offsetA/sizeof(d_A[0]);
offsetB = offsetB/sizeof(d_B[0]);
if ( TransA == 0 && TransB == 0 ) {
dim3 dimGrid( (m - 1)/BLK_M_nn + 1,
(n - 1)/BLK_N_nn + 1 );
sgemm_kernel_fermi_nn<<< dimGrid, dimBlock, 0, magma_stream >>>(
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 0 && TransB == 1 ) {
dim3 dimGrid( (m - 1)/BLK_M_nt + 1,
(n - 1)/BLK_N_nt + 1 );
sgemm_kernel_fermi_nt<<< dimGrid, dimBlock, 0, magma_stream >>>(
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 0 && TransB == 2 ) {
dim3 dimGrid( (m - 1)/BLK_M_nc + 1,
(n - 1)/BLK_N_nc + 1 );
sgemm_kernel_fermi_nc<<< dimGrid, dimBlock, 0, magma_stream >>>(
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 1 && TransB == 0 ) {
dim3 dimGrid( (m - 1)/BLK_M_tn + 1,
(n - 1)/BLK_N_tn + 1 );
sgemm_kernel_fermi_tn<<< dimGrid, dimBlock, 0, magma_stream >>>(
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 1 && TransB == 1 ) {
dim3 dimGrid( (m - 1)/BLK_M_tt + 1,
(n - 1)/BLK_N_tt + 1 );
sgemm_kernel_fermi_tt<<< dimGrid, dimBlock, 0, magma_stream >>>(
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 1 && TransB == 2 ) {
dim3 dimGrid( (m - 1)/BLK_M_tc + 1,
(n - 1)/BLK_N_tc + 1 );
sgemm_kernel_fermi_tc<<< dimGrid, dimBlock, 0, magma_stream >>>(
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 2 && TransB == 0 ) {
dim3 dimGrid( (m - 1)/BLK_M_cn + 1,
(n - 1)/BLK_N_cn + 1 );
sgemm_kernel_fermi_cn<<< dimGrid, dimBlock, 0, magma_stream >>>(
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 2 && TransB == 1 ) {
dim3 dimGrid( (m - 1)/BLK_M_ct + 1,
(n - 1)/BLK_N_ct + 1 );
sgemm_kernel_fermi_ct<<< dimGrid, dimBlock, 0, magma_stream >>>(
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 2 && TransB == 2 ) {
dim3 dimGrid( (m - 1)/BLK_M_cc + 1,
(n - 1)/BLK_N_cc + 1 );
sgemm_kernel_fermi_cc<<< dimGrid, dimBlock, 0, magma_stream >>>(
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
cudaUnbindTexture( tex_ref_A );
cudaUnbindTexture( tex_ref_B );
}
///////////////////////////////////////////////////////////////////////////////////////////////////
|
87b6d018ac10ade2cdc994ed7c1796f1ceedc0a8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define COALESCED_NUM 16
#define blockDimX 256
#define blockDimY 1
#define gridDimX (gridDim.x)
#define gridDimY (gridDim.y)
#define idx (blockIdx.x*blockDimX+threadIdx.x)
#define idy (blockIdx.y*blockDimY+threadIdx.y)
#define bidy (blockIdx.y)
#define bidx (blockIdx.x)
#define tidx (threadIdx.x)
#define tidy (threadIdx.y)
#define merger_y 1
#define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM)
#define A(y,x) A[(y)*WIDTH_A+(x)]
#define B(y,x) B[(y)*WIDTH_B+(x)]
#define C(y,x) C[(y)*WIDTH_C+(x)]
#define WIDTH_C 2048
#define WIDTH_B 16
#define WIDTH_A (2048+16)
__global__ void conv(float * A, float * B, float * C, int width, int height, int w, int h)
{
__shared__ float shared_1[16];
__shared__ float shared_0[272];
int j;
float sum = 0;
for (j=0; j<h; j=(j+1))
{
int it_2;
if ((tidx<16))
{
shared_0[(tidx+0)]=A(((idy+(( - 1)*j))+h), (idx+(( - 1)*0)));
}
shared_0[(tidx+16)]=A(((idy+(( - 1)*j))+h), ((idx+(( - 1)*0))+16));
__syncthreads();
if ((tidx<16))
{
shared_1[(tidx+0)]=B(j, (0+tidx));
}
__syncthreads();
#pragma unroll
for (it_2=0; it_2<16; it_2=(it_2+1))
{
float a;
float b;
a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)];
b=shared_1[it_2];
sum+=(a*b);
}
__syncthreads();
__syncthreads();
}
{
C(idy, idx)=sum;
}
}
| 87b6d018ac10ade2cdc994ed7c1796f1ceedc0a8.cu | #define COALESCED_NUM 16
#define blockDimX 256
#define blockDimY 1
#define gridDimX (gridDim.x)
#define gridDimY (gridDim.y)
#define idx (blockIdx.x*blockDimX+threadIdx.x)
#define idy (blockIdx.y*blockDimY+threadIdx.y)
#define bidy (blockIdx.y)
#define bidx (blockIdx.x)
#define tidx (threadIdx.x)
#define tidy (threadIdx.y)
#define merger_y 1
#define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM)
#define A(y,x) A[(y)*WIDTH_A+(x)]
#define B(y,x) B[(y)*WIDTH_B+(x)]
#define C(y,x) C[(y)*WIDTH_C+(x)]
#define WIDTH_C 2048
#define WIDTH_B 16
#define WIDTH_A (2048+16)
__global__ void conv(float * A, float * B, float * C, int width, int height, int w, int h)
{
__shared__ float shared_1[16];
__shared__ float shared_0[272];
int j;
float sum = 0;
for (j=0; j<h; j=(j+1))
{
int it_2;
if ((tidx<16))
{
shared_0[(tidx+0)]=A(((idy+(( - 1)*j))+h), (idx+(( - 1)*0)));
}
shared_0[(tidx+16)]=A(((idy+(( - 1)*j))+h), ((idx+(( - 1)*0))+16));
__syncthreads();
if ((tidx<16))
{
shared_1[(tidx+0)]=B(j, (0+tidx));
}
__syncthreads();
#pragma unroll
for (it_2=0; it_2<16; it_2=(it_2+1))
{
float a;
float b;
a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)];
b=shared_1[it_2];
sum+=(a*b);
}
__syncthreads();
__syncthreads();
}
{
C(idy, idx)=sum;
}
}
|
d452f788fbab47c7cb159a6eb602be312c175c51.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "constants.h"
#include "cuda_kernels.cuh"
#include "models/models.cuh"
#include "estimators/estimators.cuh"
/* Description of the cuda_calc_curve_values function
* ===================================================
*
* This function calls one of the fitting curve functions depending on the input
* parameter model_id. The fitting curve function calculates the values of
* the fitting curves and its partial derivatives with respect to the fitting
* curve parameters. Multiple fits are calculated in parallel.
*
* Parameters:
*
* parameters: An input vector of concatenated sets of model parameters.
*
* n_fits: The number of fits.
*
* n_points: The number of data points per fit.
*
* n_parameters: The number of curve parameters.
*
* finished: An input vector which allows the calculation to be skipped for single
* fits.
*
* values: An output vector of concatenated sets of model function values.
*
* derivatives: An output vector of concatenated sets of model function partial
* derivatives.
*
* n_fits_per_block: The number of fits calculated by each thread block.
*
* n_blocks_per_fit: The number of thread blocks used to calculate one fit.
*
* model_id: The fitting model ID.
*
* chunk_index: The data chunk index.
*
* user_info: An input vector containing user information.
*
* user_info_size: The size of user_info in bytes.
*
* Calling the cuda_calc_curve_values function
* ===========================================
*
* When calling the function, the blocks and threads must be set up correctly,
* as shown in the following example code.
*
* dim3 threads(1, 1, 1);
* dim3 blocks(1, 1, 1);
*
* threads.x = n_points * n_fits_per_block / n_blocks_per_fit;
* blocks.x = n_fits / n_fits_per_block * n_blocks_per_fit;
*
* cuda_calc_curve_values<<< blocks, threads >>>(
* parameters,
* n_fits,
* n_points,
* n_parameters,
* finished,
* values,
* derivatives,
* n_fits_per_block,
* n_blocks_per_fit,
* model_id,
* chunk_index,
* user_info,
* user_info_size);
*
*/
__global__ void cuda_calc_curve_values(
REAL const * parameters,
int const n_fits,
int const n_points,
int const n_parameters,
int const * finished,
REAL * values,
REAL * derivatives,
int const n_fits_per_block,
int const n_blocks_per_fit,
ModelID const model_id,
int const chunk_index,
char * user_info,
std::size_t const user_info_size)
{
int const fit_in_block = threadIdx.x / n_points;
int const fit_index = blockIdx.x * n_fits_per_block / n_blocks_per_fit + fit_in_block;
int const fit_piece = blockIdx.x % n_blocks_per_fit;
int const point_index = threadIdx.x - fit_in_block * n_points + fit_piece * blockDim.x;
int const first_point = fit_index * n_points;
REAL * current_values = values + first_point;
REAL * current_derivatives = derivatives + first_point * n_parameters;
REAL const * current_parameters = parameters + fit_index * n_parameters;
if (finished[fit_index])
return;
if (point_index >= n_points)
return;
calculate_model(model_id, current_parameters, n_fits, n_points, current_values, current_derivatives, point_index, fit_index, chunk_index, user_info, user_info_size);
}
/* Description of the sum_up_floats function
* ==========================================
*
* This function sums up a vector of REAL values and stores the result at the
* first place of the vector.
*
* Parameters:
*
* shared_array: An input vector of REAL values. The vector must be stored
* on the shared memory of the GPU. The size of this vector must be a
* power of two. Use zero padding to extend it to the next highest
* power of 2 greater than the number of elements.
*
* size: The number of elements in the input vector considering zero padding.
*
* Calling the sum_up_floats function
* ==================================
*
* This __device__ function can be only called from a __global__ function or
* an other __device__ function. When calling the function, the blocks and threads
* of the __global__ function must be set up correctly, as shown in the following
* example code.
*
* dim3 threads(1, 1, 1);
* dim3 blocks(1, 1, 1);
*
* threads.x = size * vectors_per_block;
* blocks.x = n_vectors / vectors_per_block;
*
* global_function<<< blocks, threads >>>(parameter1, ...);
*
*/
__device__ void sum_up_floats(volatile REAL* shared_array, int const size)
{
int const fit_in_block = threadIdx.x / size;
int const point_index = threadIdx.x - (fit_in_block*size);
int current_n_points = size >> 1;
__syncthreads();
while (current_n_points)
{
if (point_index < current_n_points)
{
shared_array[point_index] += shared_array[point_index + current_n_points];
}
current_n_points >>= 1;
__syncthreads();
}
}
/* Description of the cuda_sum_chi_square_subtotals function
* ==========================================================
*
* This function sums up chi_square subtotals in place.
*
* Parameters:
*
* chi_squares: A vector of chi-square values for multiple fits.
* in: subtotals
* out: totals
*
* n_blocks_per_fit: The number of blocks used to calculate one fit. It is
* equivalent to the number of subtotals per fit.
*
* n_fits: The number of fits.
*
* finished: An input vector which allows the calculation to be skipped
* for single fits.
*
* Calling the cuda_sum_chi_square_subtotals function
* ==================================================
*
* When calling the function, the blocks and threads must be set up correctly,
* as shown in the following example code.
*
* dim3 threads(1, 1, 1);
* dim3 blocks(1, 1, 1);
*
* int const example_value = 256;
*
* threads.x = min(n_fits, example_value);
* blocks.x = int(ceil(REAL(n_fits) / REAL(threads.x)));
*
* cuda_sum_chi_square_subtotals<<< blocks, threads >>>(
* chi_squares,
* n_blocks_per_fit,
* n_fits,
* finished);
*
*/
__global__ void cuda_sum_chi_square_subtotals(
REAL * chi_squares,
REAL const * subtotals,
int const n_blocks_per_fit,
int const n_fits,
int const * finished)
{
int const index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n_fits || finished[index])
return;
REAL * chi_square = chi_squares + index;
REAL const * subtotal = subtotals + index;
double sum = 0.0;
for (int i = 0; i < n_blocks_per_fit; i++)
sum += subtotal[i * n_fits];
chi_square[0] = sum;
}
/* Description of the cuda_check_fit_improvement function
* =======================================================
*
* This function checks after each calculation of chi-square values whether the
* currently calculated chi-square values are lower than chi-square values calculated
* in the previous iteration and sets the iteration_failed flags.
*
* Parameters:
*
* iteration_failed: An output vector of flags which indicate whether the fitting
* process improved the fit in the last iteration. If yes it is set
* to 0 otherwise to 1.
*
* chi_squares: An input vector of chi-square values for multiple fits.
*
* prev_chi_squares: An input vector of chi-square values for multiple fits calculated
* in the previous iteration.
*
* n_fits: The number of fits.
*
* finished: An input vector which allows the calculation to be skipped
* for single fits.
*
* Calling the cuda_check_fit_improvement function
* ===============================================
*
* When calling the function, the blocks and threads must be set up correctly,
* as shown in the following example code.
*
* dim3 threads(1, 1, 1);
* dim3 blocks(1, 1, 1);
*
* int const example_value = 256;
*
* threads.x = min(n_fits, example_value);
* blocks.x = int(ceil(REAL(n_fits) / REAL(threads.x)));
*
* cuda_check_fit_improvement <<< blocks, threads >>>(
* iteration_failed,
* chi_squares,
* prev_chi_squares,
* n_fits,
* finished);
*
*/
__global__ void cuda_check_fit_improvement(
int * iteration_failed,
REAL const * chi_squares,
REAL const * prev_chi_squares,
int const n_fits,
int const * finished)
{
int const index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n_fits || finished[index])
return;
bool const prev_chi_squares_initialized = prev_chi_squares[index] != 0.;
// chi_squares[index] can be NaN which compares to false with any other number
bool const chi_square_decreased = (chi_squares[index] < prev_chi_squares[index]);
if (prev_chi_squares_initialized && !chi_square_decreased)
{
iteration_failed[index] = 1;
}
else
{
iteration_failed[index] = 0;
}
}
/* Description of the cuda_calculate_chi_squares function
* ========================================================
*
* This function calls one of the estimator funktions depending on the input
* parameter estimator_id. The estimator function calculates the chi-square values.
* The calcluation is performed for multiple fits in parallel.
*
* Parameters:
*
* chi_squares: An output vector of concatenated chi-square values.
*
* states: An output vector of values which indicate whether the fitting process
* was carreid out correctly or which problem occurred. In this function
* it is only used for MLE. It is set to 3 if a fitting curve value is
* negative. This vector includes the states for multiple fits.
*
* data: An input vector of data for multiple fits
*
* values: An input vector of concatenated sets of model function values.
*
* weights: An input vector of values for weighting chi-square, gradient and hessian,
* while using LSE
*
* n_points: The number of data points per fit.
*
* n_fits: The number of fits.
*
* estimator_id: The estimator ID.
*
* finished: An input vector which allows the calculation to be skipped for single
* fits.
*
* n_fits_per_block: The number of fits calculated by each thread block.
*
* user_info: An input vector containing user information.
*
* user_info_size: The size of user_info in bytes.
*
* Calling the cuda_calculate_chi_squares function
* ================================================
*
* When calling the function, the blocks and threads must be set up correctly,
* as shown in the following example code.
*
* dim3 threads(1, 1, 1);
* dim3 blocks(1, 1, 1);
*
* threads.x = power_of_two_n_points * n_fits_per_block / n_blocks_per_fit;
* blocks.x = n_fits / n_fits_per_block * n_blocks_per_fit;
*
* int const shared_size = sizeof(REAL) * threads.x;
*
* cuda_calculate_chi_squares<<< blocks, threads, shared_size >>>(
* chi_squares,
* states,
* data,
* values,
* weights,
* n_points,
* n_fits,
* estimator_id,
* finished,
* n_fits_per_block,
* user_info,
* user_info_size);
*
*/
__global__ void cuda_calculate_chi_squares(
REAL * chi_squares,
int * states,
REAL const * data,
REAL const * values,
REAL const * weights,
int const n_points,
int const n_fits,
int const estimator_id,
int const * finished,
int const n_fits_per_block,
char * user_info,
std::size_t const user_info_size)
{
int const shared_size = blockDim.x / n_fits_per_block;
int const fit_in_block = threadIdx.x / shared_size;
int const fit_piece = blockIdx.x / n_fits;
int const fit_index = blockIdx.x * n_fits_per_block + fit_in_block - fit_piece * n_fits;
int const point_index = threadIdx.x - fit_in_block * shared_size + fit_piece * shared_size;
int const first_point = fit_index * n_points;
if (finished[fit_index])
{
return;
}
REAL const * current_data = &data[first_point];
REAL const * current_weight = weights ? &weights[first_point] : NULL;
REAL const * current_value = &values[first_point];
int * current_state = &states[fit_index];
extern __shared__ REAL extern_array[];
volatile REAL * shared_chi_square
= extern_array + (fit_in_block - fit_piece) * shared_size;
if (point_index >= n_points)
{
shared_chi_square[point_index] = 0.;
}
if (point_index < n_points)
{
calculate_chi_square(
estimator_id,
shared_chi_square,
point_index,
current_data,
current_value,
current_weight,
current_state,
user_info,
user_info_size);
}
shared_chi_square += fit_piece * shared_size;
sum_up_floats(shared_chi_square, shared_size);
chi_squares[fit_index + fit_piece * n_fits] = shared_chi_square[0];
}
/* Description of the cuda_sum_gradient_subtotals function
* ========================================================
*
* This function sums up the chi-square gradient subtotals in place.
*
* Parameters:
*
* gradients: A vector of gradient values for multiple fits.
* in: subtotals
* out: totals
*
* n_blocks_per_fit: The number of blocks used to calculate one fit
*
* n_fits: The number of fits.
*
* n_parameters_to_fit: The number of model parameters, that are not held fixed.
*
* skip: An input vector which allows the calculation to be skipped for single fits.
*
* finished: An input vector which allows the calculation to be skipped for single
* fits.
*
* Calling the cuda_sum_gradient_subtotals function
* ================================================
*
* When calling the function, the blocks and threads must be set up correctly,
* as shown in the following example code.
*
* dim3 threads(1, 1, 1);
* dim3 blocks(1, 1, 1);
*
* int const example_value = 256;
*
* threads.x = min(n_fits, example_value);
* blocks.x = int(ceil(REAL(n_fits) / REAL(threads.x)));
*
* cuda_sum_gradient_subtotals<<< blocks,threads >>>(
* gradients,
* n_blocks_per_fit,
* n_fits,
* n_parameters_to_fit,
* skip,
* finished);
*
*/
__global__ void cuda_sum_gradient_subtotals(
REAL * gradients,
REAL const * subtotals,
int const n_blocks_per_fit,
int const n_fits,
int const n_parameters,
int const * skip,
int const * finished)
{
int const index = blockIdx.x * blockDim.x + threadIdx.x;
int const fit_index = index / n_parameters;
if (fit_index >= n_fits || finished[fit_index] || skip[fit_index])
return;
REAL * gradient = gradients + index;
REAL const * subtotal = subtotals + index;
double sum = 0.0;
for (int i = 0; i < n_blocks_per_fit; i++)
sum += subtotal[i * n_fits * n_parameters];
gradient[0] = sum;
}
/* Description of the cuda_calculate_gradients function
* =====================================================
*
* This function calls one of the gradient functions depending on the input
* parameter estimator_id. The gradient function calculates the gradient values
* of the chi-square function calling a __device__ function. The calcluation is
* performed for multiple fits in parallel.
*
* Parameters:
*
* gradients: An output vector of concatenated sets of gradient vector values.
*
* data: An input vector of data for multiple fits
*
* values: An input vector of concatenated sets of model function values.
*
* derivatives: An input vector of concatenated sets of model function partial
* derivatives.
*
* weights: An input vector of values for weighting chi-square, gradient and hessian,
* while using LSE
*
* n_points: The number of data points per fit.
*
* n_fits: The number of fits.
*
* n_parameters: The number of fitting curve parameters.
*
* n_parameters_to_fit: The number of fitting curve parameters, that are not held
* fixed.
*
* parameters_to_fit_indices: An input vector of indices of fitting curve parameters,
* that are not held fixed.
*
* estimator_id: The estimator ID.
*
* finished: An input vector which allows the calculation to be skipped for single
* fits.
*
* skip: An input vector which allows the calculation to be skipped for single fits.
*
* n_fits_per_block: The number of fits calculated by each thread block.
*
* user_info: An input vector containing user information.
*
* user_info_size: The number of elements in user_info.
*
* Calling the cuda_calculate_gradients function
* =============================================
*
* When calling the function, the blocks and threads must be set up correctly,
* as shown in the following example code.
*
* dim3 threads(1, 1, 1);
* dim3 blocks(1, 1, 1);
*
* threads.x = power_of_two_n_points * n_fits_per_block / n_blocks_per_fit;
* blocks.x = n_fits / n_fits_per_block * n_blocks_per_fit;
*
* int const shared_size = sizeof(REAL) * threads.x;
*
* cuda_calculate_gradients<<< blocks, threads, shared_size >>>(
* gradients,
* data,
* values,
* derivatives,
* weight,
* n_points,
* n_fits,
* n_parameters,
* n_parameters_to_fit,
* parameters_to_fit_indices,
* estimator_id,
* finished,
* skip,
* n_fits_per_block,
* user_info,
* user_info_size);
*
*/
__global__ void cuda_calculate_gradients(
REAL * gradients,
REAL const * data,
REAL const * values,
REAL const * derivatives,
REAL const * weights,
int const n_points,
int const n_fits,
int const n_parameters,
int const n_parameters_to_fit,
int const * parameters_to_fit_indices,
int const estimator_id,
int const * finished,
int const * skip,
int const n_fits_per_block,
char * user_info,
std::size_t const user_info_size)
{
int const shared_size = blockDim.x / n_fits_per_block;
int const fit_in_block = threadIdx.x / shared_size;
int const fit_piece = blockIdx.x / n_fits;
int const fit_index = blockIdx.x * n_fits_per_block + fit_in_block - fit_piece * n_fits;
int const point_index = threadIdx.x - fit_in_block * shared_size + fit_piece * shared_size;
int const first_point = fit_index * n_points;
if (finished[fit_index] || skip[fit_index])
{
return;
}
REAL const * current_data = &data[first_point];
REAL const * current_weight = weights ? &weights[first_point] : NULL;
REAL const * current_derivative = &derivatives[first_point * n_parameters];
REAL const * current_value = &values[first_point];
extern __shared__ REAL extern_array[];
volatile REAL * shared_gradient = extern_array + (fit_in_block - fit_piece) * shared_size;
if (point_index >= n_points)
{
shared_gradient[point_index] = 0.;
}
for (int parameter_index = 0; parameter_index < n_parameters_to_fit; parameter_index++)
{
if (point_index < n_points)
{
int const derivative_index = parameters_to_fit_indices[parameter_index] * n_points + point_index;
calculate_gradient(
estimator_id,
shared_gradient,
point_index,
derivative_index,
current_data,
current_value,
current_derivative,
current_weight,
user_info,
user_info_size);
}
sum_up_floats(shared_gradient + fit_piece * shared_size, shared_size);
gradients[(fit_index * n_parameters_to_fit + parameter_index) + fit_piece * n_fits * n_parameters_to_fit]
= shared_gradient[fit_piece * shared_size];
}
}
/* Description of the cuda_calculate_hessians function
* ====================================================
*
* This function calls one of the hessian function depending on the input
* parameter estimator_id. The hessian funcion calculates the hessian matrix
* values of the chi-square function calling a __device__ functions. The
* calcluation is performed for multiple fits in parallel.
*
* Parameters:
*
* hessians: An output vector of concatenated sets of hessian matrix values.
*
* data: An input vector of data for multiple fits
*
* values: An input vector of concatenated sets of model function values.
*
* derivatives: An input vector of concatenated sets of model function partial
* derivatives.
*
* weights: An input vector of values for weighting chi-square, gradient and hessian,
* while using LSE
*
* n_fits: The number of fits.
*
* n_points: The number of data points per fit.
*
* n_parameters: The number of fitting curve parameters.
*
* n_parameters_to_fit: The number of fitting curve parameters, that are not held
* fixed.
*
* parameters_to_fit_indices: An input vector of indices of fitting curve parameters,
* that are not held fixed.
*
* estimator_id: The estimator ID.
*
* skip: An input vector which allows the calculation to be skipped for single fits.
*
* finished: An input vector which allows the calculation to be skipped for single
* fits.
*
* user_info: An input vector containing user information.
*
* user_info_size: The size of user_info in bytes.
*
* Calling the cuda_calculate_hessians function
* ============================================
*
* When calling the function, the blocks and threads must be set up correctly,
* as shown in the following example code.
*
* dim3 threads(1, 1, 1);
* dim3 blocks(1, 1, 1);
*
* int n_unique_values = n_parameters_to_fit * (n_parameters_to_fit + 1) / 2;
*
* threads.x
* = min(n_unique_values * n_fits_per_block, max_threads_per_block);
*
* blocks.y
* = threads.x / max_threads_per_block
* + int((threads.x % max_threads_per_block) > 0);
*
* blocks.x
* = n_fits / n_fits_per_block
* + int((n_fits % n_fits_per_block) > 0);
*
* cuda_calculate_hessians<<< blocks, threads >>>(
* hessians,
* data,
* values,
* derivatives,
* weight,
* n_fits,
* n_points,
* n_parameters,
* n_parameters_to_fit,
* parameters_to_fit_indices,
* estimator_id,
* skip,
* finished,
* user_info,
* user_info_size);
*
*/
__global__ void cuda_calculate_hessians(
REAL * hessians,
REAL const * data,
REAL const * values,
REAL const * derivatives,
REAL const * weights,
int const n_fits,
int const n_points,
int const n_parameters,
int const n_parameters_to_fit,
int const * parameters_to_fit_indices,
int const estimator_id,
int const * skip,
int const * finished,
char * user_info,
std::size_t const user_info_size)
{
int const n_unique_values = n_parameters_to_fit * (n_parameters_to_fit + 1) / 2;
int const n_fits_per_block = blockDim.x * gridDim.y / n_unique_values;
int const fit_in_block
= (gridDim.y == 1)
? (blockIdx.y * blockDim.x + threadIdx.x) / n_unique_values
: 0;
int const fit_index = blockIdx.x * n_fits_per_block + fit_in_block;
if (fit_index >= n_fits || finished[fit_index] || skip[fit_index])
{
return;
}
int const first_point = fit_index * n_points;
int const parameter_index = (blockIdx.y * blockDim.x + threadIdx.x) - fit_in_block * n_unique_values;
if (parameter_index >= n_unique_values)
{
return;
}
int const parameter_index_i
= n_parameters_to_fit
- 1.
- ::floor(
.5*(
std::sqrt(
- 8. * (parameter_index - n_parameters_to_fit)
+ 4. * n_parameters_to_fit * (n_parameters_to_fit - 1.)
- 7.
) - 1.
)
);
int const parameter_index_j
= parameter_index
+ parameter_index_i
- parameter_index_i*(n_parameters_to_fit - (parameter_index_i - 1) / 2.);
REAL * current_hessian = &hessians[fit_index * n_parameters_to_fit * n_parameters_to_fit];
REAL const * current_data = &data[first_point];
REAL const * current_weight = weights ? &weights[first_point] : NULL;
REAL const * current_derivative = &derivatives[first_point*n_parameters];
REAL const * current_value = &values[first_point];
int const hessian_index_ij = parameter_index_i * n_parameters_to_fit + parameter_index_j;
int const hessian_index_ji = parameter_index_j * n_parameters_to_fit + parameter_index_i;
int const derivative_index_i = parameters_to_fit_indices[parameter_index_i] * n_points;
int const derivative_index_j = parameters_to_fit_indices[parameter_index_j] * n_points;
double sum = 0.0;
for (int point_index = 0; point_index < n_points; point_index++)
{
calculate_hessian(
estimator_id,
&sum,
point_index,
derivative_index_i + point_index,
derivative_index_j + point_index,
current_data,
current_value,
current_derivative,
current_weight,
user_info,
user_info_size);
}
current_hessian[hessian_index_ij] = sum;
current_hessian[hessian_index_ji] = sum;
}
/* Description of the cuda_modify_step_widths function
* ====================================================
*
* This function midifies the diagonal elements of the hessian matrices by multiplying
* them by the factor (1+ lambda). This operation controls the step widths of the
* iteration. If the last iteration failed, befor modifying the hessian, the diagonal
* elements of the hessian are calculated back to represent unmodified values.
*
* hessians: An input and output vector of hessian matrices, which are modified by
* the lambda values.
*
* lambdas: An input vector of values for modifying the hessians.
*
* n_parameters: The number of fitting curve parameters.
*
* iteration_failed: An input vector which indicates whether the previous iteration
* failed.
*
* finished: An input vector which allows the calculation to be skipped for single fits.
*
* n_fits_per_block: The number of fits calculated by each thread block.
*
* Calling the cuda_modify_step_widths function
* ============================================
*
* When calling the function, the blocks and threads must be set up correctly,
* as shown in the following example code.
*
* dim3 threads(1, 1, 1);
* dim3 blocks(1, 1, 1);
*
* threads.x = n_parameters_to_fit * n_fits_per_block;
* blocks.x = n_fits / n_fits_per_block;
*
* cuda_modify_step_width<<< blocks, threads >>>(
* hessians,
* lambdas,
* n_parameters,
* iteration_failed,
* finished,
* n_fits_per_block);
*
*/
__global__ void cuda_modify_step_widths(
REAL * hessians,
REAL const * lambdas,
REAL * scaling_vectors,
unsigned int const n_parameters,
int const * iteration_failed,
int const * finished,
int const n_fits_per_block)
{
int const shared_size = blockDim.x / n_fits_per_block;
int const fit_in_block = threadIdx.x / shared_size;
int const parameter_index = threadIdx.x - fit_in_block * shared_size;
int const fit_index = blockIdx.x * n_fits_per_block + fit_in_block;
if (finished[fit_index])
{
return;
}
REAL * hessian = &hessians[fit_index * n_parameters * n_parameters];
REAL * scaling_vector = &scaling_vectors[fit_index * n_parameters];
REAL const & lambda = lambdas[fit_index];
int const diagonal_index = parameter_index * n_parameters + parameter_index;
if (iteration_failed[fit_index])
{
hessian[diagonal_index] -= scaling_vector[parameter_index] * lambda / 10.;
}
// adaptive scaling
scaling_vector[parameter_index]
= max(scaling_vector[parameter_index], hessian[diagonal_index]);
// continuous scaling
//scaling_vector[parameter_index] = hessian[diagonal_index];
// initial scaling
//if (scaling_vector[parameter_index] == 0.)
// scaling_vector[parameter_index] = hessian[diagonal_index];
hessian[diagonal_index] += scaling_vector[parameter_index] * lambda;
}
/* Description of the cuda_update_parameters function
* ===================================================
*
* This function stores the fitting curve parameter values in prev_parameters and
* updates them after each iteration.
*
* Parameters:
*
* parameters: An input and output vector of concatenated sets of model
* parameters.
*
* prev_parameters: An input and output vector of concatenated sets of model
* parameters calculated by the previous iteration.
*
* deltas: An input vector of concatenated delta values, which are added to the
* model parameters.
*
* n_parameters_to_fit: The number of fitted curve parameters.
*
* parameters_to_fit_indices: The indices of fitted curve parameters.
*
* finished: An input vector which allows the parameter update to be skipped for single fits.
*
* n_fits_per_block: The number of fits calculated by each threadblock.
*
* Calling the cuda_update_parameters function
* ===========================================
*
* When calling the function, the blocks and threads must be set up correctly,
* as shown in the following example code.
*
* dim3 threads(1, 1, 1);
* dim3 blocks(1, 1, 1);
*
* threads.x = n_parameters * n_fits_per_block;
* blocks.x = n_fits / n_fits_per_block;
*
* cuda_update_parameters<<< blocks, threads >>>(
* parameters,
* prev_parameters,
* deltas,
* n_parameters_to_fit,
* parameters_to_fit_indices,
* finished,
* n_fits_per_block);
*
*/
__global__ void cuda_update_parameters(
REAL * parameters,
REAL * prev_parameters,
REAL const * deltas,
int const n_parameters_to_fit,
int const * parameters_to_fit_indices,
int const * finished,
int const n_fits_per_block)
{
int const n_parameters = blockDim.x / n_fits_per_block;
int const fit_in_block = threadIdx.x / n_parameters;
int const parameter_index = threadIdx.x - fit_in_block * n_parameters;
int const fit_index = blockIdx.x * n_fits_per_block + fit_in_block;
REAL * current_parameters = ¶meters[fit_index * n_parameters];
REAL * current_prev_parameters = &prev_parameters[fit_index * n_parameters];
current_prev_parameters[parameter_index] = current_parameters[parameter_index];
if (finished[fit_index])
{
return;
}
if (parameter_index >= n_parameters_to_fit)
{
return;
}
REAL const * current_deltas = &deltas[fit_index * n_parameters_to_fit];
current_parameters[parameters_to_fit_indices[parameter_index]] += current_deltas[parameter_index];
}
/* Description of the cuda_update_state_after_solving function
* ===========================================================
*
* This function interprets the singular flag vector of the equation system
* solving function according to this LM implementation.
*
* Parameters:
*
* n_fits: The number of fits.
*
* solution_info: An input vector used to report whether a fit is singular.
*
* finished: An input vector which allows the calculation to by skipped for
* single fits.
*
* gpufit_states: An output vector of values which indicate whether the fitting
* process was carreid out correctly or which problem occurred.
* If a hessian matrix of a fit is singular, it is set to 2.
*
* Calling the cuda_update_state_after_solving function
* ====================================================
*
* When calling the function, the blocks and threads must be set up correctly,
* as shown in the following example code.
*
* dim3 threads(1, 1, 1);
* dim3 blocks(1, 1, 1);
*
* int const example_value = 256;
*
* threads.x = min(n_fits, example_value);
* blocks.x = int(ceil(REAL(n_fits) / REAL(threads.x)));
*
* cuda_update_state_after_solving<<< blocks, threads >>>(
* n_fits,
* solution_info,
* finished,
* gpufit_states);
*
*/
__global__ void cuda_update_state_after_solving(
int const n_fits,
int const * cublas_info,
int const * finished,
int * states)
{
int const fit_index = blockIdx.x * blockDim.x + threadIdx.x;
if (fit_index >= n_fits)
return;
if (finished[fit_index])
return;
if (cublas_info[fit_index] != 0)
states[fit_index] = SINGULAR_HESSIAN;
}
/* Description of the cuda_check_for_convergence function
* =======================================================
*
* This function checks after each iteration whether the fits are converged or not.
* It also checks whether the set maximum number of iterations is reached.
*
* Parameters:
*
* finished: An input and output vector which allows the calculation to be skipped
* for single fits.
*
* tolerance: The tolerance value for the convergence set by user.
*
* states: An output vector of values which indicate whether the fitting process
* was carreid out correctly or which problem occurred. If the maximum
* number of iterations is reached without converging, it is set to 1. If
* the fit converged it keeps its initial value of 0.
*
* chi_squares: An input vector of chi-square values for multiple fits. Used for the
* convergence check.
*
* prev_chi_squares: An input vector of chi-square values for multiple fits calculated
* in the previous iteration. Used for the convergence check.
*
* iteration: The value of the current iteration. It is compared to the value
* of the maximum number of iteration set by user.
*
* max_n_iterations: The maximum number of iterations set by user.
*
* n_fits: The number of fits.
*
* Calling the cuda_check_for_convergence function
* ===============================================
*
* When calling the function, the blocks and threads must be set up correctly,
* as shown in the following example code.
*
* dim3 threads(1, 1, 1);
* dim3 blocks(1, 1, 1);
*
* int const example_value = 256;
*
* threads.x = min(n_fits, example_value);
* blocks.x = int(ceil(REAL(n_fits) / REAL(threads.x)));
*
* cuda_check_for_convergence<<< blocks, threads >>>(
* finished,
* tolerance,
* states,
* chi_squares,
* prev_chi_squares,
* iteration,
* max_n_iterations,
* n_fits);
*
*/
__global__ void cuda_check_for_convergence(
int * finished,
REAL const tolerance,
int * states,
REAL const * chi_squares,
REAL const * prev_chi_squares,
int const iteration,
int const max_n_iterations,
int const n_fits)
{
int const fit_index = blockIdx.x * blockDim.x + threadIdx.x;
if (fit_index >= n_fits)
{
return;
}
if (finished[fit_index])
{
return;
}
int const fit_found
= abs(chi_squares[fit_index] - prev_chi_squares[fit_index])
< tolerance * max(1., chi_squares[fit_index]);
int const max_n_iterations_reached = iteration == max_n_iterations - 1;
if (fit_found)
{
finished[fit_index] = 1;
}
else if (max_n_iterations_reached)
{
states[fit_index] = MAX_ITERATION;
}
}
/* Description of the cuda_evaluate_iteration function
* ====================================================
*
* This function evaluates the current iteration.
* - It marks a fit as finished if a problem occured.
* - It saves the needed number of iterations if a fit finished.
* - It checks if all fits finished
*
* Parameters:
*
* all_finished: An output flag, that indicates whether all fits finished.
*
* n_iterations: An output vector of needed iterations for each fit.
*
* finished: An input and output vector which allows the evaluation to be skipped
* for single fits
*
* iteration: The values of the current iteration.
*
* states: An input vector of values which indicate whether the fitting process
* was carreid out correctly or which problem occurred.
*
* n_fits: The number of fits.
*
* Calling the cuda_evaluate_iteration function
* ============================================
*
* When calling the function, the blocks and threads must be set up correctly,
* as shown in the following example code.
*
* dim3 threads(1, 1, 1);
* dim3 blocks(1, 1, 1);
*
* int const example_value = 256;
*
* threads.x = min(n_fits, example_value);
* blocks.x = int(ceil(REAL(n_fits) / REAL(threads.x)));
*
* cuda_evaluate_iteration<<< blocks, threads >>>(
* all_finished,
* n_iterations,
* finished,
* iteration,
* states,
* n_fits);
*
*/
__global__ void cuda_evaluate_iteration(
int * all_finished,
int * n_iterations,
int * finished,
int const iteration,
int const * states,
int const n_fits)
{
int const fit_index = blockIdx.x * blockDim.x + threadIdx.x;
if (fit_index >= n_fits)
{
return;
}
if (states[fit_index] != CONVERGED)
{
finished[fit_index] = 1;
}
if (finished[fit_index] && n_iterations[fit_index] == 0)
{
n_iterations[fit_index] = iteration + 1;
}
if (!finished[fit_index])
{
*all_finished = 0;
}
}
/* Description of the cuda_prepare_next_iteration function
* ========================================================
*
* This function prepares the next iteration. It either updates previous
* chi-square values or sets currently calculated chi-square values and
* parameters to values calculated by the previous iteration. This function also
* updates lambda values.
*
* Parameters:
*
* lambdas: An output vector of values which control the step width by modifying
* the diagonal elements of the hessian matrices.
*
* chi_squares: An input and output vector of chi-square values for multiple fits.
*
* prev_chi_squares: An input and output vector of chi-square values for multiple
* fits calculated in the previous iteration.
*
* parameters: An output vector of concatenated sets of model parameters.
*
* prev_parameters: An input vector of concatenated sets of model parameters
* calculated in the previous iteration.
*
* n_fits: The number of fits.
*
* n_parameters: The number of fitting curve parameters.
*
* Calling the cuda_prepare_next_iteration function
* ================================================
*
* When calling the function, the blocks and threads must be set up correctly,
* as shown in the following example code.
*
* dim3 threads(1, 1, 1);
* dim3 blocks(1, 1, 1);
*
* int const example_value = 256;
*
* threads.x = min(n_fits, example_value);
* blocks.x = int(ceil(REAL(n_fits) / REAL(threads.x)));
*
* cuda_prepare_next_iteration<<< blocks, threads >>>(
* lambdas,
* chi_squares,
* prev_chi_squares,
* parameters,
* prev_parameters,
* n_fits,
* n_parameters);
*
*/
__global__ void cuda_prepare_next_iteration(
REAL * lambdas,
REAL * chi_squares,
REAL * prev_chi_squares,
REAL * parameters,
REAL const * prev_parameters,
int const n_fits,
int const n_parameters)
{
int const fit_index = blockIdx.x * blockDim.x + threadIdx.x;
if (fit_index >= n_fits)
{
return;
}
if (chi_squares[fit_index] < prev_chi_squares[fit_index])
{
lambdas[fit_index] *= 0.1f;
prev_chi_squares[fit_index] = chi_squares[fit_index];
}
else
{
lambdas[fit_index] *= 10.;
chi_squares[fit_index] = prev_chi_squares[fit_index];
for (int iparameter = 0; iparameter < n_parameters; iparameter++)
{
parameters[fit_index * n_parameters + iparameter] = prev_parameters[fit_index * n_parameters + iparameter];
}
}
}
| d452f788fbab47c7cb159a6eb602be312c175c51.cu | #include "constants.h"
#include "cuda_kernels.cuh"
#include "models/models.cuh"
#include "estimators/estimators.cuh"
/* Description of the cuda_calc_curve_values function
* ===================================================
*
* This function calls one of the fitting curve functions depending on the input
* parameter model_id. The fitting curve function calculates the values of
* the fitting curves and its partial derivatives with respect to the fitting
* curve parameters. Multiple fits are calculated in parallel.
*
* Parameters:
*
* parameters: An input vector of concatenated sets of model parameters.
*
* n_fits: The number of fits.
*
* n_points: The number of data points per fit.
*
* n_parameters: The number of curve parameters.
*
* finished: An input vector which allows the calculation to be skipped for single
* fits.
*
* values: An output vector of concatenated sets of model function values.
*
* derivatives: An output vector of concatenated sets of model function partial
* derivatives.
*
* n_fits_per_block: The number of fits calculated by each thread block.
*
* n_blocks_per_fit: The number of thread blocks used to calculate one fit.
*
* model_id: The fitting model ID.
*
* chunk_index: The data chunk index.
*
* user_info: An input vector containing user information.
*
* user_info_size: The size of user_info in bytes.
*
* Calling the cuda_calc_curve_values function
* ===========================================
*
* When calling the function, the blocks and threads must be set up correctly,
* as shown in the following example code.
*
* dim3 threads(1, 1, 1);
* dim3 blocks(1, 1, 1);
*
* threads.x = n_points * n_fits_per_block / n_blocks_per_fit;
* blocks.x = n_fits / n_fits_per_block * n_blocks_per_fit;
*
* cuda_calc_curve_values<<< blocks, threads >>>(
* parameters,
* n_fits,
* n_points,
* n_parameters,
* finished,
* values,
* derivatives,
* n_fits_per_block,
* n_blocks_per_fit,
* model_id,
* chunk_index,
* user_info,
* user_info_size);
*
*/
__global__ void cuda_calc_curve_values(
REAL const * parameters,
int const n_fits,
int const n_points,
int const n_parameters,
int const * finished,
REAL * values,
REAL * derivatives,
int const n_fits_per_block,
int const n_blocks_per_fit,
ModelID const model_id,
int const chunk_index,
char * user_info,
std::size_t const user_info_size)
{
int const fit_in_block = threadIdx.x / n_points;
int const fit_index = blockIdx.x * n_fits_per_block / n_blocks_per_fit + fit_in_block;
int const fit_piece = blockIdx.x % n_blocks_per_fit;
int const point_index = threadIdx.x - fit_in_block * n_points + fit_piece * blockDim.x;
int const first_point = fit_index * n_points;
REAL * current_values = values + first_point;
REAL * current_derivatives = derivatives + first_point * n_parameters;
REAL const * current_parameters = parameters + fit_index * n_parameters;
if (finished[fit_index])
return;
if (point_index >= n_points)
return;
calculate_model(model_id, current_parameters, n_fits, n_points, current_values, current_derivatives, point_index, fit_index, chunk_index, user_info, user_info_size);
}
/* Description of the sum_up_floats function
* ==========================================
*
* This function sums up a vector of REAL values and stores the result at the
* first place of the vector.
*
* Parameters:
*
* shared_array: An input vector of REAL values. The vector must be stored
* on the shared memory of the GPU. The size of this vector must be a
* power of two. Use zero padding to extend it to the next highest
* power of 2 greater than the number of elements.
*
* size: The number of elements in the input vector considering zero padding.
*
* Calling the sum_up_floats function
* ==================================
*
* This __device__ function can be only called from a __global__ function or
* an other __device__ function. When calling the function, the blocks and threads
* of the __global__ function must be set up correctly, as shown in the following
* example code.
*
* dim3 threads(1, 1, 1);
* dim3 blocks(1, 1, 1);
*
* threads.x = size * vectors_per_block;
* blocks.x = n_vectors / vectors_per_block;
*
* global_function<<< blocks, threads >>>(parameter1, ...);
*
*/
__device__ void sum_up_floats(volatile REAL* shared_array, int const size)
{
int const fit_in_block = threadIdx.x / size;
int const point_index = threadIdx.x - (fit_in_block*size);
int current_n_points = size >> 1;
__syncthreads();
while (current_n_points)
{
if (point_index < current_n_points)
{
shared_array[point_index] += shared_array[point_index + current_n_points];
}
current_n_points >>= 1;
__syncthreads();
}
}
/* Description of the cuda_sum_chi_square_subtotals function
* ==========================================================
*
* This function sums up chi_square subtotals in place.
*
* Parameters:
*
* chi_squares: A vector of chi-square values for multiple fits.
* in: subtotals
* out: totals
*
* n_blocks_per_fit: The number of blocks used to calculate one fit. It is
* equivalent to the number of subtotals per fit.
*
* n_fits: The number of fits.
*
* finished: An input vector which allows the calculation to be skipped
* for single fits.
*
* Calling the cuda_sum_chi_square_subtotals function
* ==================================================
*
* When calling the function, the blocks and threads must be set up correctly,
* as shown in the following example code.
*
* dim3 threads(1, 1, 1);
* dim3 blocks(1, 1, 1);
*
* int const example_value = 256;
*
* threads.x = min(n_fits, example_value);
* blocks.x = int(ceil(REAL(n_fits) / REAL(threads.x)));
*
* cuda_sum_chi_square_subtotals<<< blocks, threads >>>(
* chi_squares,
* n_blocks_per_fit,
* n_fits,
* finished);
*
*/
__global__ void cuda_sum_chi_square_subtotals(
REAL * chi_squares,
REAL const * subtotals,
int const n_blocks_per_fit,
int const n_fits,
int const * finished)
{
int const index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n_fits || finished[index])
return;
REAL * chi_square = chi_squares + index;
REAL const * subtotal = subtotals + index;
double sum = 0.0;
for (int i = 0; i < n_blocks_per_fit; i++)
sum += subtotal[i * n_fits];
chi_square[0] = sum;
}
/* Description of the cuda_check_fit_improvement function
* =======================================================
*
* This function checks after each calculation of chi-square values whether the
* currently calculated chi-square values are lower than chi-square values calculated
* in the previous iteration and sets the iteration_failed flags.
*
* Parameters:
*
* iteration_failed: An output vector of flags which indicate whether the fitting
* process improved the fit in the last iteration. If yes it is set
* to 0 otherwise to 1.
*
* chi_squares: An input vector of chi-square values for multiple fits.
*
* prev_chi_squares: An input vector of chi-square values for multiple fits calculated
* in the previous iteration.
*
* n_fits: The number of fits.
*
* finished: An input vector which allows the calculation to be skipped
* for single fits.
*
* Calling the cuda_check_fit_improvement function
* ===============================================
*
* When calling the function, the blocks and threads must be set up correctly,
* as shown in the following example code.
*
* dim3 threads(1, 1, 1);
* dim3 blocks(1, 1, 1);
*
* int const example_value = 256;
*
* threads.x = min(n_fits, example_value);
* blocks.x = int(ceil(REAL(n_fits) / REAL(threads.x)));
*
* cuda_check_fit_improvement <<< blocks, threads >>>(
* iteration_failed,
* chi_squares,
* prev_chi_squares,
* n_fits,
* finished);
*
*/
__global__ void cuda_check_fit_improvement(
int * iteration_failed,
REAL const * chi_squares,
REAL const * prev_chi_squares,
int const n_fits,
int const * finished)
{
int const index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n_fits || finished[index])
return;
bool const prev_chi_squares_initialized = prev_chi_squares[index] != 0.;
// chi_squares[index] can be NaN which compares to false with any other number
bool const chi_square_decreased = (chi_squares[index] < prev_chi_squares[index]);
if (prev_chi_squares_initialized && !chi_square_decreased)
{
iteration_failed[index] = 1;
}
else
{
iteration_failed[index] = 0;
}
}
/* Description of the cuda_calculate_chi_squares function
* ========================================================
*
* This function calls one of the estimator funktions depending on the input
* parameter estimator_id. The estimator function calculates the chi-square values.
* The calcluation is performed for multiple fits in parallel.
*
* Parameters:
*
* chi_squares: An output vector of concatenated chi-square values.
*
* states: An output vector of values which indicate whether the fitting process
* was carreid out correctly or which problem occurred. In this function
* it is only used for MLE. It is set to 3 if a fitting curve value is
* negative. This vector includes the states for multiple fits.
*
* data: An input vector of data for multiple fits
*
* values: An input vector of concatenated sets of model function values.
*
* weights: An input vector of values for weighting chi-square, gradient and hessian,
* while using LSE
*
* n_points: The number of data points per fit.
*
* n_fits: The number of fits.
*
* estimator_id: The estimator ID.
*
* finished: An input vector which allows the calculation to be skipped for single
* fits.
*
* n_fits_per_block: The number of fits calculated by each thread block.
*
* user_info: An input vector containing user information.
*
* user_info_size: The size of user_info in bytes.
*
* Calling the cuda_calculate_chi_squares function
* ================================================
*
* When calling the function, the blocks and threads must be set up correctly,
* as shown in the following example code.
*
* dim3 threads(1, 1, 1);
* dim3 blocks(1, 1, 1);
*
* threads.x = power_of_two_n_points * n_fits_per_block / n_blocks_per_fit;
* blocks.x = n_fits / n_fits_per_block * n_blocks_per_fit;
*
* int const shared_size = sizeof(REAL) * threads.x;
*
* cuda_calculate_chi_squares<<< blocks, threads, shared_size >>>(
* chi_squares,
* states,
* data,
* values,
* weights,
* n_points,
* n_fits,
* estimator_id,
* finished,
* n_fits_per_block,
* user_info,
* user_info_size);
*
*/
__global__ void cuda_calculate_chi_squares(
REAL * chi_squares,
int * states,
REAL const * data,
REAL const * values,
REAL const * weights,
int const n_points,
int const n_fits,
int const estimator_id,
int const * finished,
int const n_fits_per_block,
char * user_info,
std::size_t const user_info_size)
{
int const shared_size = blockDim.x / n_fits_per_block;
int const fit_in_block = threadIdx.x / shared_size;
int const fit_piece = blockIdx.x / n_fits;
int const fit_index = blockIdx.x * n_fits_per_block + fit_in_block - fit_piece * n_fits;
int const point_index = threadIdx.x - fit_in_block * shared_size + fit_piece * shared_size;
int const first_point = fit_index * n_points;
if (finished[fit_index])
{
return;
}
REAL const * current_data = &data[first_point];
REAL const * current_weight = weights ? &weights[first_point] : NULL;
REAL const * current_value = &values[first_point];
int * current_state = &states[fit_index];
extern __shared__ REAL extern_array[];
volatile REAL * shared_chi_square
= extern_array + (fit_in_block - fit_piece) * shared_size;
if (point_index >= n_points)
{
shared_chi_square[point_index] = 0.;
}
if (point_index < n_points)
{
calculate_chi_square(
estimator_id,
shared_chi_square,
point_index,
current_data,
current_value,
current_weight,
current_state,
user_info,
user_info_size);
}
shared_chi_square += fit_piece * shared_size;
sum_up_floats(shared_chi_square, shared_size);
chi_squares[fit_index + fit_piece * n_fits] = shared_chi_square[0];
}
/* Description of the cuda_sum_gradient_subtotals function
* ========================================================
*
* This function sums up the chi-square gradient subtotals in place.
*
* Parameters:
*
* gradients: A vector of gradient values for multiple fits.
* in: subtotals
* out: totals
*
* n_blocks_per_fit: The number of blocks used to calculate one fit
*
* n_fits: The number of fits.
*
* n_parameters_to_fit: The number of model parameters, that are not held fixed.
*
* skip: An input vector which allows the calculation to be skipped for single fits.
*
* finished: An input vector which allows the calculation to be skipped for single
* fits.
*
* Calling the cuda_sum_gradient_subtotals function
* ================================================
*
* When calling the function, the blocks and threads must be set up correctly,
* as shown in the following example code.
*
* dim3 threads(1, 1, 1);
* dim3 blocks(1, 1, 1);
*
* int const example_value = 256;
*
* threads.x = min(n_fits, example_value);
* blocks.x = int(ceil(REAL(n_fits) / REAL(threads.x)));
*
* cuda_sum_gradient_subtotals<<< blocks,threads >>>(
* gradients,
* n_blocks_per_fit,
* n_fits,
* n_parameters_to_fit,
* skip,
* finished);
*
*/
__global__ void cuda_sum_gradient_subtotals(
REAL * gradients,
REAL const * subtotals,
int const n_blocks_per_fit,
int const n_fits,
int const n_parameters,
int const * skip,
int const * finished)
{
int const index = blockIdx.x * blockDim.x + threadIdx.x;
int const fit_index = index / n_parameters;
if (fit_index >= n_fits || finished[fit_index] || skip[fit_index])
return;
REAL * gradient = gradients + index;
REAL const * subtotal = subtotals + index;
double sum = 0.0;
for (int i = 0; i < n_blocks_per_fit; i++)
sum += subtotal[i * n_fits * n_parameters];
gradient[0] = sum;
}
/* Description of the cuda_calculate_gradients function
* =====================================================
*
* This function calls one of the gradient functions depending on the input
* parameter estimator_id. The gradient function calculates the gradient values
* of the chi-square function calling a __device__ function. The calcluation is
* performed for multiple fits in parallel.
*
* Parameters:
*
* gradients: An output vector of concatenated sets of gradient vector values.
*
* data: An input vector of data for multiple fits
*
* values: An input vector of concatenated sets of model function values.
*
* derivatives: An input vector of concatenated sets of model function partial
* derivatives.
*
* weights: An input vector of values for weighting chi-square, gradient and hessian,
* while using LSE
*
* n_points: The number of data points per fit.
*
* n_fits: The number of fits.
*
* n_parameters: The number of fitting curve parameters.
*
* n_parameters_to_fit: The number of fitting curve parameters, that are not held
* fixed.
*
* parameters_to_fit_indices: An input vector of indices of fitting curve parameters,
* that are not held fixed.
*
* estimator_id: The estimator ID.
*
* finished: An input vector which allows the calculation to be skipped for single
* fits.
*
* skip: An input vector which allows the calculation to be skipped for single fits.
*
* n_fits_per_block: The number of fits calculated by each thread block.
*
* user_info: An input vector containing user information.
*
* user_info_size: The number of elements in user_info.
*
* Calling the cuda_calculate_gradients function
* =============================================
*
* When calling the function, the blocks and threads must be set up correctly,
* as shown in the following example code.
*
* dim3 threads(1, 1, 1);
* dim3 blocks(1, 1, 1);
*
* threads.x = power_of_two_n_points * n_fits_per_block / n_blocks_per_fit;
* blocks.x = n_fits / n_fits_per_block * n_blocks_per_fit;
*
* int const shared_size = sizeof(REAL) * threads.x;
*
* cuda_calculate_gradients<<< blocks, threads, shared_size >>>(
* gradients,
* data,
* values,
* derivatives,
* weight,
* n_points,
* n_fits,
* n_parameters,
* n_parameters_to_fit,
* parameters_to_fit_indices,
* estimator_id,
* finished,
* skip,
* n_fits_per_block,
* user_info,
* user_info_size);
*
*/
__global__ void cuda_calculate_gradients(
REAL * gradients,
REAL const * data,
REAL const * values,
REAL const * derivatives,
REAL const * weights,
int const n_points,
int const n_fits,
int const n_parameters,
int const n_parameters_to_fit,
int const * parameters_to_fit_indices,
int const estimator_id,
int const * finished,
int const * skip,
int const n_fits_per_block,
char * user_info,
std::size_t const user_info_size)
{
int const shared_size = blockDim.x / n_fits_per_block;
int const fit_in_block = threadIdx.x / shared_size;
int const fit_piece = blockIdx.x / n_fits;
int const fit_index = blockIdx.x * n_fits_per_block + fit_in_block - fit_piece * n_fits;
int const point_index = threadIdx.x - fit_in_block * shared_size + fit_piece * shared_size;
int const first_point = fit_index * n_points;
if (finished[fit_index] || skip[fit_index])
{
return;
}
REAL const * current_data = &data[first_point];
REAL const * current_weight = weights ? &weights[first_point] : NULL;
REAL const * current_derivative = &derivatives[first_point * n_parameters];
REAL const * current_value = &values[first_point];
extern __shared__ REAL extern_array[];
volatile REAL * shared_gradient = extern_array + (fit_in_block - fit_piece) * shared_size;
if (point_index >= n_points)
{
shared_gradient[point_index] = 0.;
}
for (int parameter_index = 0; parameter_index < n_parameters_to_fit; parameter_index++)
{
if (point_index < n_points)
{
int const derivative_index = parameters_to_fit_indices[parameter_index] * n_points + point_index;
calculate_gradient(
estimator_id,
shared_gradient,
point_index,
derivative_index,
current_data,
current_value,
current_derivative,
current_weight,
user_info,
user_info_size);
}
sum_up_floats(shared_gradient + fit_piece * shared_size, shared_size);
gradients[(fit_index * n_parameters_to_fit + parameter_index) + fit_piece * n_fits * n_parameters_to_fit]
= shared_gradient[fit_piece * shared_size];
}
}
/* Description of the cuda_calculate_hessians function
* ====================================================
*
* This function calls one of the hessian function depending on the input
* parameter estimator_id. The hessian funcion calculates the hessian matrix
* values of the chi-square function calling a __device__ functions. The
* calcluation is performed for multiple fits in parallel.
*
* Parameters:
*
* hessians: An output vector of concatenated sets of hessian matrix values.
*
* data: An input vector of data for multiple fits
*
* values: An input vector of concatenated sets of model function values.
*
* derivatives: An input vector of concatenated sets of model function partial
* derivatives.
*
* weights: An input vector of values for weighting chi-square, gradient and hessian,
* while using LSE
*
* n_fits: The number of fits.
*
* n_points: The number of data points per fit.
*
* n_parameters: The number of fitting curve parameters.
*
* n_parameters_to_fit: The number of fitting curve parameters, that are not held
* fixed.
*
* parameters_to_fit_indices: An input vector of indices of fitting curve parameters,
* that are not held fixed.
*
* estimator_id: The estimator ID.
*
* skip: An input vector which allows the calculation to be skipped for single fits.
*
* finished: An input vector which allows the calculation to be skipped for single
* fits.
*
* user_info: An input vector containing user information.
*
* user_info_size: The size of user_info in bytes.
*
* Calling the cuda_calculate_hessians function
* ============================================
*
* When calling the function, the blocks and threads must be set up correctly,
* as shown in the following example code.
*
* dim3 threads(1, 1, 1);
* dim3 blocks(1, 1, 1);
*
* int n_unique_values = n_parameters_to_fit * (n_parameters_to_fit + 1) / 2;
*
* threads.x
* = min(n_unique_values * n_fits_per_block, max_threads_per_block);
*
* blocks.y
* = threads.x / max_threads_per_block
* + int((threads.x % max_threads_per_block) > 0);
*
* blocks.x
* = n_fits / n_fits_per_block
* + int((n_fits % n_fits_per_block) > 0);
*
* cuda_calculate_hessians<<< blocks, threads >>>(
* hessians,
* data,
* values,
* derivatives,
* weight,
* n_fits,
* n_points,
* n_parameters,
* n_parameters_to_fit,
* parameters_to_fit_indices,
* estimator_id,
* skip,
* finished,
* user_info,
* user_info_size);
*
*/
__global__ void cuda_calculate_hessians(
REAL * hessians,
REAL const * data,
REAL const * values,
REAL const * derivatives,
REAL const * weights,
int const n_fits,
int const n_points,
int const n_parameters,
int const n_parameters_to_fit,
int const * parameters_to_fit_indices,
int const estimator_id,
int const * skip,
int const * finished,
char * user_info,
std::size_t const user_info_size)
{
int const n_unique_values = n_parameters_to_fit * (n_parameters_to_fit + 1) / 2;
int const n_fits_per_block = blockDim.x * gridDim.y / n_unique_values;
int const fit_in_block
= (gridDim.y == 1)
? (blockIdx.y * blockDim.x + threadIdx.x) / n_unique_values
: 0;
int const fit_index = blockIdx.x * n_fits_per_block + fit_in_block;
if (fit_index >= n_fits || finished[fit_index] || skip[fit_index])
{
return;
}
int const first_point = fit_index * n_points;
int const parameter_index = (blockIdx.y * blockDim.x + threadIdx.x) - fit_in_block * n_unique_values;
if (parameter_index >= n_unique_values)
{
return;
}
int const parameter_index_i
= n_parameters_to_fit
- 1.
- std::floor(
.5*(
std::sqrt(
- 8. * (parameter_index - n_parameters_to_fit)
+ 4. * n_parameters_to_fit * (n_parameters_to_fit - 1.)
- 7.
) - 1.
)
);
int const parameter_index_j
= parameter_index
+ parameter_index_i
- parameter_index_i*(n_parameters_to_fit - (parameter_index_i - 1) / 2.);
REAL * current_hessian = &hessians[fit_index * n_parameters_to_fit * n_parameters_to_fit];
REAL const * current_data = &data[first_point];
REAL const * current_weight = weights ? &weights[first_point] : NULL;
REAL const * current_derivative = &derivatives[first_point*n_parameters];
REAL const * current_value = &values[first_point];
int const hessian_index_ij = parameter_index_i * n_parameters_to_fit + parameter_index_j;
int const hessian_index_ji = parameter_index_j * n_parameters_to_fit + parameter_index_i;
int const derivative_index_i = parameters_to_fit_indices[parameter_index_i] * n_points;
int const derivative_index_j = parameters_to_fit_indices[parameter_index_j] * n_points;
double sum = 0.0;
for (int point_index = 0; point_index < n_points; point_index++)
{
calculate_hessian(
estimator_id,
&sum,
point_index,
derivative_index_i + point_index,
derivative_index_j + point_index,
current_data,
current_value,
current_derivative,
current_weight,
user_info,
user_info_size);
}
current_hessian[hessian_index_ij] = sum;
current_hessian[hessian_index_ji] = sum;
}
/* Description of the cuda_modify_step_widths function
* ====================================================
*
* This function midifies the diagonal elements of the hessian matrices by multiplying
* them by the factor (1+ lambda). This operation controls the step widths of the
* iteration. If the last iteration failed, befor modifying the hessian, the diagonal
* elements of the hessian are calculated back to represent unmodified values.
*
* hessians: An input and output vector of hessian matrices, which are modified by
* the lambda values.
*
* lambdas: An input vector of values for modifying the hessians.
*
* n_parameters: The number of fitting curve parameters.
*
* iteration_failed: An input vector which indicates whether the previous iteration
* failed.
*
* finished: An input vector which allows the calculation to be skipped for single fits.
*
* n_fits_per_block: The number of fits calculated by each thread block.
*
* Calling the cuda_modify_step_widths function
* ============================================
*
* When calling the function, the blocks and threads must be set up correctly,
* as shown in the following example code.
*
* dim3 threads(1, 1, 1);
* dim3 blocks(1, 1, 1);
*
* threads.x = n_parameters_to_fit * n_fits_per_block;
* blocks.x = n_fits / n_fits_per_block;
*
* cuda_modify_step_width<<< blocks, threads >>>(
* hessians,
* lambdas,
* n_parameters,
* iteration_failed,
* finished,
* n_fits_per_block);
*
*/
__global__ void cuda_modify_step_widths(
REAL * hessians,
REAL const * lambdas,
REAL * scaling_vectors,
unsigned int const n_parameters,
int const * iteration_failed,
int const * finished,
int const n_fits_per_block)
{
int const shared_size = blockDim.x / n_fits_per_block;
int const fit_in_block = threadIdx.x / shared_size;
int const parameter_index = threadIdx.x - fit_in_block * shared_size;
int const fit_index = blockIdx.x * n_fits_per_block + fit_in_block;
if (finished[fit_index])
{
return;
}
REAL * hessian = &hessians[fit_index * n_parameters * n_parameters];
REAL * scaling_vector = &scaling_vectors[fit_index * n_parameters];
REAL const & lambda = lambdas[fit_index];
int const diagonal_index = parameter_index * n_parameters + parameter_index;
if (iteration_failed[fit_index])
{
hessian[diagonal_index] -= scaling_vector[parameter_index] * lambda / 10.;
}
// adaptive scaling
scaling_vector[parameter_index]
= max(scaling_vector[parameter_index], hessian[diagonal_index]);
// continuous scaling
//scaling_vector[parameter_index] = hessian[diagonal_index];
// initial scaling
//if (scaling_vector[parameter_index] == 0.)
// scaling_vector[parameter_index] = hessian[diagonal_index];
hessian[diagonal_index] += scaling_vector[parameter_index] * lambda;
}
/* Description of the cuda_update_parameters function
* ===================================================
*
* This function stores the fitting curve parameter values in prev_parameters and
* updates them after each iteration.
*
* Parameters:
*
* parameters: An input and output vector of concatenated sets of model
* parameters.
*
* prev_parameters: An input and output vector of concatenated sets of model
* parameters calculated by the previous iteration.
*
* deltas: An input vector of concatenated delta values, which are added to the
* model parameters.
*
* n_parameters_to_fit: The number of fitted curve parameters.
*
* parameters_to_fit_indices: The indices of fitted curve parameters.
*
* finished: An input vector which allows the parameter update to be skipped for single fits.
*
* n_fits_per_block: The number of fits calculated by each threadblock.
*
* Calling the cuda_update_parameters function
* ===========================================
*
* When calling the function, the blocks and threads must be set up correctly,
* as shown in the following example code.
*
* dim3 threads(1, 1, 1);
* dim3 blocks(1, 1, 1);
*
* threads.x = n_parameters * n_fits_per_block;
* blocks.x = n_fits / n_fits_per_block;
*
* cuda_update_parameters<<< blocks, threads >>>(
* parameters,
* prev_parameters,
* deltas,
* n_parameters_to_fit,
* parameters_to_fit_indices,
* finished,
* n_fits_per_block);
*
*/
__global__ void cuda_update_parameters(
REAL * parameters,
REAL * prev_parameters,
REAL const * deltas,
int const n_parameters_to_fit,
int const * parameters_to_fit_indices,
int const * finished,
int const n_fits_per_block)
{
int const n_parameters = blockDim.x / n_fits_per_block;
int const fit_in_block = threadIdx.x / n_parameters;
int const parameter_index = threadIdx.x - fit_in_block * n_parameters;
int const fit_index = blockIdx.x * n_fits_per_block + fit_in_block;
REAL * current_parameters = ¶meters[fit_index * n_parameters];
REAL * current_prev_parameters = &prev_parameters[fit_index * n_parameters];
current_prev_parameters[parameter_index] = current_parameters[parameter_index];
if (finished[fit_index])
{
return;
}
if (parameter_index >= n_parameters_to_fit)
{
return;
}
REAL const * current_deltas = &deltas[fit_index * n_parameters_to_fit];
current_parameters[parameters_to_fit_indices[parameter_index]] += current_deltas[parameter_index];
}
/* Description of the cuda_update_state_after_solving function
* ===========================================================
*
* This function interprets the singular flag vector of the equation system
* solving function according to this LM implementation.
*
* Parameters:
*
* n_fits: The number of fits.
*
* solution_info: An input vector used to report whether a fit is singular.
*
* finished: An input vector which allows the calculation to by skipped for
* single fits.
*
* gpufit_states: An output vector of values which indicate whether the fitting
* process was carreid out correctly or which problem occurred.
* If a hessian matrix of a fit is singular, it is set to 2.
*
* Calling the cuda_update_state_after_solving function
* ====================================================
*
* When calling the function, the blocks and threads must be set up correctly,
* as shown in the following example code.
*
* dim3 threads(1, 1, 1);
* dim3 blocks(1, 1, 1);
*
* int const example_value = 256;
*
* threads.x = min(n_fits, example_value);
* blocks.x = int(ceil(REAL(n_fits) / REAL(threads.x)));
*
* cuda_update_state_after_solving<<< blocks, threads >>>(
* n_fits,
* solution_info,
* finished,
* gpufit_states);
*
*/
__global__ void cuda_update_state_after_solving(
int const n_fits,
int const * cublas_info,
int const * finished,
int * states)
{
int const fit_index = blockIdx.x * blockDim.x + threadIdx.x;
if (fit_index >= n_fits)
return;
if (finished[fit_index])
return;
if (cublas_info[fit_index] != 0)
states[fit_index] = SINGULAR_HESSIAN;
}
/* Description of the cuda_check_for_convergence function
* =======================================================
*
* This function checks after each iteration whether the fits are converged or not.
* It also checks whether the set maximum number of iterations is reached.
*
* Parameters:
*
* finished: An input and output vector which allows the calculation to be skipped
* for single fits.
*
* tolerance: The tolerance value for the convergence set by user.
*
* states: An output vector of values which indicate whether the fitting process
* was carreid out correctly or which problem occurred. If the maximum
* number of iterations is reached without converging, it is set to 1. If
* the fit converged it keeps its initial value of 0.
*
* chi_squares: An input vector of chi-square values for multiple fits. Used for the
* convergence check.
*
* prev_chi_squares: An input vector of chi-square values for multiple fits calculated
* in the previous iteration. Used for the convergence check.
*
* iteration: The value of the current iteration. It is compared to the value
* of the maximum number of iteration set by user.
*
* max_n_iterations: The maximum number of iterations set by user.
*
* n_fits: The number of fits.
*
* Calling the cuda_check_for_convergence function
* ===============================================
*
* When calling the function, the blocks and threads must be set up correctly,
* as shown in the following example code.
*
* dim3 threads(1, 1, 1);
* dim3 blocks(1, 1, 1);
*
* int const example_value = 256;
*
* threads.x = min(n_fits, example_value);
* blocks.x = int(ceil(REAL(n_fits) / REAL(threads.x)));
*
* cuda_check_for_convergence<<< blocks, threads >>>(
* finished,
* tolerance,
* states,
* chi_squares,
* prev_chi_squares,
* iteration,
* max_n_iterations,
* n_fits);
*
*/
__global__ void cuda_check_for_convergence(
int * finished,
REAL const tolerance,
int * states,
REAL const * chi_squares,
REAL const * prev_chi_squares,
int const iteration,
int const max_n_iterations,
int const n_fits)
{
int const fit_index = blockIdx.x * blockDim.x + threadIdx.x;
if (fit_index >= n_fits)
{
return;
}
if (finished[fit_index])
{
return;
}
int const fit_found
= abs(chi_squares[fit_index] - prev_chi_squares[fit_index])
< tolerance * max(1., chi_squares[fit_index]);
int const max_n_iterations_reached = iteration == max_n_iterations - 1;
if (fit_found)
{
finished[fit_index] = 1;
}
else if (max_n_iterations_reached)
{
states[fit_index] = MAX_ITERATION;
}
}
/* Description of the cuda_evaluate_iteration function
* ====================================================
*
* This function evaluates the current iteration.
* - It marks a fit as finished if a problem occured.
* - It saves the needed number of iterations if a fit finished.
* - It checks if all fits finished
*
* Parameters:
*
* all_finished: An output flag, that indicates whether all fits finished.
*
* n_iterations: An output vector of needed iterations for each fit.
*
* finished: An input and output vector which allows the evaluation to be skipped
* for single fits
*
* iteration: The values of the current iteration.
*
* states: An input vector of values which indicate whether the fitting process
* was carreid out correctly or which problem occurred.
*
* n_fits: The number of fits.
*
* Calling the cuda_evaluate_iteration function
* ============================================
*
* When calling the function, the blocks and threads must be set up correctly,
* as shown in the following example code.
*
* dim3 threads(1, 1, 1);
* dim3 blocks(1, 1, 1);
*
* int const example_value = 256;
*
* threads.x = min(n_fits, example_value);
* blocks.x = int(ceil(REAL(n_fits) / REAL(threads.x)));
*
* cuda_evaluate_iteration<<< blocks, threads >>>(
* all_finished,
* n_iterations,
* finished,
* iteration,
* states,
* n_fits);
*
*/
__global__ void cuda_evaluate_iteration(
int * all_finished,
int * n_iterations,
int * finished,
int const iteration,
int const * states,
int const n_fits)
{
int const fit_index = blockIdx.x * blockDim.x + threadIdx.x;
if (fit_index >= n_fits)
{
return;
}
if (states[fit_index] != CONVERGED)
{
finished[fit_index] = 1;
}
if (finished[fit_index] && n_iterations[fit_index] == 0)
{
n_iterations[fit_index] = iteration + 1;
}
if (!finished[fit_index])
{
*all_finished = 0;
}
}
/* Description of the cuda_prepare_next_iteration function
* ========================================================
*
* This function prepares the next iteration. It either updates previous
* chi-square values or sets currently calculated chi-square values and
* parameters to values calculated by the previous iteration. This function also
* updates lambda values.
*
* Parameters:
*
* lambdas: An output vector of values which control the step width by modifying
* the diagonal elements of the hessian matrices.
*
* chi_squares: An input and output vector of chi-square values for multiple fits.
*
* prev_chi_squares: An input and output vector of chi-square values for multiple
* fits calculated in the previous iteration.
*
* parameters: An output vector of concatenated sets of model parameters.
*
* prev_parameters: An input vector of concatenated sets of model parameters
* calculated in the previous iteration.
*
* n_fits: The number of fits.
*
* n_parameters: The number of fitting curve parameters.
*
* Calling the cuda_prepare_next_iteration function
* ================================================
*
* When calling the function, the blocks and threads must be set up correctly,
* as shown in the following example code.
*
* dim3 threads(1, 1, 1);
* dim3 blocks(1, 1, 1);
*
* int const example_value = 256;
*
* threads.x = min(n_fits, example_value);
* blocks.x = int(ceil(REAL(n_fits) / REAL(threads.x)));
*
* cuda_prepare_next_iteration<<< blocks, threads >>>(
* lambdas,
* chi_squares,
* prev_chi_squares,
* parameters,
* prev_parameters,
* n_fits,
* n_parameters);
*
*/
__global__ void cuda_prepare_next_iteration(
REAL * lambdas,
REAL * chi_squares,
REAL * prev_chi_squares,
REAL * parameters,
REAL const * prev_parameters,
int const n_fits,
int const n_parameters)
{
int const fit_index = blockIdx.x * blockDim.x + threadIdx.x;
if (fit_index >= n_fits)
{
return;
}
if (chi_squares[fit_index] < prev_chi_squares[fit_index])
{
lambdas[fit_index] *= 0.1f;
prev_chi_squares[fit_index] = chi_squares[fit_index];
}
else
{
lambdas[fit_index] *= 10.;
chi_squares[fit_index] = prev_chi_squares[fit_index];
for (int iparameter = 0; iparameter < n_parameters; iparameter++)
{
parameters[fit_index * n_parameters + iparameter] = prev_parameters[fit_index * n_parameters + iparameter];
}
}
}
|
4b9f4976dcb24c61dc7dc5d1bfaac895b8d8e03c.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2009-2019 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "ComputeFreeVolumeGPU.cuh"
#include "IntegratorHPMCMonoGPU.cuh"
#include "IntegratorHPMCMonoImplicitGPU.cuh"
#include "IntegratorHPMCMonoImplicitNewGPU.cuh"
#include "ShapePolyhedron.h"
namespace hpmc
{
namespace detail
{
//! HPMC kernels for ShapePolyhedron
template hipError_t gpu_hpmc_free_volume<ShapePolyhedron>(const hpmc_free_volume_args_t &args,
const typename ShapePolyhedron::param_type *d_params);
template hipError_t gpu_hpmc_update<ShapePolyhedron>(const hpmc_args_t& args,
const typename ShapePolyhedron::param_type *d_params);
template hipError_t gpu_hpmc_implicit_count_overlaps<ShapePolyhedron>(const hpmc_implicit_args_t& args,
const typename ShapePolyhedron::param_type *d_params);
template hipError_t gpu_hpmc_implicit_accept_reject<ShapePolyhedron>(const hpmc_implicit_args_t& args,
const typename ShapePolyhedron::param_type *d_params);
template hipError_t gpu_hpmc_insert_depletants_queue<ShapePolyhedron>(const hpmc_implicit_args_new_t& args,
const typename ShapePolyhedron::param_type *d_params);
template hipError_t gpu_hpmc_implicit_accept_reject_new<ShapePolyhedron>(const hpmc_implicit_args_new_t& args,
const typename ShapePolyhedron::param_type *d_params);
}; // end namespace detail
} // end namespace hpmc
| 4b9f4976dcb24c61dc7dc5d1bfaac895b8d8e03c.cu | // Copyright (c) 2009-2019 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "ComputeFreeVolumeGPU.cuh"
#include "IntegratorHPMCMonoGPU.cuh"
#include "IntegratorHPMCMonoImplicitGPU.cuh"
#include "IntegratorHPMCMonoImplicitNewGPU.cuh"
#include "ShapePolyhedron.h"
namespace hpmc
{
namespace detail
{
//! HPMC kernels for ShapePolyhedron
template cudaError_t gpu_hpmc_free_volume<ShapePolyhedron>(const hpmc_free_volume_args_t &args,
const typename ShapePolyhedron::param_type *d_params);
template cudaError_t gpu_hpmc_update<ShapePolyhedron>(const hpmc_args_t& args,
const typename ShapePolyhedron::param_type *d_params);
template cudaError_t gpu_hpmc_implicit_count_overlaps<ShapePolyhedron>(const hpmc_implicit_args_t& args,
const typename ShapePolyhedron::param_type *d_params);
template cudaError_t gpu_hpmc_implicit_accept_reject<ShapePolyhedron>(const hpmc_implicit_args_t& args,
const typename ShapePolyhedron::param_type *d_params);
template cudaError_t gpu_hpmc_insert_depletants_queue<ShapePolyhedron>(const hpmc_implicit_args_new_t& args,
const typename ShapePolyhedron::param_type *d_params);
template cudaError_t gpu_hpmc_implicit_accept_reject_new<ShapePolyhedron>(const hpmc_implicit_args_new_t& args,
const typename ShapePolyhedron::param_type *d_params);
}; // end namespace detail
} // end namespace hpmc
|
1221e9b0918527bfbaf6cdecbcac751eef19578c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THH/generic/THHTensorMode.hip"
#else
void THCTensor_(calculateMode)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *input,
THCudaLongStorage *sortBuffer,
int dimension,
THLongStorage *position) {
THAssert(THCTensor_(isContiguous)(state, input));
// Because the input is contiguous, we want to get a reference to the
// location of the buffer at the innermost dimension that we are going
// to calculate the mode for --> we do this by manually doing the stride
// calculations to get an offset
scalar_t *data = THCTensor_(data)(state, input);
for (int i = 0; i < THLongStorage_size(position); ++i) {
data += THLongStorage_data(position)[i] * THTensor_strideLegacyNoScalars(input, i);
}
int64_t nElement = THCTensor_(sizeLegacyNoScalars)(state, input, THCTensor_(nDimensionLegacyAll)(state, input) - 1);
THCThrustAllocator thrustAlloc(state);
// Wrap input data, sortBuffer, in Thrust device vectors
thrust::device_ptr<scalar_t> vecPtr = thrust::device_pointer_cast(data);
thrust::device_vector<scalar_t> iter(vecPtr, vecPtr + nElement);
thrust::device_ptr<int64_t> sbPtr = thrust::device_pointer_cast(THCudaLongStorage_data(state, sortBuffer));
thrust::device_vector<int64_t> seq(sbPtr, sbPtr + nElement);
// Fill sortBuffer with [0, 1, 2, ... nElement - 1]
thrust::sequence(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#else
thrust::device,
#endif
seq.begin(), seq.end());
// Sort the input data. The original indices of the data are stored in seq
thrust::sort_by_key(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#else
thrust::device,
#endif
iter.begin(), iter.end(), seq.begin()
#if defined(THC_REAL_IS_HALF)
, ThrustHalfLess()
#endif
);
// Count # of unique elements via an inner product between adjacent elements.
// Add 1 if two neighboring element are not equal.
int unique = 1 + thrust::inner_product(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#else
thrust::device,
#endif
iter.begin(), iter.end() - 1, iter.begin() + 1, 0, thrust::plus<int>(),
#if defined(THC_REAL_IS_HALF)
ThrustHalfNotEqualTo()
#else
thrust::not_equal_to<scalar_t>()
#endif
);
// Count frequency of each element
thrust::device_vector<scalar_t> keys(unique);
thrust::device_vector<int> counts(unique);
thrust::reduce_by_key(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#else
thrust::device,
#endif
iter.begin(), iter.end(),
thrust::constant_iterator<int>(1), keys.begin(), counts.begin()
#if defined(THC_REAL_IS_HALF)
, ThrustHalfEqualTo()
#endif
);
// Find index of maximum count
thrust::device_vector<int>::iterator it = thrust::max_element(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#else
thrust::device,
#endif
counts.begin(), counts.end());
scalar_t mode = keys[it - counts.begin()];
// Find first index within which it occurs
#if defined(THC_REAL_IS_HALF)
thrust::device_vector<scalar_t>::iterator positionIter = thrust::find_if(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#else
thrust::device,
#endif
iter.begin(), iter.end(), ThrustHalfEqualToPredicate(mode));
#else
thrust::device_vector<scalar_t>::iterator positionIter = thrust::find(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#else
thrust::device,
#endif
iter.begin(), iter.end(), mode);
#endif
THAssert(positionIter != iter.end());
int64_t index = TH_INDEX_BASE + seq[positionIter - iter.begin()];
// Place mode, index in output
ptrdiff_t valuesOffset = THCTensor_(storageOffset)(state, values);
int64_t indicesOffset = THCudaLongTensor_storageOffset(state, indices);
for (int i = 0; i < THLongStorage_size(position); ++i) {
int64_t pos = THLongStorage_data(position)[i];
valuesOffset += THTensor_strideLegacyNoScalars(values, i) * pos;
indicesOffset += THTensor_strideLegacyNoScalars(indices, i) * pos;
}
THCStorage_(set)(state, THCTensor_(storage)(state, values), valuesOffset, mode);
THCudaLongStorage_set(state, THCudaLongTensor_storage(state, indices), indicesOffset, index);
}
// this probably could be a loop, not a recursive algorithm
void THCTensor_(dimApplyMode)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *input,
THCudaLongStorage *sortBuffer,
int dimension,
THLongStorage *position,
int curDim) {
int64_t ndim = THCTensor_(nDimensionLegacyAll)(state, input);
// Because we have transposed the Tensor, the data for the dimension we are mode'ing along
// is always in the innermost dimension
if (curDim == ndim - 1) {
THCTensor_(calculateMode)(state, values, indices, input, sortBuffer, dimension, position);
} else {
// Loop through the values and recurse
for (int i = 0; i < THCTensor_(sizeLegacyNoScalars)(state, input, curDim); ++i) {
THLongStorage_data(position)[curDim] = i;
THCTensor_(dimApplyMode)(state, values, indices, input, sortBuffer, dimension, position, curDim + 1);
}
}
}
#define MAX_GRID_SIZE 65535
#define MAX_BLOCK_SIZE 1024
void THCTensor_(mode)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *input,
int dimension,
int keepdim) {
THCTensor *transposed, *contiguous, *valuesTransposed;
THLongStorage *position;
THCudaLongStorage *sortBuffer;
THCudaLongTensor *indicesTransposed;
int64_t ndim, sliceSize, slices;
THAssert(THCTensor_(checkGPU)(state, 1, values));
// Verify they are asking for a valid dimension
ndim = THCTensor_(nDimensionLegacyAll)(state, input);
THArgCheck(dimension >= 0 && dimension < ndim, 4, "Dimension of out bounds");
sliceSize = THCTensor_(sizeLegacyNoScalars)(state, input, dimension);
slices = THCTensor_(nElement)(state, input) / sliceSize;
// Resize output value, index Tensors to appropriate sizes (i.e. the same as
// the input Tensor, except at dim=dimension, the size is 1)
THCTensor_preserveReduceDimSemantics(
state, values, ndim, dimension, keepdim);
THCTensor_preserveReduceDimSemantics(
state, indices, ndim, dimension, keepdim);
std::vector<int64_t> dim = THTensor_sizesLegacyNoScalars(input);
dim[dimension] = 1;
THCTensor_(resize)(state, values, dim, {});
THCudaLongTensor_resize(state, indices, dim, {});
// If sliceSize is 1, copy input to values and set indices
if (sliceSize == 1) {
THCTensor_(copy)(state, values, input);
THCudaLongTensor_fill(state, indices, TH_INDEX_BASE);
if (!keepdim) {
THCTensor_(squeeze1d)(state, values, values, dimension);
THCudaLongTensor_squeeze1d(state, indices, indices, dimension);
}
return;
}
// Requirements for fused kernel implementation:
//
// 1. sliceSize <= 2 * max threads per block
// 2. uses one block per slice, so number of slices must be less than the maximum number of blocks for
// a kernel launch
// 3. Can use 32-bit index math for indexing (mainly just for implementation conciseness, could be changed)
if (sliceSize <= MAX_BLOCK_SIZE &&
slices <= MAX_GRID_SIZE &&
THCTensor_canUse32BitIndexMath(state, input)) {
// Beginning our optimized implementation. First thing we want to do is to transpose
// the input Tensor along the sort dimension, and then make it contiguous
transposed = THCTensor_(newTranspose)(state, input, dimension, ndim - 1);
contiguous = THCTensor_(newContiguous)(state, transposed);
// We also need to view the values and indices Tensors as transposed in order to
// properly determine the offset into the underlying storage in which to place the
// mode and index for a particular set of dimension values
valuesTransposed = THCTensor_(newTranspose)(state, values, dimension, ndim-1);
indicesTransposed = THCudaLongTensor_newTranspose(state, indices, dimension, ndim-1);
// Set-up TensorInfo structs for passing to kernel
TensorInfo<scalar_t, unsigned int> tiValues = getTensorInfo<scalar_t, THCTensor, unsigned int>(state, valuesTransposed);
TensorInfo<int64_t, unsigned int> tiIndices = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indicesTransposed);
// The number of blocks is the number of slices that we need to calculate the mode for. Each block
// is responsible for computing a single mode
dim3 grid;
THC_getGridFromTiles(slices, grid);
// The blocksize is two elements per thread, rounded up to the nearest power of 2
int64_t ceilPowerOf2 = nextHighestPowerOf2(sliceSize);
// Macro that calls kernel --> note that we set the block dimensions here, and
// the amount of shared memory
#define HANDLE_MODE(SIZE) \
{ \
dim3 blockSize(SIZE / 2); \
\
int memsize = (sizeof(scalar_t) * SIZE) + (2 * SIZE * sizeof(unsigned int)); \
hipLaunchKernelGGL(( computeMode<scalar_t, SIZE>) \
, dim3(grid), dim3(blockSize), memsize, THCState_getCurrentStream(state), \
THCTensor_(data)(state, contiguous), tiValues, tiIndices, sliceSize); \
}
// Tradeoff between compilation time and the number of specializations. Ideally we would have
// one HANDLE_MODE for each power of 2
switch(ceilPowerOf2) {
case 2048:
HANDLE_MODE(2048)
break;
case 1024:
case 512:
case 256:
HANDLE_MODE(1024)
break;
case 128:
case 64:
HANDLE_MODE(128)
break;
case 32:
case 16:
case 8:
case 4:
case 2:
HANDLE_MODE(32)
break;
case 1:
default:
assert(false);
}
THCudaCheck(hipGetLastError());
THCTensor_(free)(state, transposed);
THCTensor_(free)(state, contiguous);
THCTensor_(free)(state, valuesTransposed);
THCudaLongTensor_free(state, indicesTransposed);
} else {
// Beginning our naive implementation: We don't want to mutate the input Tensor, but
// we need to be able to sort the inputs along the dimension in order to calculate the
// mode. Additionally, its ideal if the data along the dimension is contiguous. So
// we transpose the dimension with the innermost dimension and make a new contiguous
// version that we can use.
transposed = THCTensor_(newClone)(state, input);
THCTensor_(transpose)(state, transposed, NULL, dimension, ndim - 1);
contiguous = THCTensor_(newContiguous)(state, transposed);
THCTensor_(free)(state, transposed);
// We also need to view the values and indices Tensors as transposed in order to
// properly determine the offset into the underlying storage in which to place the
// mode and index for a particular set of dimension values
valuesTransposed = THCTensor_(newTranspose)(state, values, dimension, ndim - 1);
indicesTransposed = THCudaLongTensor_newTranspose(state, indices, dimension, ndim - 1);
// Position is a Storage that will store the dimension values we are processing
position = THLongStorage_newWithSize(ndim - 1);
// Sort Buffer is a Storage that will be used in the internal sort required to calculate
// the mode efficiently
sortBuffer = THCudaLongStorage_newWithSize(state, sliceSize);
// Call mode
THCTensor_(dimApplyMode)(state, valuesTransposed, indicesTransposed, contiguous, sortBuffer, dimension, position, 0);
THCTensor_(free)(state, contiguous);
THLongStorage_free(position);
THCTensor_(free)(state, valuesTransposed);
THCudaLongTensor_free(state, indicesTransposed);
THCudaLongStorage_free(state, sortBuffer);
}
if (!keepdim) {
THCTensor_(squeeze1d)(state, values, values, dimension);
THCudaLongTensor_squeeze1d(state, indices, indices, dimension);
}
}
#undef MAX_GRID_SIZE
#undef MAX_BLOCK_SIZE
#endif
| 1221e9b0918527bfbaf6cdecbcac751eef19578c.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THC/generic/THCTensorMode.cu"
#else
void THCTensor_(calculateMode)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *input,
THCudaLongStorage *sortBuffer,
int dimension,
THLongStorage *position) {
THAssert(THCTensor_(isContiguous)(state, input));
// Because the input is contiguous, we want to get a reference to the
// location of the buffer at the innermost dimension that we are going
// to calculate the mode for --> we do this by manually doing the stride
// calculations to get an offset
scalar_t *data = THCTensor_(data)(state, input);
for (int i = 0; i < THLongStorage_size(position); ++i) {
data += THLongStorage_data(position)[i] * THTensor_strideLegacyNoScalars(input, i);
}
int64_t nElement = THCTensor_(sizeLegacyNoScalars)(state, input, THCTensor_(nDimensionLegacyAll)(state, input) - 1);
THCThrustAllocator thrustAlloc(state);
// Wrap input data, sortBuffer, in Thrust device vectors
thrust::device_ptr<scalar_t> vecPtr = thrust::device_pointer_cast(data);
thrust::device_vector<scalar_t> iter(vecPtr, vecPtr + nElement);
thrust::device_ptr<int64_t> sbPtr = thrust::device_pointer_cast(THCudaLongStorage_data(state, sortBuffer));
thrust::device_vector<int64_t> seq(sbPtr, sbPtr + nElement);
// Fill sortBuffer with [0, 1, 2, ... nElement - 1]
thrust::sequence(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#else
thrust::device,
#endif
seq.begin(), seq.end());
// Sort the input data. The original indices of the data are stored in seq
thrust::sort_by_key(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#else
thrust::device,
#endif
iter.begin(), iter.end(), seq.begin()
#if defined(THC_REAL_IS_HALF)
, ThrustHalfLess()
#endif
);
// Count # of unique elements via an inner product between adjacent elements.
// Add 1 if two neighboring element are not equal.
int unique = 1 + thrust::inner_product(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#else
thrust::device,
#endif
iter.begin(), iter.end() - 1, iter.begin() + 1, 0, thrust::plus<int>(),
#if defined(THC_REAL_IS_HALF)
ThrustHalfNotEqualTo()
#else
thrust::not_equal_to<scalar_t>()
#endif
);
// Count frequency of each element
thrust::device_vector<scalar_t> keys(unique);
thrust::device_vector<int> counts(unique);
thrust::reduce_by_key(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#else
thrust::device,
#endif
iter.begin(), iter.end(),
thrust::constant_iterator<int>(1), keys.begin(), counts.begin()
#if defined(THC_REAL_IS_HALF)
, ThrustHalfEqualTo()
#endif
);
// Find index of maximum count
thrust::device_vector<int>::iterator it = thrust::max_element(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#else
thrust::device,
#endif
counts.begin(), counts.end());
scalar_t mode = keys[it - counts.begin()];
// Find first index within which it occurs
#if defined(THC_REAL_IS_HALF)
thrust::device_vector<scalar_t>::iterator positionIter = thrust::find_if(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#else
thrust::device,
#endif
iter.begin(), iter.end(), ThrustHalfEqualToPredicate(mode));
#else
thrust::device_vector<scalar_t>::iterator positionIter = thrust::find(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#else
thrust::device,
#endif
iter.begin(), iter.end(), mode);
#endif
THAssert(positionIter != iter.end());
int64_t index = TH_INDEX_BASE + seq[positionIter - iter.begin()];
// Place mode, index in output
ptrdiff_t valuesOffset = THCTensor_(storageOffset)(state, values);
int64_t indicesOffset = THCudaLongTensor_storageOffset(state, indices);
for (int i = 0; i < THLongStorage_size(position); ++i) {
int64_t pos = THLongStorage_data(position)[i];
valuesOffset += THTensor_strideLegacyNoScalars(values, i) * pos;
indicesOffset += THTensor_strideLegacyNoScalars(indices, i) * pos;
}
THCStorage_(set)(state, THCTensor_(storage)(state, values), valuesOffset, mode);
THCudaLongStorage_set(state, THCudaLongTensor_storage(state, indices), indicesOffset, index);
}
// this probably could be a loop, not a recursive algorithm
void THCTensor_(dimApplyMode)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *input,
THCudaLongStorage *sortBuffer,
int dimension,
THLongStorage *position,
int curDim) {
int64_t ndim = THCTensor_(nDimensionLegacyAll)(state, input);
// Because we have transposed the Tensor, the data for the dimension we are mode'ing along
// is always in the innermost dimension
if (curDim == ndim - 1) {
THCTensor_(calculateMode)(state, values, indices, input, sortBuffer, dimension, position);
} else {
// Loop through the values and recurse
for (int i = 0; i < THCTensor_(sizeLegacyNoScalars)(state, input, curDim); ++i) {
THLongStorage_data(position)[curDim] = i;
THCTensor_(dimApplyMode)(state, values, indices, input, sortBuffer, dimension, position, curDim + 1);
}
}
}
#define MAX_GRID_SIZE 65535
#define MAX_BLOCK_SIZE 1024
void THCTensor_(mode)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *input,
int dimension,
int keepdim) {
THCTensor *transposed, *contiguous, *valuesTransposed;
THLongStorage *position;
THCudaLongStorage *sortBuffer;
THCudaLongTensor *indicesTransposed;
int64_t ndim, sliceSize, slices;
THAssert(THCTensor_(checkGPU)(state, 1, values));
// Verify they are asking for a valid dimension
ndim = THCTensor_(nDimensionLegacyAll)(state, input);
THArgCheck(dimension >= 0 && dimension < ndim, 4, "Dimension of out bounds");
sliceSize = THCTensor_(sizeLegacyNoScalars)(state, input, dimension);
slices = THCTensor_(nElement)(state, input) / sliceSize;
// Resize output value, index Tensors to appropriate sizes (i.e. the same as
// the input Tensor, except at dim=dimension, the size is 1)
THCTensor_preserveReduceDimSemantics(
state, values, ndim, dimension, keepdim);
THCTensor_preserveReduceDimSemantics(
state, indices, ndim, dimension, keepdim);
std::vector<int64_t> dim = THTensor_sizesLegacyNoScalars(input);
dim[dimension] = 1;
THCTensor_(resize)(state, values, dim, {});
THCudaLongTensor_resize(state, indices, dim, {});
// If sliceSize is 1, copy input to values and set indices
if (sliceSize == 1) {
THCTensor_(copy)(state, values, input);
THCudaLongTensor_fill(state, indices, TH_INDEX_BASE);
if (!keepdim) {
THCTensor_(squeeze1d)(state, values, values, dimension);
THCudaLongTensor_squeeze1d(state, indices, indices, dimension);
}
return;
}
// Requirements for fused kernel implementation:
//
// 1. sliceSize <= 2 * max threads per block
// 2. uses one block per slice, so number of slices must be less than the maximum number of blocks for
// a kernel launch
// 3. Can use 32-bit index math for indexing (mainly just for implementation conciseness, could be changed)
if (sliceSize <= MAX_BLOCK_SIZE &&
slices <= MAX_GRID_SIZE &&
THCTensor_canUse32BitIndexMath(state, input)) {
// Beginning our optimized implementation. First thing we want to do is to transpose
// the input Tensor along the sort dimension, and then make it contiguous
transposed = THCTensor_(newTranspose)(state, input, dimension, ndim - 1);
contiguous = THCTensor_(newContiguous)(state, transposed);
// We also need to view the values and indices Tensors as transposed in order to
// properly determine the offset into the underlying storage in which to place the
// mode and index for a particular set of dimension values
valuesTransposed = THCTensor_(newTranspose)(state, values, dimension, ndim-1);
indicesTransposed = THCudaLongTensor_newTranspose(state, indices, dimension, ndim-1);
// Set-up TensorInfo structs for passing to kernel
TensorInfo<scalar_t, unsigned int> tiValues = getTensorInfo<scalar_t, THCTensor, unsigned int>(state, valuesTransposed);
TensorInfo<int64_t, unsigned int> tiIndices = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indicesTransposed);
// The number of blocks is the number of slices that we need to calculate the mode for. Each block
// is responsible for computing a single mode
dim3 grid;
THC_getGridFromTiles(slices, grid);
// The blocksize is two elements per thread, rounded up to the nearest power of 2
int64_t ceilPowerOf2 = nextHighestPowerOf2(sliceSize);
// Macro that calls kernel --> note that we set the block dimensions here, and
// the amount of shared memory
#define HANDLE_MODE(SIZE) \
{ \
dim3 blockSize(SIZE / 2); \
\
int memsize = (sizeof(scalar_t) * SIZE) + (2 * SIZE * sizeof(unsigned int)); \
computeMode<scalar_t, SIZE> \
<<<grid, blockSize, memsize, THCState_getCurrentStream(state)>>>( \
THCTensor_(data)(state, contiguous), tiValues, tiIndices, sliceSize); \
}
// Tradeoff between compilation time and the number of specializations. Ideally we would have
// one HANDLE_MODE for each power of 2
switch(ceilPowerOf2) {
case 2048:
HANDLE_MODE(2048)
break;
case 1024:
case 512:
case 256:
HANDLE_MODE(1024)
break;
case 128:
case 64:
HANDLE_MODE(128)
break;
case 32:
case 16:
case 8:
case 4:
case 2:
HANDLE_MODE(32)
break;
case 1:
default:
assert(false);
}
THCudaCheck(cudaGetLastError());
THCTensor_(free)(state, transposed);
THCTensor_(free)(state, contiguous);
THCTensor_(free)(state, valuesTransposed);
THCudaLongTensor_free(state, indicesTransposed);
} else {
// Beginning our naive implementation: We don't want to mutate the input Tensor, but
// we need to be able to sort the inputs along the dimension in order to calculate the
// mode. Additionally, its ideal if the data along the dimension is contiguous. So
// we transpose the dimension with the innermost dimension and make a new contiguous
// version that we can use.
transposed = THCTensor_(newClone)(state, input);
THCTensor_(transpose)(state, transposed, NULL, dimension, ndim - 1);
contiguous = THCTensor_(newContiguous)(state, transposed);
THCTensor_(free)(state, transposed);
// We also need to view the values and indices Tensors as transposed in order to
// properly determine the offset into the underlying storage in which to place the
// mode and index for a particular set of dimension values
valuesTransposed = THCTensor_(newTranspose)(state, values, dimension, ndim - 1);
indicesTransposed = THCudaLongTensor_newTranspose(state, indices, dimension, ndim - 1);
// Position is a Storage that will store the dimension values we are processing
position = THLongStorage_newWithSize(ndim - 1);
// Sort Buffer is a Storage that will be used in the internal sort required to calculate
// the mode efficiently
sortBuffer = THCudaLongStorage_newWithSize(state, sliceSize);
// Call mode
THCTensor_(dimApplyMode)(state, valuesTransposed, indicesTransposed, contiguous, sortBuffer, dimension, position, 0);
THCTensor_(free)(state, contiguous);
THLongStorage_free(position);
THCTensor_(free)(state, valuesTransposed);
THCudaLongTensor_free(state, indicesTransposed);
THCudaLongStorage_free(state, sortBuffer);
}
if (!keepdim) {
THCTensor_(squeeze1d)(state, values, values, dimension);
THCudaLongTensor_squeeze1d(state, indices, indices, dimension);
}
}
#undef MAX_GRID_SIZE
#undef MAX_BLOCK_SIZE
#endif
|
50ed77103f7d9030d0e9e9adec301b80116a9671.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2015 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mse_layer_updater_cuda.h"
#include <hip/hip_runtime.h>
#include "util_cuda.h"
#include "../mse_layer.h"
#include "../neural_network_exception.h"
namespace nnforge
{
namespace cuda
{
extern __shared__ float arr_sh[];
__global__ void mse_upd_kernel(
float * __restrict output,
const float * __restrict input0,
const float * __restrict input1,
const float * __restrict scale_mask,
int input_feature_map_count,
int elem_count_per_feature_map,
float scale,
int entry_count)
{
int feature_map_id = threadIdx.x;
int neuron_id = blockIdx.x;
int entry_id = blockIdx.y;
int threadblock_size = blockDim.x;
float err = 0.0F;
int output_offset = entry_id * elem_count_per_feature_map + neuron_id;
float mask = 1.0F;
if (scale_mask)
mask = scale_mask[output_offset];
int thread_id = threadIdx.x;
if (mask != 0.0F)
{
int input_offset = (entry_id * input_feature_map_count + feature_map_id) * elem_count_per_feature_map + neuron_id;
while (feature_map_id < input_feature_map_count)
{
float local_err = input0[input_offset] - input1[input_offset];
err += local_err * local_err;
feature_map_id += threadblock_size;
input_offset += threadblock_size * elem_count_per_feature_map;
}
int lane_id = thread_id & 31;
#pragma unroll
for(int tx = 16; tx > 0; tx >>= 1)
err += __shfl_down(err, tx);
int warp_count = threadblock_size >> 5;
if (warp_count > 1)
{
if (lane_id == 0)
arr_sh[thread_id >> 5] = err;
__syncthreads();
if (thread_id < 32)
{
err = 0.0F;
if (thread_id < warp_count)
err = arr_sh[thread_id];
#pragma unroll
for(int tx = 4; tx > 0; tx >>= 1)
err += __shfl_down(err, tx);
}
}
}
if (thread_id == 0)
output[output_offset] = err * (mask * scale);
}
template<bool add_update_to_destination>
__global__ void mse_backprop_upd_kernel(
float * __restrict output,
const float * __restrict deriv_input_neurons,
const float * __restrict target_input_neurons,
float scale2,
int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
{
if (add_update_to_destination)
output[elem_id] += scale2 * (target_input_neurons[elem_id] - deriv_input_neurons[elem_id]);
else
output[elem_id] = scale2 * (target_input_neurons[elem_id] - deriv_input_neurons[elem_id]);
}
}
template<bool add_update_to_destination>
__global__ void mse_backprop_upd_kernel(
float * __restrict output,
const float * __restrict deriv_input_neurons,
const float * __restrict target_input_neurons,
const float * __restrict scale_mask,
float scale2,
int elem_count_per_feature_map,
int input_feature_map_count,
int entry_count)
{
int neuron_id = blockDim.x * blockIdx.x + threadIdx.x;
int feature_map_id = blockDim.y * blockIdx.y + threadIdx.y;
int entry_id = blockDim.z * blockIdx.z + threadIdx.z;
if ((neuron_id < elem_count_per_feature_map) && (feature_map_id < input_feature_map_count) && (entry_id < entry_count))
{
int elem_id = (entry_id * input_feature_map_count + feature_map_id) * elem_count_per_feature_map + neuron_id;
float mask = scale_mask[entry_id * elem_count_per_feature_map + neuron_id];
float gradient = 0.0F;
if (mask != 0.0F)
{
float actual_val = target_input_neurons[elem_id];
float predicted_val = deriv_input_neurons[elem_id];
gradient = actual_val - predicted_val;
}
if (add_update_to_destination)
output[elem_id] += gradient * (mask * scale2);
else
output[elem_id] = gradient * (mask * scale2);
}
}
mse_layer_updater_cuda::mse_layer_updater_cuda()
{
}
mse_layer_updater_cuda::~mse_layer_updater_cuda()
{
}
void mse_layer_updater_cuda::enqueue_forward_propagation(
hipStream_t stream_id,
cuda_linear_buffer_device::ptr output_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
cuda_linear_buffer_device::ptr temporary_per_entry_buffer,
unsigned int entry_count)
{
int threadblock_size = get_threadblock_size(input_configuration_specific_list[0].feature_map_count);
const float * scale_mask = 0;
if (input_buffers.size() > 2)
scale_mask = *input_buffers[2];
int smem_size = ((threadblock_size + 32 - 1) / 32) * sizeof(float);
hipLaunchKernelGGL(( mse_upd_kernel), dim3(dim3(input_elem_count_per_feature_map_list[0], entry_count)), dim3(threadblock_size), smem_size, stream_id,
*output_buffer,
*input_buffers[0],
*input_buffers[1],
scale_mask,
input_configuration_specific_list[0].feature_map_count,
input_elem_count_per_feature_map_list[0],
scale,
entry_count);
}
void mse_layer_updater_cuda::enqueue_backward_data_propagation(
hipStream_t stream_id,
unsigned int input_index,
cuda_linear_buffer_device::ptr input_errors_buffer,
cuda_linear_buffer_device::const_ptr output_errors_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_neurons_buffers,
cuda_linear_buffer_device::const_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
cuda_linear_buffer_device::const_ptr temporary_per_entry_buffer,
bool add_update_to_destination,
unsigned int entry_count)
{
if (input_neurons_buffers.size() > 2)
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
input_elem_count_per_feature_map_list[0],
input_configuration_specific_list[0].feature_map_count,
entry_count);
if (add_update_to_destination)
hipLaunchKernelGGL(( mse_backprop_upd_kernel<true>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
*input_errors_buffer,
*input_neurons_buffers[input_index],
*input_neurons_buffers[1 - input_index],
*input_neurons_buffers[2],
scale * 2.0F,
input_elem_count_per_feature_map_list[0],
input_configuration_specific_list[0].feature_map_count,
entry_count);
else
hipLaunchKernelGGL(( mse_backprop_upd_kernel<false>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
*input_errors_buffer,
*input_neurons_buffers[input_index],
*input_neurons_buffers[1 - input_index],
*input_neurons_buffers[2],
scale * 2.0F,
input_elem_count_per_feature_map_list[0],
input_configuration_specific_list[0].feature_map_count,
entry_count);
}
else
{
int elem_count = entry_count * input_elem_count_per_entry_list[0];
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
elem_count);
if (add_update_to_destination)
hipLaunchKernelGGL(( mse_backprop_upd_kernel<true>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
*input_errors_buffer,
*input_neurons_buffers[input_index],
*input_neurons_buffers[1 - input_index],
scale * 2.0F,
elem_count);
else
hipLaunchKernelGGL(( mse_backprop_upd_kernel<false>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
*input_errors_buffer,
*input_neurons_buffers[input_index],
*input_neurons_buffers[1 - input_index],
scale * 2.0F,
elem_count);
}
}
void mse_layer_updater_cuda::updater_configured()
{
if (actions.find(layer_action(layer_action::backward_data, 2)) != actions.end())
throw neural_network_exception("mse_layer_updater_cuda cannot do backward propagation for scale mask");
nnforge_shared_ptr<const mse_layer> layer_derived = nnforge_dynamic_pointer_cast<const mse_layer>(layer_schema);
scale = layer_derived->scale;
}
bool mse_layer_updater_cuda::is_backward_data_dependent_on_output_buffer(unsigned int action_input_index) const
{
return false;
}
int mse_layer_updater_cuda::get_threadblock_size(int input_feature_map_count)
{
int threadblock_size;
if (input_feature_map_count < 256)
{
threadblock_size = (input_feature_map_count + 32 - 1) / 32 * 32;
}
else
{
int threadblock_count = (input_feature_map_count + 256 - 1) / 256;
threadblock_size = (input_feature_map_count + threadblock_count - 1) / threadblock_count;
threadblock_size = (threadblock_size + 32 - 1) / 32 * 32;
}
return threadblock_size;
}
}
}
| 50ed77103f7d9030d0e9e9adec301b80116a9671.cu | /*
* Copyright 2011-2015 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mse_layer_updater_cuda.h"
#include <cuda_runtime.h>
#include "util_cuda.h"
#include "../mse_layer.h"
#include "../neural_network_exception.h"
namespace nnforge
{
namespace cuda
{
extern __shared__ float arr_sh[];
__global__ void mse_upd_kernel(
float * __restrict output,
const float * __restrict input0,
const float * __restrict input1,
const float * __restrict scale_mask,
int input_feature_map_count,
int elem_count_per_feature_map,
float scale,
int entry_count)
{
int feature_map_id = threadIdx.x;
int neuron_id = blockIdx.x;
int entry_id = blockIdx.y;
int threadblock_size = blockDim.x;
float err = 0.0F;
int output_offset = entry_id * elem_count_per_feature_map + neuron_id;
float mask = 1.0F;
if (scale_mask)
mask = scale_mask[output_offset];
int thread_id = threadIdx.x;
if (mask != 0.0F)
{
int input_offset = (entry_id * input_feature_map_count + feature_map_id) * elem_count_per_feature_map + neuron_id;
while (feature_map_id < input_feature_map_count)
{
float local_err = input0[input_offset] - input1[input_offset];
err += local_err * local_err;
feature_map_id += threadblock_size;
input_offset += threadblock_size * elem_count_per_feature_map;
}
int lane_id = thread_id & 31;
#pragma unroll
for(int tx = 16; tx > 0; tx >>= 1)
err += __shfl_down(err, tx);
int warp_count = threadblock_size >> 5;
if (warp_count > 1)
{
if (lane_id == 0)
arr_sh[thread_id >> 5] = err;
__syncthreads();
if (thread_id < 32)
{
err = 0.0F;
if (thread_id < warp_count)
err = arr_sh[thread_id];
#pragma unroll
for(int tx = 4; tx > 0; tx >>= 1)
err += __shfl_down(err, tx);
}
}
}
if (thread_id == 0)
output[output_offset] = err * (mask * scale);
}
template<bool add_update_to_destination>
__global__ void mse_backprop_upd_kernel(
float * __restrict output,
const float * __restrict deriv_input_neurons,
const float * __restrict target_input_neurons,
float scale2,
int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
{
if (add_update_to_destination)
output[elem_id] += scale2 * (target_input_neurons[elem_id] - deriv_input_neurons[elem_id]);
else
output[elem_id] = scale2 * (target_input_neurons[elem_id] - deriv_input_neurons[elem_id]);
}
}
template<bool add_update_to_destination>
__global__ void mse_backprop_upd_kernel(
float * __restrict output,
const float * __restrict deriv_input_neurons,
const float * __restrict target_input_neurons,
const float * __restrict scale_mask,
float scale2,
int elem_count_per_feature_map,
int input_feature_map_count,
int entry_count)
{
int neuron_id = blockDim.x * blockIdx.x + threadIdx.x;
int feature_map_id = blockDim.y * blockIdx.y + threadIdx.y;
int entry_id = blockDim.z * blockIdx.z + threadIdx.z;
if ((neuron_id < elem_count_per_feature_map) && (feature_map_id < input_feature_map_count) && (entry_id < entry_count))
{
int elem_id = (entry_id * input_feature_map_count + feature_map_id) * elem_count_per_feature_map + neuron_id;
float mask = scale_mask[entry_id * elem_count_per_feature_map + neuron_id];
float gradient = 0.0F;
if (mask != 0.0F)
{
float actual_val = target_input_neurons[elem_id];
float predicted_val = deriv_input_neurons[elem_id];
gradient = actual_val - predicted_val;
}
if (add_update_to_destination)
output[elem_id] += gradient * (mask * scale2);
else
output[elem_id] = gradient * (mask * scale2);
}
}
mse_layer_updater_cuda::mse_layer_updater_cuda()
{
}
mse_layer_updater_cuda::~mse_layer_updater_cuda()
{
}
void mse_layer_updater_cuda::enqueue_forward_propagation(
cudaStream_t stream_id,
cuda_linear_buffer_device::ptr output_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
cuda_linear_buffer_device::ptr temporary_per_entry_buffer,
unsigned int entry_count)
{
int threadblock_size = get_threadblock_size(input_configuration_specific_list[0].feature_map_count);
const float * scale_mask = 0;
if (input_buffers.size() > 2)
scale_mask = *input_buffers[2];
int smem_size = ((threadblock_size + 32 - 1) / 32) * sizeof(float);
mse_upd_kernel<<<dim3(input_elem_count_per_feature_map_list[0], entry_count), threadblock_size, smem_size, stream_id>>>(
*output_buffer,
*input_buffers[0],
*input_buffers[1],
scale_mask,
input_configuration_specific_list[0].feature_map_count,
input_elem_count_per_feature_map_list[0],
scale,
entry_count);
}
void mse_layer_updater_cuda::enqueue_backward_data_propagation(
cudaStream_t stream_id,
unsigned int input_index,
cuda_linear_buffer_device::ptr input_errors_buffer,
cuda_linear_buffer_device::const_ptr output_errors_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_neurons_buffers,
cuda_linear_buffer_device::const_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
cuda_linear_buffer_device::const_ptr temporary_per_entry_buffer,
bool add_update_to_destination,
unsigned int entry_count)
{
if (input_neurons_buffers.size() > 2)
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
input_elem_count_per_feature_map_list[0],
input_configuration_specific_list[0].feature_map_count,
entry_count);
if (add_update_to_destination)
mse_backprop_upd_kernel<true><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
*input_errors_buffer,
*input_neurons_buffers[input_index],
*input_neurons_buffers[1 - input_index],
*input_neurons_buffers[2],
scale * 2.0F,
input_elem_count_per_feature_map_list[0],
input_configuration_specific_list[0].feature_map_count,
entry_count);
else
mse_backprop_upd_kernel<false><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
*input_errors_buffer,
*input_neurons_buffers[input_index],
*input_neurons_buffers[1 - input_index],
*input_neurons_buffers[2],
scale * 2.0F,
input_elem_count_per_feature_map_list[0],
input_configuration_specific_list[0].feature_map_count,
entry_count);
}
else
{
int elem_count = entry_count * input_elem_count_per_entry_list[0];
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
elem_count);
if (add_update_to_destination)
mse_backprop_upd_kernel<true><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
*input_errors_buffer,
*input_neurons_buffers[input_index],
*input_neurons_buffers[1 - input_index],
scale * 2.0F,
elem_count);
else
mse_backprop_upd_kernel<false><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
*input_errors_buffer,
*input_neurons_buffers[input_index],
*input_neurons_buffers[1 - input_index],
scale * 2.0F,
elem_count);
}
}
void mse_layer_updater_cuda::updater_configured()
{
if (actions.find(layer_action(layer_action::backward_data, 2)) != actions.end())
throw neural_network_exception("mse_layer_updater_cuda cannot do backward propagation for scale mask");
nnforge_shared_ptr<const mse_layer> layer_derived = nnforge_dynamic_pointer_cast<const mse_layer>(layer_schema);
scale = layer_derived->scale;
}
bool mse_layer_updater_cuda::is_backward_data_dependent_on_output_buffer(unsigned int action_input_index) const
{
return false;
}
int mse_layer_updater_cuda::get_threadblock_size(int input_feature_map_count)
{
int threadblock_size;
if (input_feature_map_count < 256)
{
threadblock_size = (input_feature_map_count + 32 - 1) / 32 * 32;
}
else
{
int threadblock_count = (input_feature_map_count + 256 - 1) / 256;
threadblock_size = (input_feature_map_count + threadblock_count - 1) / threadblock_count;
threadblock_size = (threadblock_size + 32 - 1) / 32 * 32;
}
return threadblock_size;
}
}
}
|
790025825985482ebe088e0eda3a58b6fafeb5b4.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright 2017 Stanford, NVIDIA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "model.h"
#include "cuda_helper.h"
Tensor FFModel::softmax(const Tensor& _input)
{
assert(_input.numDim == 2);
Softmax *sm = new Softmax(*this, _input);
layers.push_back(sm);
return sm->outputs[0];
}
Softmax::Softmax(FFModel& model,
const Tensor& _input)
: Op(model, OP_SOFTMAX, "Softmax", _input), profiling(model.config.profiling)
{
outputs[0].numDim = 2;
outputs[0].adim[0] = _input.adim[0];
outputs[0].adim[1] = _input.adim[1];
}
void Softmax::create_weights(FFModel& model)
{
// Do nothing since we don't ahve weights
}
void Softmax::create_output_and_partition(FFModel& model)
{
// Retrive the task indexspace for the op
std::string pcname = name;
task_is = IndexSpaceT<2>(model.get_or_create_task_is(2, pcname));
Context ctx = model.config.lg_ctx;
Runtime* runtime = model.config.lg_hlr;
Rect<2> part_rect = runtime->get_index_space_domain(ctx, task_is);
int num_par_c = part_rect.hi[0] - part_rect.lo[0] + 1;
int num_par_n = part_rect.hi[1] - part_rect.lo[1] + 1;
// Current require data parallelism for Softmax
assert(num_par_c == 1);
{
const int dims[2] = {inputs[0].adim[1], inputs[0].adim[0]};
outputs[0] = model.create_tensor<2>(dims, (IndexSpaceT<2>)task_is, DT_FLOAT);
outputs[0].owner_op = this;
outputs[0].owner_idx = 0;
}
// Compute partition bound for input
Rect<2> input_rect = runtime->get_index_partition_color_space(
ctx, inputs[0].part.get_index_partition());
if (input_rect == part_rect) {
input_lps[0] = inputs[0].part;
input_grad_lps[0] = inputs[0].part_grad;
} else {
model.create_disjoint_partition(
inputs[0], (IndexSpaceT<2>)task_is, input_lps[0], input_grad_lps[0]);
}
}
/*
regions[0]: input
regions[1]: output
*/
OpMeta* Softmax::init_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
//const Softmax* softmax = (Softmax*) task->args;
TensorAccessorR<float, 2> acc_input(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, 2> acc_output(
regions[1], task->regions[1], FID_DATA, ctx, runtime,
false/*readutput*/);
FFHandler handle = *((const FFHandler*) task->local_args);
SoftmaxMeta* m = new SoftmaxMeta(handle);
checkCUDNN(cudnnCreateTensorDescriptor(&m->inputTensor));
//checkCUDNN(cudnnCreateTensorDescriptor(&m->outputTensor));
assert(acc_input.rect == acc_output.rect);
int input_c = acc_input.rect.hi[0] - acc_input.rect.lo[0] + 1;
int input_n = acc_input.rect.hi[1] - acc_input.rect.lo[1] + 1;
checkCUDNN(cudnnSetTensor4dDescriptor(m->inputTensor,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
input_n, input_c, 1, 1));
return m;
}
__host__
void Softmax::init(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Rect<2> rect = runtime->get_index_space_domain(ctx, task_is);
int idx = 0;
for (PointInRectIterator<2> it(rect); it(); it++) {
FFHandler handle = ff.handlers[idx++];
argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler)));
}
IndexLauncher launcher(SOFTMAX_INIT_TASK_ID, task_is,
TaskArgument(this, sizeof(Softmax)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_DISCARD, EXCLUSIVE, outputs[0].region));
launcher.add_field(1, FID_DATA);
FutureMap fm = runtime->execute_index_space(ctx, launcher);
fm.wait_all_results();
idx = 0;
for (PointInRectIterator<2> it(rect); it(); it++) {
meta[idx++] = fm.get_result<OpMeta*>(*it);
}
}
/*
regions[0](I): input
regions[1](O): output
*/
__host__
void Softmax::forward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
float alpha = 1.0f, beta = 0.0f;
const Softmax* softmax = (Softmax*) task->args;
const SoftmaxMeta* m = *((SoftmaxMeta**) task->local_args);
TensorAccessorR<float, 2> acc_input(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, 2> acc_output(
regions[1], task->regions[1], FID_DATA, ctx, runtime,
false/*readOutput*/);
hipEvent_t t_start, t_end;
if (softmax->profiling) {
hipEventCreate(&t_start);
hipEventCreate(&t_end);
hipEventRecord(t_start);
}
#ifndef DISABLE_LEGION_CUDA_HIJACK
hipStream_t stream;
checkCUDA(hipStreamCreate(&stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
#endif
checkCUDNN(cudnnSoftmaxForward(m->handle.dnn,
CUDNN_SOFTMAX_ACCURATE,
CUDNN_SOFTMAX_MODE_CHANNEL,
&alpha, m->inputTensor, acc_input.ptr,
&beta, m->inputTensor, acc_output.ptr));
if (softmax->profiling) {
hipEventRecord(t_end);
checkCUDA(hipEventSynchronize(t_end));
//print_tensor<2, float>(acc_output.ptr, acc_output.rect, "[Softmax:forward:output]");
float elapsed = 0;
checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end));
hipEventDestroy(t_start);
hipEventDestroy(t_end);
printf("Softmax forward time = %.2fms\n", elapsed);
}
}
__host__
void Softmax::forward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Rect<2> rect = runtime->get_index_space_domain(ctx, task_is);
int idx = 0;
for (PointInRectIterator<2> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
IndexLauncher launcher(SOFTMAX_FWD_TASK_ID, task_is,
TaskArgument(this, sizeof(Softmax)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(1, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
/*
regions[0](I/O): input_grad
regions[1](I): output_grad
*/
// Note that the backward task of softmax is actually a no op (i.e., input_grad = output_grad)
// since the upstream cross_entropy_loss function computes performs softmax_cross_entropy_loss
// to avoid intermediate zeros
__host__
void Softmax::backward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
const Softmax* softmax = (Softmax*) task->args;
const SoftmaxMeta* m = *((SoftmaxMeta**) task->local_args);
TensorAccessorW<float, 2> acc_input_grad(
regions[0], task->regions[0], FID_DATA, ctx, runtime,
true/*readOutput*/);
TensorAccessorR<float, 2> acc_output_grad(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
// make sure the image indices match!
assert(acc_input_grad.rect == acc_output_grad.rect);
hipEvent_t t_start, t_end;
if (softmax->profiling) {
hipEventCreate(&t_start);
hipEventCreate(&t_end);
hipEventRecord(t_start);
}
#ifndef DISABLE_LEGION_CUDA_HIJACK
hipStream_t stream;
checkCUDA(hipStreamCreate(&stream));
checkCUDA(hipblasSetStream(m->handle.blas, stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
#endif
checkCUDA(hipMemcpyAsync(acc_input_grad.ptr, acc_output_grad.ptr,
acc_input_grad.rect.volume() * sizeof(float),
hipMemcpyDeviceToDevice));
if (softmax->profiling) {
hipEventRecord(t_end);
checkCUDA(hipEventSynchronize(t_end));
//print_tensor<2, float>(acc_output_grad.ptr, acc_output_grad.rect, "[Softmax:backward:output_grad]");
//print_tensor<2, float>(acc_input_grad.ptr, acc_input_grad.rect, "[Softmax:backward:input_grad]");
float elapsed = 0;
checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end));
hipEventDestroy(t_start);
hipEventDestroy(t_end);
printf("Softmax backward time = %.2fms\n", elapsed);
}
}
__host__
void Softmax::backward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
int idx = 0;
Rect<2> rect = runtime->get_index_space_domain(ctx, task_is);
for (PointInRectIterator<2> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
IndexLauncher launcher(SOFTMAX_BWD_TASK_ID, task_is,
TaskArgument(this, sizeof(Softmax)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_grad_lps[0], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[0].region_grad));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[0].part_grad, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, outputs[0].region_grad));
launcher.add_field(1, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
bool Softmax::measure_compute_time(Simulator* sim,
const ParallelConfig& pc,
float& forward_time,
float& backward_time)
{
//TODO: implement measure_forward
return false;
}
| 790025825985482ebe088e0eda3a58b6fafeb5b4.cu | /* Copyright 2017 Stanford, NVIDIA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "model.h"
#include "cuda_helper.h"
Tensor FFModel::softmax(const Tensor& _input)
{
assert(_input.numDim == 2);
Softmax *sm = new Softmax(*this, _input);
layers.push_back(sm);
return sm->outputs[0];
}
Softmax::Softmax(FFModel& model,
const Tensor& _input)
: Op(model, OP_SOFTMAX, "Softmax", _input), profiling(model.config.profiling)
{
outputs[0].numDim = 2;
outputs[0].adim[0] = _input.adim[0];
outputs[0].adim[1] = _input.adim[1];
}
void Softmax::create_weights(FFModel& model)
{
// Do nothing since we don't ahve weights
}
void Softmax::create_output_and_partition(FFModel& model)
{
// Retrive the task indexspace for the op
std::string pcname = name;
task_is = IndexSpaceT<2>(model.get_or_create_task_is(2, pcname));
Context ctx = model.config.lg_ctx;
Runtime* runtime = model.config.lg_hlr;
Rect<2> part_rect = runtime->get_index_space_domain(ctx, task_is);
int num_par_c = part_rect.hi[0] - part_rect.lo[0] + 1;
int num_par_n = part_rect.hi[1] - part_rect.lo[1] + 1;
// Current require data parallelism for Softmax
assert(num_par_c == 1);
{
const int dims[2] = {inputs[0].adim[1], inputs[0].adim[0]};
outputs[0] = model.create_tensor<2>(dims, (IndexSpaceT<2>)task_is, DT_FLOAT);
outputs[0].owner_op = this;
outputs[0].owner_idx = 0;
}
// Compute partition bound for input
Rect<2> input_rect = runtime->get_index_partition_color_space(
ctx, inputs[0].part.get_index_partition());
if (input_rect == part_rect) {
input_lps[0] = inputs[0].part;
input_grad_lps[0] = inputs[0].part_grad;
} else {
model.create_disjoint_partition(
inputs[0], (IndexSpaceT<2>)task_is, input_lps[0], input_grad_lps[0]);
}
}
/*
regions[0]: input
regions[1]: output
*/
OpMeta* Softmax::init_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
//const Softmax* softmax = (Softmax*) task->args;
TensorAccessorR<float, 2> acc_input(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, 2> acc_output(
regions[1], task->regions[1], FID_DATA, ctx, runtime,
false/*readutput*/);
FFHandler handle = *((const FFHandler*) task->local_args);
SoftmaxMeta* m = new SoftmaxMeta(handle);
checkCUDNN(cudnnCreateTensorDescriptor(&m->inputTensor));
//checkCUDNN(cudnnCreateTensorDescriptor(&m->outputTensor));
assert(acc_input.rect == acc_output.rect);
int input_c = acc_input.rect.hi[0] - acc_input.rect.lo[0] + 1;
int input_n = acc_input.rect.hi[1] - acc_input.rect.lo[1] + 1;
checkCUDNN(cudnnSetTensor4dDescriptor(m->inputTensor,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
input_n, input_c, 1, 1));
return m;
}
__host__
void Softmax::init(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Rect<2> rect = runtime->get_index_space_domain(ctx, task_is);
int idx = 0;
for (PointInRectIterator<2> it(rect); it(); it++) {
FFHandler handle = ff.handlers[idx++];
argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler)));
}
IndexLauncher launcher(SOFTMAX_INIT_TASK_ID, task_is,
TaskArgument(this, sizeof(Softmax)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_DISCARD, EXCLUSIVE, outputs[0].region));
launcher.add_field(1, FID_DATA);
FutureMap fm = runtime->execute_index_space(ctx, launcher);
fm.wait_all_results();
idx = 0;
for (PointInRectIterator<2> it(rect); it(); it++) {
meta[idx++] = fm.get_result<OpMeta*>(*it);
}
}
/*
regions[0](I): input
regions[1](O): output
*/
__host__
void Softmax::forward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
float alpha = 1.0f, beta = 0.0f;
const Softmax* softmax = (Softmax*) task->args;
const SoftmaxMeta* m = *((SoftmaxMeta**) task->local_args);
TensorAccessorR<float, 2> acc_input(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, 2> acc_output(
regions[1], task->regions[1], FID_DATA, ctx, runtime,
false/*readOutput*/);
cudaEvent_t t_start, t_end;
if (softmax->profiling) {
cudaEventCreate(&t_start);
cudaEventCreate(&t_end);
cudaEventRecord(t_start);
}
#ifndef DISABLE_LEGION_CUDA_HIJACK
cudaStream_t stream;
checkCUDA(cudaStreamCreate(&stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
#endif
checkCUDNN(cudnnSoftmaxForward(m->handle.dnn,
CUDNN_SOFTMAX_ACCURATE,
CUDNN_SOFTMAX_MODE_CHANNEL,
&alpha, m->inputTensor, acc_input.ptr,
&beta, m->inputTensor, acc_output.ptr));
if (softmax->profiling) {
cudaEventRecord(t_end);
checkCUDA(cudaEventSynchronize(t_end));
//print_tensor<2, float>(acc_output.ptr, acc_output.rect, "[Softmax:forward:output]");
float elapsed = 0;
checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end));
cudaEventDestroy(t_start);
cudaEventDestroy(t_end);
printf("Softmax forward time = %.2fms\n", elapsed);
}
}
__host__
void Softmax::forward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Rect<2> rect = runtime->get_index_space_domain(ctx, task_is);
int idx = 0;
for (PointInRectIterator<2> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
IndexLauncher launcher(SOFTMAX_FWD_TASK_ID, task_is,
TaskArgument(this, sizeof(Softmax)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(1, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
/*
regions[0](I/O): input_grad
regions[1](I): output_grad
*/
// Note that the backward task of softmax is actually a no op (i.e., input_grad = output_grad)
// since the upstream cross_entropy_loss function computes performs softmax_cross_entropy_loss
// to avoid intermediate zeros
__host__
void Softmax::backward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
const Softmax* softmax = (Softmax*) task->args;
const SoftmaxMeta* m = *((SoftmaxMeta**) task->local_args);
TensorAccessorW<float, 2> acc_input_grad(
regions[0], task->regions[0], FID_DATA, ctx, runtime,
true/*readOutput*/);
TensorAccessorR<float, 2> acc_output_grad(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
// make sure the image indices match!
assert(acc_input_grad.rect == acc_output_grad.rect);
cudaEvent_t t_start, t_end;
if (softmax->profiling) {
cudaEventCreate(&t_start);
cudaEventCreate(&t_end);
cudaEventRecord(t_start);
}
#ifndef DISABLE_LEGION_CUDA_HIJACK
cudaStream_t stream;
checkCUDA(cudaStreamCreate(&stream));
checkCUDA(cublasSetStream(m->handle.blas, stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
#endif
checkCUDA(cudaMemcpyAsync(acc_input_grad.ptr, acc_output_grad.ptr,
acc_input_grad.rect.volume() * sizeof(float),
cudaMemcpyDeviceToDevice));
if (softmax->profiling) {
cudaEventRecord(t_end);
checkCUDA(cudaEventSynchronize(t_end));
//print_tensor<2, float>(acc_output_grad.ptr, acc_output_grad.rect, "[Softmax:backward:output_grad]");
//print_tensor<2, float>(acc_input_grad.ptr, acc_input_grad.rect, "[Softmax:backward:input_grad]");
float elapsed = 0;
checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end));
cudaEventDestroy(t_start);
cudaEventDestroy(t_end);
printf("Softmax backward time = %.2fms\n", elapsed);
}
}
__host__
void Softmax::backward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
int idx = 0;
Rect<2> rect = runtime->get_index_space_domain(ctx, task_is);
for (PointInRectIterator<2> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
IndexLauncher launcher(SOFTMAX_BWD_TASK_ID, task_is,
TaskArgument(this, sizeof(Softmax)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_grad_lps[0], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[0].region_grad));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[0].part_grad, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, outputs[0].region_grad));
launcher.add_field(1, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
bool Softmax::measure_compute_time(Simulator* sim,
const ParallelConfig& pc,
float& forward_time,
float& backward_time)
{
//TODO: implement measure_forward
return false;
}
|
203e3f108b31485569bfe75e87922849974e59cb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHReduceApplyUtils.cuh"
#include "THHTensorCopy.h"
#include <thrust/device_ptr.h>
#include <hiprand/hiprand_kernel.h> // known CUDA 6 compilation error fix
#include <thrust/sort.h>
template <typename IndexType, int Power2SortSize>
__device__ __forceinline__ IndexType
getSortSliceLinearIndex() {
// linear block ID -> slice we are sorting (one per block)
return getLinearBlockId<IndexType>();
}
// Returns 2^(ceil(lg(n)) from Stanford bit twiddling hacks
unsigned long nextHighestPowerOf2(unsigned long n) {
n--;
n |= n >> 1;
n |= n >> 2;
n |= n >> 4;
n |= n >> 8;
n |= n >> 16;
n |= n >> 32;
n++;
return n;
}
template <typename T>
struct LTComp {
__device__ __forceinline__ bool operator()(const T& a, const T& b) const {
return (a < b);
}
};
template <typename T>
struct GTComp {
__device__ __forceinline__ bool operator()(const T& a, const T& b) const {
return (a > b);
}
};
template <typename Comparator, typename K, typename V>
__device__ __forceinline__ void bitonicSwap(K& kA, V& vA,
K& kB, V& vB,
bool dir,
const Comparator& comp) {
// Entries with -1 indices (not real data; out of bounds) always
// sort to the end
bool val = (comp(kA, kB) && (vA != -1)) || (vB == -1);
if (val == dir) {
K k = kA;
kA = kB;
kB = k;
V v = vA;
vA = vB;
vB = v;
}
};
template <typename Comparator, typename K, typename V,
typename IndexType, int Power2SortSize>
__device__ inline void bitonicSort(K keys[Power2SortSize],
V values[Power2SortSize],
const Comparator& comp) {
#pragma unroll
for (unsigned int size = 2; size < Power2SortSize; size *= 2) {
bool flag = ((threadIdx.x & (size / 2)) != 0);
#pragma unroll
for (unsigned int stride = size / 2; stride > 0; stride /= 2) {
// Single warp per slice is completely synchronous
if (Power2SortSize > 64) {
__syncthreads();
}
unsigned int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
bitonicSwap<Comparator, K, V>(
keys[pos], values[pos], keys[pos + stride], values[pos + stride],
flag, comp);
}
}
#pragma unroll
for (unsigned int stride = Power2SortSize / 2; stride > 0; stride /= 2) {
// Single warp per slice is completely synchronous
if (Power2SortSize > 64) {
__syncthreads();
}
unsigned int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
bitonicSwap<Comparator, K, V>(
keys[pos], values[pos], keys[pos + stride], values [pos + stride],
false, comp);
}
// Single warp per slice is completely synchronous
if (Power2SortSize > 64) {
__syncthreads();
}
}
template <typename Comparator, typename IndexType, int Dims, int Power2SortSize>
__global__ void
THCudaTensor_bitonicSortWithIndex(TensorInfo<IndexType> sorted,
TensorInfo<IndexType> indices,
TensorInfo<IndexType> input,
int dim,
IndexType totalSlices,
IndexType sliceSize,
IndexType sliceStride,
IndexType outSize,
IndexType outStride,
const Comparator comp) {
// Find the slice of the tensor that we are sorting
const IndexType linearIndex =
getSortSliceLinearIndex<IndexType, Power2SortSize>();
// Tiling the slices could have us be out of bounds, if there are a
// lot of slices to sort
if (linearIndex >= totalSlices) {
return;
}
__shared__ float keys[Power2SortSize];
__shared__ int values[Power2SortSize];
// Read unsorted values
const IndexType inputStartOffset =
IndexToOffset<IndexType, Dims>::get(linearIndex, input);
// Each thread is responsible for loading and storing 2 elements
const int elem1 = threadIdx.x;
const int elem2 = threadIdx.x + (Power2SortSize / 2);
keys[elem1] = (elem1 < sliceSize) ?
input.data[inputStartOffset + elem1 * sliceStride] :
0.0f; // doesn't matter, element val out of bounds
// Torch indices are 1-based (hence the +1)
values[elem1] = (elem1 < sliceSize) ? (elem1 + 1) :
-1; // out of bounds
keys[elem2] = (elem2 < sliceSize) ?
input.data[inputStartOffset + elem2 * sliceStride] :
0.0f; // doesn't matter, element val out of bounds
// Torch indices are 1-based (hence the +1)
values[elem2] = (elem2 < sliceSize) ? (elem2 + 1) :
-1; // out of bounds
// Sort!
bitonicSort<Comparator, float, int, IndexType, Power2SortSize>(
keys, values, comp);
// Write sorted values; indices have same layout
const IndexType sortedStartOffset =
IndexToOffset<IndexType, -1>::get(linearIndex, sorted);
const IndexType out1 = sortedStartOffset + elem1 * outStride;
// elem1 values are always valid, since otherwise we would have
// chosen the next smallest power-of-2 for sorting
sorted.data[out1] = keys[elem1];
indices.data[out1] = values[elem1];
const IndexType out2 = sortedStartOffset + elem2 * outStride;
// elem2 values might be out-of-range, if the data size we are
// sorting is not a power-of-2
if (values[elem2] != -1) {
sorted.data[out2] = keys[elem2];
indices.data[out2] = values[elem2];
}
}
bool THCudaTensor_sortImpl(THCState* state,
THCudaTensor* sorted,
THCudaTensor* indices,
THCudaTensor* input,
int dim, bool dir) {
long inElements = THCudaTensor_nElement(state, input);
long sliceSize = THCudaTensor_size(state, input, dim);
long sliceStride = THCudaTensor_stride(state, input, dim);
long slices = inElements / sliceSize;
long outSize = THCudaTensor_size(state, sorted, dim);
long outStride = THCudaTensor_stride(state, sorted, dim);
if (THCudaTensor_nDimension(state, input) > MAX_CUTORCH_DIMS) {
// Too many dimensions
return false;
}
if (THCudaTensor_nDimension(state, input) == 0) {
// Zero-dim tensor; do nothing
return true;
}
// The amount of shared memory and block size is based on
// 2^ceil(lg(n)); we choose that sorting implementation for a given
// size.
long ceilPowerOf2 = nextHighestPowerOf2(sliceSize);
// Only handle 1-2048 at the moment
if (ceilPowerOf2 > 2048) {
return false;
}
const dim3 block(ceilPowerOf2 / 2);
// The grid is based on the number of independent slices that we
// have to sort; one block per slice
dim3 grid;
if (!THC_getGridFromTiles(slices, grid)) {
return false;
}
#define HANDLE_CASE(TYPE, A, SIZE) \
if (dir) { \
hipLaunchKernelGGL(( THCudaTensor_bitonicSortWithIndex<GTComp<float>, TYPE, A, SIZE>) \
, dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), \
sortedInfo, indicesInfo, inputInfo, \
dim, \
slices, (TYPE) sliceSize, (TYPE) sliceStride, \
(TYPE) outSize, (TYPE) outStride, \
GTComp<float>()); \
} else { \
hipLaunchKernelGGL(( THCudaTensor_bitonicSortWithIndex<LTComp<float>, TYPE, A, SIZE>) \
, dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), \
sortedInfo, indicesInfo, inputInfo, \
dim, \
slices, (TYPE) sliceSize, (TYPE) sliceStride, \
(TYPE) outSize, (TYPE) outStride, \
LTComp<float>()); \
}
#define HANDLE_SORT_CASE(TYPE, A) \
{ \
switch (ceilPowerOf2) { \
case 2048: \
HANDLE_CASE(TYPE, A, 2048); \
break; \
case 1024: \
HANDLE_CASE(TYPE, A, 1024); \
break; \
case 512: \
HANDLE_CASE(TYPE, A, 512); \
break; \
case 256: \
HANDLE_CASE(TYPE, A, 256); \
break; \
case 128: \
HANDLE_CASE(TYPE, A, 128); \
break; \
case 64: \
HANDLE_CASE(TYPE, A, 64); \
break; \
case 32: \
HANDLE_CASE(TYPE, A, 32); \
break; \
case 16: \
HANDLE_CASE(TYPE, A, 16); \
break; \
case 8: \
HANDLE_CASE(TYPE, A, 8); \
break; \
case 4: \
HANDLE_CASE(TYPE, A, 4); \
break; \
case 2: \
HANDLE_CASE(TYPE, A, 2); \
break; \
case 1: \
HANDLE_CASE(TYPE, A, 1); \
break; \
default: \
assert(false); \
} \
}
#define HANDLE_A_CASE(TYPE, A) \
{ \
if (inputInfo.isContiguous()) { \
HANDLE_SORT_CASE(TYPE, -2); \
} else { \
switch (A) { \
case 1: \
HANDLE_SORT_CASE(TYPE, 1); \
break; \
case 2: \
HANDLE_SORT_CASE(TYPE, 2); \
break; \
case 3: \
HANDLE_SORT_CASE(TYPE, 3); \
break; \
default: \
HANDLE_SORT_CASE(TYPE, -1); \
break; \
} \
} \
}
if (THC_canUse32BitIndexMath(state, input)) {
// In order to get to the right offset for the slice we are
// sorting, set `dim` size to 1 (the `dropDim` argument)
TensorInfo<unsigned int> sortedInfo(state, sorted, dim);
TensorInfo<unsigned int> indicesInfo(state, indices, dim);
TensorInfo<unsigned int> inputInfo(state, input, dim);
HANDLE_A_CASE(unsigned int, inputInfo.dims);
} else {
// In order to get to the right offset for the slice we are
// sorting, set `dim` size to 1 (the `dropDim` argument)
TensorInfo<unsigned long> sortedInfo(state, sorted, dim);
TensorInfo<unsigned long> indicesInfo(state, indices, dim);
TensorInfo<unsigned long> inputInfo(state, input, dim);
// long case is rare, just instantiate these versions
if (inputInfo.isContiguous()) {
HANDLE_SORT_CASE(unsigned long, -2);
} else {
HANDLE_SORT_CASE(unsigned long, -1);
}
}
#undef HANDLE_CASE
#undef HANDLE_SORT_CASE
#undef HANDLE_A_CASE
return true;
}
// `base` is the base address of a tensor
// For each slice (defined as a linear point of `out`, from 0 ->
// (sliceSize - 1) * sliceStride, we fill that slice from `0` to
// `sliceSize - 1`.
__global__ void
THCudaTensor_fillSliceWithIndex(TensorInfo<unsigned long> out,
long totalSlices,
long sliceSize,
long sliceStride) {
long slice = getLinearBlockId<long>();
if (slice >= totalSlices) {
return;
}
const unsigned long offset =
IndexToOffset<unsigned long, -1>::get(slice, out);
for (long i = threadIdx.x; i < sliceSize; i += blockDim.x) {
// Torch indices are 1-based (hence the +1)
out.data[offset + i * sliceStride] = (float) i + 1;
}
}
bool canSortThrust(THCState* state, THCudaTensor* input, int dim) {
long totalElements = THCudaTensor_nElement(state, input);
long sliceSize = THCudaTensor_size(state, input, dim);
long numSlices = totalElements / sliceSize;
// Only bother deferring to Thrust if the sort slice is contiguous,
// the number of slices are small, and they are large
return ((THCudaTensor_stride(state, input, dim) == 1) &&
numSlices <= 16 &&
sliceSize > 4096);
}
void THCudaTensor_sortImplThrust(THCState* state,
THCudaTensor* sorted,
THCudaTensor* indices,
THCudaTensor* input,
int dim, bool dir) {
// Fill the indices as values that Thrust can use for key/value sorting
long totalElements = THCudaTensor_nElement(state, input);
long sliceSize = THCudaTensor_size(state, input, dim);
long sliceStride = THCudaTensor_stride(state, input, dim);
long numSlices = totalElements / sliceSize;
// Copy input to sorted, since we sort in place
if (sorted != input) {
THCudaTensor_copy(state, sorted, input);
}
TensorInfo<unsigned long> sortedInfo(state, sorted, dim);
TensorInfo<unsigned long> indicesInfo(state, indices, dim);
dim3 grid;
THC_getGridFromTiles(numSlices, grid);
hipLaunchKernelGGL(( THCudaTensor_fillSliceWithIndex), dim3(grid), dim3(min((long long)sliceSize, 1024LL)),
0, THCState_getCurrentStream(state),
indicesInfo, numSlices, sliceSize, sliceStride);
THCudaCheck(hipGetLastError());
for (long slice = 0; slice < numSlices; ++slice) {
unsigned long sortedStart =
IndexToOffset<unsigned long, -1>::get(slice, sortedInfo);
unsigned long indicesStart =
IndexToOffset<unsigned long, -1>::get(slice, indicesInfo);
thrust::device_ptr<float>
sortedSliceStart(THCudaTensor_data(state, sorted) +
sortedStart);
thrust::device_ptr<float>
sortedSliceEnd(THCudaTensor_data(state, sorted) +
sortedStart + sliceSize);
thrust::device_ptr<float>
indicesSliceStart(THCudaTensor_data(state, indices) +
indicesStart);
if (dir) {
thrust::sort_by_key(sortedSliceStart, sortedSliceEnd, indicesSliceStart,
thrust::greater<float>());
} else {
thrust::sort_by_key(sortedSliceStart, sortedSliceEnd, indicesSliceStart,
thrust::less<float>());
}
}
}
THC_API void THCudaTensor_sort(THCState* state,
THCudaTensor *sorted,
THCudaTensor *indices,
THCudaTensor *input,
int dim, int order) {
THAssert(THCudaTensor_checkGPU(state, 3, sorted, indices, input));
// Make sure sufficient output space is allocated
THCudaTensor_resizeAs(state, sorted, input);
THCudaTensor_resizeAs(state, indices, input);
// If we think Thrust will be more efficient, use that
if (canSortThrust(state, input, dim)) {
THCudaTensor_sortImplThrust(state, sorted, indices, input,
dim, (bool) order);
return;
}
// Otherwise, use our blockwide sort kernel per each reduction slice
if (!THCudaTensor_sortImpl(state, sorted, indices, input,
dim, (bool) order)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
| 203e3f108b31485569bfe75e87922849974e59cb.cu | #include "THCReduceApplyUtils.cuh"
#include "THCTensorCopy.h"
#include <thrust/device_ptr.h>
#include <curand_kernel.h> // known CUDA 6 compilation error fix
#include <thrust/sort.h>
template <typename IndexType, int Power2SortSize>
__device__ __forceinline__ IndexType
getSortSliceLinearIndex() {
// linear block ID -> slice we are sorting (one per block)
return getLinearBlockId<IndexType>();
}
// Returns 2^(ceil(lg(n)) from Stanford bit twiddling hacks
unsigned long nextHighestPowerOf2(unsigned long n) {
n--;
n |= n >> 1;
n |= n >> 2;
n |= n >> 4;
n |= n >> 8;
n |= n >> 16;
n |= n >> 32;
n++;
return n;
}
template <typename T>
struct LTComp {
__device__ __forceinline__ bool operator()(const T& a, const T& b) const {
return (a < b);
}
};
template <typename T>
struct GTComp {
__device__ __forceinline__ bool operator()(const T& a, const T& b) const {
return (a > b);
}
};
template <typename Comparator, typename K, typename V>
__device__ __forceinline__ void bitonicSwap(K& kA, V& vA,
K& kB, V& vB,
bool dir,
const Comparator& comp) {
// Entries with -1 indices (not real data; out of bounds) always
// sort to the end
bool val = (comp(kA, kB) && (vA != -1)) || (vB == -1);
if (val == dir) {
K k = kA;
kA = kB;
kB = k;
V v = vA;
vA = vB;
vB = v;
}
};
template <typename Comparator, typename K, typename V,
typename IndexType, int Power2SortSize>
__device__ inline void bitonicSort(K keys[Power2SortSize],
V values[Power2SortSize],
const Comparator& comp) {
#pragma unroll
for (unsigned int size = 2; size < Power2SortSize; size *= 2) {
bool flag = ((threadIdx.x & (size / 2)) != 0);
#pragma unroll
for (unsigned int stride = size / 2; stride > 0; stride /= 2) {
// Single warp per slice is completely synchronous
if (Power2SortSize > 64) {
__syncthreads();
}
unsigned int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
bitonicSwap<Comparator, K, V>(
keys[pos], values[pos], keys[pos + stride], values[pos + stride],
flag, comp);
}
}
#pragma unroll
for (unsigned int stride = Power2SortSize / 2; stride > 0; stride /= 2) {
// Single warp per slice is completely synchronous
if (Power2SortSize > 64) {
__syncthreads();
}
unsigned int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
bitonicSwap<Comparator, K, V>(
keys[pos], values[pos], keys[pos + stride], values [pos + stride],
false, comp);
}
// Single warp per slice is completely synchronous
if (Power2SortSize > 64) {
__syncthreads();
}
}
template <typename Comparator, typename IndexType, int Dims, int Power2SortSize>
__global__ void
THCudaTensor_bitonicSortWithIndex(TensorInfo<IndexType> sorted,
TensorInfo<IndexType> indices,
TensorInfo<IndexType> input,
int dim,
IndexType totalSlices,
IndexType sliceSize,
IndexType sliceStride,
IndexType outSize,
IndexType outStride,
const Comparator comp) {
// Find the slice of the tensor that we are sorting
const IndexType linearIndex =
getSortSliceLinearIndex<IndexType, Power2SortSize>();
// Tiling the slices could have us be out of bounds, if there are a
// lot of slices to sort
if (linearIndex >= totalSlices) {
return;
}
__shared__ float keys[Power2SortSize];
__shared__ int values[Power2SortSize];
// Read unsorted values
const IndexType inputStartOffset =
IndexToOffset<IndexType, Dims>::get(linearIndex, input);
// Each thread is responsible for loading and storing 2 elements
const int elem1 = threadIdx.x;
const int elem2 = threadIdx.x + (Power2SortSize / 2);
keys[elem1] = (elem1 < sliceSize) ?
input.data[inputStartOffset + elem1 * sliceStride] :
0.0f; // doesn't matter, element val out of bounds
// Torch indices are 1-based (hence the +1)
values[elem1] = (elem1 < sliceSize) ? (elem1 + 1) :
-1; // out of bounds
keys[elem2] = (elem2 < sliceSize) ?
input.data[inputStartOffset + elem2 * sliceStride] :
0.0f; // doesn't matter, element val out of bounds
// Torch indices are 1-based (hence the +1)
values[elem2] = (elem2 < sliceSize) ? (elem2 + 1) :
-1; // out of bounds
// Sort!
bitonicSort<Comparator, float, int, IndexType, Power2SortSize>(
keys, values, comp);
// Write sorted values; indices have same layout
const IndexType sortedStartOffset =
IndexToOffset<IndexType, -1>::get(linearIndex, sorted);
const IndexType out1 = sortedStartOffset + elem1 * outStride;
// elem1 values are always valid, since otherwise we would have
// chosen the next smallest power-of-2 for sorting
sorted.data[out1] = keys[elem1];
indices.data[out1] = values[elem1];
const IndexType out2 = sortedStartOffset + elem2 * outStride;
// elem2 values might be out-of-range, if the data size we are
// sorting is not a power-of-2
if (values[elem2] != -1) {
sorted.data[out2] = keys[elem2];
indices.data[out2] = values[elem2];
}
}
bool THCudaTensor_sortImpl(THCState* state,
THCudaTensor* sorted,
THCudaTensor* indices,
THCudaTensor* input,
int dim, bool dir) {
long inElements = THCudaTensor_nElement(state, input);
long sliceSize = THCudaTensor_size(state, input, dim);
long sliceStride = THCudaTensor_stride(state, input, dim);
long slices = inElements / sliceSize;
long outSize = THCudaTensor_size(state, sorted, dim);
long outStride = THCudaTensor_stride(state, sorted, dim);
if (THCudaTensor_nDimension(state, input) > MAX_CUTORCH_DIMS) {
// Too many dimensions
return false;
}
if (THCudaTensor_nDimension(state, input) == 0) {
// Zero-dim tensor; do nothing
return true;
}
// The amount of shared memory and block size is based on
// 2^ceil(lg(n)); we choose that sorting implementation for a given
// size.
long ceilPowerOf2 = nextHighestPowerOf2(sliceSize);
// Only handle 1-2048 at the moment
if (ceilPowerOf2 > 2048) {
return false;
}
const dim3 block(ceilPowerOf2 / 2);
// The grid is based on the number of independent slices that we
// have to sort; one block per slice
dim3 grid;
if (!THC_getGridFromTiles(slices, grid)) {
return false;
}
#define HANDLE_CASE(TYPE, A, SIZE) \
if (dir) { \
THCudaTensor_bitonicSortWithIndex<GTComp<float>, TYPE, A, SIZE> \
<<<grid, block, 0, THCState_getCurrentStream(state)>>>( \
sortedInfo, indicesInfo, inputInfo, \
dim, \
slices, (TYPE) sliceSize, (TYPE) sliceStride, \
(TYPE) outSize, (TYPE) outStride, \
GTComp<float>()); \
} else { \
THCudaTensor_bitonicSortWithIndex<LTComp<float>, TYPE, A, SIZE> \
<<<grid, block, 0, THCState_getCurrentStream(state)>>>( \
sortedInfo, indicesInfo, inputInfo, \
dim, \
slices, (TYPE) sliceSize, (TYPE) sliceStride, \
(TYPE) outSize, (TYPE) outStride, \
LTComp<float>()); \
}
#define HANDLE_SORT_CASE(TYPE, A) \
{ \
switch (ceilPowerOf2) { \
case 2048: \
HANDLE_CASE(TYPE, A, 2048); \
break; \
case 1024: \
HANDLE_CASE(TYPE, A, 1024); \
break; \
case 512: \
HANDLE_CASE(TYPE, A, 512); \
break; \
case 256: \
HANDLE_CASE(TYPE, A, 256); \
break; \
case 128: \
HANDLE_CASE(TYPE, A, 128); \
break; \
case 64: \
HANDLE_CASE(TYPE, A, 64); \
break; \
case 32: \
HANDLE_CASE(TYPE, A, 32); \
break; \
case 16: \
HANDLE_CASE(TYPE, A, 16); \
break; \
case 8: \
HANDLE_CASE(TYPE, A, 8); \
break; \
case 4: \
HANDLE_CASE(TYPE, A, 4); \
break; \
case 2: \
HANDLE_CASE(TYPE, A, 2); \
break; \
case 1: \
HANDLE_CASE(TYPE, A, 1); \
break; \
default: \
assert(false); \
} \
}
#define HANDLE_A_CASE(TYPE, A) \
{ \
if (inputInfo.isContiguous()) { \
HANDLE_SORT_CASE(TYPE, -2); \
} else { \
switch (A) { \
case 1: \
HANDLE_SORT_CASE(TYPE, 1); \
break; \
case 2: \
HANDLE_SORT_CASE(TYPE, 2); \
break; \
case 3: \
HANDLE_SORT_CASE(TYPE, 3); \
break; \
default: \
HANDLE_SORT_CASE(TYPE, -1); \
break; \
} \
} \
}
if (THC_canUse32BitIndexMath(state, input)) {
// In order to get to the right offset for the slice we are
// sorting, set `dim` size to 1 (the `dropDim` argument)
TensorInfo<unsigned int> sortedInfo(state, sorted, dim);
TensorInfo<unsigned int> indicesInfo(state, indices, dim);
TensorInfo<unsigned int> inputInfo(state, input, dim);
HANDLE_A_CASE(unsigned int, inputInfo.dims);
} else {
// In order to get to the right offset for the slice we are
// sorting, set `dim` size to 1 (the `dropDim` argument)
TensorInfo<unsigned long> sortedInfo(state, sorted, dim);
TensorInfo<unsigned long> indicesInfo(state, indices, dim);
TensorInfo<unsigned long> inputInfo(state, input, dim);
// long case is rare, just instantiate these versions
if (inputInfo.isContiguous()) {
HANDLE_SORT_CASE(unsigned long, -2);
} else {
HANDLE_SORT_CASE(unsigned long, -1);
}
}
#undef HANDLE_CASE
#undef HANDLE_SORT_CASE
#undef HANDLE_A_CASE
return true;
}
// `base` is the base address of a tensor
// For each slice (defined as a linear point of `out`, from 0 ->
// (sliceSize - 1) * sliceStride, we fill that slice from `0` to
// `sliceSize - 1`.
__global__ void
THCudaTensor_fillSliceWithIndex(TensorInfo<unsigned long> out,
long totalSlices,
long sliceSize,
long sliceStride) {
long slice = getLinearBlockId<long>();
if (slice >= totalSlices) {
return;
}
const unsigned long offset =
IndexToOffset<unsigned long, -1>::get(slice, out);
for (long i = threadIdx.x; i < sliceSize; i += blockDim.x) {
// Torch indices are 1-based (hence the +1)
out.data[offset + i * sliceStride] = (float) i + 1;
}
}
bool canSortThrust(THCState* state, THCudaTensor* input, int dim) {
long totalElements = THCudaTensor_nElement(state, input);
long sliceSize = THCudaTensor_size(state, input, dim);
long numSlices = totalElements / sliceSize;
// Only bother deferring to Thrust if the sort slice is contiguous,
// the number of slices are small, and they are large
return ((THCudaTensor_stride(state, input, dim) == 1) &&
numSlices <= 16 &&
sliceSize > 4096);
}
void THCudaTensor_sortImplThrust(THCState* state,
THCudaTensor* sorted,
THCudaTensor* indices,
THCudaTensor* input,
int dim, bool dir) {
// Fill the indices as values that Thrust can use for key/value sorting
long totalElements = THCudaTensor_nElement(state, input);
long sliceSize = THCudaTensor_size(state, input, dim);
long sliceStride = THCudaTensor_stride(state, input, dim);
long numSlices = totalElements / sliceSize;
// Copy input to sorted, since we sort in place
if (sorted != input) {
THCudaTensor_copy(state, sorted, input);
}
TensorInfo<unsigned long> sortedInfo(state, sorted, dim);
TensorInfo<unsigned long> indicesInfo(state, indices, dim);
dim3 grid;
THC_getGridFromTiles(numSlices, grid);
THCudaTensor_fillSliceWithIndex<<<grid, min((long long)sliceSize, 1024LL),
0, THCState_getCurrentStream(state)>>>(
indicesInfo, numSlices, sliceSize, sliceStride);
THCudaCheck(cudaGetLastError());
for (long slice = 0; slice < numSlices; ++slice) {
unsigned long sortedStart =
IndexToOffset<unsigned long, -1>::get(slice, sortedInfo);
unsigned long indicesStart =
IndexToOffset<unsigned long, -1>::get(slice, indicesInfo);
thrust::device_ptr<float>
sortedSliceStart(THCudaTensor_data(state, sorted) +
sortedStart);
thrust::device_ptr<float>
sortedSliceEnd(THCudaTensor_data(state, sorted) +
sortedStart + sliceSize);
thrust::device_ptr<float>
indicesSliceStart(THCudaTensor_data(state, indices) +
indicesStart);
if (dir) {
thrust::sort_by_key(sortedSliceStart, sortedSliceEnd, indicesSliceStart,
thrust::greater<float>());
} else {
thrust::sort_by_key(sortedSliceStart, sortedSliceEnd, indicesSliceStart,
thrust::less<float>());
}
}
}
THC_API void THCudaTensor_sort(THCState* state,
THCudaTensor *sorted,
THCudaTensor *indices,
THCudaTensor *input,
int dim, int order) {
THAssert(THCudaTensor_checkGPU(state, 3, sorted, indices, input));
// Make sure sufficient output space is allocated
THCudaTensor_resizeAs(state, sorted, input);
THCudaTensor_resizeAs(state, indices, input);
// If we think Thrust will be more efficient, use that
if (canSortThrust(state, input, dim)) {
THCudaTensor_sortImplThrust(state, sorted, indices, input,
dim, (bool) order);
return;
}
// Otherwise, use our blockwide sort kernel per each reduction slice
if (!THCudaTensor_sortImpl(state, sorted, indices, input,
dim, (bool) order)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
|
a966a76264e766dcabb33fb390ba832e7a99650d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void Subtract(float *d_Result, float *d_Data1, float *d_Data2, int width, int height)
{
const int x = __mul24(blockIdx.x, 16) + threadIdx.x;
const int y = __mul24(blockIdx.y, 16) + threadIdx.y;
int p = __mul24(y, width) + x;
if (x<width && y<height)
d_Result[p] = d_Data1[p] - d_Data2[p];
__syncthreads();
} | a966a76264e766dcabb33fb390ba832e7a99650d.cu | #include "includes.h"
__global__ void Subtract(float *d_Result, float *d_Data1, float *d_Data2, int width, int height)
{
const int x = __mul24(blockIdx.x, 16) + threadIdx.x;
const int y = __mul24(blockIdx.y, 16) + threadIdx.y;
int p = __mul24(y, width) + x;
if (x<width && y<height)
d_Result[p] = d_Data1[p] - d_Data2[p];
__syncthreads();
} |
360ea51db98df3577bdf5cc2eedb3957131878cd.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#define ITER 100000
#define THREAD_PER_BLOCK 10
#define PI 3.1415926535
#define RAD(X) X *(PI / 180.0)
void calculator(float *sin_arr, float *cos_arr, float *tan_arr)
{
for (int idx = 0; idx < ITER; idx++)
{
float rad = RAD(idx);
sin_arr[idx] = sinf(rad);
cos_arr[idx] = cosf(rad);
tan_arr[idx] = tanf(rad);
}
}
int main()
{
hipProfilerStart();
float *sin_arr, *cos_arr, *tan_arr;
sin_arr = (float *)malloc(sizeof(float) * ITER);
cos_arr = (float *)malloc(sizeof(float) * ITER);
tan_arr = (float *)malloc(sizeof(float) * ITER);
calculator(sin_arr, cos_arr, tan_arr);
for (int i = 0; i < ITER; i++)
{
printf("sin (%d) = %f cos (%d) = %f tan (%d) = %f\n", i, sin_arr[i], i, cos_arr[i], i, tan_arr[i]);
}
free(sin_arr);
free(cos_arr);
free(tan_arr);
hipProfilerStop();
return 0;
} | 360ea51db98df3577bdf5cc2eedb3957131878cd.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_profiler_api.h>
#define ITER 100000
#define THREAD_PER_BLOCK 10
#define PI 3.1415926535
#define RAD(X) X *(PI / 180.0)
void calculator(float *sin_arr, float *cos_arr, float *tan_arr)
{
for (int idx = 0; idx < ITER; idx++)
{
float rad = RAD(idx);
sin_arr[idx] = sinf(rad);
cos_arr[idx] = cosf(rad);
tan_arr[idx] = tanf(rad);
}
}
int main()
{
cudaProfilerStart();
float *sin_arr, *cos_arr, *tan_arr;
sin_arr = (float *)malloc(sizeof(float) * ITER);
cos_arr = (float *)malloc(sizeof(float) * ITER);
tan_arr = (float *)malloc(sizeof(float) * ITER);
calculator(sin_arr, cos_arr, tan_arr);
for (int i = 0; i < ITER; i++)
{
printf("sin (%d) = %f cos (%d) = %f tan (%d) = %f\n", i, sin_arr[i], i, cos_arr[i], i, tan_arr[i]);
}
free(sin_arr);
free(cos_arr);
free(tan_arr);
cudaProfilerStop();
return 0;
} |
9487a94d774dded47e72bfee261949aaeee7fa7d.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hip/hip_runtime.h>
#include "repeat_elements_grad_impl.cuh"
#include "runtime/device/gpu/cuda_common.h"
template <typename T>
__global__ void RepeatElementsGrad(const int dx_size, const T *dy, const int rep, T *dx, const int outer_size,
const int repeat_dim_size, const int inner_size) {
for (size_t t_id = blockIdx.x * blockDim.x + threadIdx.x; t_id < dx_size; t_id += gridDim.x * blockDim.x) {
int inner_id = t_id % inner_size;
int repeat_dim_id = t_id / inner_size % repeat_dim_size;
int outer_id = t_id / inner_size / repeat_dim_size;
T dx_i = static_cast<T>(0);
for (int i = 0; i < rep; i++) {
dx_i += dy[(outer_id * rep * repeat_dim_size * inner_size) + (repeat_dim_id * rep * inner_size) +
(i * inner_size) + inner_id];
}
dx[t_id] = dx_i;
}
}
template <typename T>
void CalRepeatElementsGrad(const T *dy, const int rep, T *dx, const int outer_size, const int repeat_dim_size,
const int inner_size, hipStream_t cuda_stream) {
const int dx_size = outer_size * repeat_dim_size * inner_size;
hipLaunchKernelGGL(( RepeatElementsGrad), dim3(GET_BLOCKS(dx_size)), dim3(GET_THREADS), 0, cuda_stream, dx_size, dy, rep, dx, outer_size,
repeat_dim_size, inner_size);
}
template void CalRepeatElementsGrad<int>(const int *dy, const int rep, int *dx, const int outer_size,
const int repeat_dim_size, const int inner_size, hipStream_t cuda_stream);
template void CalRepeatElementsGrad<half>(const half *dy, const int rep, half *dx, const int outer_size,
const int repeat_dim_size, const int inner_size, hipStream_t cuda_stream);
| 9487a94d774dded47e72bfee261949aaeee7fa7d.cu | /**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuda_runtime.h>
#include "repeat_elements_grad_impl.cuh"
#include "runtime/device/gpu/cuda_common.h"
template <typename T>
__global__ void RepeatElementsGrad(const int dx_size, const T *dy, const int rep, T *dx, const int outer_size,
const int repeat_dim_size, const int inner_size) {
for (size_t t_id = blockIdx.x * blockDim.x + threadIdx.x; t_id < dx_size; t_id += gridDim.x * blockDim.x) {
int inner_id = t_id % inner_size;
int repeat_dim_id = t_id / inner_size % repeat_dim_size;
int outer_id = t_id / inner_size / repeat_dim_size;
T dx_i = static_cast<T>(0);
for (int i = 0; i < rep; i++) {
dx_i += dy[(outer_id * rep * repeat_dim_size * inner_size) + (repeat_dim_id * rep * inner_size) +
(i * inner_size) + inner_id];
}
dx[t_id] = dx_i;
}
}
template <typename T>
void CalRepeatElementsGrad(const T *dy, const int rep, T *dx, const int outer_size, const int repeat_dim_size,
const int inner_size, cudaStream_t cuda_stream) {
const int dx_size = outer_size * repeat_dim_size * inner_size;
RepeatElementsGrad<<<GET_BLOCKS(dx_size), GET_THREADS, 0, cuda_stream>>>(dx_size, dy, rep, dx, outer_size,
repeat_dim_size, inner_size);
}
template void CalRepeatElementsGrad<int>(const int *dy, const int rep, int *dx, const int outer_size,
const int repeat_dim_size, const int inner_size, cudaStream_t cuda_stream);
template void CalRepeatElementsGrad<half>(const half *dy, const int rep, half *dx, const int outer_size,
const int repeat_dim_size, const int inner_size, cudaStream_t cuda_stream);
|
e019ba363d83b8fa15d0ff610426ccf2afad6740.hip | // !!! This is a file automatically generated by hipify!!!
#include "cudf.h"
#include "rmm/rmm.h"
#include "utilities/cudf_utils.h"
#include "utilities/error_utils.hpp"
#include <cub/device/device_segmented_radix_sort.cuh>
struct SegmentedRadixSortPlan{
const gdf_size_type num_items;
// temporary storage
void *storage;
size_t storage_bytes;
void *back_key, *back_val;
size_t back_key_size, back_val_size;
hipStream_t stream;
int descending;
unsigned begin_bit, end_bit;
SegmentedRadixSortPlan(size_t num_items, int descending,
unsigned begin_bit, unsigned end_bit)
: num_items(num_items),
storage(nullptr), storage_bytes(0),
back_key(nullptr), back_val(nullptr),
back_key_size(0), back_val_size(0),
stream(0), descending(descending),
begin_bit(begin_bit), end_bit(end_bit)
{}
gdf_error setup(size_t sizeof_key, size_t sizeof_val) {
back_key_size = num_items * sizeof_key;
back_val_size = num_items * sizeof_val;
RMM_TRY( RMM_ALLOC(&back_key, back_key_size, stream) ); // TODO: non-default stream
RMM_TRY( RMM_ALLOC(&back_val, back_val_size, stream) );
return GDF_SUCCESS;
}
gdf_error teardown() {
RMM_TRY(RMM_FREE(back_key, stream));
RMM_TRY(RMM_FREE(back_val, stream));
RMM_TRY(RMM_FREE(storage, stream));
return GDF_SUCCESS;
}
};
template <typename Tk, typename Tv>
struct SegmentedRadixSort {
static
gdf_error sort( SegmentedRadixSortPlan *plan,
Tk *d_key_buf, Tv *d_value_buf,
unsigned num_segments,
unsigned *d_begin_offsets,
unsigned *d_end_offsets) {
unsigned num_items = plan->num_items;
Tk *d_key_alt_buf = (Tk*)plan->back_key;
Tv *d_value_alt_buf = (Tv*)plan->back_val;
hipStream_t stream = plan->stream;
int descending = plan->descending;
unsigned begin_bit = plan->begin_bit;
unsigned end_bit = plan->end_bit;
cub::DoubleBuffer<Tk> d_keys(d_key_buf, d_key_alt_buf);
typedef hipcub::DeviceSegmentedRadixSort Sorter;
if (d_value_buf) {
// Sort KeyValue pairs
cub::DoubleBuffer<Tv> d_values(d_value_buf, d_value_alt_buf);
if (descending) {
Sorter::SortPairsDescending(plan->storage,
plan->storage_bytes,
d_keys,
d_values,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
begin_bit,
end_bit,
stream);
} else {
Sorter::SortPairs( plan->storage,
plan->storage_bytes,
d_keys,
d_values,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
begin_bit,
end_bit,
stream );
}
CUDA_CHECK_LAST();
if (plan->storage && d_value_buf != d_values.Current()){
hipMemcpyAsync(d_value_buf, d_value_alt_buf,
num_items * sizeof(Tv),
hipMemcpyDeviceToDevice,
stream);
CUDA_CHECK_LAST();
}
} else {
// Sort Keys only
if (descending) {
Sorter::SortKeysDescending( plan->storage,
plan->storage_bytes,
d_keys,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
begin_bit,
end_bit,
stream );
CUDA_CHECK_LAST()
} else {
Sorter::SortKeys( plan->storage,
plan->storage_bytes,
d_keys,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
begin_bit,
end_bit,
stream );
}
CUDA_CHECK_LAST();
}
if ( plan->storage ) {
// We have operated and the result is not in front buffer
if (d_key_buf != d_keys.Current()){
hipMemcpyAsync(d_key_buf, d_key_alt_buf, num_items * sizeof(Tk),
hipMemcpyDeviceToDevice, stream);
CUDA_CHECK_LAST();
}
} else {
// We have not operated.
// Just checking for temporary storage requirement
RMM_TRY( RMM_ALLOC(&plan->storage, plan->storage_bytes, plan->stream) ); // TODO: non-default stream
CUDA_CHECK_LAST();
// Now that we have allocated, do real work.
return sort(plan, d_key_buf, d_value_buf, num_segments,
d_begin_offsets, d_end_offsets);
}
return GDF_SUCCESS;
}
};
gdf_segmented_radixsort_plan_type* cffi_wrap(SegmentedRadixSortPlan* obj){
return reinterpret_cast<gdf_segmented_radixsort_plan_type*>(obj);
}
SegmentedRadixSortPlan* cffi_unwrap(gdf_segmented_radixsort_plan_type* hdl){
return reinterpret_cast<SegmentedRadixSortPlan*>(hdl);
}
gdf_segmented_radixsort_plan_type* gdf_segmented_radixsort_plan(
size_t num_items, int descending,
unsigned begin_bit, unsigned end_bit)
{
return cffi_wrap(new SegmentedRadixSortPlan(num_items, descending,
begin_bit, end_bit));
}
gdf_error gdf_segmented_radixsort_plan_setup(
gdf_segmented_radixsort_plan_type *hdl,
size_t sizeof_key, size_t sizeof_val)
{
return cffi_unwrap(hdl)->setup(sizeof_key, sizeof_val);
}
gdf_error gdf_segmented_radixsort_plan_free(gdf_segmented_radixsort_plan_type *hdl)
{
auto plan = cffi_unwrap(hdl);
gdf_error status = plan->teardown();
delete plan;
return status;
}
#define WRAP(Fn, Tk, Tv) \
gdf_error gdf_segmented_radixsort_##Fn(gdf_segmented_radixsort_plan_type *hdl, \
gdf_column *keycol, \
gdf_column *valcol, \
unsigned num_segments, \
unsigned *d_begin_offsets, \
unsigned *d_end_offsets) \
{ \
/* validity mask must be empty */ \
GDF_REQUIRE(!keycol->valid || !keycol->null_count, GDF_VALIDITY_UNSUPPORTED); \
GDF_REQUIRE(!valcol->valid || !valcol->null_count, GDF_VALIDITY_UNSUPPORTED); \
/* size of columns must match */ \
GDF_REQUIRE(keycol->size == valcol->size, GDF_COLUMN_SIZE_MISMATCH); \
SegmentedRadixSortPlan *plan = cffi_unwrap(hdl); \
/* num_items must match */ \
GDF_REQUIRE(plan->num_items == keycol->size, GDF_COLUMN_SIZE_MISMATCH); \
/* back buffer size must match */ \
GDF_REQUIRE(sizeof(Tk) * plan->num_items == plan->back_key_size, \
GDF_COLUMN_SIZE_MISMATCH); \
GDF_REQUIRE(sizeof(Tv) * plan->num_items == plan->back_val_size, \
GDF_COLUMN_SIZE_MISMATCH); \
/* Do sort */ \
return SegmentedRadixSort<Tk, Tv>::sort(plan, \
(Tk*)keycol->data, (Tv*)valcol->data, \
num_segments, d_begin_offsets, d_end_offsets); \
}
WRAP(f32, float, int64_t)
WRAP(f64, double, int64_t)
WRAP(i8, int8_t, int64_t)
WRAP(i32, int32_t, int64_t)
WRAP(i64, int64_t, int64_t)
gdf_error gdf_segmented_radixsort_generic(gdf_segmented_radixsort_plan_type *hdl,
gdf_column *keycol,
gdf_column *valcol,
unsigned num_segments,
unsigned *d_begin_offsets,
unsigned *d_end_offsets)
{
GDF_REQUIRE(valcol->dtype == GDF_INT64, GDF_UNSUPPORTED_DTYPE);
// dispatch table
switch ( keycol->dtype ) {
case GDF_INT8: return gdf_segmented_radixsort_i8(hdl, keycol, valcol,
num_segments, d_begin_offsets,
d_end_offsets);
case GDF_INT32: return gdf_segmented_radixsort_i32(hdl, keycol, valcol,
num_segments, d_begin_offsets,
d_end_offsets);
case GDF_INT64: return gdf_segmented_radixsort_i64(hdl, keycol, valcol,
num_segments, d_begin_offsets,
d_end_offsets);
case GDF_FLOAT32: return gdf_segmented_radixsort_f32(hdl, keycol, valcol,
num_segments, d_begin_offsets,
d_end_offsets);
case GDF_FLOAT64: return gdf_segmented_radixsort_f64(hdl, keycol, valcol,
num_segments, d_begin_offsets,
d_end_offsets);
default: return GDF_UNSUPPORTED_DTYPE;
}
}
| e019ba363d83b8fa15d0ff610426ccf2afad6740.cu | #include "cudf.h"
#include "rmm/rmm.h"
#include "utilities/cudf_utils.h"
#include "utilities/error_utils.hpp"
#include <cub/device/device_segmented_radix_sort.cuh>
struct SegmentedRadixSortPlan{
const gdf_size_type num_items;
// temporary storage
void *storage;
size_t storage_bytes;
void *back_key, *back_val;
size_t back_key_size, back_val_size;
cudaStream_t stream;
int descending;
unsigned begin_bit, end_bit;
SegmentedRadixSortPlan(size_t num_items, int descending,
unsigned begin_bit, unsigned end_bit)
: num_items(num_items),
storage(nullptr), storage_bytes(0),
back_key(nullptr), back_val(nullptr),
back_key_size(0), back_val_size(0),
stream(0), descending(descending),
begin_bit(begin_bit), end_bit(end_bit)
{}
gdf_error setup(size_t sizeof_key, size_t sizeof_val) {
back_key_size = num_items * sizeof_key;
back_val_size = num_items * sizeof_val;
RMM_TRY( RMM_ALLOC(&back_key, back_key_size, stream) ); // TODO: non-default stream
RMM_TRY( RMM_ALLOC(&back_val, back_val_size, stream) );
return GDF_SUCCESS;
}
gdf_error teardown() {
RMM_TRY(RMM_FREE(back_key, stream));
RMM_TRY(RMM_FREE(back_val, stream));
RMM_TRY(RMM_FREE(storage, stream));
return GDF_SUCCESS;
}
};
template <typename Tk, typename Tv>
struct SegmentedRadixSort {
static
gdf_error sort( SegmentedRadixSortPlan *plan,
Tk *d_key_buf, Tv *d_value_buf,
unsigned num_segments,
unsigned *d_begin_offsets,
unsigned *d_end_offsets) {
unsigned num_items = plan->num_items;
Tk *d_key_alt_buf = (Tk*)plan->back_key;
Tv *d_value_alt_buf = (Tv*)plan->back_val;
cudaStream_t stream = plan->stream;
int descending = plan->descending;
unsigned begin_bit = plan->begin_bit;
unsigned end_bit = plan->end_bit;
cub::DoubleBuffer<Tk> d_keys(d_key_buf, d_key_alt_buf);
typedef cub::DeviceSegmentedRadixSort Sorter;
if (d_value_buf) {
// Sort KeyValue pairs
cub::DoubleBuffer<Tv> d_values(d_value_buf, d_value_alt_buf);
if (descending) {
Sorter::SortPairsDescending(plan->storage,
plan->storage_bytes,
d_keys,
d_values,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
begin_bit,
end_bit,
stream);
} else {
Sorter::SortPairs( plan->storage,
plan->storage_bytes,
d_keys,
d_values,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
begin_bit,
end_bit,
stream );
}
CUDA_CHECK_LAST();
if (plan->storage && d_value_buf != d_values.Current()){
cudaMemcpyAsync(d_value_buf, d_value_alt_buf,
num_items * sizeof(Tv),
cudaMemcpyDeviceToDevice,
stream);
CUDA_CHECK_LAST();
}
} else {
// Sort Keys only
if (descending) {
Sorter::SortKeysDescending( plan->storage,
plan->storage_bytes,
d_keys,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
begin_bit,
end_bit,
stream );
CUDA_CHECK_LAST()
} else {
Sorter::SortKeys( plan->storage,
plan->storage_bytes,
d_keys,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
begin_bit,
end_bit,
stream );
}
CUDA_CHECK_LAST();
}
if ( plan->storage ) {
// We have operated and the result is not in front buffer
if (d_key_buf != d_keys.Current()){
cudaMemcpyAsync(d_key_buf, d_key_alt_buf, num_items * sizeof(Tk),
cudaMemcpyDeviceToDevice, stream);
CUDA_CHECK_LAST();
}
} else {
// We have not operated.
// Just checking for temporary storage requirement
RMM_TRY( RMM_ALLOC(&plan->storage, plan->storage_bytes, plan->stream) ); // TODO: non-default stream
CUDA_CHECK_LAST();
// Now that we have allocated, do real work.
return sort(plan, d_key_buf, d_value_buf, num_segments,
d_begin_offsets, d_end_offsets);
}
return GDF_SUCCESS;
}
};
gdf_segmented_radixsort_plan_type* cffi_wrap(SegmentedRadixSortPlan* obj){
return reinterpret_cast<gdf_segmented_radixsort_plan_type*>(obj);
}
SegmentedRadixSortPlan* cffi_unwrap(gdf_segmented_radixsort_plan_type* hdl){
return reinterpret_cast<SegmentedRadixSortPlan*>(hdl);
}
gdf_segmented_radixsort_plan_type* gdf_segmented_radixsort_plan(
size_t num_items, int descending,
unsigned begin_bit, unsigned end_bit)
{
return cffi_wrap(new SegmentedRadixSortPlan(num_items, descending,
begin_bit, end_bit));
}
gdf_error gdf_segmented_radixsort_plan_setup(
gdf_segmented_radixsort_plan_type *hdl,
size_t sizeof_key, size_t sizeof_val)
{
return cffi_unwrap(hdl)->setup(sizeof_key, sizeof_val);
}
gdf_error gdf_segmented_radixsort_plan_free(gdf_segmented_radixsort_plan_type *hdl)
{
auto plan = cffi_unwrap(hdl);
gdf_error status = plan->teardown();
delete plan;
return status;
}
#define WRAP(Fn, Tk, Tv) \
gdf_error gdf_segmented_radixsort_##Fn(gdf_segmented_radixsort_plan_type *hdl, \
gdf_column *keycol, \
gdf_column *valcol, \
unsigned num_segments, \
unsigned *d_begin_offsets, \
unsigned *d_end_offsets) \
{ \
/* validity mask must be empty */ \
GDF_REQUIRE(!keycol->valid || !keycol->null_count, GDF_VALIDITY_UNSUPPORTED); \
GDF_REQUIRE(!valcol->valid || !valcol->null_count, GDF_VALIDITY_UNSUPPORTED); \
/* size of columns must match */ \
GDF_REQUIRE(keycol->size == valcol->size, GDF_COLUMN_SIZE_MISMATCH); \
SegmentedRadixSortPlan *plan = cffi_unwrap(hdl); \
/* num_items must match */ \
GDF_REQUIRE(plan->num_items == keycol->size, GDF_COLUMN_SIZE_MISMATCH); \
/* back buffer size must match */ \
GDF_REQUIRE(sizeof(Tk) * plan->num_items == plan->back_key_size, \
GDF_COLUMN_SIZE_MISMATCH); \
GDF_REQUIRE(sizeof(Tv) * plan->num_items == plan->back_val_size, \
GDF_COLUMN_SIZE_MISMATCH); \
/* Do sort */ \
return SegmentedRadixSort<Tk, Tv>::sort(plan, \
(Tk*)keycol->data, (Tv*)valcol->data, \
num_segments, d_begin_offsets, d_end_offsets); \
}
WRAP(f32, float, int64_t)
WRAP(f64, double, int64_t)
WRAP(i8, int8_t, int64_t)
WRAP(i32, int32_t, int64_t)
WRAP(i64, int64_t, int64_t)
gdf_error gdf_segmented_radixsort_generic(gdf_segmented_radixsort_plan_type *hdl,
gdf_column *keycol,
gdf_column *valcol,
unsigned num_segments,
unsigned *d_begin_offsets,
unsigned *d_end_offsets)
{
GDF_REQUIRE(valcol->dtype == GDF_INT64, GDF_UNSUPPORTED_DTYPE);
// dispatch table
switch ( keycol->dtype ) {
case GDF_INT8: return gdf_segmented_radixsort_i8(hdl, keycol, valcol,
num_segments, d_begin_offsets,
d_end_offsets);
case GDF_INT32: return gdf_segmented_radixsort_i32(hdl, keycol, valcol,
num_segments, d_begin_offsets,
d_end_offsets);
case GDF_INT64: return gdf_segmented_radixsort_i64(hdl, keycol, valcol,
num_segments, d_begin_offsets,
d_end_offsets);
case GDF_FLOAT32: return gdf_segmented_radixsort_f32(hdl, keycol, valcol,
num_segments, d_begin_offsets,
d_end_offsets);
case GDF_FLOAT64: return gdf_segmented_radixsort_f64(hdl, keycol, valcol,
num_segments, d_begin_offsets,
d_end_offsets);
default: return GDF_UNSUPPORTED_DTYPE;
}
}
|
669af896a72ee856defea837633c7fea380e2081.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from zgemm_fermi.cu normal z -> s, Fri Sep 11 18:29:22 2015
@author Jakub Kurzak
@author Stan Tomov
@author Mark Gates
[zcds]gemm_fermi.cu defines the CPU driver.
[zcds]gemm_fermi_kernels.h defines the block sizes for each precision.
gemm_stencil_defs.h defines types and functions for precision-independent code.
These files are included multiple times, once for each transpose version.
gemm_stencil.cuh defines the GPU kernel (device function).
gemm_kernel.cuh defines the GPU kernel (global function).
The batched version uses gemm_kernel_batched.cuh instead of gemm_kernel.cuh.
*/
#include "common_magma.h"
#include "commonblas_s.h"
#define PRECISION_s
///////////////////////////////////////////////////////////////////////////////////////////////////
#include "sgemm_fermi_kernels.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
SGEMM performs one of the matrix-matrix operations
C = alpha*op( A )*op( B ) + beta*C,
where op( X ) is one of
op( X ) = X or op( X ) = X**T or op( X ) = X**H,
alpha and beta are scalars, and A, B and C are matrices, with
op( A ) an m by k matrix, op( B ) a k by n matrix and C an m by n matrix.
Parameters
----------
@param[in]
transA CHARACTER*1.
On entry, transA specifies the form of op( A ) to be used in
the matrix multiplication as follows:
- = 'N': op( A ) = A.
- = 'T': op( A ) = A**T.
- = 'C': op( A ) = A**H.
@param[in]
transB CHARACTER*1.
On entry, transB specifies the form of op( B ) to be used in
the matrix multiplication as follows:
- = 'N': op( B ) = B.
- = 'T': op( B ) = B**T.
- = 'C': op( B ) = B**H.
@param[in]
m INTEGER.
On entry, M specifies the number of rows of the matrix
op( dA ) and of the matrix dC. M must be at least zero.
@param[in]
n INTEGER.
On entry, N specifies the number of columns of the matrix
op( dB ) and the number of columns of the matrix dC. N must be
at least zero.
@param[in]
k INTEGER.
On entry, K specifies the number of columns of the matrix
op( dA ) and the number of rows of the matrix op( dB ). K must
be at least zero.
@param[in]
alpha REAL
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA REAL array of DIMENSION ( LDA, ka ), where ka is
k when transA = MagmaNoTrans, and is m otherwise.
Before entry with transA = MagmaNoTrans, the leading m by k
part of the array dA must contain the matrix dA, otherwise
the leading k by m part of the array dA must contain the
matrix dA.
@param[in]
ldda INTEGER.
On entry, LDA specifies the first dimension of A as declared
in the calling (sub) program. When transA = MagmaNoTrans then
LDA must be at least max( 1, m ), otherwise LDA must be at
least max( 1, k ).
@param[in]
dB REAL array of DIMENSION ( LDB, kb ), where kb is
n when transB = MagmaNoTrans, and is k otherwise.
Before entry with transB = MagmaNoTrans, the leading k by n
part of the array dB must contain the matrix dB, otherwise
the leading n by k part of the array dB must contain the
matrix dB.
@param[in]
lddb INTEGER.
On entry, LDB specifies the first dimension of dB as declared
in the calling (sub) program. When transB = MagmaNoTrans then
LDB must be at least max( 1, k ), otherwise LDB must be at
least max( 1, n ).
@param[in]
beta REAL.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then dC need not be set on input.
@param[in,out]
dC REAL array of DIMENSION ( LDC, n ).
Before entry, the leading m by n part of the array dC must
contain the matrix dC, except when beta is zero, in which
case dC need not be set on entry.
On exit, the array dC is overwritten by the m by n matrix
( alpha*op( dA )*op( dB ) + beta*dC ).
@param[in]
lddc INTEGER.
On entry, LDC specifies the first dimension of dC as declared
in the calling (sub) program. LDC must be at least
max( 1, m ).
@ingroup magma_sblas3
********************************************************************/
extern "C" void
magmablas_sgemm(
magma_trans_t transA, magma_trans_t transB, magma_int_t m, magma_int_t n, magma_int_t k,
float alpha,
magmaFloat_const_ptr dA, magma_int_t ldda,
magmaFloat_const_ptr dB, magma_int_t lddb,
float beta,
magmaFloat_ptr dC, magma_int_t lddc )
{
magma_int_t info = 0;
if ( transA != MagmaNoTrans && transA != MagmaTrans && transA != MagmaConjTrans )
info = -1;
else if ( transB != MagmaNoTrans && transB != MagmaTrans && transB != MagmaConjTrans )
info = -2;
else if ( m < 0 )
info = -3;
else if ( n < 0 )
info = -4;
else if ( k < 0 )
info = -5;
else if ( transA == MagmaNoTrans ? ldda < m : ldda < k )
info = -8;
else if ( transB == MagmaNoTrans ? lddb < k : lddb < n )
info = -10;
else if ( lddc < m )
info = -13;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// call CUDA ARCH 1.x version
// magmablas for [sd] precisions, cublas for [zc] precisions.
#if defined(PRECISION_z) || defined(PRECISION_c)
magma_sgemm(
transA, transB,
m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc );
#else
magmablas_sgemm_tesla(
transA, transB, m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc );
#endif
return;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
if ( m <= 0 || n <= 0 || k <= 0 )
return;
size_t offsetA = 0;
size_t offsetB = 0;
int TransA = 2, TransB = 2;
if ( transA == MagmaTrans )
TransA = 1;
else if ( transA == MagmaNoTrans )
TransA = 0;
if ( transB == MagmaTrans )
TransB = 1;
else if ( transB == MagmaNoTrans )
TransB = 0;
magma_int_t Am = ( ! TransA ? m : k);
magma_int_t An = (!TransA ? k : m);
magma_int_t Bm = ( ! TransB ? k : n);
magma_int_t Bn = (!TransB ? n : k);
size_t sizeA = (size_t) ldda * (An - 1) + Am;
size_t sizeB = (size_t) lddb * (Bn - 1) + Bm;
size_t CUBLAS_MAX_1DBUF_SIZE = ((1 << 27) - 512);
if ( sizeA >= CUBLAS_MAX_1DBUF_SIZE ||
sizeB >= CUBLAS_MAX_1DBUF_SIZE )
{
magma_sgemm( transA, transB, m, n, k, alpha,
dA, ldda, dB, lddb,
beta, dC, lddc );
return;
}
#ifdef TEXTURE_1D
// Set textures parameters
tex_ref_A.normalized = false;
tex_ref_A.filterMode = hipFilterModePoint;
tex_ref_A.addressMode[0] = hipAddressModeClamp;
tex_ref_B.normalized = false;
tex_ref_B.filterMode = hipFilterModePoint;
tex_ref_B.addressMode[0] = hipAddressModeClamp;
// Bind A and B to texture references
hipError_t err;
err = hipBindTexture(&offsetA, tex_ref_A, dA, sizeA*sizeof(float));
if ( err != hipSuccess ) {
fprintf( stderr, "cannot bind A to texture: %s (%d)\n", hipGetErrorString(err), err );
return;
}
err = hipBindTexture(&offsetB, tex_ref_B, dB, sizeB*sizeof(float));
if ( err != hipSuccess ) {
fprintf( stderr, "cannot bind B to texture: %s (%d)\n", hipGetErrorString(err), err );
hipUnbindTexture( tex_ref_A );
return;
}
#endif
// Set up grids
dim3 dimBlock(DIM_X, DIM_Y);
offsetA = offsetA/sizeof(dA[0]);
offsetB = offsetB/sizeof(dB[0]);
if ( TransA == 0 && TransB == 0 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_nn ),
magma_ceildiv( n, BLK_N_nn ) );
hipLaunchKernelGGL(( sgemm_kernel_fermi_nn), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 0 && TransB == 1 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_nt ),
magma_ceildiv( n, BLK_N_nt ) );
hipLaunchKernelGGL(( sgemm_kernel_fermi_nt), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 0 && TransB == 2 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_nc ),
magma_ceildiv( n, BLK_N_nc ) );
hipLaunchKernelGGL(( sgemm_kernel_fermi_nc), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 1 && TransB == 0 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_tn ),
magma_ceildiv( n, BLK_N_tn ) );
hipLaunchKernelGGL(( sgemm_kernel_fermi_tn), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 1 && TransB == 1 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_tt ),
magma_ceildiv( n, BLK_N_tt ) );
hipLaunchKernelGGL(( sgemm_kernel_fermi_tt), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 1 && TransB == 2 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_tc ),
magma_ceildiv( n, BLK_N_tc ) );
hipLaunchKernelGGL(( sgemm_kernel_fermi_tc), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 2 && TransB == 0 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_cn ),
magma_ceildiv( n, BLK_N_cn ) );
hipLaunchKernelGGL(( sgemm_kernel_fermi_cn), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 2 && TransB == 1 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_ct ),
magma_ceildiv( n, BLK_N_ct ) );
hipLaunchKernelGGL(( sgemm_kernel_fermi_ct), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 2 && TransB == 2 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_cc ),
magma_ceildiv( n, BLK_N_cc ) );
hipLaunchKernelGGL(( sgemm_kernel_fermi_cc), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
#ifdef TEXTURE_1D
hipUnbindTexture( tex_ref_A );
hipUnbindTexture( tex_ref_B );
#endif
}
///////////////////////////////////////////////////////////////////////////////////////////////////
| 669af896a72ee856defea837633c7fea380e2081.cu | /*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from zgemm_fermi.cu normal z -> s, Fri Sep 11 18:29:22 2015
@author Jakub Kurzak
@author Stan Tomov
@author Mark Gates
[zcds]gemm_fermi.cu defines the CPU driver.
[zcds]gemm_fermi_kernels.h defines the block sizes for each precision.
gemm_stencil_defs.h defines types and functions for precision-independent code.
These files are included multiple times, once for each transpose version.
gemm_stencil.cuh defines the GPU kernel (device function).
gemm_kernel.cuh defines the GPU kernel (global function).
The batched version uses gemm_kernel_batched.cuh instead of gemm_kernel.cuh.
*/
#include "common_magma.h"
#include "commonblas_s.h"
#define PRECISION_s
///////////////////////////////////////////////////////////////////////////////////////////////////
#include "sgemm_fermi_kernels.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
SGEMM performs one of the matrix-matrix operations
C = alpha*op( A )*op( B ) + beta*C,
where op( X ) is one of
op( X ) = X or op( X ) = X**T or op( X ) = X**H,
alpha and beta are scalars, and A, B and C are matrices, with
op( A ) an m by k matrix, op( B ) a k by n matrix and C an m by n matrix.
Parameters
----------
@param[in]
transA CHARACTER*1.
On entry, transA specifies the form of op( A ) to be used in
the matrix multiplication as follows:
- = 'N': op( A ) = A.
- = 'T': op( A ) = A**T.
- = 'C': op( A ) = A**H.
@param[in]
transB CHARACTER*1.
On entry, transB specifies the form of op( B ) to be used in
the matrix multiplication as follows:
- = 'N': op( B ) = B.
- = 'T': op( B ) = B**T.
- = 'C': op( B ) = B**H.
@param[in]
m INTEGER.
On entry, M specifies the number of rows of the matrix
op( dA ) and of the matrix dC. M must be at least zero.
@param[in]
n INTEGER.
On entry, N specifies the number of columns of the matrix
op( dB ) and the number of columns of the matrix dC. N must be
at least zero.
@param[in]
k INTEGER.
On entry, K specifies the number of columns of the matrix
op( dA ) and the number of rows of the matrix op( dB ). K must
be at least zero.
@param[in]
alpha REAL
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA REAL array of DIMENSION ( LDA, ka ), where ka is
k when transA = MagmaNoTrans, and is m otherwise.
Before entry with transA = MagmaNoTrans, the leading m by k
part of the array dA must contain the matrix dA, otherwise
the leading k by m part of the array dA must contain the
matrix dA.
@param[in]
ldda INTEGER.
On entry, LDA specifies the first dimension of A as declared
in the calling (sub) program. When transA = MagmaNoTrans then
LDA must be at least max( 1, m ), otherwise LDA must be at
least max( 1, k ).
@param[in]
dB REAL array of DIMENSION ( LDB, kb ), where kb is
n when transB = MagmaNoTrans, and is k otherwise.
Before entry with transB = MagmaNoTrans, the leading k by n
part of the array dB must contain the matrix dB, otherwise
the leading n by k part of the array dB must contain the
matrix dB.
@param[in]
lddb INTEGER.
On entry, LDB specifies the first dimension of dB as declared
in the calling (sub) program. When transB = MagmaNoTrans then
LDB must be at least max( 1, k ), otherwise LDB must be at
least max( 1, n ).
@param[in]
beta REAL.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then dC need not be set on input.
@param[in,out]
dC REAL array of DIMENSION ( LDC, n ).
Before entry, the leading m by n part of the array dC must
contain the matrix dC, except when beta is zero, in which
case dC need not be set on entry.
On exit, the array dC is overwritten by the m by n matrix
( alpha*op( dA )*op( dB ) + beta*dC ).
@param[in]
lddc INTEGER.
On entry, LDC specifies the first dimension of dC as declared
in the calling (sub) program. LDC must be at least
max( 1, m ).
@ingroup magma_sblas3
********************************************************************/
extern "C" void
magmablas_sgemm(
magma_trans_t transA, magma_trans_t transB, magma_int_t m, magma_int_t n, magma_int_t k,
float alpha,
magmaFloat_const_ptr dA, magma_int_t ldda,
magmaFloat_const_ptr dB, magma_int_t lddb,
float beta,
magmaFloat_ptr dC, magma_int_t lddc )
{
magma_int_t info = 0;
if ( transA != MagmaNoTrans && transA != MagmaTrans && transA != MagmaConjTrans )
info = -1;
else if ( transB != MagmaNoTrans && transB != MagmaTrans && transB != MagmaConjTrans )
info = -2;
else if ( m < 0 )
info = -3;
else if ( n < 0 )
info = -4;
else if ( k < 0 )
info = -5;
else if ( transA == MagmaNoTrans ? ldda < m : ldda < k )
info = -8;
else if ( transB == MagmaNoTrans ? lddb < k : lddb < n )
info = -10;
else if ( lddc < m )
info = -13;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// call CUDA ARCH 1.x version
// magmablas for [sd] precisions, cublas for [zc] precisions.
#if defined(PRECISION_z) || defined(PRECISION_c)
magma_sgemm(
transA, transB,
m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc );
#else
magmablas_sgemm_tesla(
transA, transB, m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc );
#endif
return;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
if ( m <= 0 || n <= 0 || k <= 0 )
return;
size_t offsetA = 0;
size_t offsetB = 0;
int TransA = 2, TransB = 2;
if ( transA == MagmaTrans )
TransA = 1;
else if ( transA == MagmaNoTrans )
TransA = 0;
if ( transB == MagmaTrans )
TransB = 1;
else if ( transB == MagmaNoTrans )
TransB = 0;
magma_int_t Am = ( ! TransA ? m : k);
magma_int_t An = (!TransA ? k : m);
magma_int_t Bm = ( ! TransB ? k : n);
magma_int_t Bn = (!TransB ? n : k);
size_t sizeA = (size_t) ldda * (An - 1) + Am;
size_t sizeB = (size_t) lddb * (Bn - 1) + Bm;
size_t CUBLAS_MAX_1DBUF_SIZE = ((1 << 27) - 512);
if ( sizeA >= CUBLAS_MAX_1DBUF_SIZE ||
sizeB >= CUBLAS_MAX_1DBUF_SIZE )
{
magma_sgemm( transA, transB, m, n, k, alpha,
dA, ldda, dB, lddb,
beta, dC, lddc );
return;
}
#ifdef TEXTURE_1D
// Set textures parameters
tex_ref_A.normalized = false;
tex_ref_A.filterMode = cudaFilterModePoint;
tex_ref_A.addressMode[0] = cudaAddressModeClamp;
tex_ref_B.normalized = false;
tex_ref_B.filterMode = cudaFilterModePoint;
tex_ref_B.addressMode[0] = cudaAddressModeClamp;
// Bind A and B to texture references
cudaError_t err;
err = cudaBindTexture(&offsetA, tex_ref_A, dA, sizeA*sizeof(float));
if ( err != cudaSuccess ) {
fprintf( stderr, "cannot bind A to texture: %s (%d)\n", cudaGetErrorString(err), err );
return;
}
err = cudaBindTexture(&offsetB, tex_ref_B, dB, sizeB*sizeof(float));
if ( err != cudaSuccess ) {
fprintf( stderr, "cannot bind B to texture: %s (%d)\n", cudaGetErrorString(err), err );
cudaUnbindTexture( tex_ref_A );
return;
}
#endif
// Set up grids
dim3 dimBlock(DIM_X, DIM_Y);
offsetA = offsetA/sizeof(dA[0]);
offsetB = offsetB/sizeof(dB[0]);
if ( TransA == 0 && TransB == 0 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_nn ),
magma_ceildiv( n, BLK_N_nn ) );
sgemm_kernel_fermi_nn<<< dimGrid, dimBlock, 0, magma_stream >>>(
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 0 && TransB == 1 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_nt ),
magma_ceildiv( n, BLK_N_nt ) );
sgemm_kernel_fermi_nt<<< dimGrid, dimBlock, 0, magma_stream >>>(
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 0 && TransB == 2 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_nc ),
magma_ceildiv( n, BLK_N_nc ) );
sgemm_kernel_fermi_nc<<< dimGrid, dimBlock, 0, magma_stream >>>(
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 1 && TransB == 0 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_tn ),
magma_ceildiv( n, BLK_N_tn ) );
sgemm_kernel_fermi_tn<<< dimGrid, dimBlock, 0, magma_stream >>>(
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 1 && TransB == 1 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_tt ),
magma_ceildiv( n, BLK_N_tt ) );
sgemm_kernel_fermi_tt<<< dimGrid, dimBlock, 0, magma_stream >>>(
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 1 && TransB == 2 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_tc ),
magma_ceildiv( n, BLK_N_tc ) );
sgemm_kernel_fermi_tc<<< dimGrid, dimBlock, 0, magma_stream >>>(
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 2 && TransB == 0 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_cn ),
magma_ceildiv( n, BLK_N_cn ) );
sgemm_kernel_fermi_cn<<< dimGrid, dimBlock, 0, magma_stream >>>(
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 2 && TransB == 1 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_ct ),
magma_ceildiv( n, BLK_N_ct ) );
sgemm_kernel_fermi_ct<<< dimGrid, dimBlock, 0, magma_stream >>>(
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 2 && TransB == 2 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_cc ),
magma_ceildiv( n, BLK_N_cc ) );
sgemm_kernel_fermi_cc<<< dimGrid, dimBlock, 0, magma_stream >>>(
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
#ifdef TEXTURE_1D
cudaUnbindTexture( tex_ref_A );
cudaUnbindTexture( tex_ref_B );
#endif
}
///////////////////////////////////////////////////////////////////////////////////////////////////
|
ad18c8609ab10b3e457d4e6990d61772abae9908.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* 2D Convolution: C = A (*) B, A is the 5x5 kernel matrix, B is the image matrix.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
//#include <cutil.h>
// includes, kernels
#include <2Dconvolution_kernel.cu>
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int);
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width, int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
int ReadFile(Matrix* M, char* file_name);
void WriteFile(Matrix M, char* file_name);
void FreeDeviceMatrix(Matrix* M);
void FreeMatrix(Matrix* M);
void ConvolutionOnDevice(const Matrix A, const Matrix B, Matrix C);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv) {
Matrix A;
Matrix B;
Matrix C;
srand(2012);
if(argc != 5 && argc != 4)
{
// Allocate and initialize the matrices
A = AllocateMatrix(KERNEL_SIZE, KERNEL_SIZE, 1);
B = AllocateMatrix((rand() % 1024) + 1, (rand() % 1024) + 1, 1);
C = AllocateMatrix(B.height, B.width, 0);
}
else
{
// Allocate and read in matrices from disk
int* params = NULL;
int data_read = 0;
//cutReadFilei(argv[1], ¶ms, &data_read, true);
FILE *fp=fopen(argv[1],"rb");
if (fp==NULL)
{
printf("Nothing in the file\n");
return -1;
}
int Num_read = 0;
while (!feof(fp)) {
//printf("Nothing in the file\n");
//fread(&(params[data_read++]),sizeof(int),1,fp);
//printf("%f\n",params[data_read] );
fscanf (fp, "%d", &data_read);
Num_read += 1;
if(Num_read == 2)
{break;}
}
fclose(fp);
/*
printf("Liang Xu\n");
params[0] = (int)data_read;
params[1] = (int)data_read;
if(data_read != 2) {
printf("Error reading parameter file\n");
return 1;
}
*/
A = AllocateMatrix(KERNEL_SIZE, KERNEL_SIZE, 0);
(void)ReadFile(&A, argv[2]);
B = AllocateMatrix(data_read, data_read, 0);
C = AllocateMatrix(data_read, data_read, 0);
(void)ReadFile(&B, argv[3]);
}
// Convolution on the device
ConvolutionOnDevice(A, B, C);
// compute the matrix multiplication on the CPU for comparison
Matrix reference = AllocateMatrix(C.height, C.width, 0);
computeGold(reference.elements, A.elements, B.elements, B.height, B.width);
// in this case check if the result is equivalent to the expected soluion
//CUTBoolean res = cutComparefe(reference.elements, C.elements, C.width * C.height, 0.001f);
//printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED");
bool res=true;
for (int i=0; i<C.width * C.height; i++)
if (fabs(reference.elements[i]-C.elements[i])>0.0001f) {
printf("Found Different\n");
printf("Point [%d] is different\n",i );
printf("reference value is [%f]\n",reference.elements[i]);
printf("GPU value is [%f]\n\n",C.elements[i]);
res=false;
break;
}
printf("Test %s\n", (true == res) ? "PASSED" : "FAILED");
if(argc == 5)
{
WriteFile(C, argv[4]);
}
else if(argc == 2)
{
WriteFile(C, argv[1]);
}
// Free matrices
FreeMatrix(&A);
FreeMatrix(&B);
FreeMatrix(&C);
return 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void ConvolutionOnDevice(const Matrix A, const Matrix B, Matrix C)
{
// Load A and B to the device
//Matrix Ad = AllocateDeviceMatrix(A);
//CopyToDeviceMatrix(Ad, A);
Matrix Bd = AllocateDeviceMatrix(B);
CopyToDeviceMatrix(Bd, B);
// Allocate P on the device
Matrix Cd = AllocateDeviceMatrix(C);
CopyToDeviceMatrix(Cd, C); // Clear memory
hipMemcpyToSymbol(Kernel, A.elements, KERNEL_SIZE*KERNEL_SIZE*sizeof(float));
//Setup the execution configuration
//Launch the device computation threads!
int blocks = B.height;
int threads = B.width;
hipEvent_t start, stop;
float elapsedTime=0.0f;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( ConvolutionKernel), dim3(blocks), dim3(threads), 0, 0, Bd,Cd);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
printf("The execution time of GPU is :%f\n",elapsedTime);
// Read C from the device
CopyFromDeviceMatrix(C, Cd);
// Free device matrices
//FreeDeviceMatrix(&Ad);
FreeDeviceMatrix(&Bd);
FreeDeviceMatrix(&Cd);
}
// Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
hipMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
// Allocate a device matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
// If init == 2, initialize matrix parameters, but do not allocate memory
Matrix AllocateMatrix(int height, int width, int init)
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
// don't allocate memory on option 2
if(init == 2)
return M;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < M.height * M.width; i++)
{
M.elements[i] = (init == 0) ? (0.0f) : (rand() / (float)RAND_MAX);
if(rand() % 2)
M.elements[i] = -M.elements[i];
}
return M;
}
// Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
hipMemcpy(Mdevice.elements, Mhost.elements, size,
hipMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
hipMemcpy(Mhost.elements, Mdevice.elements, size,
hipMemcpyDeviceToHost);
}
// Free a device matrix.
void FreeDeviceMatrix(Matrix* M)
{
hipFree(M->elements);
M->elements = NULL;
}
// Free a host Matrix
void FreeMatrix(Matrix* M)
{
free(M->elements);
M->elements = NULL;
}
int ReadFile(Matrix* M, char* file_name)
{
unsigned int i=0;
//cutReadFilef(file_name, &(M->elements), &data_read, true);
FILE *fp=fopen(file_name,"rb");
float temp;
if (fp==NULL) return -1;
int limit = M->width * M->height;
while (!feof(fp)) {
fscanf (fp, "%f", &temp);
M->elements[i] = temp;
if(i = limit )
{
break;
}
//fread(&(M->elements[i]),sizeof(float),1,fp);
//printf("Element is %f\n",M->elements[i] );
i++;
}
fclose(fp);
return i;
}
void WriteFile(Matrix M, char* file_name)
{
//cutWriteFilef(file_name, M.elements, M.width*M.height,
//0.0001f);
FILE *fp=fopen(file_name,"wb");
if (fp==NULL) return;
fwrite(M.elements,sizeof(float),M.width*M.height,fp);
fclose(fp);
}
| ad18c8609ab10b3e457d4e6990d61772abae9908.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* 2D Convolution: C = A (*) B, A is the 5x5 kernel matrix, B is the image matrix.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
//#include <cutil.h>
// includes, kernels
#include <2Dconvolution_kernel.cu>
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int);
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width, int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
int ReadFile(Matrix* M, char* file_name);
void WriteFile(Matrix M, char* file_name);
void FreeDeviceMatrix(Matrix* M);
void FreeMatrix(Matrix* M);
void ConvolutionOnDevice(const Matrix A, const Matrix B, Matrix C);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv) {
Matrix A;
Matrix B;
Matrix C;
srand(2012);
if(argc != 5 && argc != 4)
{
// Allocate and initialize the matrices
A = AllocateMatrix(KERNEL_SIZE, KERNEL_SIZE, 1);
B = AllocateMatrix((rand() % 1024) + 1, (rand() % 1024) + 1, 1);
C = AllocateMatrix(B.height, B.width, 0);
}
else
{
// Allocate and read in matrices from disk
int* params = NULL;
int data_read = 0;
//cutReadFilei(argv[1], ¶ms, &data_read, true);
FILE *fp=fopen(argv[1],"rb");
if (fp==NULL)
{
printf("Nothing in the file\n");
return -1;
}
int Num_read = 0;
while (!feof(fp)) {
//printf("Nothing in the file\n");
//fread(&(params[data_read++]),sizeof(int),1,fp);
//printf("%f\n",params[data_read] );
fscanf (fp, "%d", &data_read);
Num_read += 1;
if(Num_read == 2)
{break;}
}
fclose(fp);
/*
printf("Liang Xu\n");
params[0] = (int)data_read;
params[1] = (int)data_read;
if(data_read != 2) {
printf("Error reading parameter file\n");
return 1;
}
*/
A = AllocateMatrix(KERNEL_SIZE, KERNEL_SIZE, 0);
(void)ReadFile(&A, argv[2]);
B = AllocateMatrix(data_read, data_read, 0);
C = AllocateMatrix(data_read, data_read, 0);
(void)ReadFile(&B, argv[3]);
}
// Convolution on the device
ConvolutionOnDevice(A, B, C);
// compute the matrix multiplication on the CPU for comparison
Matrix reference = AllocateMatrix(C.height, C.width, 0);
computeGold(reference.elements, A.elements, B.elements, B.height, B.width);
// in this case check if the result is equivalent to the expected soluion
//CUTBoolean res = cutComparefe(reference.elements, C.elements, C.width * C.height, 0.001f);
//printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED");
bool res=true;
for (int i=0; i<C.width * C.height; i++)
if (fabs(reference.elements[i]-C.elements[i])>0.0001f) {
printf("Found Different\n");
printf("Point [%d] is different\n",i );
printf("reference value is [%f]\n",reference.elements[i]);
printf("GPU value is [%f]\n\n",C.elements[i]);
res=false;
break;
}
printf("Test %s\n", (true == res) ? "PASSED" : "FAILED");
if(argc == 5)
{
WriteFile(C, argv[4]);
}
else if(argc == 2)
{
WriteFile(C, argv[1]);
}
// Free matrices
FreeMatrix(&A);
FreeMatrix(&B);
FreeMatrix(&C);
return 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void ConvolutionOnDevice(const Matrix A, const Matrix B, Matrix C)
{
// Load A and B to the device
//Matrix Ad = AllocateDeviceMatrix(A);
//CopyToDeviceMatrix(Ad, A);
Matrix Bd = AllocateDeviceMatrix(B);
CopyToDeviceMatrix(Bd, B);
// Allocate P on the device
Matrix Cd = AllocateDeviceMatrix(C);
CopyToDeviceMatrix(Cd, C); // Clear memory
cudaMemcpyToSymbol(Kernel, A.elements, KERNEL_SIZE*KERNEL_SIZE*sizeof(float));
//Setup the execution configuration
//Launch the device computation threads!
int blocks = B.height;
int threads = B.width;
cudaEvent_t start, stop;
float elapsedTime=0.0f;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
ConvolutionKernel<<<blocks, threads>>>(Bd,Cd);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("The execution time of GPU is :%f\n",elapsedTime);
// Read C from the device
CopyFromDeviceMatrix(C, Cd);
// Free device matrices
//FreeDeviceMatrix(&Ad);
FreeDeviceMatrix(&Bd);
FreeDeviceMatrix(&Cd);
}
// Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
cudaMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
// Allocate a device matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
// If init == 2, initialize matrix parameters, but do not allocate memory
Matrix AllocateMatrix(int height, int width, int init)
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
// don't allocate memory on option 2
if(init == 2)
return M;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < M.height * M.width; i++)
{
M.elements[i] = (init == 0) ? (0.0f) : (rand() / (float)RAND_MAX);
if(rand() % 2)
M.elements[i] = -M.elements[i];
}
return M;
}
// Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
cudaMemcpy(Mdevice.elements, Mhost.elements, size,
cudaMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
cudaMemcpy(Mhost.elements, Mdevice.elements, size,
cudaMemcpyDeviceToHost);
}
// Free a device matrix.
void FreeDeviceMatrix(Matrix* M)
{
cudaFree(M->elements);
M->elements = NULL;
}
// Free a host Matrix
void FreeMatrix(Matrix* M)
{
free(M->elements);
M->elements = NULL;
}
int ReadFile(Matrix* M, char* file_name)
{
unsigned int i=0;
//cutReadFilef(file_name, &(M->elements), &data_read, true);
FILE *fp=fopen(file_name,"rb");
float temp;
if (fp==NULL) return -1;
int limit = M->width * M->height;
while (!feof(fp)) {
fscanf (fp, "%f", &temp);
M->elements[i] = temp;
if(i = limit )
{
break;
}
//fread(&(M->elements[i]),sizeof(float),1,fp);
//printf("Element is %f\n",M->elements[i] );
i++;
}
fclose(fp);
return i;
}
void WriteFile(Matrix M, char* file_name)
{
//cutWriteFilef(file_name, M.elements, M.width*M.height,
//0.0001f);
FILE *fp=fopen(file_name,"wb");
if (fp==NULL) return;
fwrite(M.elements,sizeof(float),M.width*M.height,fp);
fclose(fp);
}
|
09862f0ac9269318d9481d713afecd9b9f3a706e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <vector>
__global__ void vecmabite( int *out, int *in, int threads, std::size_t size )
{
auto tid_x = threadIdx.x;
auto tid_b = blockIdx.x;
out[ tid_x + threads * tid_b] = in[ 2 * (tid_x + threads * tid_b) ];
}
int main(int ac, char **av)
{
if (ac < 2)
return (-1);
int len = atoi(av[1]);
int * out_d = nullptr;
int * in_d = nullptr;
int thread_max = 0;
int thread_x = 0;
std::vector< int > out( len );
std::vector< int > in( 2 * len );
hipDeviceGetAttribute(&thread_max, hipDeviceAttributeMaxThreadsPerBlock, 0);
if ((2 * len) / thread_max > 0)
thread_x = 1024;
else
thread_x = thread_max;
for( std::size_t i = 0 ; i < in.size() ; ++i )
{
in[ i ] = i;
}
hipMalloc( &out_d, out.size() * sizeof( int ) );
hipMalloc( &in_d, in.size() * sizeof( int ) );
hipMemcpy( in_d, in.data(), in.size() * sizeof( int ), hipMemcpyHostToDevice );
hipLaunchKernelGGL(( vecmabite), dim3((2 * len) / thread_max), dim3(thread_x) , 0, 0, out_d, in_d, thread_max, out.size() );
hipMemcpy( out.data(), out_d, out.size() * sizeof( int ), hipMemcpyDeviceToHost );
for( auto const x: out )
{
std::cout << x << std::endl;
}
hipFree( out_d );
hipFree( in_d );
return 0;
} | 09862f0ac9269318d9481d713afecd9b9f3a706e.cu | #include <iostream>
#include <vector>
__global__ void vecmabite( int *out, int *in, int threads, std::size_t size )
{
auto tid_x = threadIdx.x;
auto tid_b = blockIdx.x;
out[ tid_x + threads * tid_b] = in[ 2 * (tid_x + threads * tid_b) ];
}
int main(int ac, char **av)
{
if (ac < 2)
return (-1);
int len = atoi(av[1]);
int * out_d = nullptr;
int * in_d = nullptr;
int thread_max = 0;
int thread_x = 0;
std::vector< int > out( len );
std::vector< int > in( 2 * len );
cudaDeviceGetAttribute(&thread_max, cudaDevAttrMaxThreadsPerBlock, 0);
if ((2 * len) / thread_max > 0)
thread_x = 1024;
else
thread_x = thread_max;
for( std::size_t i = 0 ; i < in.size() ; ++i )
{
in[ i ] = i;
}
cudaMalloc( &out_d, out.size() * sizeof( int ) );
cudaMalloc( &in_d, in.size() * sizeof( int ) );
cudaMemcpy( in_d, in.data(), in.size() * sizeof( int ), cudaMemcpyHostToDevice );
vecmabite<<< (2 * len) / thread_max, thread_x >>>( out_d, in_d, thread_max, out.size() );
cudaMemcpy( out.data(), out_d, out.size() * sizeof( int ), cudaMemcpyDeviceToHost );
for( auto const x: out )
{
std::cout << x << std::endl;
}
cudaFree( out_d );
cudaFree( in_d );
return 0;
} |
6011f7679ded3f3ebc4452bfd8564de98248b557.hip | // !!! This is a file automatically generated by hipify!!!
/*
* @author: created by ss on 16-11-2.
* @brief: multi-class svm training, prediction, svm with probability output
*
*/
#include <map>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime_api.h>
#include "svmModel.h"
#include "svmPredictor.h"
#include "multiSmoSolver.h"
#include "multiPredictor.h"
#include "trainClassifier.h"
#include "../svm-shared/HessianIO/deviceHessian.h"
#include "../svm-shared/storageManager.h"
//#include "sigmoidTrainGPUHelper.h"
SvmModel::~SvmModel() {
checkCudaErrors(hipFree(devCoef));
checkCudaErrors(hipFree(devStart));
checkCudaErrors(hipFree(devCount));
checkCudaErrors(hipFree(devProbA));
checkCudaErrors(hipFree(devProbB));
checkCudaErrors(hipFree(devRho));
checkCudaErrors(hipFree(devSVMapVal));
checkCudaErrors(hipFree(devSVMapValSelfDot));
checkCudaErrors(hipFree(devSVMapRowPtr));
checkCudaErrors(hipFree(devSVMapColInd));
checkCudaErrors(hipFree(devSVIndex));
if (svMapCSRMat) delete svMapCSRMat;
}
/*
* @brief: get the classifier id based on i and j.
*/
uint SvmModel::getK(int i, int j) const {
return ((nrClass - 1) + (nrClass - i)) * i / 2 + j - i - 1;
}
void SvmModel::fit(const SvmProblem &problem, const SVMParam ¶m) {
//reset model to fit a new SvmProblem
nrClass = problem.getNumOfClasses();
cnr2 = (nrClass) * (nrClass - 1) / 2;
numOfSVs = 0;
numOfFeatures = 0;
coef.clear();
rho.clear();
probA.clear();
probB.clear();
svIndex.clear();
svMap.clear();
label.clear();
start.clear();
count.clear();
probability = false;
coef.resize(cnr2);
rho.resize(cnr2);
probA.resize(cnr2);
probB.resize(cnr2);
svIndex.resize(cnr2);
this->param = param;
label = problem.label;
numOfFeatures = problem.getNumOfFeatures();
MultiSmoSolver multiSmoSolver(problem,*this,param);
multiSmoSolver.solve();
int _start = 0;
for (int i = 0; i < cnr2; ++i) {
start.push_back(_start);
count.push_back(svIndex[i].size());
_start += count[i];
}
transferToDevice();
}
void SvmModel::transferToDevice() {
//convert svMap to csr matrix then copy it to device
svMapCSRMat = new CSRMatrix(svMap,numOfFeatures);
int nnz = svMapCSRMat->getNnz();
checkCudaErrors(hipMalloc((void **) &devSVMapVal, sizeof(float_point) * nnz));
checkCudaErrors(hipMalloc((void **) &devSVMapValSelfDot, sizeof(float_point) * svMapCSRMat->getNumOfSamples()));
checkCudaErrors(hipMalloc((void **) &devSVMapRowPtr, sizeof(int) * (svMapCSRMat->getNumOfSamples() + 1)));
checkCudaErrors(hipMalloc((void **) &devSVMapColInd, sizeof(int) * nnz));
checkCudaErrors(
hipMemcpy(devSVMapVal, svMapCSRMat->getCSRVal(), sizeof(float_point) * nnz, hipMemcpyHostToDevice));
checkCudaErrors(
hipMemcpy(devSVMapValSelfDot, svMapCSRMat->getCSRValSelfDot(),
sizeof(float_point) * svMapCSRMat->getNumOfSamples(), hipMemcpyHostToDevice));
checkCudaErrors(
hipMemcpy(devSVMapRowPtr, svMapCSRMat->getCSRRowPtr(), sizeof(int) * (svMapCSRMat->getNumOfSamples() + 1),
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(devSVMapColInd, svMapCSRMat->getCSRColInd(), sizeof(int) * nnz, hipMemcpyHostToDevice));
//flat svIndex then copy in to device
checkCudaErrors(hipMalloc((void **) &devSVIndex, sizeof(int) * numOfSVs));
for (int i = 0; i < cnr2; ++i) {
checkCudaErrors(hipMemcpy(devSVIndex + start[i], svIndex[i].data(), sizeof(int) * svIndex[i].size(),
hipMemcpyHostToDevice));
}
checkCudaErrors(hipMalloc((void **) &devCoef, sizeof(float_point) * numOfSVs));
checkCudaErrors(hipMalloc((void **) &devStart, sizeof(float_point) * cnr2));
checkCudaErrors(hipMalloc((void **) &devCount, sizeof(float_point) * cnr2));
checkCudaErrors(hipMalloc((void **) &devProbA, sizeof(float_point) * cnr2));
checkCudaErrors(hipMalloc((void **) &devProbB, sizeof(float_point) * cnr2));
checkCudaErrors(hipMalloc((void **) &devRho, sizeof(float_point) * cnr2));
for (int i = 0; i < cnr2; ++i) {
checkCudaErrors(hipMemcpy(devCoef + start[i], coef[i].data(), sizeof(float_point) * count[i],
hipMemcpyHostToDevice));
}
checkCudaErrors(hipMemcpy(devProbA, probA.data(), sizeof(float_point) * cnr2, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(devProbB, probB.data(), sizeof(float_point) * cnr2, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(devStart, start.data(), sizeof(int) * cnr2, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(devCount, count.data(), sizeof(int) * cnr2, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(devRho, rho.data(), sizeof(float_point) * cnr2, hipMemcpyHostToDevice));
}
//void SvmModel::gpu_sigmoid_train(
// int l, const float_point *dec_values, const float_point *labels,
// float_point& A, float_point& B)
//{
//
// float_point prior1, prior0 ;
// int max_iter=100; // Maximal number of iterations
// float_point min_step=1e-10; // Minimal step taken in line search
// float_point sigma=1e-12; // For numerically strict PD of Hessian
// float_point eps=1e-5;
// float_point hiTarget=(prior1+1.0)/(prior1+2.0);
// float_point loTarget=1/(prior0+2.0);
// float_point fApB,g1,g2,gd,stepsize;
// float_point newA,newB,newf;
// int iter;
// float_point fval = 0.0;
// // Initial Point and Initial Fun Value
// A=0.0; B=log((prior0+1.0)/(prior1+1.0));
//
// int blocknum=(l+THREAD_NUM-1)/THREAD_NUM;
//
// hipStream_t stream[2];
// for(int i = 0;i < 2;i ++)
// hipStreamCreate(&stream[i]);
//
// float_point *dev_prior1,*dev_prior0;
// float_point *dev_labels,*dev_t,*dev_dec_values;
// float_point *dev_fApB,*dev_fval,*dev_sum,*dev_d1,*dev_d2,*dev_g1,*dev_h11,*dev_h21,*dev_p,*dev_q;
// float_point *dev_det,*dev_dA,*dev_dB,*dev_gd,*dev_newf;
// float_point *dev_newA,*dev_newB;
//
// checkCudaErrors(hipMalloc((void**)&dev_sum,sizeof(float_point)*blocknum));
// checkCudaErrors(hipMalloc((void**)&dev_newA,sizeof(float_point)));
// checkCudaErrors(hipMalloc((void**)&dev_newB,sizeof(float_point)));
// checkCudaErrors(hipMalloc((void**)&dev_fApB,sizeof(float_point)*l));
// checkCudaErrors(hipMalloc((void**)&dev_fval,sizeof(float_point)*(blocknum+1)*THREAD_NUM));
// checkCudaErrors(hipMalloc((void**)&dev_labels,sizeof(float_point)*l));
// checkCudaErrors(hipMalloc((void**)&dev_t,sizeof(float_point)*l));
// checkCudaErrors(hipMalloc((void**)&dev_dec_values,sizeof(float_point)*l));
// checkCudaErrors(hipMalloc((void**)&dev_p,sizeof(float_point)*l));
// checkCudaErrors(hipMalloc((void**)&dev_q,sizeof(float_point)*l));
// checkCudaErrors(hipMalloc((void**)&dev_d1,sizeof(float_point)*(blocknum+1)*THREAD_NUM));
// checkCudaErrors(hipMalloc((void**)&dev_d2,sizeof(float_point)*(blocknum+1)*THREAD_NUM));
// checkCudaErrors(hipMalloc((void**)&dev_g1,sizeof(float_point)*(blocknum+1)*THREAD_NUM));
// checkCudaErrors(hipMalloc((void**)&dev_h11,sizeof(float_point)*(blocknum+1)*THREAD_NUM));
// checkCudaErrors(hipMalloc((void**)&dev_h21,sizeof(float_point)*(blocknum+1)*THREAD_NUM));
// checkCudaErrors(hipMalloc((void**)&dev_det,sizeof(float_point)));
// checkCudaErrors(hipMalloc((void**)&dev_dA,sizeof(float_point)));
// checkCudaErrors(hipMalloc((void**)&dev_dB,sizeof(float_point)));
// checkCudaErrors(hipMalloc((void**)&dev_gd,sizeof(float_point)));
// checkCudaErrors(hipMalloc((void**)&dev_newf,sizeof(float_point)*(blocknum+1)*THREAD_NUM));
// checkCudaErrors(hipMalloc((void**)&dev_prior1,sizeof(float_point)*(blocknum+1)*THREAD_NUM));
// checkCudaErrors(hipMalloc((void**)&dev_prior0,sizeof(float_point)*(blocknum+1)*THREAD_NUM));
//
// checkCudaErrors(hipMemcpy(dev_labels,labels,sizeof(float_point)*l,hipMemcpyHostToDevice));
// checkCudaErrors(hipMemcpy(dev_dec_values,dec_values,sizeof(float_point)*l,hipMemcpyHostToDevice));
//
// checkCudaErrors(hipMemset(dev_fval, 0, sizeof(float_point)*(blocknum+1)*THREAD_NUM));
// checkCudaErrors(hipMemset(dev_h11, 0, sizeof(float_point)*(blocknum+1)*THREAD_NUM));
// checkCudaErrors(hipMemset(dev_h21, 0, sizeof(float_point)*(blocknum+1)*THREAD_NUM));
// checkCudaErrors(hipMemset(dev_d1, 0, sizeof(float_point)*(blocknum+1)*THREAD_NUM));
// checkCudaErrors(hipMemset(dev_d2, 0, sizeof(float_point)*(blocknum+1)*THREAD_NUM));
// checkCudaErrors(hipMemset(dev_g1, 0, sizeof(float_point)*(blocknum+1)*THREAD_NUM));
// checkCudaErrors(hipMemset(dev_newf, 0, sizeof(float_point)*(blocknum+1)*THREAD_NUM));
//
// dev_getprior<<<blocknum+1,THREAD_NUM>>>(dev_labels,l,dev_prior1,dev_prior0);
// dev_paral_red_sum<<<blocknum,THREAD_NUM,THREAD_NUM*sizeof(float_point)>>>(dev_prior1,dev_sum,l);
// dev_get_sum<<<1,1>>>(dev_sum,dev_prior1,blocknum);
// dev_paral_red_sum<<<blocknum,THREAD_NUM,THREAD_NUM*sizeof(float_point)>>>(dev_prior0,dev_sum,l);
// dev_get_sum<<<1,1>>>(dev_sum,dev_prior0,blocknum);
//
// checkCudaErrors(hipMemcpy(&prior1,dev_prior1,sizeof(float_point),hipMemcpyDeviceToHost));
// checkCudaErrors(hipMemcpy(&prior0,dev_prior0,sizeof(float_point),hipMemcpyDeviceToHost));
//
// //get fApB,fval
// dev_getfApB_fval<<<blocknum,THREAD_NUM>>>(dev_fval,dev_labels,dev_t,dev_dec_values,dev_fApB,A,B,hiTarget,loTarget,l);
// dev_paral_red_sum<<<blocknum,THREAD_NUM,THREAD_NUM*sizeof(float_point)>>>(dev_fval,dev_sum,l);
// dev_get_sum<<<1,1>>>(dev_sum,dev_fval,blocknum);//dev_get_fval_sum<<<1,1>>>(dev_fval);
//
// checkCudaErrors(hipFree(dev_labels));
// for (iter=0;iter<max_iter;iter++)
// {
//
// if(iter>0)
// //update newA,newB
// dev_getfApB<<<blocknum,THREAD_NUM>>>(l,dev_fApB,dev_dec_values,A,B);
// //get p q
// dev_getpq<<<blocknum,THREAD_NUM>>>(l,dev_t,dev_fApB,dev_p,dev_q,dev_d1,dev_d2,dev_h11,dev_h21,dev_g1,dev_dec_values);
// //get h11
// dev_paral_red_sum<<<blocknum,THREAD_NUM,THREAD_NUM*sizeof(float_point)>>>(dev_h11,dev_sum,l);
// dev_get_sum<<<1,1>>>(dev_sum,dev_h11,blocknum);
// //get h21
// dev_paral_red_sum<<<blocknum,THREAD_NUM,THREAD_NUM*sizeof(float_point)>>>(dev_h21,dev_sum,l);
// dev_get_sum<<<1,1>>>(dev_sum,dev_h21,blocknum);
// //get d2\h22
// dev_paral_red_sum<<<blocknum,THREAD_NUM,THREAD_NUM*sizeof(float_point)>>>(dev_d2,dev_sum,l);
// dev_get_sum<<<1,1>>>(dev_sum,dev_d2,blocknum);//d2[0]=h22
// //get g1
// dev_paral_red_sum<<<blocknum,THREAD_NUM,THREAD_NUM*sizeof(float_point)>>>(dev_g1,dev_sum,l);
// dev_get_sum<<<1,1>>>(dev_sum,dev_g1,blocknum);
// //get d1\g2
// dev_paral_red_sum<<<blocknum,THREAD_NUM,THREAD_NUM*sizeof(float_point)>>>(dev_d1,dev_sum,l);
// dev_get_sum<<<1,1>>>(dev_sum,dev_d1,blocknum);//d1[0]=g2
//
// checkCudaErrors(hipMemcpy(&g1,dev_g1,sizeof(float_point),hipMemcpyDeviceToHost));
// checkCudaErrors(hipMemcpy(&g2,dev_d1,sizeof(float_point),hipMemcpyDeviceToHost));
// // Stopping Criteria
// if (fabs(g1)<eps && fabs(g2)<eps)
// break;
//
//
// // Finding Newton direction: -inv(H') * g
// dev_get_det<<<1,1>>>(sigma,dev_h11,dev_d2,dev_h21,dev_det);
// //?????????????
// dev_getdA<<<1,1,0,stream[0]>>>(dev_dA,dev_det,dev_d2,dev_h21,dev_g1,dev_d1);
// dev_getdB<<<1,1,0,stream[1]>>>(dev_dB,dev_det,dev_h11,dev_h21,dev_g1,dev_d1);
// dev_getgd<<<1,1>>>(dev_gd,dev_dA,dev_dB,dev_g1,dev_d1);
//
// stepsize = 1; // Line Search
//
// while (stepsize >= min_step)
// {
// //update newA newB
// dev_updateAB<<<1,2>>>(dev_newA,dev_newB,A,B,stepsize,dev_dA,dev_dB);
//
// // New function value
// dev_getnewfApB<<<blocknum,THREAD_NUM,THREAD_NUM*sizeof(float_point)>>>(l,dev_fApB,dev_dec_values,dev_newA,dev_newB);
// dev_getnewf<<<blocknum,THREAD_NUM>>>(l,dev_fApB,dev_t,dev_newf);
// dev_paral_red_sum<<<blocknum,THREAD_NUM,THREAD_NUM*sizeof(float_point)>>>(dev_newf,dev_sum,l);//more block?
// dev_get_sum<<<1,1>>>(dev_sum,dev_newf,blocknum);
//
// // Check sufficient decrease
// checkCudaErrors(hipMemcpy(&newf,dev_newf,sizeof(float_point),hipMemcpyDeviceToHost));
// checkCudaErrors(hipMemcpy(&fval,dev_fval,sizeof(float_point),hipMemcpyDeviceToHost));
// checkCudaErrors(hipMemcpy(&gd,dev_gd,sizeof(float_point),hipMemcpyDeviceToHost));
// if (newf<fval+0.0001*(float_point)stepsize*gd)
// {
// hipMemcpy(&A,dev_newA,sizeof(float_point),hipMemcpyDeviceToHost);
// hipMemcpy(&B,dev_newB,sizeof(float_point),hipMemcpyDeviceToHost);
// fval=newf;
// break;
// }
// else
// stepsize = stepsize / 2.0;
// }
//
// if (stepsize < min_step)
// {
// info("Line search fails in two-class probability estimates\n");
// break;
// }
// }
//
// if (iter>=max_iter)
// info("Reaching maximal iterations in two-class probability estimates\n");
//
// checkCudaErrors(hipFree(dev_newA));
// checkCudaErrors(hipFree(dev_newB));
// checkCudaErrors(hipFree(dev_fApB));
// checkCudaErrors(hipFree(dev_fval));
// checkCudaErrors(hipFree(dev_dec_values));
// checkCudaErrors(hipFree(dev_det));
// checkCudaErrors(hipFree(dev_dA));
// checkCudaErrors(hipFree(dev_dB));
// checkCudaErrors(hipFree(dev_gd));
// checkCudaErrors(hipFree(dev_newf));
// checkCudaErrors(hipFree(dev_t));
// checkCudaErrors(hipFree(dev_d1));
// checkCudaErrors(hipFree(dev_d2));
// checkCudaErrors(hipFree(dev_g1));
// checkCudaErrors(hipFree(dev_h11));
// checkCudaErrors(hipFree(dev_h21));
// checkCudaErrors(hipFree(dev_p));
// checkCudaErrors(hipFree(dev_q));
// checkCudaErrors(hipFree(dev_sum));
// checkCudaErrors(hipFree(dev_prior1));
// checkCudaErrors(hipFree(dev_prior0));
//}
void SvmModel::sigmoidTrain(const float_point *decValues, const int l, const vector<int> &labels, float_point &A,
float_point &B) {
double prior1 = 0, prior0 = 0;
int i;
for (i = 0; i < l; i++)
if (labels[i] > 0)
prior1 += 1;
else
prior0 += 1;
int max_iter = 100; // Maximal number of iterations
double min_step = 1e-10; // Minimal step taken in line search
double sigma = 1e-12; // For numerically strict PD of Hessian
double eps = 1e-5;
double hiTarget = (prior1 + 1.0) / (prior1 + 2.0);
double loTarget = 1 / (prior0 + 2.0);
double *t = (double *) malloc(sizeof(double) * l);
double fApB, p, q, h11, h22, h21, g1, g2, det, dA, dB, gd, stepsize;
double newA, newB, newf, d1, d2;
int iter;
// Initial Point and Initial Fun Value
A = 0.0;
B = log((prior0 + 1.0) / (prior1 + 1.0));
double fval = 0.0;
for (i = 0; i < l; i++) {
if (labels[i] > 0)
t[i] = hiTarget;
else
t[i] = loTarget;
fApB = decValues[i] * A + B;
if (fApB >= 0)
fval += t[i] * fApB + log(1 + exp(-fApB));
else
fval += (t[i] - 1) * fApB + log(1 + exp(fApB));
}
for (iter = 0; iter < max_iter; iter++) {
// Update Gradient and Hessian (use H' = H + sigma I)
h11 = sigma; // numerically ensures strict PD
h22 = sigma;
h21 = 0.0;
g1 = 0.0;
g2 = 0.0;
for (i = 0; i < l; i++) {
fApB = decValues[i] * A + B;
if (fApB >= 0) {
p = exp(-fApB) / (1.0 + exp(-fApB));
q = 1.0 / (1.0 + exp(-fApB));
} else {
p = 1.0 / (1.0 + exp(fApB));
q = exp(fApB) / (1.0 + exp(fApB));
}
d2 = p * q;
h11 += decValues[i] * decValues[i] * d2;
h22 += d2;
h21 += decValues[i] * d2;
d1 = t[i] - p;
g1 += decValues[i] * d1;
g2 += d1;
}
// Stopping Criteria
if (fabs(g1) < eps && fabs(g2) < eps)
break;
// Finding Newton direction: -inv(H') * g
det = h11 * h22 - h21 * h21;
dA = -(h22 * g1 - h21 * g2) / det;
dB = -(-h21 * g1 + h11 * g2) / det;
gd = g1 * dA + g2 * dB;
stepsize = 1; // Line Search
while (stepsize >= min_step) {
newA = A + stepsize * dA;
newB = B + stepsize * dB;
// New function value
newf = 0.0;
for (i = 0; i < l; i++) {
fApB = decValues[i] * newA + newB;
if (fApB >= 0)
newf += t[i] * fApB + log(1 + exp(-fApB));
else
newf += (t[i] - 1) * fApB + log(1 + exp(fApB));
}
// Check sufficient decrease
if (newf < fval + 0.0001 * stepsize * gd) {
A = newA;
B = newB;
fval = newf;
break;
} else
stepsize = stepsize / 2.0;
}
if (stepsize < min_step) {
printf("Line search fails in two-class probability estimates\n");
break;
}
}
if (iter >= max_iter)
printf(
"Reaching maximal iterations in two-class probability estimates\n");
free(t);
}
/**
*@brief: add a binary svm model to the multi-class svm model.
**/
void SvmModel::addBinaryModel(const SvmProblem &problem, const vector<int> &svLocalIndex, const vector<float_point> &coef,
float_point rho, int i, int j) {
static map<int, int> indexMap;
int k = getK(i, j);
this->coef[k] = coef;
for (int l = 0; l < svLocalIndex.size(); ++l) {
//map SV local index to the instance index (global index) in the whole training set
int originalIndex = problem.originalIndex[svLocalIndex[l]];
if (indexMap.find(originalIndex) != indexMap.end()) {//instance of this sv has been stored in svMap
} else {
indexMap[originalIndex] = svMap.size();//key is SV's global index; value is the id (in the map) for this SV instance.
svMap.push_back(problem.v_vSamples[svLocalIndex[l]]);
}
this->svIndex[k].push_back(indexMap[originalIndex]);//svIndex is the id in the map.
}
this->rho[k] = rho;
numOfSVs += svLocalIndex.size();
}
bool SvmModel::isProbability() const {
return probability;
}
| 6011f7679ded3f3ebc4452bfd8564de98248b557.cu | /*
* @author: created by ss on 16-11-2.
* @brief: multi-class svm training, prediction, svm with probability output
*
*/
#include <map>
#include <cuda.h>
#include <helper_cuda.h>
#include <cuda_runtime_api.h>
#include <cuda_profiler_api.h>
#include "svmModel.h"
#include "svmPredictor.h"
#include "multiSmoSolver.h"
#include "multiPredictor.h"
#include "trainClassifier.h"
#include "../svm-shared/HessianIO/deviceHessian.h"
#include "../svm-shared/storageManager.h"
//#include "sigmoidTrainGPUHelper.h"
SvmModel::~SvmModel() {
checkCudaErrors(cudaFree(devCoef));
checkCudaErrors(cudaFree(devStart));
checkCudaErrors(cudaFree(devCount));
checkCudaErrors(cudaFree(devProbA));
checkCudaErrors(cudaFree(devProbB));
checkCudaErrors(cudaFree(devRho));
checkCudaErrors(cudaFree(devSVMapVal));
checkCudaErrors(cudaFree(devSVMapValSelfDot));
checkCudaErrors(cudaFree(devSVMapRowPtr));
checkCudaErrors(cudaFree(devSVMapColInd));
checkCudaErrors(cudaFree(devSVIndex));
if (svMapCSRMat) delete svMapCSRMat;
}
/*
* @brief: get the classifier id based on i and j.
*/
uint SvmModel::getK(int i, int j) const {
return ((nrClass - 1) + (nrClass - i)) * i / 2 + j - i - 1;
}
void SvmModel::fit(const SvmProblem &problem, const SVMParam ¶m) {
//reset model to fit a new SvmProblem
nrClass = problem.getNumOfClasses();
cnr2 = (nrClass) * (nrClass - 1) / 2;
numOfSVs = 0;
numOfFeatures = 0;
coef.clear();
rho.clear();
probA.clear();
probB.clear();
svIndex.clear();
svMap.clear();
label.clear();
start.clear();
count.clear();
probability = false;
coef.resize(cnr2);
rho.resize(cnr2);
probA.resize(cnr2);
probB.resize(cnr2);
svIndex.resize(cnr2);
this->param = param;
label = problem.label;
numOfFeatures = problem.getNumOfFeatures();
MultiSmoSolver multiSmoSolver(problem,*this,param);
multiSmoSolver.solve();
int _start = 0;
for (int i = 0; i < cnr2; ++i) {
start.push_back(_start);
count.push_back(svIndex[i].size());
_start += count[i];
}
transferToDevice();
}
void SvmModel::transferToDevice() {
//convert svMap to csr matrix then copy it to device
svMapCSRMat = new CSRMatrix(svMap,numOfFeatures);
int nnz = svMapCSRMat->getNnz();
checkCudaErrors(cudaMalloc((void **) &devSVMapVal, sizeof(float_point) * nnz));
checkCudaErrors(cudaMalloc((void **) &devSVMapValSelfDot, sizeof(float_point) * svMapCSRMat->getNumOfSamples()));
checkCudaErrors(cudaMalloc((void **) &devSVMapRowPtr, sizeof(int) * (svMapCSRMat->getNumOfSamples() + 1)));
checkCudaErrors(cudaMalloc((void **) &devSVMapColInd, sizeof(int) * nnz));
checkCudaErrors(
cudaMemcpy(devSVMapVal, svMapCSRMat->getCSRVal(), sizeof(float_point) * nnz, cudaMemcpyHostToDevice));
checkCudaErrors(
cudaMemcpy(devSVMapValSelfDot, svMapCSRMat->getCSRValSelfDot(),
sizeof(float_point) * svMapCSRMat->getNumOfSamples(), cudaMemcpyHostToDevice));
checkCudaErrors(
cudaMemcpy(devSVMapRowPtr, svMapCSRMat->getCSRRowPtr(), sizeof(int) * (svMapCSRMat->getNumOfSamples() + 1),
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(devSVMapColInd, svMapCSRMat->getCSRColInd(), sizeof(int) * nnz, cudaMemcpyHostToDevice));
//flat svIndex then copy in to device
checkCudaErrors(cudaMalloc((void **) &devSVIndex, sizeof(int) * numOfSVs));
for (int i = 0; i < cnr2; ++i) {
checkCudaErrors(cudaMemcpy(devSVIndex + start[i], svIndex[i].data(), sizeof(int) * svIndex[i].size(),
cudaMemcpyHostToDevice));
}
checkCudaErrors(cudaMalloc((void **) &devCoef, sizeof(float_point) * numOfSVs));
checkCudaErrors(cudaMalloc((void **) &devStart, sizeof(float_point) * cnr2));
checkCudaErrors(cudaMalloc((void **) &devCount, sizeof(float_point) * cnr2));
checkCudaErrors(cudaMalloc((void **) &devProbA, sizeof(float_point) * cnr2));
checkCudaErrors(cudaMalloc((void **) &devProbB, sizeof(float_point) * cnr2));
checkCudaErrors(cudaMalloc((void **) &devRho, sizeof(float_point) * cnr2));
for (int i = 0; i < cnr2; ++i) {
checkCudaErrors(cudaMemcpy(devCoef + start[i], coef[i].data(), sizeof(float_point) * count[i],
cudaMemcpyHostToDevice));
}
checkCudaErrors(cudaMemcpy(devProbA, probA.data(), sizeof(float_point) * cnr2, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(devProbB, probB.data(), sizeof(float_point) * cnr2, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(devStart, start.data(), sizeof(int) * cnr2, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(devCount, count.data(), sizeof(int) * cnr2, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(devRho, rho.data(), sizeof(float_point) * cnr2, cudaMemcpyHostToDevice));
}
//void SvmModel::gpu_sigmoid_train(
// int l, const float_point *dec_values, const float_point *labels,
// float_point& A, float_point& B)
//{
//
// float_point prior1, prior0 ;
// int max_iter=100; // Maximal number of iterations
// float_point min_step=1e-10; // Minimal step taken in line search
// float_point sigma=1e-12; // For numerically strict PD of Hessian
// float_point eps=1e-5;
// float_point hiTarget=(prior1+1.0)/(prior1+2.0);
// float_point loTarget=1/(prior0+2.0);
// float_point fApB,g1,g2,gd,stepsize;
// float_point newA,newB,newf;
// int iter;
// float_point fval = 0.0;
// // Initial Point and Initial Fun Value
// A=0.0; B=log((prior0+1.0)/(prior1+1.0));
//
// int blocknum=(l+THREAD_NUM-1)/THREAD_NUM;
//
// cudaStream_t stream[2];
// for(int i = 0;i < 2;i ++)
// cudaStreamCreate(&stream[i]);
//
// float_point *dev_prior1,*dev_prior0;
// float_point *dev_labels,*dev_t,*dev_dec_values;
// float_point *dev_fApB,*dev_fval,*dev_sum,*dev_d1,*dev_d2,*dev_g1,*dev_h11,*dev_h21,*dev_p,*dev_q;
// float_point *dev_det,*dev_dA,*dev_dB,*dev_gd,*dev_newf;
// float_point *dev_newA,*dev_newB;
//
// checkCudaErrors(cudaMalloc((void**)&dev_sum,sizeof(float_point)*blocknum));
// checkCudaErrors(cudaMalloc((void**)&dev_newA,sizeof(float_point)));
// checkCudaErrors(cudaMalloc((void**)&dev_newB,sizeof(float_point)));
// checkCudaErrors(cudaMalloc((void**)&dev_fApB,sizeof(float_point)*l));
// checkCudaErrors(cudaMalloc((void**)&dev_fval,sizeof(float_point)*(blocknum+1)*THREAD_NUM));
// checkCudaErrors(cudaMalloc((void**)&dev_labels,sizeof(float_point)*l));
// checkCudaErrors(cudaMalloc((void**)&dev_t,sizeof(float_point)*l));
// checkCudaErrors(cudaMalloc((void**)&dev_dec_values,sizeof(float_point)*l));
// checkCudaErrors(cudaMalloc((void**)&dev_p,sizeof(float_point)*l));
// checkCudaErrors(cudaMalloc((void**)&dev_q,sizeof(float_point)*l));
// checkCudaErrors(cudaMalloc((void**)&dev_d1,sizeof(float_point)*(blocknum+1)*THREAD_NUM));
// checkCudaErrors(cudaMalloc((void**)&dev_d2,sizeof(float_point)*(blocknum+1)*THREAD_NUM));
// checkCudaErrors(cudaMalloc((void**)&dev_g1,sizeof(float_point)*(blocknum+1)*THREAD_NUM));
// checkCudaErrors(cudaMalloc((void**)&dev_h11,sizeof(float_point)*(blocknum+1)*THREAD_NUM));
// checkCudaErrors(cudaMalloc((void**)&dev_h21,sizeof(float_point)*(blocknum+1)*THREAD_NUM));
// checkCudaErrors(cudaMalloc((void**)&dev_det,sizeof(float_point)));
// checkCudaErrors(cudaMalloc((void**)&dev_dA,sizeof(float_point)));
// checkCudaErrors(cudaMalloc((void**)&dev_dB,sizeof(float_point)));
// checkCudaErrors(cudaMalloc((void**)&dev_gd,sizeof(float_point)));
// checkCudaErrors(cudaMalloc((void**)&dev_newf,sizeof(float_point)*(blocknum+1)*THREAD_NUM));
// checkCudaErrors(cudaMalloc((void**)&dev_prior1,sizeof(float_point)*(blocknum+1)*THREAD_NUM));
// checkCudaErrors(cudaMalloc((void**)&dev_prior0,sizeof(float_point)*(blocknum+1)*THREAD_NUM));
//
// checkCudaErrors(cudaMemcpy(dev_labels,labels,sizeof(float_point)*l,cudaMemcpyHostToDevice));
// checkCudaErrors(cudaMemcpy(dev_dec_values,dec_values,sizeof(float_point)*l,cudaMemcpyHostToDevice));
//
// checkCudaErrors(cudaMemset(dev_fval, 0, sizeof(float_point)*(blocknum+1)*THREAD_NUM));
// checkCudaErrors(cudaMemset(dev_h11, 0, sizeof(float_point)*(blocknum+1)*THREAD_NUM));
// checkCudaErrors(cudaMemset(dev_h21, 0, sizeof(float_point)*(blocknum+1)*THREAD_NUM));
// checkCudaErrors(cudaMemset(dev_d1, 0, sizeof(float_point)*(blocknum+1)*THREAD_NUM));
// checkCudaErrors(cudaMemset(dev_d2, 0, sizeof(float_point)*(blocknum+1)*THREAD_NUM));
// checkCudaErrors(cudaMemset(dev_g1, 0, sizeof(float_point)*(blocknum+1)*THREAD_NUM));
// checkCudaErrors(cudaMemset(dev_newf, 0, sizeof(float_point)*(blocknum+1)*THREAD_NUM));
//
// dev_getprior<<<blocknum+1,THREAD_NUM>>>(dev_labels,l,dev_prior1,dev_prior0);
// dev_paral_red_sum<<<blocknum,THREAD_NUM,THREAD_NUM*sizeof(float_point)>>>(dev_prior1,dev_sum,l);
// dev_get_sum<<<1,1>>>(dev_sum,dev_prior1,blocknum);
// dev_paral_red_sum<<<blocknum,THREAD_NUM,THREAD_NUM*sizeof(float_point)>>>(dev_prior0,dev_sum,l);
// dev_get_sum<<<1,1>>>(dev_sum,dev_prior0,blocknum);
//
// checkCudaErrors(cudaMemcpy(&prior1,dev_prior1,sizeof(float_point),cudaMemcpyDeviceToHost));
// checkCudaErrors(cudaMemcpy(&prior0,dev_prior0,sizeof(float_point),cudaMemcpyDeviceToHost));
//
// //get fApB,fval
// dev_getfApB_fval<<<blocknum,THREAD_NUM>>>(dev_fval,dev_labels,dev_t,dev_dec_values,dev_fApB,A,B,hiTarget,loTarget,l);
// dev_paral_red_sum<<<blocknum,THREAD_NUM,THREAD_NUM*sizeof(float_point)>>>(dev_fval,dev_sum,l);
// dev_get_sum<<<1,1>>>(dev_sum,dev_fval,blocknum);//dev_get_fval_sum<<<1,1>>>(dev_fval);
//
// checkCudaErrors(cudaFree(dev_labels));
// for (iter=0;iter<max_iter;iter++)
// {
//
// if(iter>0)
// //update newA,newB
// dev_getfApB<<<blocknum,THREAD_NUM>>>(l,dev_fApB,dev_dec_values,A,B);
// //get p q
// dev_getpq<<<blocknum,THREAD_NUM>>>(l,dev_t,dev_fApB,dev_p,dev_q,dev_d1,dev_d2,dev_h11,dev_h21,dev_g1,dev_dec_values);
// //get h11
// dev_paral_red_sum<<<blocknum,THREAD_NUM,THREAD_NUM*sizeof(float_point)>>>(dev_h11,dev_sum,l);
// dev_get_sum<<<1,1>>>(dev_sum,dev_h11,blocknum);
// //get h21
// dev_paral_red_sum<<<blocknum,THREAD_NUM,THREAD_NUM*sizeof(float_point)>>>(dev_h21,dev_sum,l);
// dev_get_sum<<<1,1>>>(dev_sum,dev_h21,blocknum);
// //get d2\h22
// dev_paral_red_sum<<<blocknum,THREAD_NUM,THREAD_NUM*sizeof(float_point)>>>(dev_d2,dev_sum,l);
// dev_get_sum<<<1,1>>>(dev_sum,dev_d2,blocknum);//d2[0]=h22
// //get g1
// dev_paral_red_sum<<<blocknum,THREAD_NUM,THREAD_NUM*sizeof(float_point)>>>(dev_g1,dev_sum,l);
// dev_get_sum<<<1,1>>>(dev_sum,dev_g1,blocknum);
// //get d1\g2
// dev_paral_red_sum<<<blocknum,THREAD_NUM,THREAD_NUM*sizeof(float_point)>>>(dev_d1,dev_sum,l);
// dev_get_sum<<<1,1>>>(dev_sum,dev_d1,blocknum);//d1[0]=g2
//
// checkCudaErrors(cudaMemcpy(&g1,dev_g1,sizeof(float_point),cudaMemcpyDeviceToHost));
// checkCudaErrors(cudaMemcpy(&g2,dev_d1,sizeof(float_point),cudaMemcpyDeviceToHost));
// // Stopping Criteria
// if (fabs(g1)<eps && fabs(g2)<eps)
// break;
//
//
// // Finding Newton direction: -inv(H') * g
// dev_get_det<<<1,1>>>(sigma,dev_h11,dev_d2,dev_h21,dev_det);
// //?????????????
// dev_getdA<<<1,1,0,stream[0]>>>(dev_dA,dev_det,dev_d2,dev_h21,dev_g1,dev_d1);
// dev_getdB<<<1,1,0,stream[1]>>>(dev_dB,dev_det,dev_h11,dev_h21,dev_g1,dev_d1);
// dev_getgd<<<1,1>>>(dev_gd,dev_dA,dev_dB,dev_g1,dev_d1);
//
// stepsize = 1; // Line Search
//
// while (stepsize >= min_step)
// {
// //update newA newB
// dev_updateAB<<<1,2>>>(dev_newA,dev_newB,A,B,stepsize,dev_dA,dev_dB);
//
// // New function value
// dev_getnewfApB<<<blocknum,THREAD_NUM,THREAD_NUM*sizeof(float_point)>>>(l,dev_fApB,dev_dec_values,dev_newA,dev_newB);
// dev_getnewf<<<blocknum,THREAD_NUM>>>(l,dev_fApB,dev_t,dev_newf);
// dev_paral_red_sum<<<blocknum,THREAD_NUM,THREAD_NUM*sizeof(float_point)>>>(dev_newf,dev_sum,l);//more block?
// dev_get_sum<<<1,1>>>(dev_sum,dev_newf,blocknum);
//
// // Check sufficient decrease
// checkCudaErrors(cudaMemcpy(&newf,dev_newf,sizeof(float_point),cudaMemcpyDeviceToHost));
// checkCudaErrors(cudaMemcpy(&fval,dev_fval,sizeof(float_point),cudaMemcpyDeviceToHost));
// checkCudaErrors(cudaMemcpy(&gd,dev_gd,sizeof(float_point),cudaMemcpyDeviceToHost));
// if (newf<fval+0.0001*(float_point)stepsize*gd)
// {
// cudaMemcpy(&A,dev_newA,sizeof(float_point),cudaMemcpyDeviceToHost);
// cudaMemcpy(&B,dev_newB,sizeof(float_point),cudaMemcpyDeviceToHost);
// fval=newf;
// break;
// }
// else
// stepsize = stepsize / 2.0;
// }
//
// if (stepsize < min_step)
// {
// info("Line search fails in two-class probability estimates\n");
// break;
// }
// }
//
// if (iter>=max_iter)
// info("Reaching maximal iterations in two-class probability estimates\n");
//
// checkCudaErrors(cudaFree(dev_newA));
// checkCudaErrors(cudaFree(dev_newB));
// checkCudaErrors(cudaFree(dev_fApB));
// checkCudaErrors(cudaFree(dev_fval));
// checkCudaErrors(cudaFree(dev_dec_values));
// checkCudaErrors(cudaFree(dev_det));
// checkCudaErrors(cudaFree(dev_dA));
// checkCudaErrors(cudaFree(dev_dB));
// checkCudaErrors(cudaFree(dev_gd));
// checkCudaErrors(cudaFree(dev_newf));
// checkCudaErrors(cudaFree(dev_t));
// checkCudaErrors(cudaFree(dev_d1));
// checkCudaErrors(cudaFree(dev_d2));
// checkCudaErrors(cudaFree(dev_g1));
// checkCudaErrors(cudaFree(dev_h11));
// checkCudaErrors(cudaFree(dev_h21));
// checkCudaErrors(cudaFree(dev_p));
// checkCudaErrors(cudaFree(dev_q));
// checkCudaErrors(cudaFree(dev_sum));
// checkCudaErrors(cudaFree(dev_prior1));
// checkCudaErrors(cudaFree(dev_prior0));
//}
void SvmModel::sigmoidTrain(const float_point *decValues, const int l, const vector<int> &labels, float_point &A,
float_point &B) {
double prior1 = 0, prior0 = 0;
int i;
for (i = 0; i < l; i++)
if (labels[i] > 0)
prior1 += 1;
else
prior0 += 1;
int max_iter = 100; // Maximal number of iterations
double min_step = 1e-10; // Minimal step taken in line search
double sigma = 1e-12; // For numerically strict PD of Hessian
double eps = 1e-5;
double hiTarget = (prior1 + 1.0) / (prior1 + 2.0);
double loTarget = 1 / (prior0 + 2.0);
double *t = (double *) malloc(sizeof(double) * l);
double fApB, p, q, h11, h22, h21, g1, g2, det, dA, dB, gd, stepsize;
double newA, newB, newf, d1, d2;
int iter;
// Initial Point and Initial Fun Value
A = 0.0;
B = log((prior0 + 1.0) / (prior1 + 1.0));
double fval = 0.0;
for (i = 0; i < l; i++) {
if (labels[i] > 0)
t[i] = hiTarget;
else
t[i] = loTarget;
fApB = decValues[i] * A + B;
if (fApB >= 0)
fval += t[i] * fApB + log(1 + exp(-fApB));
else
fval += (t[i] - 1) * fApB + log(1 + exp(fApB));
}
for (iter = 0; iter < max_iter; iter++) {
// Update Gradient and Hessian (use H' = H + sigma I)
h11 = sigma; // numerically ensures strict PD
h22 = sigma;
h21 = 0.0;
g1 = 0.0;
g2 = 0.0;
for (i = 0; i < l; i++) {
fApB = decValues[i] * A + B;
if (fApB >= 0) {
p = exp(-fApB) / (1.0 + exp(-fApB));
q = 1.0 / (1.0 + exp(-fApB));
} else {
p = 1.0 / (1.0 + exp(fApB));
q = exp(fApB) / (1.0 + exp(fApB));
}
d2 = p * q;
h11 += decValues[i] * decValues[i] * d2;
h22 += d2;
h21 += decValues[i] * d2;
d1 = t[i] - p;
g1 += decValues[i] * d1;
g2 += d1;
}
// Stopping Criteria
if (fabs(g1) < eps && fabs(g2) < eps)
break;
// Finding Newton direction: -inv(H') * g
det = h11 * h22 - h21 * h21;
dA = -(h22 * g1 - h21 * g2) / det;
dB = -(-h21 * g1 + h11 * g2) / det;
gd = g1 * dA + g2 * dB;
stepsize = 1; // Line Search
while (stepsize >= min_step) {
newA = A + stepsize * dA;
newB = B + stepsize * dB;
// New function value
newf = 0.0;
for (i = 0; i < l; i++) {
fApB = decValues[i] * newA + newB;
if (fApB >= 0)
newf += t[i] * fApB + log(1 + exp(-fApB));
else
newf += (t[i] - 1) * fApB + log(1 + exp(fApB));
}
// Check sufficient decrease
if (newf < fval + 0.0001 * stepsize * gd) {
A = newA;
B = newB;
fval = newf;
break;
} else
stepsize = stepsize / 2.0;
}
if (stepsize < min_step) {
printf("Line search fails in two-class probability estimates\n");
break;
}
}
if (iter >= max_iter)
printf(
"Reaching maximal iterations in two-class probability estimates\n");
free(t);
}
/**
*@brief: add a binary svm model to the multi-class svm model.
**/
void SvmModel::addBinaryModel(const SvmProblem &problem, const vector<int> &svLocalIndex, const vector<float_point> &coef,
float_point rho, int i, int j) {
static map<int, int> indexMap;
int k = getK(i, j);
this->coef[k] = coef;
for (int l = 0; l < svLocalIndex.size(); ++l) {
//map SV local index to the instance index (global index) in the whole training set
int originalIndex = problem.originalIndex[svLocalIndex[l]];
if (indexMap.find(originalIndex) != indexMap.end()) {//instance of this sv has been stored in svMap
} else {
indexMap[originalIndex] = svMap.size();//key is SV's global index; value is the id (in the map) for this SV instance.
svMap.push_back(problem.v_vSamples[svLocalIndex[l]]);
}
this->svIndex[k].push_back(indexMap[originalIndex]);//svIndex is the id in the map.
}
this->rho[k] = rho;
numOfSVs += svLocalIndex.size();
}
bool SvmModel::isProbability() const {
return probability;
}
|
fdf9c6e650b93e973cecc413ff63bf8265f45be5.hip | // !!! This is a file automatically generated by hipify!!!
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h>
// includes CUDA
#include <hip/hip_runtime.h>
// includes, project
#include <helper_cuda.h>
#include <helper_functions.h> // helper functions for SDK examples
//include other files
#include "test.h"
#include "random.h"
//#include "environment.h"
#include "predatorprey.h"
//#include "network.h"
#include "feedforward.h"
#include "population.h"
#ifndef nPreds
#define nPreds 6
#endif
#ifndef nHidden
#define nHidden 50
#endif
#ifndef nIndivs
#define nIndivs 540
#endif
//globals
Neuron* bestTeam;
int bestGene;
Population* subPops;
Population** predSubPops;
Gridworld world;
int catches;
//input params with default values
bool sim = false;
//int hidden = 10;
//int numIndivs = 100;
//int numInputs = 2;
//int numOutputs = 5;
//int burstGens = 10;
//int maxGens = 100000;
int goalFitness = 100000;
//int numPreds = 3;
int trialsPerEval = 9;
struct tempState{
int* PredatorX;
int* PredatorY;
int PreyX;
int PreyY;
};
//struct aTeam{//moved to feedForward.h
// int numOutputs;
// int numInputs;
// double act1[15];
// Neuron t1[15];
// double act2[15];
// Neuron t2[15];
// double act3[15];
// Neuron t3[15];
// int fitness;
// int numHidden;
// int catches;
//};
struct teamArr{
aTeam team;
};
Population* init(int hid, int num, int genes){
Population* pops = new Population[hid];
Population* p = newPopulation(num, genes);
for(int i = 0; i < hid; i++){
if(i>0) p->ID = p->ID + 1;
createIndividuals(p);
pops[i] = *p;
}
return pops;
}
int h_calculateDistance(Gridworld* h_world, int h_predX, int h_predY, int h_preyX, int h_preyY){
double h_xDist = 0;
double h_yDist = 0;
h_xDist = abs((double)(h_predX-h_preyX));
if(h_xDist > double(h_world->length/2)){
h_xDist = double(h_world->length) - h_xDist;
}
h_yDist = abs((double)(h_predY-h_preyY));
if(h_yDist > double(h_world->height/2)){
h_yDist = double(h_world->height) - h_yDist;
}
return int(h_xDist + h_yDist);
}
__device__ int calculateDistance(Gridworld* world, int predX, int predY, int preyX ,int preyY){
double xDist = 0;
double yDist = 0;
xDist = abs((double)(predX-preyX));
if(xDist > double(world->length/2)){
xDist = double(world->length) - xDist;
}
yDist = abs((double)(predY-preyY));
if(yDist > double(world->height/2)){
yDist = double(world->height) - yDist;
}
return int(xDist + yDist);
}
__global__ void kernelAssignFitness(int fitness, Neuron** hiddenUnits){
int index = threadIdx.x + blockIdx.x * blockDim.x;
// for(int i=index;i<numHidden;i++){
// Neuron* n = hiddenUnits[i];
hiddenUnits[index]->Fitness = fitness;
hiddenUnits[index]->Trials++;
// hiddenUnits[i] = n;
// }
}
teamArr* h_eval(Gridworld* h_worldpntr, teamArr* h_teams, int h_numPreds, double* h_input, double* h_output, int h_inplen, int h_outlen, int h_trialsPerEval, bool h_sim, int h_numTrials){
State* h_statepntr = new State();
h_reset(h_statepntr, h_numPreds);
int h_catches = 0;
int h_totalFitness = 0;
int h_PreyPositions[2][9] = {{16, 50, 82, 82, 82, 16, 50, 50, 82},{50, 50, 50, 82, 16, 50, 16, 82, 50}};
for(int i=0;i<h_trialsPerEval;i++){
int h_fitness = 0;
int h_steps = 0;
int h_maxSteps = 150;
int h_avg_init_dist = 0;
int h_avg_final_dist = 0;
h_setPreyPosition(h_statepntr, h_PreyPositions[0][i], h_PreyPositions[1][i]);
int h_nearestDist = 100;
int h_nearestPred = 0;
int h_currentDist = 0;
for(int p = 0;p<h_numPreds;p++){
h_avg_init_dist += h_calculateDistance(h_worldpntr, h_statepntr->PredatorX[p], h_statepntr->PredatorY[p], h_statepntr->PreyX, h_statepntr->PreyY);
}
h_avg_init_dist = h_avg_init_dist/h_numPreds;
while(!h_Caught(h_statepntr) && h_steps < h_maxSteps){
for(int p=0;p<h_numPreds;p++){
h_currentDist = h_calculateDistance(h_worldpntr, h_statepntr->PredatorX[p], h_statepntr->PredatorY[p], h_statepntr->PreyX, h_statepntr->PreyY);
if(h_currentDist<h_nearestDist){
h_nearestDist = h_currentDist;
h_nearestPred = p;
}
}
h_PerformPreyAction(h_statepntr, h_worldpntr, h_nearestPred);
for(int p = 0;p<h_numPreds;p++){
h_input[0] = double(h_statepntr->PreyX);
h_input[1] = double(h_statepntr->PreyY);
// delete[] h_output;
for(int o=0;o<h_outlen;o++){
h_output[o] = 0;
}
// h_output = new double[h_outlen];
double* out = h_Activate(&h_teams[i].team, h_input, h_inplen, h_output);
h_PerformPredatorAction(h_statepntr, h_worldpntr, p, out, h_teams[i].team.numOutputs);
}
// printf("Pred1 X: %d Pred1 Y: %d\n", h_statepntr->PredatorX[0], h_statepntr->PredatorY[0]);
// printf("Pred2 X: %d Pred2 Y: %d\n", h_statepntr->PredatorX[1], h_statepntr->PredatorY[1]);
// printf("Pred3 X: %d Pred3 Y: %d\n", h_statepntr->PredatorX[2], h_statepntr->PredatorY[2]);
// printf("Prey X: %d Prey Y: %d\n", h_statepntr->PreyX, h_statepntr->PreyY);
h_steps++;
}
for(int p = 0;p< h_numPreds;p++){
h_avg_final_dist += h_calculateDistance(h_worldpntr, h_statepntr->PredatorX[p], h_statepntr->PredatorY[p], h_statepntr->PreyX, h_statepntr->PreyY);
}
h_avg_final_dist = h_avg_final_dist/h_numPreds;
if(!h_Caught(h_statepntr)){
h_fitness = h_avg_init_dist - h_avg_final_dist;
}else{
h_fitness = 200-h_avg_final_dist;
h_catches++;
}
h_totalFitness += h_fitness;
}
h_teams[0].team.fitness = h_totalFitness; // /trialsPerEval
h_teams[0].team.catches = h_catches;
for(int i2 = 0; i2<h_teams[0].team.numHidden;i2++){
Neuron n1 = h_teams[0].team.t1[i2];
Neuron n2 = h_teams[0].team.t2[i2];
Neuron n3 = h_teams[0].team.t3[i2];
n1.Fitness = h_teams[0].team.fitness;
n2.Fitness = h_teams[0].team.fitness;
n3.Fitness = h_teams[0].team.fitness;
n1.Trials++;
n2.Trials++;
n3.Trials++;
h_teams[0].team.t1[i2] = n1;
h_teams[0].team.t2[i2] = n2;
h_teams[0].team.t3[i2] = n3;
}
return h_teams;
}
__global__ void runEvaluationsParallel(State* statepntr, Gridworld* worldpntr, teamArr* d_teams, int numPreds, double* input, double* output, int inplen, int outlen, int trialsPerEval, bool sim, int numTrials, bool isCaught){
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
isCaught = false;
kernelReset(statepntr, numPreds);
for(int i = index;i < numTrials;i+= stride){
int catches = 0;
int total_fitness = 0;
int PreyPositions[2][9] = {{16, 50, 82, 82, 82, 16, 50, 50, 82},{50, 50, 50, 82, 16, 50, 16, 82, 50}};
for(int l = 0;l < trialsPerEval;l++){
if(l % (numTrials/10) == 0 && isCaught)break;
int fitness =0;
int steps = 0;
int maxSteps = 150;
int avg_init_dist = 0;
int avg_final_dist = 0;
setPreyPosition(statepntr, PreyPositions[0][l], PreyPositions[1][l]);
int nearestDist = 100;//so that closest pred changes
int nearestPred = 0;
int currentDist = 0;
for(int p = 0 ; p < numPreds; p++){
avg_init_dist = avg_init_dist + calculateDistance(worldpntr, statepntr->PredatorX[p], statepntr->PredatorY[p], statepntr->PreyX, statepntr->PreyY);
}
avg_init_dist = avg_init_dist/numPreds;
while(!Caught(statepntr) && steps < maxSteps){
for(int p=0; p < numPreds;p++){
currentDist = calculateDistance(worldpntr, statepntr->PredatorX[p], statepntr->PredatorY[p], statepntr->PreyX, statepntr->PreyY);
if(currentDist<nearestDist){
nearestDist = currentDist;
nearestPred = p;
}
}
PerformPreyAction(statepntr, worldpntr, nearestPred);
for(int pred = 0; pred < numPreds;pred++){
input[0] = double(statepntr->PreyX);
input[1] = double(statepntr->PreyY);
for(int o=0;o<outlen;o++){
output[o] = 0;
}
double* out = Activate(&d_teams[i].team, input, inplen, output);
PerformPredatorAction(statepntr, worldpntr, pred, out, d_teams[i].team.numOutputs);//change to use state?
}
steps++;
}
for(int p = 0; p < numPreds;p++){
avg_final_dist = avg_final_dist + calculateDistance(worldpntr, statepntr->PredatorX[p], statepntr->PredatorY[p], statepntr->PreyX, statepntr->PreyY);
}
avg_final_dist = avg_final_dist/numPreds;
if(!Caught(statepntr)){
fitness = (avg_init_dist - avg_final_dist);// /10
}else{
// printf("Pred1 at %d, %d\n", statepntr->PredatorX[0], statepntr->PredatorY[0]);
// printf("Pred2 at %d, %d\n", statepntr->PredatorX[1], statepntr->PredatorY[1]);
// printf("Pred3 at %d, %d\n", statepntr->PredatorX[2], statepntr->PredatorY[2]);
// printf("Prey at %d, %d\n", statepntr->PreyX, statepntr->PreyY);
fitness = (200 - avg_final_dist)/10;
catches++;
isCaught = true;
break;
}
total_fitness = total_fitness + fitness;
}
d_teams[i].team.fitness = total_fitness; // /trialsPerEval
d_teams[i].team.catches = catches;
for(int i2 = 0; i2<d_teams[i].team.numHidden;i2++){
Neuron n1 = d_teams[i].team.t1[i2];
Neuron n2 = d_teams[i].team.t2[i2];
Neuron n3 = d_teams[i].team.t3[i2];
n1.Fitness = d_teams[i].team.fitness;
n2.Fitness = d_teams[i].team.fitness;
n3.Fitness = d_teams[i].team.fitness;
n1.Trials++;
n2.Trials++;
n3.Trials++;
d_teams[i].team.t1[i2] = n1;
d_teams[i].team.t2[i2] = n2;
d_teams[i].team.t3[i2] = n3;
}
}
}
void CHECK(hipError_t err){
if(err){
printf("Error in %s at line %d: %s\n", __FILE__, __LINE__, hipGetErrorString(err));
}
}
__global__ void testKernel(teamArr* teams, double* input, int inplen){
int index = threadIdx.x;
// input = new double[inplen];
input[0] = 1.0;
// double test = teams[index].team.t1[0].Weight[0];
// teams[index].team.fitness = index+1;
for(int i2 = 0; i2<teams[index].team.numHidden;i2++){
Neuron n1 = teams[index].team.t1[i2];
Neuron n2 = teams[index].team.t2[i2];
Neuron n3 = teams[index].team.t3[i2];
n1.Fitness = teams[index].team.fitness;
n2.Fitness = teams[index].team.fitness;
n3.Fitness = teams[index].team.fitness;
n1.Trials++;
n2.Trials++;
n3.Trials++;
teams[index].team.t1[i2] = n1;
teams[index].team.t2[i2] = n2;
teams[index].team.t3[i2] = n3;
}
// printf("Index %d Weight %d\n", index, test);
}
int main(int argc, char **argv)
{
//benchmark time vars
float timeTillCall, timeTillAfterCall, timeTillEnd;
hipEvent_t start, stopBefore, stopAfter, stopEnd;
CHECK(hipEventCreate(&start));
CHECK(hipEventCreate(&stopBefore));
CHECK(hipEventCreate(&stopAfter));
CHECK(hipEventCreate(&stopEnd));
CHECK(hipEventRecord(start, 0));
//testing values
int numInputs = 2;
int hidden = nHidden;
int numOutputs = 5;
int numIndivs = nIndivs;//540
int maxGens = 100;
int goalFitness = 100;
int numPreds = nPreds;//6
int burstGens = 2;
//TODO: parse input
//simulation values
bool stagnated;
double mutationRate = 0.4;
int bestFitness = 0;
int generations = 0;
stagnated = false;
bool teamfound = false;
int numTrials = 10 * numIndivs;
//GPU values
int SMs;
int deviceID;
hipGetDevice(&deviceID);
hipDeviceGetAttribute(&SMs, hipDeviceAttributeMultiprocessorCount, deviceID);
int threadsPerBlock = 256;
int blocks = 32 * SMs;
predSubPops = new Population*[numPreds];
//initialisation of subpopulations
for(int p = 0;p<numPreds;p++){
feedForward* ff = newFeedForward(numInputs, hidden, numOutputs, false);
Population* subpops = init(hidden, numIndivs, ff->GeneSize);
predSubPops[p] = subpops;
}
teamArr* teams;
teamArr* d_teams;
PredatorPrey* h_pp;
h_pp = newPredatorPrey(numPreds);
feedForward* ff = new feedForward[numPreds];
//setup constructs in memory
int numTeamBytes = numTrials * sizeof(aTeam);
printf("team array bytes: %d\n", numTeamBytes);
CHECK(hipMalloc(&d_teams, numTeamBytes));
teams = (teamArr*)malloc(numTeamBytes);
Gridworld* d_world;
printf("world struct bytes: %d\n", sizeof(Gridworld));
CHECK(hipMalloc(&d_world, sizeof(Gridworld)));
State* d_state;
CHECK(hipMalloc(&d_state, sizeof(State)));
//bool for stopping unnecessary execution in kernel
bool* d_isCaught;
CHECK(hipMalloc(&d_isCaught, sizeof(bool)));
//run simulation
while(generations < maxGens && catches < 1){//run contents of this loop in parallel
catches = 0;
for(int t = 0; t < numTrials;t++){
for(int p = 0;p<numPreds;p++){
ff[p] = ff_reset(ff[p], numInputs, hidden, numOutputs, false);
ff[p] = Create(ff[p], predSubPops[p], hidden);
}
aTeam tm;
for(int i = 0;i<hidden;i++){
tm.act1[i] = ff[0].Activation[i];
tm.act2[i] = ff[1].Activation[i];
tm.act3[i] = ff[2].Activation[i];
tm.t1[i] = ff[0].HiddenUnits[i];
tm.t2[i] = ff[1].HiddenUnits[i];
tm.t3[i] = ff[2].HiddenUnits[i];
}
tm.catches = ff->Catches;
tm.fitness = ff->Fitness;
tm.numHidden = ff->numHidden;
tm.numInputs = ff->NumInputs;
tm.numOutputs = ff->NumOutputs;
teams[t].team = tm;
}
CHECK(hipMemcpy(d_teams, teams, numTeamBytes, hipMemcpyHostToDevice));
reset(h_pp, numPreds);
CHECK(hipMemcpy(d_world, h_pp->world, sizeof(Gridworld), hipMemcpyHostToDevice));
//setup for kernel evaluation
int inplen = (teams[0].team.numInputs);
int outlen = (teams[0].team.numOutputs);
double* d_input;
double* h_input;
// printf("input array bytes: %d\n", inplen * sizeof(double));
CHECK(hipMalloc(&d_input, inplen * sizeof(double)));
h_input = (double*)malloc(inplen * sizeof(double));
double* d_output;
double* h_output;
// printf("output array bytes: %d\n", outlen * sizeof(double));
CHECK(hipMalloc(&d_output, outlen * sizeof(double)));
h_output = (double*)malloc(outlen * sizeof(double));
//evaluate teams
// testKernel<<<1, 100>>>(d_teams, d_input, inplen);
// blocks, threadsPerBlock
// teams = h_eval(h_pp->world, teams, numPreds, h_input, h_output, inplen, outlen, trialsPerEval, sim, numTrials);
//benchmark time 1
CHECK(hipEventRecord(stopBefore, 0));
CHECK(hipEventSynchronize(stopBefore));
CHECK(hipEventElapsedTime(&timeTillCall, start, stopBefore));
printf("Execution time till before evaluation call: %3.1f ms \n", timeTillCall);
hipLaunchKernelGGL(( runEvaluationsParallel), dim3(blocks), dim3(threadsPerBlock), 0, 0, d_state, d_world, d_teams, numPreds, d_input, d_output, inplen, outlen, trialsPerEval, sim, numTrials, d_isCaught);
// feedForward* t = evaluate(*pp, team, numPreds);
CHECK(hipPeekAtLastError());
hipDeviceSynchronize();
//benchmark time 2
CHECK(hipEventRecord(stopAfter, 0));
CHECK(hipEventSynchronize(stopAfter));
CHECK(hipEventElapsedTime(&timeTillAfterCall, start, stopAfter));
printf("Execution time till after evaluation call: %3.1f ms \n", timeTillAfterCall-timeTillCall);
//send memory back
CHECK(hipMemcpy(teams, d_teams, numTeamBytes, hipMemcpyDeviceToHost));
CHECK(hipMemcpy(h_output, d_output, outlen * sizeof(double), hipMemcpyDeviceToHost));
CHECK(hipMemcpy(h_input, d_input, inplen * sizeof(double), hipMemcpyDeviceToHost));
//assign team scores
//TODO: loop through all teams
//single team testing
// bestFitness = (teams[0].team.fitness);
// bestGene = teams[0].team.numInputs + teams[0].team.numOutputs;
// bestTeam = teams[0].team.t1;
// /*//commented out for single team testing
for(int n = 0; n < numTrials;n++){
catches = catches + (teams[n].team.catches);
if(bestFitness == 0 && !teamfound){
bestFitness = (teams[n].team.fitness);
}
//keep track of the best performing team
if((teams[n].team.fitness) > bestFitness){
bestFitness = (teams[n].team.fitness);
bestGene = teams[n].team.numInputs + teams[n].team.numOutputs;
bestTeam = teams[n].team.t1;
//tag best team neurons
// for(int i = 0;i<numPreds;i++){
// Tag(bestTeam[0]);
// }
}
//if this is the first run, take the team as the baseline best team
if(!teamfound){
teamfound = true;
bestFitness = (teams[n].team.fitness);
bestGene = teams[n].team.numInputs + teams[n].team.numOutputs;
bestTeam = teams[n].team.t1;
}
}
// */
printf("Generation %d, best fitness is %d, catches is %d\n", generations+1, bestFitness, catches);
//check for stagnation and burst mutate if stagnated
if(generations%burstGens == 0 && generations != 0){
//burst mutate
stagnated = true;
for(int pred = 0; pred < numPreds; pred++){
Population* predPop = predSubPops[pred];
for(int i = 0; i< hidden;i++){
Population subpop = predPop[i];
for(int n = 0; n< numIndivs;n++){
Neuron indiv = subpop.Individuals[n];
Neuron* hid = bestTeam;
subpop.Individuals[n] = perturb(indiv, hid[i], bestGene);
}
}
}
}
//sort by fitness, mate upper part and mutate populations if not stagnated
if(!stagnated){
for(int i = 0 ;i<numPreds;i++){
for(int j = 0;j<hidden;j++){
predSubPops[i][j] = sortNeurons(predSubPops[i][j]);
predSubPops[i][j] = mate(predSubPops[i][j]);
predSubPops[i][j] = mutate(predSubPops[i][j], mutationRate);
}
}
}
stagnated = false;
generations++;
// CHECK(hipFree(d_teams));
// free(teams);
// CHECK(hipFree(d_world));
CHECK(hipFree(d_input));
CHECK(hipFree(d_output));
free(h_input);
free(h_output);
}
//benchmark time 3
CHECK(hipEventRecord(stopEnd, 0));
CHECK(hipEventSynchronize(stopEnd));
CHECK(hipEventElapsedTime(&timeTillEnd, start, stopEnd));
printf("Total execution time: %3.1f ms \n", timeTillEnd);
}
| fdf9c6e650b93e973cecc413ff63bf8265f45be5.cu |
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h>
// includes CUDA
#include <cuda_runtime.h>
// includes, project
#include <helper_cuda.h>
#include <helper_functions.h> // helper functions for SDK examples
//include other files
#include "test.h"
#include "random.h"
//#include "environment.h"
#include "predatorprey.h"
//#include "network.h"
#include "feedforward.h"
#include "population.h"
#ifndef nPreds
#define nPreds 6
#endif
#ifndef nHidden
#define nHidden 50
#endif
#ifndef nIndivs
#define nIndivs 540
#endif
//globals
Neuron* bestTeam;
int bestGene;
Population* subPops;
Population** predSubPops;
Gridworld world;
int catches;
//input params with default values
bool sim = false;
//int hidden = 10;
//int numIndivs = 100;
//int numInputs = 2;
//int numOutputs = 5;
//int burstGens = 10;
//int maxGens = 100000;
int goalFitness = 100000;
//int numPreds = 3;
int trialsPerEval = 9;
struct tempState{
int* PredatorX;
int* PredatorY;
int PreyX;
int PreyY;
};
//struct aTeam{//moved to feedForward.h
// int numOutputs;
// int numInputs;
// double act1[15];
// Neuron t1[15];
// double act2[15];
// Neuron t2[15];
// double act3[15];
// Neuron t3[15];
// int fitness;
// int numHidden;
// int catches;
//};
struct teamArr{
aTeam team;
};
Population* init(int hid, int num, int genes){
Population* pops = new Population[hid];
Population* p = newPopulation(num, genes);
for(int i = 0; i < hid; i++){
if(i>0) p->ID = p->ID + 1;
createIndividuals(p);
pops[i] = *p;
}
return pops;
}
int h_calculateDistance(Gridworld* h_world, int h_predX, int h_predY, int h_preyX, int h_preyY){
double h_xDist = 0;
double h_yDist = 0;
h_xDist = abs((double)(h_predX-h_preyX));
if(h_xDist > double(h_world->length/2)){
h_xDist = double(h_world->length) - h_xDist;
}
h_yDist = abs((double)(h_predY-h_preyY));
if(h_yDist > double(h_world->height/2)){
h_yDist = double(h_world->height) - h_yDist;
}
return int(h_xDist + h_yDist);
}
__device__ int calculateDistance(Gridworld* world, int predX, int predY, int preyX ,int preyY){
double xDist = 0;
double yDist = 0;
xDist = abs((double)(predX-preyX));
if(xDist > double(world->length/2)){
xDist = double(world->length) - xDist;
}
yDist = abs((double)(predY-preyY));
if(yDist > double(world->height/2)){
yDist = double(world->height) - yDist;
}
return int(xDist + yDist);
}
__global__ void kernelAssignFitness(int fitness, Neuron** hiddenUnits){
int index = threadIdx.x + blockIdx.x * blockDim.x;
// for(int i=index;i<numHidden;i++){
// Neuron* n = hiddenUnits[i];
hiddenUnits[index]->Fitness = fitness;
hiddenUnits[index]->Trials++;
// hiddenUnits[i] = n;
// }
}
teamArr* h_eval(Gridworld* h_worldpntr, teamArr* h_teams, int h_numPreds, double* h_input, double* h_output, int h_inplen, int h_outlen, int h_trialsPerEval, bool h_sim, int h_numTrials){
State* h_statepntr = new State();
h_reset(h_statepntr, h_numPreds);
int h_catches = 0;
int h_totalFitness = 0;
int h_PreyPositions[2][9] = {{16, 50, 82, 82, 82, 16, 50, 50, 82},{50, 50, 50, 82, 16, 50, 16, 82, 50}};
for(int i=0;i<h_trialsPerEval;i++){
int h_fitness = 0;
int h_steps = 0;
int h_maxSteps = 150;
int h_avg_init_dist = 0;
int h_avg_final_dist = 0;
h_setPreyPosition(h_statepntr, h_PreyPositions[0][i], h_PreyPositions[1][i]);
int h_nearestDist = 100;
int h_nearestPred = 0;
int h_currentDist = 0;
for(int p = 0;p<h_numPreds;p++){
h_avg_init_dist += h_calculateDistance(h_worldpntr, h_statepntr->PredatorX[p], h_statepntr->PredatorY[p], h_statepntr->PreyX, h_statepntr->PreyY);
}
h_avg_init_dist = h_avg_init_dist/h_numPreds;
while(!h_Caught(h_statepntr) && h_steps < h_maxSteps){
for(int p=0;p<h_numPreds;p++){
h_currentDist = h_calculateDistance(h_worldpntr, h_statepntr->PredatorX[p], h_statepntr->PredatorY[p], h_statepntr->PreyX, h_statepntr->PreyY);
if(h_currentDist<h_nearestDist){
h_nearestDist = h_currentDist;
h_nearestPred = p;
}
}
h_PerformPreyAction(h_statepntr, h_worldpntr, h_nearestPred);
for(int p = 0;p<h_numPreds;p++){
h_input[0] = double(h_statepntr->PreyX);
h_input[1] = double(h_statepntr->PreyY);
// delete[] h_output;
for(int o=0;o<h_outlen;o++){
h_output[o] = 0;
}
// h_output = new double[h_outlen];
double* out = h_Activate(&h_teams[i].team, h_input, h_inplen, h_output);
h_PerformPredatorAction(h_statepntr, h_worldpntr, p, out, h_teams[i].team.numOutputs);
}
// printf("Pred1 X: %d Pred1 Y: %d\n", h_statepntr->PredatorX[0], h_statepntr->PredatorY[0]);
// printf("Pred2 X: %d Pred2 Y: %d\n", h_statepntr->PredatorX[1], h_statepntr->PredatorY[1]);
// printf("Pred3 X: %d Pred3 Y: %d\n", h_statepntr->PredatorX[2], h_statepntr->PredatorY[2]);
// printf("Prey X: %d Prey Y: %d\n", h_statepntr->PreyX, h_statepntr->PreyY);
h_steps++;
}
for(int p = 0;p< h_numPreds;p++){
h_avg_final_dist += h_calculateDistance(h_worldpntr, h_statepntr->PredatorX[p], h_statepntr->PredatorY[p], h_statepntr->PreyX, h_statepntr->PreyY);
}
h_avg_final_dist = h_avg_final_dist/h_numPreds;
if(!h_Caught(h_statepntr)){
h_fitness = h_avg_init_dist - h_avg_final_dist;
}else{
h_fitness = 200-h_avg_final_dist;
h_catches++;
}
h_totalFitness += h_fitness;
}
h_teams[0].team.fitness = h_totalFitness; // /trialsPerEval
h_teams[0].team.catches = h_catches;
for(int i2 = 0; i2<h_teams[0].team.numHidden;i2++){
Neuron n1 = h_teams[0].team.t1[i2];
Neuron n2 = h_teams[0].team.t2[i2];
Neuron n3 = h_teams[0].team.t3[i2];
n1.Fitness = h_teams[0].team.fitness;
n2.Fitness = h_teams[0].team.fitness;
n3.Fitness = h_teams[0].team.fitness;
n1.Trials++;
n2.Trials++;
n3.Trials++;
h_teams[0].team.t1[i2] = n1;
h_teams[0].team.t2[i2] = n2;
h_teams[0].team.t3[i2] = n3;
}
return h_teams;
}
__global__ void runEvaluationsParallel(State* statepntr, Gridworld* worldpntr, teamArr* d_teams, int numPreds, double* input, double* output, int inplen, int outlen, int trialsPerEval, bool sim, int numTrials, bool isCaught){
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
isCaught = false;
kernelReset(statepntr, numPreds);
for(int i = index;i < numTrials;i+= stride){
int catches = 0;
int total_fitness = 0;
int PreyPositions[2][9] = {{16, 50, 82, 82, 82, 16, 50, 50, 82},{50, 50, 50, 82, 16, 50, 16, 82, 50}};
for(int l = 0;l < trialsPerEval;l++){
if(l % (numTrials/10) == 0 && isCaught)break;
int fitness =0;
int steps = 0;
int maxSteps = 150;
int avg_init_dist = 0;
int avg_final_dist = 0;
setPreyPosition(statepntr, PreyPositions[0][l], PreyPositions[1][l]);
int nearestDist = 100;//so that closest pred changes
int nearestPred = 0;
int currentDist = 0;
for(int p = 0 ; p < numPreds; p++){
avg_init_dist = avg_init_dist + calculateDistance(worldpntr, statepntr->PredatorX[p], statepntr->PredatorY[p], statepntr->PreyX, statepntr->PreyY);
}
avg_init_dist = avg_init_dist/numPreds;
while(!Caught(statepntr) && steps < maxSteps){
for(int p=0; p < numPreds;p++){
currentDist = calculateDistance(worldpntr, statepntr->PredatorX[p], statepntr->PredatorY[p], statepntr->PreyX, statepntr->PreyY);
if(currentDist<nearestDist){
nearestDist = currentDist;
nearestPred = p;
}
}
PerformPreyAction(statepntr, worldpntr, nearestPred);
for(int pred = 0; pred < numPreds;pred++){
input[0] = double(statepntr->PreyX);
input[1] = double(statepntr->PreyY);
for(int o=0;o<outlen;o++){
output[o] = 0;
}
double* out = Activate(&d_teams[i].team, input, inplen, output);
PerformPredatorAction(statepntr, worldpntr, pred, out, d_teams[i].team.numOutputs);//change to use state?
}
steps++;
}
for(int p = 0; p < numPreds;p++){
avg_final_dist = avg_final_dist + calculateDistance(worldpntr, statepntr->PredatorX[p], statepntr->PredatorY[p], statepntr->PreyX, statepntr->PreyY);
}
avg_final_dist = avg_final_dist/numPreds;
if(!Caught(statepntr)){
fitness = (avg_init_dist - avg_final_dist);// /10
}else{
// printf("Pred1 at %d, %d\n", statepntr->PredatorX[0], statepntr->PredatorY[0]);
// printf("Pred2 at %d, %d\n", statepntr->PredatorX[1], statepntr->PredatorY[1]);
// printf("Pred3 at %d, %d\n", statepntr->PredatorX[2], statepntr->PredatorY[2]);
// printf("Prey at %d, %d\n", statepntr->PreyX, statepntr->PreyY);
fitness = (200 - avg_final_dist)/10;
catches++;
isCaught = true;
break;
}
total_fitness = total_fitness + fitness;
}
d_teams[i].team.fitness = total_fitness; // /trialsPerEval
d_teams[i].team.catches = catches;
for(int i2 = 0; i2<d_teams[i].team.numHidden;i2++){
Neuron n1 = d_teams[i].team.t1[i2];
Neuron n2 = d_teams[i].team.t2[i2];
Neuron n3 = d_teams[i].team.t3[i2];
n1.Fitness = d_teams[i].team.fitness;
n2.Fitness = d_teams[i].team.fitness;
n3.Fitness = d_teams[i].team.fitness;
n1.Trials++;
n2.Trials++;
n3.Trials++;
d_teams[i].team.t1[i2] = n1;
d_teams[i].team.t2[i2] = n2;
d_teams[i].team.t3[i2] = n3;
}
}
}
void CHECK(cudaError_t err){
if(err){
printf("Error in %s at line %d: %s\n", __FILE__, __LINE__, cudaGetErrorString(err));
}
}
__global__ void testKernel(teamArr* teams, double* input, int inplen){
int index = threadIdx.x;
// input = new double[inplen];
input[0] = 1.0;
// double test = teams[index].team.t1[0].Weight[0];
// teams[index].team.fitness = index+1;
for(int i2 = 0; i2<teams[index].team.numHidden;i2++){
Neuron n1 = teams[index].team.t1[i2];
Neuron n2 = teams[index].team.t2[i2];
Neuron n3 = teams[index].team.t3[i2];
n1.Fitness = teams[index].team.fitness;
n2.Fitness = teams[index].team.fitness;
n3.Fitness = teams[index].team.fitness;
n1.Trials++;
n2.Trials++;
n3.Trials++;
teams[index].team.t1[i2] = n1;
teams[index].team.t2[i2] = n2;
teams[index].team.t3[i2] = n3;
}
// printf("Index %d Weight %d\n", index, test);
}
int main(int argc, char **argv)
{
//benchmark time vars
float timeTillCall, timeTillAfterCall, timeTillEnd;
cudaEvent_t start, stopBefore, stopAfter, stopEnd;
CHECK(cudaEventCreate(&start));
CHECK(cudaEventCreate(&stopBefore));
CHECK(cudaEventCreate(&stopAfter));
CHECK(cudaEventCreate(&stopEnd));
CHECK(cudaEventRecord(start, 0));
//testing values
int numInputs = 2;
int hidden = nHidden;
int numOutputs = 5;
int numIndivs = nIndivs;//540
int maxGens = 100;
int goalFitness = 100;
int numPreds = nPreds;//6
int burstGens = 2;
//TODO: parse input
//simulation values
bool stagnated;
double mutationRate = 0.4;
int bestFitness = 0;
int generations = 0;
stagnated = false;
bool teamfound = false;
int numTrials = 10 * numIndivs;
//GPU values
int SMs;
int deviceID;
cudaGetDevice(&deviceID);
cudaDeviceGetAttribute(&SMs, cudaDevAttrMultiProcessorCount, deviceID);
int threadsPerBlock = 256;
int blocks = 32 * SMs;
predSubPops = new Population*[numPreds];
//initialisation of subpopulations
for(int p = 0;p<numPreds;p++){
feedForward* ff = newFeedForward(numInputs, hidden, numOutputs, false);
Population* subpops = init(hidden, numIndivs, ff->GeneSize);
predSubPops[p] = subpops;
}
teamArr* teams;
teamArr* d_teams;
PredatorPrey* h_pp;
h_pp = newPredatorPrey(numPreds);
feedForward* ff = new feedForward[numPreds];
//setup constructs in memory
int numTeamBytes = numTrials * sizeof(aTeam);
printf("team array bytes: %d\n", numTeamBytes);
CHECK(cudaMalloc(&d_teams, numTeamBytes));
teams = (teamArr*)malloc(numTeamBytes);
Gridworld* d_world;
printf("world struct bytes: %d\n", sizeof(Gridworld));
CHECK(cudaMalloc(&d_world, sizeof(Gridworld)));
State* d_state;
CHECK(cudaMalloc(&d_state, sizeof(State)));
//bool for stopping unnecessary execution in kernel
bool* d_isCaught;
CHECK(cudaMalloc(&d_isCaught, sizeof(bool)));
//run simulation
while(generations < maxGens && catches < 1){//run contents of this loop in parallel
catches = 0;
for(int t = 0; t < numTrials;t++){
for(int p = 0;p<numPreds;p++){
ff[p] = ff_reset(ff[p], numInputs, hidden, numOutputs, false);
ff[p] = Create(ff[p], predSubPops[p], hidden);
}
aTeam tm;
for(int i = 0;i<hidden;i++){
tm.act1[i] = ff[0].Activation[i];
tm.act2[i] = ff[1].Activation[i];
tm.act3[i] = ff[2].Activation[i];
tm.t1[i] = ff[0].HiddenUnits[i];
tm.t2[i] = ff[1].HiddenUnits[i];
tm.t3[i] = ff[2].HiddenUnits[i];
}
tm.catches = ff->Catches;
tm.fitness = ff->Fitness;
tm.numHidden = ff->numHidden;
tm.numInputs = ff->NumInputs;
tm.numOutputs = ff->NumOutputs;
teams[t].team = tm;
}
CHECK(cudaMemcpy(d_teams, teams, numTeamBytes, cudaMemcpyHostToDevice));
reset(h_pp, numPreds);
CHECK(cudaMemcpy(d_world, h_pp->world, sizeof(Gridworld), cudaMemcpyHostToDevice));
//setup for kernel evaluation
int inplen = (teams[0].team.numInputs);
int outlen = (teams[0].team.numOutputs);
double* d_input;
double* h_input;
// printf("input array bytes: %d\n", inplen * sizeof(double));
CHECK(cudaMalloc(&d_input, inplen * sizeof(double)));
h_input = (double*)malloc(inplen * sizeof(double));
double* d_output;
double* h_output;
// printf("output array bytes: %d\n", outlen * sizeof(double));
CHECK(cudaMalloc(&d_output, outlen * sizeof(double)));
h_output = (double*)malloc(outlen * sizeof(double));
//evaluate teams
// testKernel<<<1, 100>>>(d_teams, d_input, inplen);
// blocks, threadsPerBlock
// teams = h_eval(h_pp->world, teams, numPreds, h_input, h_output, inplen, outlen, trialsPerEval, sim, numTrials);
//benchmark time 1
CHECK(cudaEventRecord(stopBefore, 0));
CHECK(cudaEventSynchronize(stopBefore));
CHECK(cudaEventElapsedTime(&timeTillCall, start, stopBefore));
printf("Execution time till before evaluation call: %3.1f ms \n", timeTillCall);
runEvaluationsParallel<<<blocks, threadsPerBlock>>>(d_state, d_world, d_teams, numPreds, d_input, d_output, inplen, outlen, trialsPerEval, sim, numTrials, d_isCaught);
// feedForward* t = evaluate(*pp, team, numPreds);
CHECK(cudaPeekAtLastError());
cudaDeviceSynchronize();
//benchmark time 2
CHECK(cudaEventRecord(stopAfter, 0));
CHECK(cudaEventSynchronize(stopAfter));
CHECK(cudaEventElapsedTime(&timeTillAfterCall, start, stopAfter));
printf("Execution time till after evaluation call: %3.1f ms \n", timeTillAfterCall-timeTillCall);
//send memory back
CHECK(cudaMemcpy(teams, d_teams, numTeamBytes, cudaMemcpyDeviceToHost));
CHECK(cudaMemcpy(h_output, d_output, outlen * sizeof(double), cudaMemcpyDeviceToHost));
CHECK(cudaMemcpy(h_input, d_input, inplen * sizeof(double), cudaMemcpyDeviceToHost));
//assign team scores
//TODO: loop through all teams
//single team testing
// bestFitness = (teams[0].team.fitness);
// bestGene = teams[0].team.numInputs + teams[0].team.numOutputs;
// bestTeam = teams[0].team.t1;
// /*//commented out for single team testing
for(int n = 0; n < numTrials;n++){
catches = catches + (teams[n].team.catches);
if(bestFitness == 0 && !teamfound){
bestFitness = (teams[n].team.fitness);
}
//keep track of the best performing team
if((teams[n].team.fitness) > bestFitness){
bestFitness = (teams[n].team.fitness);
bestGene = teams[n].team.numInputs + teams[n].team.numOutputs;
bestTeam = teams[n].team.t1;
//tag best team neurons
// for(int i = 0;i<numPreds;i++){
// Tag(bestTeam[0]);
// }
}
//if this is the first run, take the team as the baseline best team
if(!teamfound){
teamfound = true;
bestFitness = (teams[n].team.fitness);
bestGene = teams[n].team.numInputs + teams[n].team.numOutputs;
bestTeam = teams[n].team.t1;
}
}
// */
printf("Generation %d, best fitness is %d, catches is %d\n", generations+1, bestFitness, catches);
//check for stagnation and burst mutate if stagnated
if(generations%burstGens == 0 && generations != 0){
//burst mutate
stagnated = true;
for(int pred = 0; pred < numPreds; pred++){
Population* predPop = predSubPops[pred];
for(int i = 0; i< hidden;i++){
Population subpop = predPop[i];
for(int n = 0; n< numIndivs;n++){
Neuron indiv = subpop.Individuals[n];
Neuron* hid = bestTeam;
subpop.Individuals[n] = perturb(indiv, hid[i], bestGene);
}
}
}
}
//sort by fitness, mate upper part and mutate populations if not stagnated
if(!stagnated){
for(int i = 0 ;i<numPreds;i++){
for(int j = 0;j<hidden;j++){
predSubPops[i][j] = sortNeurons(predSubPops[i][j]);
predSubPops[i][j] = mate(predSubPops[i][j]);
predSubPops[i][j] = mutate(predSubPops[i][j], mutationRate);
}
}
}
stagnated = false;
generations++;
// CHECK(cudaFree(d_teams));
// free(teams);
// CHECK(cudaFree(d_world));
CHECK(cudaFree(d_input));
CHECK(cudaFree(d_output));
free(h_input);
free(h_output);
}
//benchmark time 3
CHECK(cudaEventRecord(stopEnd, 0));
CHECK(cudaEventSynchronize(stopEnd));
CHECK(cudaEventElapsedTime(&timeTillEnd, start, stopEnd));
printf("Total execution time: %3.1f ms \n", timeTillEnd);
}
|
dd3375155b8cd04bb4af6f47f3cb4d5d52941800.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdlib>
#include <iostream>
using namespace std;
int main () {
int active_gpu_id;
int gpus_count;
hipDeviceProp_t gpu_props;
hipGetDeviceCount(&gpus_count);
if (gpus_count) {
hipGetDevice(&active_gpu_id);
cout << "There is " << gpus_count << " GPUs available on your machine which are :" << endl;
for (int i = 0; i < gpus_count; i++) {
hipGetDeviceProperties(&gpu_props, i);
cout << "- " << gpu_props.name << " (id=" << i << ")";
if (gpu_props.integrated) cout << " [INTEGRATED]";
if (i == active_gpu_id) cout << " [ACTIVE]";
cout << endl;
cout << "---> maxThreadsPerBlock = " << gpu_props.maxThreadsPerBlock << endl;
cout << "---> maxThreadsDim = (" << gpu_props.maxThreadsDim[0] << ", " << gpu_props.maxThreadsDim[1] << ", " << gpu_props.maxThreadsDim[2] << ")" << endl;
cout << "---> maxGridSize = (" << gpu_props.maxGridSize[0] << ", " << gpu_props.maxGridSize[1] << ", " << gpu_props.maxGridSize[2] << ")" << endl;
}
}
else
cout << "Sorry but no GPU available on your machine" << endl;
return EXIT_SUCCESS;
}
| dd3375155b8cd04bb4af6f47f3cb4d5d52941800.cu | #include <cstdlib>
#include <iostream>
using namespace std;
int main () {
int active_gpu_id;
int gpus_count;
cudaDeviceProp gpu_props;
cudaGetDeviceCount(&gpus_count);
if (gpus_count) {
cudaGetDevice(&active_gpu_id);
cout << "There is " << gpus_count << " GPUs available on your machine which are :" << endl;
for (int i = 0; i < gpus_count; i++) {
cudaGetDeviceProperties(&gpu_props, i);
cout << "- " << gpu_props.name << " (id=" << i << ")";
if (gpu_props.integrated) cout << " [INTEGRATED]";
if (i == active_gpu_id) cout << " [ACTIVE]";
cout << endl;
cout << "---> maxThreadsPerBlock = " << gpu_props.maxThreadsPerBlock << endl;
cout << "---> maxThreadsDim = (" << gpu_props.maxThreadsDim[0] << ", " << gpu_props.maxThreadsDim[1] << ", " << gpu_props.maxThreadsDim[2] << ")" << endl;
cout << "---> maxGridSize = (" << gpu_props.maxGridSize[0] << ", " << gpu_props.maxGridSize[1] << ", " << gpu_props.maxGridSize[2] << ")" << endl;
}
}
else
cout << "Sorry but no GPU available on your machine" << endl;
return EXIT_SUCCESS;
}
|
a764bf7e3b533070af0607cd7fbb85954789cb3e.hip | // !!! This is a file automatically generated by hipify!!!
/*%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% by: Alireza Ahmadi %
% University of Bonn- MSc Robotics & Geodetic Engineering%
% Alireza.Ahmadi@uni-bonn.de %
% AlirezaAhmadi.xyz %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%*/
// based on https://rosettacode.org/wiki/K-d_tree
#include "kdtree.h"
namespace DynaMap{
__device__ __host__
kdTree::kdTree(){}
__device__ __host__
kdTree::~kdTree(){}
void kdTree::init(int querySize){
visited = 0;
hipMallocManaged(&kdDistnaces, sizeof(float));
hipMallocManaged(&kdFound, sizeof(struct kdNode));
hipMallocManaged(&kdRoot, sizeof(struct kdNode) * NODE_NUM);
hipMallocManaged(&kdQuery, sizeof(struct kdNode) * NODE_NUM);
hipMallocManaged(&VisitedNodes,sizeof(struct kdNode) * NODE_NUM);
hipDeviceSynchronize();
}
void kdTree::Free(void){
hipDeviceSynchronize();
hipFree(kdDistnaces);
hipFree(kdFound);
hipFree(kdRoot);
hipFree(kdQuery);
hipFree(VisitedNodes);
}
__device__ __host__
inline float kdTree::dist(struct kdNode *a, struct kdNode *b, int dim){
float t, d = 0;
while (dim--) {
t = a->x[dim] - b->x[dim];
d += t * t;
}
return d;
}
inline void kdTree::swap(struct kdNode *x, struct kdNode *y) {
#if defined(__CUDA_ARCH__)
struct kdNode *tmp;
hipMallocManaged(&tmp, sizeof(struct kdNode));
hipMemcpy(tmp, x, sizeof(struct kdNode), hipMemcpyDeviceToDevice);
hipDeviceSynchronize();
hipMemcpy(x, y, sizeof(struct kdNode), hipMemcpyDeviceToDevice);
hipDeviceSynchronize();
hipMemcpy(y, tmp, sizeof(struct kdNode), hipMemcpyDeviceToDevice);
hipDeviceSynchronize();
#else
float tmp[MAX_DIM];
int id;
memcpy(tmp, x->x, sizeof(tmp));
id = x->id;
memcpy(x->x, y->x, sizeof(tmp));
x->id= y->id;
memcpy(y->x, tmp, sizeof(tmp));
y->id = id;
#endif
}
struct kdNode* kdTree::findMedian(struct kdNode *start, struct kdNode *end, int idx){
if (end <= start) return NULL;
if (end == start + 1)
return start;
struct kdNode *p, *store, *md = start + (end - start) / 2;
float pivot;
while (1) {
pivot = md->x[idx];
swap(md, end - 1);
for (store = p = start; p < end; p++) {
if (p->x[idx] < pivot) {
if (p != store)
swap(p, store);
store++;
}
}
swap(store, end - 1);
/* median has duplicate values */
if (store->x[idx] == md->x[idx])
return md;
if (store > md) end = store;
else start = store;
}
}
struct kdNode* kdTree::buildTree(struct kdNode *t, int len, int i, int dim){
struct kdNode *n;
if (!len) return 0;
if ((n = findMedian(t, t + len, i))) {
i = (i + 1) % dim;
n->left = buildTree(t, n - t, i, dim);
n->right = buildTree(n + 1, t + len - (n + 1), i, dim);
}
return n;
}
__device__ __host__
void kdTree::findNearest(struct kdNode *root,
struct kdNode *nd,
int i,
int dim,
struct kdNode **best,
float *best_dist){
float d, dx, dx2;
if (!root) return;
d = dist(root, nd, dim);
dx = root->x[i] - nd->x[i];
dx2 = dx * dx;
visited ++;
// std::cout << "RootID: "<< root->id << ", ndID: "<< nd->id << ", d: " << d << std::endl;
if (!*best || d < *best_dist) {
*best_dist = d;
*best = root;
}
/* if chance of exact match is high */
if (!*best_dist) return;
if (++i >= dim) i = 0;
findNearest(dx > 0 ? root->left : root->right, nd, i, dim, best, best_dist);
if (dx2 >= *best_dist) return;
findNearest(dx > 0 ? root->right : root->left, nd, i, dim, best, best_dist);
}
__device__ __host__
void kdTree::findKNearest(struct kdNode *root,
struct kdNode *nd,
int i,
int dim,
struct kdNode **best,
float *best_dist,
struct kdNode *VisitedNodes){
float d, dx, dx2;
if (!root) return;
d = dist(root, nd, dim);
dx = root->x[i] - nd->x[i];
dx2 = dx * dx;
VisitedNodes[visited] = *root;
VisitedNodes[visited].distance = d;
visited ++;
// std::cout << "RootID: "<< root->id << ", ndID: "<< nd->id << ", d: " << d << std::endl;
if (!*best || d < *best_dist) {
*best_dist = d;
*best = root;
}
/* if chance of exact match is high */
if (!*best_dist) return;
if (++i >= dim) i = 0;
findKNearest(dx > 0 ? root->left : root->right, nd, i, dim, best, best_dist, VisitedNodes);
if (dx2 >= *best_dist) return;
findKNearest(dx > 0 ? root->right : root->left, nd, i, dim, best, best_dist, VisitedNodes);
}
__device__ __host__
void kdTree::findKNN(struct kdNode &targetNode){
this->visited = 0;
this->kdFound = 0;
this->findKNearest(this->kdRoot, &targetNode, 0, 3, &this->kdFound, this->kdDistnaces, VisitedNodes);
this->sortNodes(this->visited);
}
__device__ __host__
inline void kdTree::sortNodes(int visitedNum){
for (int idx = 0; idx < visitedNum; idx++){
// Shift values (and indexes) higher
struct kdNode tmpNodes;
int j = idx;
// Store current distance and associated nIdx
struct kdNode currNodes = VisitedNodes[j];
while (j > 0 && VisitedNodes[j-1].distance > currNodes.distance) {
tmpNodes = VisitedNodes[j-1];
VisitedNodes[j-1] = currNodes;
VisitedNodes[j] = tmpNodes;
--j;
}
}
}
}
| a764bf7e3b533070af0607cd7fbb85954789cb3e.cu | /*%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% by: Alireza Ahmadi %
% University of Bonn- MSc Robotics & Geodetic Engineering%
% Alireza.Ahmadi@uni-bonn.de %
% AlirezaAhmadi.xyz %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%*/
// based on https://rosettacode.org/wiki/K-d_tree
#include "kdtree.h"
namespace DynaMap{
__device__ __host__
kdTree::kdTree(){}
__device__ __host__
kdTree::~kdTree(){}
void kdTree::init(int querySize){
visited = 0;
cudaMallocManaged(&kdDistnaces, sizeof(float));
cudaMallocManaged(&kdFound, sizeof(struct kdNode));
cudaMallocManaged(&kdRoot, sizeof(struct kdNode) * NODE_NUM);
cudaMallocManaged(&kdQuery, sizeof(struct kdNode) * NODE_NUM);
cudaMallocManaged(&VisitedNodes,sizeof(struct kdNode) * NODE_NUM);
cudaDeviceSynchronize();
}
void kdTree::Free(void){
cudaDeviceSynchronize();
cudaFree(kdDistnaces);
cudaFree(kdFound);
cudaFree(kdRoot);
cudaFree(kdQuery);
cudaFree(VisitedNodes);
}
__device__ __host__
inline float kdTree::dist(struct kdNode *a, struct kdNode *b, int dim){
float t, d = 0;
while (dim--) {
t = a->x[dim] - b->x[dim];
d += t * t;
}
return d;
}
inline void kdTree::swap(struct kdNode *x, struct kdNode *y) {
#if defined(__CUDA_ARCH__)
struct kdNode *tmp;
cudaMallocManaged(&tmp, sizeof(struct kdNode));
cudaMemcpy(tmp, x, sizeof(struct kdNode), cudaMemcpyDeviceToDevice);
cudaDeviceSynchronize();
cudaMemcpy(x, y, sizeof(struct kdNode), cudaMemcpyDeviceToDevice);
cudaDeviceSynchronize();
cudaMemcpy(y, tmp, sizeof(struct kdNode), cudaMemcpyDeviceToDevice);
cudaDeviceSynchronize();
#else
float tmp[MAX_DIM];
int id;
memcpy(tmp, x->x, sizeof(tmp));
id = x->id;
memcpy(x->x, y->x, sizeof(tmp));
x->id= y->id;
memcpy(y->x, tmp, sizeof(tmp));
y->id = id;
#endif
}
struct kdNode* kdTree::findMedian(struct kdNode *start, struct kdNode *end, int idx){
if (end <= start) return NULL;
if (end == start + 1)
return start;
struct kdNode *p, *store, *md = start + (end - start) / 2;
float pivot;
while (1) {
pivot = md->x[idx];
swap(md, end - 1);
for (store = p = start; p < end; p++) {
if (p->x[idx] < pivot) {
if (p != store)
swap(p, store);
store++;
}
}
swap(store, end - 1);
/* median has duplicate values */
if (store->x[idx] == md->x[idx])
return md;
if (store > md) end = store;
else start = store;
}
}
struct kdNode* kdTree::buildTree(struct kdNode *t, int len, int i, int dim){
struct kdNode *n;
if (!len) return 0;
if ((n = findMedian(t, t + len, i))) {
i = (i + 1) % dim;
n->left = buildTree(t, n - t, i, dim);
n->right = buildTree(n + 1, t + len - (n + 1), i, dim);
}
return n;
}
__device__ __host__
void kdTree::findNearest(struct kdNode *root,
struct kdNode *nd,
int i,
int dim,
struct kdNode **best,
float *best_dist){
float d, dx, dx2;
if (!root) return;
d = dist(root, nd, dim);
dx = root->x[i] - nd->x[i];
dx2 = dx * dx;
visited ++;
// std::cout << "RootID: "<< root->id << ", ndID: "<< nd->id << ", d: " << d << std::endl;
if (!*best || d < *best_dist) {
*best_dist = d;
*best = root;
}
/* if chance of exact match is high */
if (!*best_dist) return;
if (++i >= dim) i = 0;
findNearest(dx > 0 ? root->left : root->right, nd, i, dim, best, best_dist);
if (dx2 >= *best_dist) return;
findNearest(dx > 0 ? root->right : root->left, nd, i, dim, best, best_dist);
}
__device__ __host__
void kdTree::findKNearest(struct kdNode *root,
struct kdNode *nd,
int i,
int dim,
struct kdNode **best,
float *best_dist,
struct kdNode *VisitedNodes){
float d, dx, dx2;
if (!root) return;
d = dist(root, nd, dim);
dx = root->x[i] - nd->x[i];
dx2 = dx * dx;
VisitedNodes[visited] = *root;
VisitedNodes[visited].distance = d;
visited ++;
// std::cout << "RootID: "<< root->id << ", ndID: "<< nd->id << ", d: " << d << std::endl;
if (!*best || d < *best_dist) {
*best_dist = d;
*best = root;
}
/* if chance of exact match is high */
if (!*best_dist) return;
if (++i >= dim) i = 0;
findKNearest(dx > 0 ? root->left : root->right, nd, i, dim, best, best_dist, VisitedNodes);
if (dx2 >= *best_dist) return;
findKNearest(dx > 0 ? root->right : root->left, nd, i, dim, best, best_dist, VisitedNodes);
}
__device__ __host__
void kdTree::findKNN(struct kdNode &targetNode){
this->visited = 0;
this->kdFound = 0;
this->findKNearest(this->kdRoot, &targetNode, 0, 3, &this->kdFound, this->kdDistnaces, VisitedNodes);
this->sortNodes(this->visited);
}
__device__ __host__
inline void kdTree::sortNodes(int visitedNum){
for (int idx = 0; idx < visitedNum; idx++){
// Shift values (and indexes) higher
struct kdNode tmpNodes;
int j = idx;
// Store current distance and associated nIdx
struct kdNode currNodes = VisitedNodes[j];
while (j > 0 && VisitedNodes[j-1].distance > currNodes.distance) {
tmpNodes = VisitedNodes[j-1];
VisitedNodes[j-1] = currNodes;
VisitedNodes[j] = tmpNodes;
--j;
}
}
}
}
|
91fb2a556e42dc81598ddee0d9bf7ebb89b00c75.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "malloc.h"
#include <stdio.h>
#include "Header.h"
#define TX 32
#define ATOMIC 1 // 0 for non-atomic addition
__global__ void addKernel(float *dot, float *a, float *b)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
const int s_idx = threadIdx.x;
__shared__ float s_prod[TX];
s_prod[s_idx] = a[i] * b[i];
__syncthreads();
if (s_idx == 0) {
float blockSum = 0.0;
for (int j = 0; j < blockDim.x; ++j){
blockSum += s_prod[j];
}
printf("Block_%d, blockSum = %f\n", blockIdx.x, blockSum);
if (ATOMIC) {
atomicAdd(dot, blockSum);
}
else {
*dot += blockSum;
}
}
}
__global__ void addKernelShared(float *dot, float *a, float *b)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (ATOMIC) {
atomicAdd(dot, a[i] * b[i]);
}
else {
*dot += a[i]*b[i];
}
}
// Helper function for using CUDA to add vectors in parallel.
void addWithCuda(float *dot, const float *a, const float *b, unsigned int size)
{
float *dev_a = 0;
float *dev_b = 0;
float *dev_c ;
hipEvent_t startKernel, stopKernel;
hipEventCreate(&startKernel);
hipEventCreate(&stopKernel);
// Allocate GPU buffers for three vectors (two input, one output) .
hipMalloc((void**)&dev_a, size * sizeof(float));
hipMalloc((void**)&dev_b, size * sizeof(float));
hipMalloc((void**)&dev_c, sizeof(float));
hipMemset(dev_c, 0, sizeof(float));
// Copy input vectors from host memory to GPU buffers.
hipMemcpy(dev_a, a, size * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, size * sizeof(float), hipMemcpyHostToDevice);
hipEventRecord(startKernel);
hipLaunchKernelGGL(( addKernel) , dim3((size + TX - 1) / TX), dim3(TX) , 0, 0, dev_c, dev_a, dev_b);
hipEventRecord(stopKernel);
hipEventSynchronize(stopKernel);
float kernelTimeinMs = 0;
hipEventElapsedTime(&kernelTimeinMs, startKernel, stopKernel);
printf("Kernel time (ms): %f\n", kernelTimeinMs);
hipMemcpy(dot, dev_c, 1 * sizeof(float), hipMemcpyDeviceToHost);
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
}
void addWithCudaShared(float *dot, const float *a, const float *b, unsigned int size)
{
float *dev_a = 0;
float *dev_b = 0;
float *dev_c;
hipEvent_t startKernel, stopKernel;
hipEventCreate(&startKernel);
hipEventCreate(&stopKernel);
// Allocate GPU buffers for three vectors (two input, one output) .
hipMalloc((void**)&dev_a, size * sizeof(float));
hipMalloc((void**)&dev_b, size * sizeof(float));
hipMalloc((void**)&dev_c, sizeof(float));
hipMemset(dev_c, 0, sizeof(float));
// Copy input vectors from host memory to GPU buffers.
hipMemcpy(dev_a, a, size * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, size * sizeof(float), hipMemcpyHostToDevice);
hipEventRecord(startKernel);
addKernelShared << < (size + TX - 1) / TX, TX >> >(dev_c, dev_a, dev_b);
hipEventRecord(stopKernel);
hipEventSynchronize(stopKernel);
float kernelTimeinMs = 0;
hipEventElapsedTime(&kernelTimeinMs, startKernel, stopKernel);
printf("Kernel time with Shared Memory (ms): %f\n", kernelTimeinMs);
hipMemcpy(dot, dev_c, 1 * sizeof(float), hipMemcpyDeviceToHost);
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
}
| 91fb2a556e42dc81598ddee0d9bf7ebb89b00c75.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "malloc.h"
#include <stdio.h>
#include "Header.h"
#define TX 32
#define ATOMIC 1 // 0 for non-atomic addition
__global__ void addKernel(float *dot, float *a, float *b)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
const int s_idx = threadIdx.x;
__shared__ float s_prod[TX];
s_prod[s_idx] = a[i] * b[i];
__syncthreads();
if (s_idx == 0) {
float blockSum = 0.0;
for (int j = 0; j < blockDim.x; ++j){
blockSum += s_prod[j];
}
printf("Block_%d, blockSum = %f\n", blockIdx.x, blockSum);
if (ATOMIC) {
atomicAdd(dot, blockSum);
}
else {
*dot += blockSum;
}
}
}
__global__ void addKernelShared(float *dot, float *a, float *b)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (ATOMIC) {
atomicAdd(dot, a[i] * b[i]);
}
else {
*dot += a[i]*b[i];
}
}
// Helper function for using CUDA to add vectors in parallel.
void addWithCuda(float *dot, const float *a, const float *b, unsigned int size)
{
float *dev_a = 0;
float *dev_b = 0;
float *dev_c ;
cudaEvent_t startKernel, stopKernel;
cudaEventCreate(&startKernel);
cudaEventCreate(&stopKernel);
// Allocate GPU buffers for three vectors (two input, one output) .
cudaMalloc((void**)&dev_a, size * sizeof(float));
cudaMalloc((void**)&dev_b, size * sizeof(float));
cudaMalloc((void**)&dev_c, sizeof(float));
cudaMemset(dev_c, 0, sizeof(float));
// Copy input vectors from host memory to GPU buffers.
cudaMemcpy(dev_a, a, size * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, size * sizeof(float), cudaMemcpyHostToDevice);
cudaEventRecord(startKernel);
addKernel <<< (size + TX - 1) / TX, TX >>>(dev_c, dev_a, dev_b);
cudaEventRecord(stopKernel);
cudaEventSynchronize(stopKernel);
float kernelTimeinMs = 0;
cudaEventElapsedTime(&kernelTimeinMs, startKernel, stopKernel);
printf("Kernel time (ms): %f\n", kernelTimeinMs);
cudaMemcpy(dot, dev_c, 1 * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
}
void addWithCudaShared(float *dot, const float *a, const float *b, unsigned int size)
{
float *dev_a = 0;
float *dev_b = 0;
float *dev_c;
cudaEvent_t startKernel, stopKernel;
cudaEventCreate(&startKernel);
cudaEventCreate(&stopKernel);
// Allocate GPU buffers for three vectors (two input, one output) .
cudaMalloc((void**)&dev_a, size * sizeof(float));
cudaMalloc((void**)&dev_b, size * sizeof(float));
cudaMalloc((void**)&dev_c, sizeof(float));
cudaMemset(dev_c, 0, sizeof(float));
// Copy input vectors from host memory to GPU buffers.
cudaMemcpy(dev_a, a, size * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, size * sizeof(float), cudaMemcpyHostToDevice);
cudaEventRecord(startKernel);
addKernelShared << < (size + TX - 1) / TX, TX >> >(dev_c, dev_a, dev_b);
cudaEventRecord(stopKernel);
cudaEventSynchronize(stopKernel);
float kernelTimeinMs = 0;
cudaEventElapsedTime(&kernelTimeinMs, startKernel, stopKernel);
printf("Kernel time with Shared Memory (ms): %f\n", kernelTimeinMs);
cudaMemcpy(dot, dev_c, 1 * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
}
|
ea034294943b34a98cec6bc60b3a794cb8bd4e46.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* -----------------------------------------------------------------------------
*
* Module : Ion Series
* Copyright : (c) [2009..2010] Trevor L. McDonell
* License : BSD
*
* ---------------------------------------------------------------------------*/
#include "utils.h"
#include "device.h"
#include "texture.h"
#include "ion_series.h"
#include "algorithms.h"
#include <stdint.h>
/*
* Scan a warp-sized chunk of data. Because warps execute instructions in SIMD
* fashion, there is no need to synchronise in order to share data. The most
* efficient algorithm is the step-efficient method of Hillis & Steele that
* takes log(N) steps, rather than the work-efficient tree-based algorithm
* described by Blelloch that takes 2 * log(N) steps.
*/
template <class T, bool inclusive>
static __device__ T
scan_warp(T val, volatile T* s_data)
{
const uint32_t idx = threadIdx.x;
const uint32_t lane = threadIdx.x & (WARP_SIZE-1);
/*
* If we double the size of the s_data array and pad the bottom half with
* zero, then we can avoid branching (although there is plenty already).
*
* In device emulation mode, the warp size is 1 and so sync-less operation
* does not work.
*/
s_data[idx] = val; __EMUSYNC;
#ifdef __DEVICE_EMULATION__
val = (lane >= 1) ? s_data[idx - 1] : 0; __EMUSYNC; s_data[idx] += val; __EMUSYNC;
val = (lane >= 2) ? s_data[idx - 2] : 0; __EMUSYNC; s_data[idx] += val; __EMUSYNC;
val = (lane >= 4) ? s_data[idx - 4] : 0; __EMUSYNC; s_data[idx] += val; __EMUSYNC;
val = (lane >= 8) ? s_data[idx - 8] : 0; __EMUSYNC; s_data[idx] += val; __EMUSYNC;
val = (lane >= 16) ? s_data[idx - 16] : 0; __EMUSYNC; s_data[idx] += val; __EMUSYNC;
#else
if (lane >= 1) s_data[idx] = val = val + s_data[idx - 1];
if (lane >= 2) s_data[idx] = val = val + s_data[idx - 2];
if (lane >= 4) s_data[idx] = val = val + s_data[idx - 4];
if (lane >= 8) s_data[idx] = val = val + s_data[idx - 8];
if (lane >= 16) s_data[idx] = val = val + s_data[idx - 16];
#endif
if (inclusive) return s_data[idx];
else return (lane > 0) ? s_data[idx - 1] : 0;
}
/*
* Reduce a warp-sized chunk of data in shared memory.
*/
template <class T>
static __device__ T
reduce_warp(T sum, volatile T* s_data)
{
s_data[threadIdx.x] = sum; __EMUSYNC;
s_data[threadIdx.x] = sum = sum + s_data[threadIdx.x + 16]; __EMUSYNC;
s_data[threadIdx.x] = sum = sum + s_data[threadIdx.x + 8]; __EMUSYNC;
s_data[threadIdx.x] = sum = sum + s_data[threadIdx.x + 4]; __EMUSYNC;
s_data[threadIdx.x] = sum = sum + s_data[threadIdx.x + 2]; __EMUSYNC;
s_data[threadIdx.x] = sum = sum + s_data[threadIdx.x + 1]; __EMUSYNC;
return sum;
}
/*
* Histogram binning
*/
__inline__ __device__ static float
ionMZ(const float m, const float c)
{
return __fdividef(m + MASS_H * c, c);
}
__inline__ __device__ static uint32_t
binMZ(const float mz)
{
return rintf(__fdividef(mz, BIN_WIDTH_MONO));
}
/*
* Dissociation products
*/
template <bool UseCache>
__inline__ __device__ static void
addIon(float &sum, const float *d_spec, const uint32_t N, const int32_t x, const float y)
{
if (0 <= x && x < N)
sum += y * fetch_x<UseCache>(x, d_spec);
}
template <uint32_t charge, bool UseCache>
__device__ void
addIonsAB(float &sum, const float *d_spec, const uint32_t N, const float mass)
{
float m;
int32_t x;
// A-ions
addIon<UseCache>(sum, d_spec, N, binMZ(ionMZ(mass - MASS_CO, charge)), 10.0f);
// B-ions
m = ionMZ(mass, charge);
x = binMZ(m);
addIon<UseCache>(sum, d_spec, N, x, 50.0f);
addIon<UseCache>(sum, d_spec, N, x+1, 25.0f); // technically, should be binMZ(m+1)
addIon<UseCache>(sum, d_spec, N, x-1, 25.0f);
addIon<UseCache>(sum, d_spec, N, binMZ(m - __fdividef(MASS_H2O, charge)), 10.0f);
addIon<UseCache>(sum, d_spec, N, binMZ(m - __fdividef(MASS_NH3, charge)), 10.0f);
}
template <uint32_t charge, bool UseCache>
__device__ void
addIonsY(float &sum, const float *d_spec, const uint32_t N, const float mass)
{
float m = ionMZ(mass + MASS_H2O, charge);
int32_t x = binMZ(m);
// Y-ions
addIon<UseCache>(sum, d_spec, N, x, 50.0f);
addIon<UseCache>(sum, d_spec, N, x+1, 25.0f);
addIon<UseCache>(sum, d_spec, N, x-1, 25.0f);
addIon<UseCache>(sum, d_spec, N, binMZ(m - __fdividef(MASS_NH3, charge)), 10.0f);
}
template <uint32_t charge, bool UseCache>
__device__ void
addIons_k(float &sum, const float *d_spec, const uint32_t N, const float b_mass, const float y_mass)
{
addIonsAB<charge,UseCache>(sum, d_spec, N, b_mass);
addIonsY <charge,UseCache>(sum, d_spec, N, y_mass);
}
/* -----------------------------------------------------------------------------
* Sequest theoretical spectrum and cross-correlation analysis
* -----------------------------------------------------------------------------
*
* Generate theoretical spectra for a collection of peptide fragments. The
* 'ions' array contains the individual amino-acid masses for the database
* entries. We are interested in the sequences generated between the terminal
* indices (tc,tn) of the locations specified in the 'idx' array.
*
* A warp of threads iterates between the (tc,tn) indices, generating the b- and
* y-ion mass ladders. These fragment locations, together with those
* corresponding to neutral losses of H2O and NH3, are the spectral peaks that
* will be combined with the pre-processed experimental spectrum to calculate
* the sequest correlation score.
*
* Optionally, the texture cache may be used for accessing the d_spec
* (experimental spectrum) vector. This generally shows good improvements.
*
*/
template <uint32_t BlockSize, uint32_t MaxCharge, bool UseCache>
__global__ static void
addIons_core
(
float *d_score, // output array of scores, length num_idx
const float *d_spec, // experimental spectrum
const float *d_residual, // peptide residual mass
const float *d_ions, // individual ion masses
const uint32_t *d_tc, // c-terminal indices
const uint32_t *d_tn, // n-terminal indices
const uint32_t *d_idx, // The indices of the sequences under consideration
const uint32_t num_idx,
const uint32_t len_spec
)
{
/*
* Require at least a full warp for each row. This could be relaxed by
* modifying the cooperative reduction step
*/
assert(BlockSize % WARP_SIZE == 0);
const uint32_t vectorsPerBlock = BlockSize / WARP_SIZE;
const uint32_t numVectors = vectorsPerBlock * gridDim.x;
const uint32_t thread_id = BlockSize * blockIdx.x + threadIdx.x;
const uint32_t vector_id = thread_id / WARP_SIZE;
const uint32_t thread_lane = threadIdx.x & (WARP_SIZE-1);
__shared__ volatile float s_data[BlockSize];
for (uint32_t row = vector_id; row < num_idx; row += numVectors)
{
const uint32_t idx = d_idx[row];
const uint32_t row_start = d_tc[idx];
const uint32_t row_end = d_tn[idx];
const float residual = d_residual[idx];
float sum;
float b_mass;
float y_mass;
s_data[threadIdx.x] = 0.0f;
/*
* Have all threads read in mass values for this segment, calculating
* dissociation products and partial dot-product sums to the
* pre-processed experimental spectrum.
*/
for (uint32_t j = row_start + thread_lane; j < row_end; j += WARP_SIZE)
{
/*
* Load the ion mass, and propagate the partial scan results
*/
b_mass = d_ions[j];
if (thread_lane == 0)
b_mass += s_data[threadIdx.x + (WARP_SIZE-1)];
/*
* Generate fragment mass ladder
*/
b_mass = scan_warp<float,true>(b_mass, s_data);
y_mass = residual - b_mass;
sum = 0.0f;
if (1 <= MaxCharge) addIons_k<1,UseCache>(sum, d_spec, len_spec, b_mass, y_mass);
if (2 <= MaxCharge) addIons_k<2,UseCache>(sum, d_spec, len_spec, b_mass, y_mass);
if (3 <= MaxCharge) addIons_k<3,UseCache>(sum, d_spec, len_spec, b_mass, y_mass);
if (4 <= MaxCharge) addIons_k<4,UseCache>(sum, d_spec, len_spec, b_mass, y_mass);
}
/*
* Reduce the partial dot-product results from all threads, and write
* the result for this sequence
*/
reduce_warp<float>(sum, s_data);
if (thread_lane == 0)
d_score[row] = s_data[threadIdx.x];
}
}
/*
* Select a number of threads and blocks. Each block will have at least one full
* warp, as required by the core kernel
*/
static void
addIons_control(uint32_t N, uint32_t &blocks, uint32_t &threads)
{
threads = (N < MAX_THREADS) ? max(WARP_SIZE, ceilPow2(N)) : MAX_THREADS;
blocks = (N + threads - 1) / threads;
blocks = min(blocks, MAX_BLOCKS);
}
template <uint32_t MaxCharge, bool UseCache>
static void
addIons_dispatch
(
float *d_score,
const float *d_spec,
const float *d_residual,
const float *d_ions,
const uint32_t *d_tc,
const uint32_t *d_tn,
const uint32_t *d_idx,
const uint32_t num_idx,
const uint32_t len_spec
)
{
uint32_t blocks;
uint32_t threads;
addIons_control(num_idx, blocks, threads);
switch (threads)
{
// case 512: addIons_core<512,MaxCharge,UseCache><<<blocks,threads>>>(d_score, d_spec, d_residual, d_ions, d_tc, d_tn, d_idx, num_idx, len_spec); break;
// case 256: addIons_core<256,MaxCharge,UseCache><<<blocks,threads>>>(d_score, d_spec, d_residual, d_ions, d_tc, d_tn, d_idx, num_idx, len_spec); break;
case 128:hipLaunchKernelGGL(( addIons_core<128,MaxCharge,UseCache>), dim3(blocks),dim3(threads), 0, 0, d_score, d_spec, d_residual, d_ions, d_tc, d_tn, d_idx, num_idx, len_spec); break;
case 64:hipLaunchKernelGGL(( addIons_core< 64,MaxCharge,UseCache>), dim3(blocks),dim3(threads), 0, 0, d_score, d_spec, d_residual, d_ions, d_tc, d_tn, d_idx, num_idx, len_spec); break;
case 32:hipLaunchKernelGGL(( addIons_core< 32,MaxCharge,UseCache>), dim3(blocks),dim3(threads), 0, 0, d_score, d_spec, d_residual, d_ions, d_tc, d_tn, d_idx, num_idx, len_spec); break;
default:
assert(!"Non-exhaustive patterns in match");
}
}
void
addIons_inplace
(
float *d_score,
const float *d_spec,
const float *d_residual,
const float *d_ions,
const uint32_t *d_tc,
const uint32_t *d_tn,
const uint32_t *d_idx,
const uint32_t num_idx,
const uint32_t max_charge,
const uint32_t len_spec
)
{
bind_x(d_spec);
switch (max_charge)
{
case 1: addIons_dispatch<1,true>(d_score, d_spec, d_residual, d_ions, d_tc, d_tn, d_idx, num_idx, len_spec); break;
case 2: addIons_dispatch<2,true>(d_score, d_spec, d_residual, d_ions, d_tc, d_tn, d_idx, num_idx, len_spec); break;
case 3: addIons_dispatch<3,true>(d_score, d_spec, d_residual, d_ions, d_tc, d_tn, d_idx, num_idx, len_spec); break;
case 4: addIons_dispatch<4,true>(d_score, d_spec, d_residual, d_ions, d_tc, d_tn, d_idx, num_idx, len_spec); break;
default:
assert(!"Non-exhaustive patterns in match");
}
unbind_x(d_spec);
}
| ea034294943b34a98cec6bc60b3a794cb8bd4e46.cu | /* -----------------------------------------------------------------------------
*
* Module : Ion Series
* Copyright : (c) [2009..2010] Trevor L. McDonell
* License : BSD
*
* ---------------------------------------------------------------------------*/
#include "utils.h"
#include "device.h"
#include "texture.h"
#include "ion_series.h"
#include "algorithms.h"
#include <stdint.h>
/*
* Scan a warp-sized chunk of data. Because warps execute instructions in SIMD
* fashion, there is no need to synchronise in order to share data. The most
* efficient algorithm is the step-efficient method of Hillis & Steele that
* takes log(N) steps, rather than the work-efficient tree-based algorithm
* described by Blelloch that takes 2 * log(N) steps.
*/
template <class T, bool inclusive>
static __device__ T
scan_warp(T val, volatile T* s_data)
{
const uint32_t idx = threadIdx.x;
const uint32_t lane = threadIdx.x & (WARP_SIZE-1);
/*
* If we double the size of the s_data array and pad the bottom half with
* zero, then we can avoid branching (although there is plenty already).
*
* In device emulation mode, the warp size is 1 and so sync-less operation
* does not work.
*/
s_data[idx] = val; __EMUSYNC;
#ifdef __DEVICE_EMULATION__
val = (lane >= 1) ? s_data[idx - 1] : 0; __EMUSYNC; s_data[idx] += val; __EMUSYNC;
val = (lane >= 2) ? s_data[idx - 2] : 0; __EMUSYNC; s_data[idx] += val; __EMUSYNC;
val = (lane >= 4) ? s_data[idx - 4] : 0; __EMUSYNC; s_data[idx] += val; __EMUSYNC;
val = (lane >= 8) ? s_data[idx - 8] : 0; __EMUSYNC; s_data[idx] += val; __EMUSYNC;
val = (lane >= 16) ? s_data[idx - 16] : 0; __EMUSYNC; s_data[idx] += val; __EMUSYNC;
#else
if (lane >= 1) s_data[idx] = val = val + s_data[idx - 1];
if (lane >= 2) s_data[idx] = val = val + s_data[idx - 2];
if (lane >= 4) s_data[idx] = val = val + s_data[idx - 4];
if (lane >= 8) s_data[idx] = val = val + s_data[idx - 8];
if (lane >= 16) s_data[idx] = val = val + s_data[idx - 16];
#endif
if (inclusive) return s_data[idx];
else return (lane > 0) ? s_data[idx - 1] : 0;
}
/*
* Reduce a warp-sized chunk of data in shared memory.
*/
template <class T>
static __device__ T
reduce_warp(T sum, volatile T* s_data)
{
s_data[threadIdx.x] = sum; __EMUSYNC;
s_data[threadIdx.x] = sum = sum + s_data[threadIdx.x + 16]; __EMUSYNC;
s_data[threadIdx.x] = sum = sum + s_data[threadIdx.x + 8]; __EMUSYNC;
s_data[threadIdx.x] = sum = sum + s_data[threadIdx.x + 4]; __EMUSYNC;
s_data[threadIdx.x] = sum = sum + s_data[threadIdx.x + 2]; __EMUSYNC;
s_data[threadIdx.x] = sum = sum + s_data[threadIdx.x + 1]; __EMUSYNC;
return sum;
}
/*
* Histogram binning
*/
__inline__ __device__ static float
ionMZ(const float m, const float c)
{
return __fdividef(m + MASS_H * c, c);
}
__inline__ __device__ static uint32_t
binMZ(const float mz)
{
return rintf(__fdividef(mz, BIN_WIDTH_MONO));
}
/*
* Dissociation products
*/
template <bool UseCache>
__inline__ __device__ static void
addIon(float &sum, const float *d_spec, const uint32_t N, const int32_t x, const float y)
{
if (0 <= x && x < N)
sum += y * fetch_x<UseCache>(x, d_spec);
}
template <uint32_t charge, bool UseCache>
__device__ void
addIonsAB(float &sum, const float *d_spec, const uint32_t N, const float mass)
{
float m;
int32_t x;
// A-ions
addIon<UseCache>(sum, d_spec, N, binMZ(ionMZ(mass - MASS_CO, charge)), 10.0f);
// B-ions
m = ionMZ(mass, charge);
x = binMZ(m);
addIon<UseCache>(sum, d_spec, N, x, 50.0f);
addIon<UseCache>(sum, d_spec, N, x+1, 25.0f); // technically, should be binMZ(m+1)
addIon<UseCache>(sum, d_spec, N, x-1, 25.0f);
addIon<UseCache>(sum, d_spec, N, binMZ(m - __fdividef(MASS_H2O, charge)), 10.0f);
addIon<UseCache>(sum, d_spec, N, binMZ(m - __fdividef(MASS_NH3, charge)), 10.0f);
}
template <uint32_t charge, bool UseCache>
__device__ void
addIonsY(float &sum, const float *d_spec, const uint32_t N, const float mass)
{
float m = ionMZ(mass + MASS_H2O, charge);
int32_t x = binMZ(m);
// Y-ions
addIon<UseCache>(sum, d_spec, N, x, 50.0f);
addIon<UseCache>(sum, d_spec, N, x+1, 25.0f);
addIon<UseCache>(sum, d_spec, N, x-1, 25.0f);
addIon<UseCache>(sum, d_spec, N, binMZ(m - __fdividef(MASS_NH3, charge)), 10.0f);
}
template <uint32_t charge, bool UseCache>
__device__ void
addIons_k(float &sum, const float *d_spec, const uint32_t N, const float b_mass, const float y_mass)
{
addIonsAB<charge,UseCache>(sum, d_spec, N, b_mass);
addIonsY <charge,UseCache>(sum, d_spec, N, y_mass);
}
/* -----------------------------------------------------------------------------
* Sequest theoretical spectrum and cross-correlation analysis
* -----------------------------------------------------------------------------
*
* Generate theoretical spectra for a collection of peptide fragments. The
* 'ions' array contains the individual amino-acid masses for the database
* entries. We are interested in the sequences generated between the terminal
* indices (tc,tn) of the locations specified in the 'idx' array.
*
* A warp of threads iterates between the (tc,tn) indices, generating the b- and
* y-ion mass ladders. These fragment locations, together with those
* corresponding to neutral losses of H2O and NH3, are the spectral peaks that
* will be combined with the pre-processed experimental spectrum to calculate
* the sequest correlation score.
*
* Optionally, the texture cache may be used for accessing the d_spec
* (experimental spectrum) vector. This generally shows good improvements.
*
*/
template <uint32_t BlockSize, uint32_t MaxCharge, bool UseCache>
__global__ static void
addIons_core
(
float *d_score, // output array of scores, length num_idx
const float *d_spec, // experimental spectrum
const float *d_residual, // peptide residual mass
const float *d_ions, // individual ion masses
const uint32_t *d_tc, // c-terminal indices
const uint32_t *d_tn, // n-terminal indices
const uint32_t *d_idx, // The indices of the sequences under consideration
const uint32_t num_idx,
const uint32_t len_spec
)
{
/*
* Require at least a full warp for each row. This could be relaxed by
* modifying the cooperative reduction step
*/
assert(BlockSize % WARP_SIZE == 0);
const uint32_t vectorsPerBlock = BlockSize / WARP_SIZE;
const uint32_t numVectors = vectorsPerBlock * gridDim.x;
const uint32_t thread_id = BlockSize * blockIdx.x + threadIdx.x;
const uint32_t vector_id = thread_id / WARP_SIZE;
const uint32_t thread_lane = threadIdx.x & (WARP_SIZE-1);
__shared__ volatile float s_data[BlockSize];
for (uint32_t row = vector_id; row < num_idx; row += numVectors)
{
const uint32_t idx = d_idx[row];
const uint32_t row_start = d_tc[idx];
const uint32_t row_end = d_tn[idx];
const float residual = d_residual[idx];
float sum;
float b_mass;
float y_mass;
s_data[threadIdx.x] = 0.0f;
/*
* Have all threads read in mass values for this segment, calculating
* dissociation products and partial dot-product sums to the
* pre-processed experimental spectrum.
*/
for (uint32_t j = row_start + thread_lane; j < row_end; j += WARP_SIZE)
{
/*
* Load the ion mass, and propagate the partial scan results
*/
b_mass = d_ions[j];
if (thread_lane == 0)
b_mass += s_data[threadIdx.x + (WARP_SIZE-1)];
/*
* Generate fragment mass ladder
*/
b_mass = scan_warp<float,true>(b_mass, s_data);
y_mass = residual - b_mass;
sum = 0.0f;
if (1 <= MaxCharge) addIons_k<1,UseCache>(sum, d_spec, len_spec, b_mass, y_mass);
if (2 <= MaxCharge) addIons_k<2,UseCache>(sum, d_spec, len_spec, b_mass, y_mass);
if (3 <= MaxCharge) addIons_k<3,UseCache>(sum, d_spec, len_spec, b_mass, y_mass);
if (4 <= MaxCharge) addIons_k<4,UseCache>(sum, d_spec, len_spec, b_mass, y_mass);
}
/*
* Reduce the partial dot-product results from all threads, and write
* the result for this sequence
*/
reduce_warp<float>(sum, s_data);
if (thread_lane == 0)
d_score[row] = s_data[threadIdx.x];
}
}
/*
* Select a number of threads and blocks. Each block will have at least one full
* warp, as required by the core kernel
*/
static void
addIons_control(uint32_t N, uint32_t &blocks, uint32_t &threads)
{
threads = (N < MAX_THREADS) ? max(WARP_SIZE, ceilPow2(N)) : MAX_THREADS;
blocks = (N + threads - 1) / threads;
blocks = min(blocks, MAX_BLOCKS);
}
template <uint32_t MaxCharge, bool UseCache>
static void
addIons_dispatch
(
float *d_score,
const float *d_spec,
const float *d_residual,
const float *d_ions,
const uint32_t *d_tc,
const uint32_t *d_tn,
const uint32_t *d_idx,
const uint32_t num_idx,
const uint32_t len_spec
)
{
uint32_t blocks;
uint32_t threads;
addIons_control(num_idx, blocks, threads);
switch (threads)
{
// case 512: addIons_core<512,MaxCharge,UseCache><<<blocks,threads>>>(d_score, d_spec, d_residual, d_ions, d_tc, d_tn, d_idx, num_idx, len_spec); break;
// case 256: addIons_core<256,MaxCharge,UseCache><<<blocks,threads>>>(d_score, d_spec, d_residual, d_ions, d_tc, d_tn, d_idx, num_idx, len_spec); break;
case 128: addIons_core<128,MaxCharge,UseCache><<<blocks,threads>>>(d_score, d_spec, d_residual, d_ions, d_tc, d_tn, d_idx, num_idx, len_spec); break;
case 64: addIons_core< 64,MaxCharge,UseCache><<<blocks,threads>>>(d_score, d_spec, d_residual, d_ions, d_tc, d_tn, d_idx, num_idx, len_spec); break;
case 32: addIons_core< 32,MaxCharge,UseCache><<<blocks,threads>>>(d_score, d_spec, d_residual, d_ions, d_tc, d_tn, d_idx, num_idx, len_spec); break;
default:
assert(!"Non-exhaustive patterns in match");
}
}
void
addIons_inplace
(
float *d_score,
const float *d_spec,
const float *d_residual,
const float *d_ions,
const uint32_t *d_tc,
const uint32_t *d_tn,
const uint32_t *d_idx,
const uint32_t num_idx,
const uint32_t max_charge,
const uint32_t len_spec
)
{
bind_x(d_spec);
switch (max_charge)
{
case 1: addIons_dispatch<1,true>(d_score, d_spec, d_residual, d_ions, d_tc, d_tn, d_idx, num_idx, len_spec); break;
case 2: addIons_dispatch<2,true>(d_score, d_spec, d_residual, d_ions, d_tc, d_tn, d_idx, num_idx, len_spec); break;
case 3: addIons_dispatch<3,true>(d_score, d_spec, d_residual, d_ions, d_tc, d_tn, d_idx, num_idx, len_spec); break;
case 4: addIons_dispatch<4,true>(d_score, d_spec, d_residual, d_ions, d_tc, d_tn, d_idx, num_idx, len_spec); break;
default:
assert(!"Non-exhaustive patterns in match");
}
unbind_x(d_spec);
}
|
3d79b9c231a63e61a2cc6f1cd3d0dd55d629c940.hip | // !!! This is a file automatically generated by hipify!!!
/*
* allocation example - with timing
* * pinned memory
* * host memory
* * device memory
* * host memory registration (pinned)
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include "../colours.h"
typedef enum {
PINNED,
DEVICE,
HOST_REG
} AllocType;
float profileMemory(AllocType alloc, size_t size, unsigned int flags) {
hipEvent_t start, stop;
hipError_t err = hipSuccess;
void* devPtr = NULL;
void* hostPtr = malloc(size);
float ms = 0;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
switch(alloc) {
case PINNED:
err = hipHostMalloc(&devPtr, size, flags);
break;
case DEVICE:
err = hipMalloc(&devPtr, size);
break;
case HOST_REG:
err = hipHostRegister(hostPtr, size, flags);
break;
default:
fprintf(stderr, " [!] unknown value\n");
}
if (err != hipSuccess) {
fprintf(stderr, "[!] Error: %s\n", hipGetErrorString(err));
return 0.0f;
}
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&ms, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
if (devPtr != NULL) {
hipFree(devPtr);
}
if (alloc == HOST_REG) {
hipHostUnregister(hostPtr);
}
if (hostPtr != NULL) {
free(hostPtr);
}
return ms;
}
void printResults(float ms, const char* colour) {
printf("> %s%f%s ms\n\n", colour, ms, WHITE);
}
/**
* Host main routine
*/
int
main(void)
{
size_t samples[] = {512, 1024*1024, 1024*1024*200, 1024*1024*500};
size_t num_samples = sizeof(samples) / sizeof(samples[0]);
float ms = 0;
for(int i=0; i < num_samples; i++) {
size_t size = samples[i];
printf("[*] using size %d bytes ( %s%.2f%s MB)\n", size, CYAN, (float)size/1024/1024, WHITE);
// test device memory
printf("> profile device memory\n");
ms = profileMemory(DEVICE, size, 0);
printResults(ms, RED);
// test pinned memory
printf("> profile default pinned memory\n");
ms = profileMemory(PINNED, size, 0);
printResults(ms, RED);
printf("> profile portable pinned memory\n");
ms = profileMemory(PINNED, size, hipHostMallocPortable);
printResults(ms, RED);
printf("> profile mapped pinned memory\n");
ms = profileMemory(PINNED, size, hipHostMallocMapped);
printResults(ms, RED);
printf("> profile wc pinned memory\n");
ms = profileMemory(PINNED, size, hipHostMallocWriteCombined);
printResults(ms, RED);
printf("> profile registered host memory\n");
ms = profileMemory(HOST_REG, size, hipHostRegisterDefault);
printResults(ms, RED);
}
return 0;
}
| 3d79b9c231a63e61a2cc6f1cd3d0dd55d629c940.cu | /*
* allocation example - with timing
* * pinned memory
* * host memory
* * device memory
* * host memory registration (pinned)
*/
#include <stdio.h>
#include <cuda_runtime.h>
#include "../colours.h"
typedef enum {
PINNED,
DEVICE,
HOST_REG
} AllocType;
float profileMemory(AllocType alloc, size_t size, unsigned int flags) {
cudaEvent_t start, stop;
cudaError_t err = cudaSuccess;
void* devPtr = NULL;
void* hostPtr = malloc(size);
float ms = 0;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
switch(alloc) {
case PINNED:
err = cudaHostAlloc(&devPtr, size, flags);
break;
case DEVICE:
err = cudaMalloc(&devPtr, size);
break;
case HOST_REG:
err = cudaHostRegister(hostPtr, size, flags);
break;
default:
fprintf(stderr, " [!] unknown value\n");
}
if (err != cudaSuccess) {
fprintf(stderr, "[!] Error: %s\n", cudaGetErrorString(err));
return 0.0f;
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&ms, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
if (devPtr != NULL) {
cudaFree(devPtr);
}
if (alloc == HOST_REG) {
cudaHostUnregister(hostPtr);
}
if (hostPtr != NULL) {
free(hostPtr);
}
return ms;
}
void printResults(float ms, const char* colour) {
printf("> %s%f%s ms\n\n", colour, ms, WHITE);
}
/**
* Host main routine
*/
int
main(void)
{
size_t samples[] = {512, 1024*1024, 1024*1024*200, 1024*1024*500};
size_t num_samples = sizeof(samples) / sizeof(samples[0]);
float ms = 0;
for(int i=0; i < num_samples; i++) {
size_t size = samples[i];
printf("[*] using size %d bytes ( %s%.2f%s MB)\n", size, CYAN, (float)size/1024/1024, WHITE);
// test device memory
printf("> profile device memory\n");
ms = profileMemory(DEVICE, size, 0);
printResults(ms, RED);
// test pinned memory
printf("> profile default pinned memory\n");
ms = profileMemory(PINNED, size, 0);
printResults(ms, RED);
printf("> profile portable pinned memory\n");
ms = profileMemory(PINNED, size, cudaHostAllocPortable);
printResults(ms, RED);
printf("> profile mapped pinned memory\n");
ms = profileMemory(PINNED, size, cudaHostAllocMapped);
printResults(ms, RED);
printf("> profile wc pinned memory\n");
ms = profileMemory(PINNED, size, cudaHostAllocWriteCombined);
printResults(ms, RED);
printf("> profile registered host memory\n");
ms = profileMemory(HOST_REG, size, cudaHostRegisterDefault);
printResults(ms, RED);
}
return 0;
}
|
0d52bcedaef9ec2f6388bb0b467ec75845a9d42b.hip | // !!! This is a file automatically generated by hipify!!!
#include "SequenceVisitor.cuh"
#include "WeakTracksAdder.cuh"
DEFINE_EMPTY_SET_ARGUMENTS_SIZE(velo_weak_tracks_adder_t)
template<>
void SequenceVisitor::visit<velo_weak_tracks_adder_t>(
velo_weak_tracks_adder_t& state,
const velo_weak_tracks_adder_t::arguments_t& arguments,
const RuntimeOptions& runtime_options,
const Constants& constants,
HostBuffers& host_buffers,
hipStream_t& cuda_stream,
hipEvent_t& cuda_generic_event)
{
// Setup opts and arguments
state.set_opts(dim3(host_buffers.host_number_of_selected_events[0]), dim3(256), cuda_stream);
state.set_arguments(
arguments.offset<dev_velo_cluster_container>(),
arguments.offset<dev_estimated_input_size>(),
arguments.offset<dev_tracks>(),
arguments.offset<dev_weak_tracks>(),
arguments.offset<dev_hit_used>(),
arguments.offset<dev_atomics_velo>());
state.invoke();
}
| 0d52bcedaef9ec2f6388bb0b467ec75845a9d42b.cu | #include "SequenceVisitor.cuh"
#include "WeakTracksAdder.cuh"
DEFINE_EMPTY_SET_ARGUMENTS_SIZE(velo_weak_tracks_adder_t)
template<>
void SequenceVisitor::visit<velo_weak_tracks_adder_t>(
velo_weak_tracks_adder_t& state,
const velo_weak_tracks_adder_t::arguments_t& arguments,
const RuntimeOptions& runtime_options,
const Constants& constants,
HostBuffers& host_buffers,
cudaStream_t& cuda_stream,
cudaEvent_t& cuda_generic_event)
{
// Setup opts and arguments
state.set_opts(dim3(host_buffers.host_number_of_selected_events[0]), dim3(256), cuda_stream);
state.set_arguments(
arguments.offset<dev_velo_cluster_container>(),
arguments.offset<dev_estimated_input_size>(),
arguments.offset<dev_tracks>(),
arguments.offset<dev_weak_tracks>(),
arguments.offset<dev_hit_used>(),
arguments.offset<dev_atomics_velo>());
state.invoke();
}
|
4afb97c8897f8f05a772684f86b82ef87422b150.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication as described in Chapter 3
* of the programming guide.
* It has been written for clarity of exposition to illustrate various CUDA
* programming principles, not with the goal of providing the most
* performant generic kernel for matrix multiplication.
*
* See also:
* V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra,"
* in Proc. 2008 ACM/IEEE Conf. on Supercomputing (SC '08),
* Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
#include <rocblas.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
#include <helper_image.h>
#include "common.h"
#define mFetchSmem(ia, ib, ir){ \
a2[0][ir] = smem[ia + 0]; \
a2[1][ir] = smem[ia + 8]; \
b2[0][ir] = smem[ib + 0]; \
b2[1][ir] = smem[ib + 8]; \
}
#define mRank8x8(ir){ \
c[0*8+0] += a2[0][ir].x * b2[0][ir].x; \
c[1*8+0] += a2[0][ir].y * b2[0][ir].x; \
c[2*8+0] += a2[0][ir].z * b2[0][ir].x; \
c[3*8+0] += a2[0][ir].w * b2[0][ir].x; \
c[4*8+0] += a2[1][ir].x * b2[0][ir].x; \
c[5*8+0] += a2[1][ir].y * b2[0][ir].x; \
c[6*8+0] += a2[1][ir].z * b2[0][ir].x; \
c[7*8+0] += a2[1][ir].w * b2[0][ir].x; \
c[0*8+1] += a2[0][ir].x * b2[0][ir].y; \
c[1*8+1] += a2[0][ir].y * b2[0][ir].y; \
c[2*8+1] += a2[0][ir].z * b2[0][ir].y; \
c[3*8+1] += a2[0][ir].w * b2[0][ir].y; \
c[4*8+1] += a2[1][ir].x * b2[0][ir].y; \
c[5*8+1] += a2[1][ir].y * b2[0][ir].y; \
c[6*8+1] += a2[1][ir].z * b2[0][ir].y; \
c[7*8+1] += a2[1][ir].w * b2[0][ir].y; \
c[0*8+2] += a2[0][ir].x * b2[0][ir].z; \
c[1*8+2] += a2[0][ir].y * b2[0][ir].z; \
c[2*8+2] += a2[0][ir].z * b2[0][ir].z; \
c[3*8+2] += a2[0][ir].w * b2[0][ir].z; \
c[4*8+2] += a2[1][ir].x * b2[0][ir].z; \
c[5*8+2] += a2[1][ir].y * b2[0][ir].z; \
c[6*8+2] += a2[1][ir].z * b2[0][ir].z; \
c[7*8+2] += a2[1][ir].w * b2[0][ir].z; \
c[0*8+3] += a2[0][ir].x * b2[0][ir].w; \
c[1*8+3] += a2[0][ir].y * b2[0][ir].w; \
c[2*8+3] += a2[0][ir].z * b2[0][ir].w; \
c[3*8+3] += a2[0][ir].w * b2[0][ir].w; \
c[4*8+3] += a2[1][ir].x * b2[0][ir].w; \
c[5*8+3] += a2[1][ir].y * b2[0][ir].w; \
c[6*8+3] += a2[1][ir].z * b2[0][ir].w; \
c[7*8+3] += a2[1][ir].w * b2[0][ir].w; \
c[0*8+4] += a2[0][ir].x * b2[1][ir].x; \
c[1*8+4] += a2[0][ir].y * b2[1][ir].x; \
c[2*8+4] += a2[0][ir].z * b2[1][ir].x; \
c[3*8+4] += a2[0][ir].w * b2[1][ir].x; \
c[4*8+4] += a2[1][ir].x * b2[1][ir].x; \
c[5*8+4] += a2[1][ir].y * b2[1][ir].x; \
c[6*8+4] += a2[1][ir].z * b2[1][ir].x; \
c[7*8+4] += a2[1][ir].w * b2[1][ir].x; \
c[0*8+5] += a2[0][ir].x * b2[1][ir].y; \
c[1*8+5] += a2[0][ir].y * b2[1][ir].y; \
c[2*8+5] += a2[0][ir].z * b2[1][ir].y; \
c[3*8+5] += a2[0][ir].w * b2[1][ir].y; \
c[4*8+5] += a2[1][ir].x * b2[1][ir].y; \
c[5*8+5] += a2[1][ir].y * b2[1][ir].y; \
c[6*8+5] += a2[1][ir].z * b2[1][ir].y; \
c[7*8+5] += a2[1][ir].w * b2[1][ir].y; \
c[0*8+6] += a2[0][ir].x * b2[1][ir].z; \
c[1*8+6] += a2[0][ir].y * b2[1][ir].z; \
c[2*8+6] += a2[0][ir].z * b2[1][ir].z; \
c[3*8+6] += a2[0][ir].w * b2[1][ir].z; \
c[4*8+6] += a2[1][ir].x * b2[1][ir].z; \
c[5*8+6] += a2[1][ir].y * b2[1][ir].z; \
c[6*8+6] += a2[1][ir].z * b2[1][ir].z; \
c[7*8+6] += a2[1][ir].w * b2[1][ir].z; \
c[0*8+7] += a2[0][ir].x * b2[1][ir].w; \
c[1*8+7] += a2[0][ir].y * b2[1][ir].w; \
c[2*8+7] += a2[0][ir].z * b2[1][ir].w; \
c[3*8+7] += a2[0][ir].w * b2[1][ir].w; \
c[4*8+7] += a2[1][ir].x * b2[1][ir].w; \
c[5*8+7] += a2[1][ir].y * b2[1][ir].w; \
c[6*8+7] += a2[1][ir].z * b2[1][ir].w; \
c[7*8+7] += a2[1][ir].w * b2[1][ir].w; \
}
void
matrixMulCPU(float *C, const float *A, const float *B, unsigned int hA, unsigned int wA, unsigned int wB)
{
for (unsigned int i = 0; i < hA; ++i)
{
#pragma omp parallel for num_threads(16)
for (unsigned int j = 0; j < wB; ++j)
{
double sum = 0;
for (unsigned int k = 0; k < wA; ++k)
{
double a = A[i * wA + k];
double b = B[k * wB + j];
sum += a * b;
}
C[i * wB + j] = (float)sum;
}
}
}
void randomInit(float *data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
__device__ __forceinline__ void d_rank8x8(float *C, const float *A, const float *B)
{
float b;
#pragma unroll
for (int i = 0; i < 8; i++)
{
b = B[i];
C[0*8+i] += A[0]*b;
C[1*8+i] += A[1]*b;
C[2*8+i] += A[2]*b;
C[3*8+i] += A[3]*b;
C[4*8+i] += A[4]*b;
C[5*8+i] += A[5]*b;
C[6*8+i] += A[6]*b;
C[7*8+i] += A[7]*b;
}
}
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
//__launch_bounds__(64, 8) //MAX_THREADS_PER_BLOCK, MIN_BLOCKS_PER_MP
extern "C"
__global__ void
sgemm_kernel_64(float *C, float *A, float *B, int hA, int wA, int wB)
{
__shared__ float4 smem[2*64 * 2 * 2];
float c[64] = {0.0f};//thread register initialized zero
float4 a1[2], b1[2]; // registers for 1st prefetch from global memory
//float2 a1[4], b1[4];
float4 a2[2][2], b2[2][2]; // registers for 2nd prefetch from shared memory
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tid = threadIdx.x;
//int tid_x = tid & 0x7;
//int tid_y = tid / 8;
int tid15 = (tid & 15);
int tid4 = (tid >> 4);
int aBegin = 64 * by;
//int aEnd = aBegin + hA*(wA - 1);
int aStep = 8 * hA;
int bBegin = 64 * bx;
int bStep = 8 * wB;
int wA8 = wA - 8;
A += aBegin + tid4*hA + (tid15<<2);
B += bBegin + tid4*wB + (tid15<<2);
//AA = A + hA * 4;
//BB = B + wB * 4;
// 1st prefetch from global memory
a1[0] = ld_gbl_cs((const float4 *)(A ));
//A += hA * 4;
a1[1] = ld_gbl_cs((const float4 *)(A + hA*4));
b1[0] = ld_gbl_cs((const float4 *)(B ));
//B += wB * 4;
b1[1] = ld_gbl_cs((const float4 *)(B + wB*4));
// shared offsets
int sh_offs = (tid4<<4) + tid15;
int sh_a = ((tid4<<1) | (tid&1));//tid_y;
int sh_b = ((tid>>1) & 7) + 256;//tid_x + 256;
// shared memory double buffer
smem[sh_offs ] = a1[0];
smem[sh_offs + 64 ] = a1[1];
smem[sh_offs + 256] = b1[0];
smem[sh_offs + 64 + 256] = b1[1];
__syncthreads();
// 2nd prefetch from shared memory
mFetchSmem(sh_a+0*16, sh_b+0*16, 0);//shared memory-->register, memory access
// main loop
for (int k = 0; k < wA; k += 8)
//for (int a = aBegin + aStep, b = bBegin + bStep; a <= aEnd; a += aStep, b += bStep)
{
A += aStep;
B += bStep;
sh_offs ^= 128;
// 1st prefetch from global memory
if (k < wA8)
{
a1[0] = ld_gbl_cs((const float4 *)(A ));
a1[1] = ld_gbl_cs((const float4 *)(A + hA*4));
b1[0] = ld_gbl_cs((const float4 *)(B ));
b1[1] = ld_gbl_cs((const float4 *)(B + wB*4));
}
// compute sub matrix
mFetchSmem(sh_a+1*16, sh_b+1*16, 1);//shared memory-->register, memory access
mRank8x8(0); //register->register, compute
mFetchSmem(sh_a+2*16, sh_b+2*16, 0);//shared memory-->register, memory access
mRank8x8(1); //register->register, compute
mFetchSmem(sh_a+3*16, sh_b+3*16, 1);//shared memory-->register, memory access
mRank8x8(0); //register->register, compute
mFetchSmem(sh_a+4*16, sh_b+4*16, 0);//shared memory-->register, memory access
mRank8x8(1); //register->register, compute
mFetchSmem(sh_a+5*16, sh_b+5*16, 1);//shared memory-->register, memory access
mRank8x8(0); //register->register, compute
mFetchSmem(sh_a+6*16, sh_b+6*16, 0);//shared memory-->register, memory access
mRank8x8(1); //register->register, compute
mFetchSmem(sh_a+7*16, sh_b+7*16, 1);//shared memory-->register, memory access
// shift read index
sh_a ^= 128;
sh_b ^= 128;
// compute the last sub matrix
mRank8x8(0); //register->register, compute
// shared memory double buffer
if (k < wA8)
{
smem[sh_offs ] = a1[0];
smem[sh_offs + 64 ] = a1[1];
smem[sh_offs + 256] = b1[0];
smem[sh_offs + 64 + 256] = b1[1];
}
mRank8x8(1); //register->register, compute
__syncthreads();
// 2nd prefetch from shared memory
if (k < wA8)
{
mFetchSmem(sh_a+0*16, sh_b+0*16, 0);//shared memory-->register, memory access
}
}
//__syncthreads();
//if (tid_x == 0 && tid_y == 2 && bx == 0 && by == 0)
//{
// printf("c[0] : %f\n", c[0]);
//}
// store the 8*8 result to shared
#if 1
int C_index = wB*64*by + 64*bx + tid4*8*wB + tid15*4;
int tid31 = tid & 31;
int cs = ((tid31/2)&7) + (tid31&1)*16 + tid4*32;
#pragma unroll
for (int i = 0; i < 8; i++)
{
// reform C to shared memory
smem[cs + 0] = make_float4(c[i*8+0], c[i*8+1], c[i*8+2], c[i*8+3]);
smem[cs + 8] = make_float4(c[i*8+4], c[i*8+5], c[i*8+6], c[i*8+7]);
//if (i == 0 && tid_x == 0 && tid_y == 2 && bx == 0 && by == 0)
// printf("%d: %f\n", cs, smem[cs + 0].x);
//if (C_index + (i + (i/4)*32 + 4)*wB >= 1024*1024 - 1)
// printf("%d %d %d %d\n", bx, by, tid, (i + (i/4)*32 + 4)*wB);
// coalesing access
st_gbl_cs((const float4 *)(C + C_index + (i + (i/4)*28 + 0)*wB), smem[tid15 + (tid4<<5)+ 0] );
st_gbl_cs((const float4 *)(C + C_index + (i + (i/4)*28 + 4)*wB), smem[tid15 + (tid4<<5)+16] );
}
#else
int C_index = wB * 64 * by + tid_y * 8 * wB + 64 * bx + tid_x * 8;
#pragma unroll
for (int i = 0; i < 8; i++)
{
C[C_index + i * wB + 0] = c[i * 8 + 0];
C[C_index + i * wB + 1] = c[i * 8 + 1];
C[C_index + i * wB + 2] = c[i * 8 + 2];
C[C_index + i * wB + 3] = c[i * 8 + 3];
C[C_index + i * wB + 4] = c[i * 8 + 4];
C[C_index + i * wB + 5] = c[i * 8 + 5];
C[C_index + i * wB + 6] = c[i * 8 + 6];
C[C_index + i * wB + 7] = c[i * 8 + 7];
}
#endif
}
void constantInit(float *data, int size, float val)
{
for (int i = 0; i < size; ++i)
{
data[i] = val;
}
}
/**
* Program main
*/
int main(int argc, char **argv)
{
printf("[Matrix Multiply Using CUDA] - Starting...\n");
int devID = 0;
hipSetDevice(devID);
int N = 1024;
int NITER = 10000;
if (argc == 2)
{
N = atoi(argv[1]);
}
else if (argc == 3)
{
N = atoi(argv[1]);
NITER = atoi(argv[2]);
}
dim3 dimsA(N, N, 1);
dim3 dimsB(N, N, 1);
//if (dimsA.x != dimsB.y)
//{
// printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
// dimsA.x, dimsB.y);
// exit(EXIT_FAILURE);
//}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y);
// set seed for rand()
srand(2006);
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = (float *)malloc(mem_size_A);
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = (float *)malloc(mem_size_B);
// Initialize host memory
randomInit(h_A, size_A);
randomInit(h_B, size_B);
// Allocate device memory
float *d_A, *d_B, *d_C;
//checkCudaErrors(hipDeviceSetCacheConfig(hipFuncCachePreferL1));
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = (float *) malloc(mem_size_C);
checkCudaErrors(hipMalloc((void **)&d_A, mem_size_A));
checkCudaErrors(hipMalloc((void **)&d_B, mem_size_B));
checkCudaErrors(hipMalloc((void **)&d_C, mem_size_C));
// copy host memory to device
checkCudaErrors(hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice));
// Setup execution parameters
dim3 threads(64);//32x32
dim3 grid(dimsB.x/64, dimsA.y/64);
//Performs warmup operation using matrixMul CUDA kernel
hipLaunchKernelGGL(( sgemm_kernel_64), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.y, dimsA.x, dimsB.x);
printf("done\n");
hipDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
// Record the start event
checkCudaErrors(hipEventRecord(start, NULL));
// Execute the kernel
int nIter = NITER;
for (int j = 0; j < nIter; j++)
{
hipLaunchKernelGGL(( sgemm_kernel_64), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.y, dimsA.x, dimsB.x);
}
// Record the stop event
checkCudaErrors(hipEventRecord(stop, NULL));
// Wait for the stop event to complete
checkCudaErrors(hipEventSynchronize(stop));
float msecTotal = 0.0f;
checkCudaErrors(hipEventElapsedTime(&msecTotal, start, stop));
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
checkCudaErrors(hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost));
printf("Computing result using host CPU...\n");
float *A_T = (float *)malloc(mem_size_A);
for (int i = 0; i < dimsA.x; i++){
for (int j = 0; j < dimsA.y; j++){
A_T[i*dimsA.y + j] = h_A[j*dimsA.x+i];
}
}
float *reference = (float *)malloc(mem_size_C);
matrixMulCPU(reference, A_T, h_B, (unsigned int)dimsA.y, (unsigned int)dimsA.x, (unsigned int)dimsB.x);
//float c08 = 0;
//for (int i = 0; i < dimsB.x; i++)
//{
// c08 += A_T[i] * h_B[i * dimsB.x + 32];
//}
//printf("c08: %f\n", c08);
//bool resCUBLAS = sdkCompareL2fe(reference, h_C, mem_size_C, 1.0e-4f);
//if (resCUBLAS != true)
//{
// printf("COMPARE ERROR!\n");
//}
for (int i = 0; i < dimsA.y; i++)
{
for (int j = 0; j < dimsB.x; j++)
{
if (fabs(reference[i * dimsB.x + j] - h_C[i * dimsB.x + j]) > 1e-3)
{
printf("index (%d, %d)---- reference %f, gpu_data: %f\n", i, j, reference[i * dimsB.x + j], h_C[i * dimsB.x + j]);
return 0; //exit(1);
}
}
}
//printf("done.\n");
//for (int i = 0; i < 100; i++)
// printf("h_C is %f \t", h_C[i]);
//printf("\n");
//printf("Checking computed result for correctness: ");
//bool correct = true;
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
//double eps = 1.e-6 ; // machine zero
//for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++)
//{
// double abs_err = fabs(h_C[i] - (dimsA.x * valB));
// double dot_length = dimsA.x;
// double abs_val = fabs(h_C[i]);
// double rel_err = abs_err/abs_val/dot_length ;
// if (rel_err > eps)
// {
// printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], dimsA.x*valB, eps);
// correct = false;
// }
//}
//printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
return 0;
}
| 4afb97c8897f8f05a772684f86b82ef87422b150.cu | /**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication as described in Chapter 3
* of the programming guide.
* It has been written for clarity of exposition to illustrate various CUDA
* programming principles, not with the goal of providing the most
* performant generic kernel for matrix multiplication.
*
* See also:
* V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra,"
* in Proc. 2008 ACM/IEEE Conf. on Supercomputing (SC '08),
* Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <cuda_runtime.h>
#include <cublas_v2.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
#include <helper_image.h>
#include "common.h"
#define mFetchSmem(ia, ib, ir){ \
a2[0][ir] = smem[ia + 0]; \
a2[1][ir] = smem[ia + 8]; \
b2[0][ir] = smem[ib + 0]; \
b2[1][ir] = smem[ib + 8]; \
}
#define mRank8x8(ir){ \
c[0*8+0] += a2[0][ir].x * b2[0][ir].x; \
c[1*8+0] += a2[0][ir].y * b2[0][ir].x; \
c[2*8+0] += a2[0][ir].z * b2[0][ir].x; \
c[3*8+0] += a2[0][ir].w * b2[0][ir].x; \
c[4*8+0] += a2[1][ir].x * b2[0][ir].x; \
c[5*8+0] += a2[1][ir].y * b2[0][ir].x; \
c[6*8+0] += a2[1][ir].z * b2[0][ir].x; \
c[7*8+0] += a2[1][ir].w * b2[0][ir].x; \
c[0*8+1] += a2[0][ir].x * b2[0][ir].y; \
c[1*8+1] += a2[0][ir].y * b2[0][ir].y; \
c[2*8+1] += a2[0][ir].z * b2[0][ir].y; \
c[3*8+1] += a2[0][ir].w * b2[0][ir].y; \
c[4*8+1] += a2[1][ir].x * b2[0][ir].y; \
c[5*8+1] += a2[1][ir].y * b2[0][ir].y; \
c[6*8+1] += a2[1][ir].z * b2[0][ir].y; \
c[7*8+1] += a2[1][ir].w * b2[0][ir].y; \
c[0*8+2] += a2[0][ir].x * b2[0][ir].z; \
c[1*8+2] += a2[0][ir].y * b2[0][ir].z; \
c[2*8+2] += a2[0][ir].z * b2[0][ir].z; \
c[3*8+2] += a2[0][ir].w * b2[0][ir].z; \
c[4*8+2] += a2[1][ir].x * b2[0][ir].z; \
c[5*8+2] += a2[1][ir].y * b2[0][ir].z; \
c[6*8+2] += a2[1][ir].z * b2[0][ir].z; \
c[7*8+2] += a2[1][ir].w * b2[0][ir].z; \
c[0*8+3] += a2[0][ir].x * b2[0][ir].w; \
c[1*8+3] += a2[0][ir].y * b2[0][ir].w; \
c[2*8+3] += a2[0][ir].z * b2[0][ir].w; \
c[3*8+3] += a2[0][ir].w * b2[0][ir].w; \
c[4*8+3] += a2[1][ir].x * b2[0][ir].w; \
c[5*8+3] += a2[1][ir].y * b2[0][ir].w; \
c[6*8+3] += a2[1][ir].z * b2[0][ir].w; \
c[7*8+3] += a2[1][ir].w * b2[0][ir].w; \
c[0*8+4] += a2[0][ir].x * b2[1][ir].x; \
c[1*8+4] += a2[0][ir].y * b2[1][ir].x; \
c[2*8+4] += a2[0][ir].z * b2[1][ir].x; \
c[3*8+4] += a2[0][ir].w * b2[1][ir].x; \
c[4*8+4] += a2[1][ir].x * b2[1][ir].x; \
c[5*8+4] += a2[1][ir].y * b2[1][ir].x; \
c[6*8+4] += a2[1][ir].z * b2[1][ir].x; \
c[7*8+4] += a2[1][ir].w * b2[1][ir].x; \
c[0*8+5] += a2[0][ir].x * b2[1][ir].y; \
c[1*8+5] += a2[0][ir].y * b2[1][ir].y; \
c[2*8+5] += a2[0][ir].z * b2[1][ir].y; \
c[3*8+5] += a2[0][ir].w * b2[1][ir].y; \
c[4*8+5] += a2[1][ir].x * b2[1][ir].y; \
c[5*8+5] += a2[1][ir].y * b2[1][ir].y; \
c[6*8+5] += a2[1][ir].z * b2[1][ir].y; \
c[7*8+5] += a2[1][ir].w * b2[1][ir].y; \
c[0*8+6] += a2[0][ir].x * b2[1][ir].z; \
c[1*8+6] += a2[0][ir].y * b2[1][ir].z; \
c[2*8+6] += a2[0][ir].z * b2[1][ir].z; \
c[3*8+6] += a2[0][ir].w * b2[1][ir].z; \
c[4*8+6] += a2[1][ir].x * b2[1][ir].z; \
c[5*8+6] += a2[1][ir].y * b2[1][ir].z; \
c[6*8+6] += a2[1][ir].z * b2[1][ir].z; \
c[7*8+6] += a2[1][ir].w * b2[1][ir].z; \
c[0*8+7] += a2[0][ir].x * b2[1][ir].w; \
c[1*8+7] += a2[0][ir].y * b2[1][ir].w; \
c[2*8+7] += a2[0][ir].z * b2[1][ir].w; \
c[3*8+7] += a2[0][ir].w * b2[1][ir].w; \
c[4*8+7] += a2[1][ir].x * b2[1][ir].w; \
c[5*8+7] += a2[1][ir].y * b2[1][ir].w; \
c[6*8+7] += a2[1][ir].z * b2[1][ir].w; \
c[7*8+7] += a2[1][ir].w * b2[1][ir].w; \
}
void
matrixMulCPU(float *C, const float *A, const float *B, unsigned int hA, unsigned int wA, unsigned int wB)
{
for (unsigned int i = 0; i < hA; ++i)
{
#pragma omp parallel for num_threads(16)
for (unsigned int j = 0; j < wB; ++j)
{
double sum = 0;
for (unsigned int k = 0; k < wA; ++k)
{
double a = A[i * wA + k];
double b = B[k * wB + j];
sum += a * b;
}
C[i * wB + j] = (float)sum;
}
}
}
void randomInit(float *data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
__device__ __forceinline__ void d_rank8x8(float *C, const float *A, const float *B)
{
float b;
#pragma unroll
for (int i = 0; i < 8; i++)
{
b = B[i];
C[0*8+i] += A[0]*b;
C[1*8+i] += A[1]*b;
C[2*8+i] += A[2]*b;
C[3*8+i] += A[3]*b;
C[4*8+i] += A[4]*b;
C[5*8+i] += A[5]*b;
C[6*8+i] += A[6]*b;
C[7*8+i] += A[7]*b;
}
}
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
//__launch_bounds__(64, 8) //MAX_THREADS_PER_BLOCK, MIN_BLOCKS_PER_MP
extern "C"
__global__ void
sgemm_kernel_64(float *C, float *A, float *B, int hA, int wA, int wB)
{
__shared__ float4 smem[2*64 * 2 * 2];
float c[64] = {0.0f};//thread register initialized zero
float4 a1[2], b1[2]; // registers for 1st prefetch from global memory
//float2 a1[4], b1[4];
float4 a2[2][2], b2[2][2]; // registers for 2nd prefetch from shared memory
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tid = threadIdx.x;
//int tid_x = tid & 0x7;
//int tid_y = tid / 8;
int tid15 = (tid & 15);
int tid4 = (tid >> 4);
int aBegin = 64 * by;
//int aEnd = aBegin + hA*(wA - 1);
int aStep = 8 * hA;
int bBegin = 64 * bx;
int bStep = 8 * wB;
int wA8 = wA - 8;
A += aBegin + tid4*hA + (tid15<<2);
B += bBegin + tid4*wB + (tid15<<2);
//AA = A + hA * 4;
//BB = B + wB * 4;
// 1st prefetch from global memory
a1[0] = ld_gbl_cs((const float4 *)(A ));
//A += hA * 4;
a1[1] = ld_gbl_cs((const float4 *)(A + hA*4));
b1[0] = ld_gbl_cs((const float4 *)(B ));
//B += wB * 4;
b1[1] = ld_gbl_cs((const float4 *)(B + wB*4));
// shared offsets
int sh_offs = (tid4<<4) + tid15;
int sh_a = ((tid4<<1) | (tid&1));//tid_y;
int sh_b = ((tid>>1) & 7) + 256;//tid_x + 256;
// shared memory double buffer
smem[sh_offs ] = a1[0];
smem[sh_offs + 64 ] = a1[1];
smem[sh_offs + 256] = b1[0];
smem[sh_offs + 64 + 256] = b1[1];
__syncthreads();
// 2nd prefetch from shared memory
mFetchSmem(sh_a+0*16, sh_b+0*16, 0);//shared memory-->register, memory access
// main loop
for (int k = 0; k < wA; k += 8)
//for (int a = aBegin + aStep, b = bBegin + bStep; a <= aEnd; a += aStep, b += bStep)
{
A += aStep;
B += bStep;
sh_offs ^= 128;
// 1st prefetch from global memory
if (k < wA8)
{
a1[0] = ld_gbl_cs((const float4 *)(A ));
a1[1] = ld_gbl_cs((const float4 *)(A + hA*4));
b1[0] = ld_gbl_cs((const float4 *)(B ));
b1[1] = ld_gbl_cs((const float4 *)(B + wB*4));
}
// compute sub matrix
mFetchSmem(sh_a+1*16, sh_b+1*16, 1);//shared memory-->register, memory access
mRank8x8(0); //register->register, compute
mFetchSmem(sh_a+2*16, sh_b+2*16, 0);//shared memory-->register, memory access
mRank8x8(1); //register->register, compute
mFetchSmem(sh_a+3*16, sh_b+3*16, 1);//shared memory-->register, memory access
mRank8x8(0); //register->register, compute
mFetchSmem(sh_a+4*16, sh_b+4*16, 0);//shared memory-->register, memory access
mRank8x8(1); //register->register, compute
mFetchSmem(sh_a+5*16, sh_b+5*16, 1);//shared memory-->register, memory access
mRank8x8(0); //register->register, compute
mFetchSmem(sh_a+6*16, sh_b+6*16, 0);//shared memory-->register, memory access
mRank8x8(1); //register->register, compute
mFetchSmem(sh_a+7*16, sh_b+7*16, 1);//shared memory-->register, memory access
// shift read index
sh_a ^= 128;
sh_b ^= 128;
// compute the last sub matrix
mRank8x8(0); //register->register, compute
// shared memory double buffer
if (k < wA8)
{
smem[sh_offs ] = a1[0];
smem[sh_offs + 64 ] = a1[1];
smem[sh_offs + 256] = b1[0];
smem[sh_offs + 64 + 256] = b1[1];
}
mRank8x8(1); //register->register, compute
__syncthreads();
// 2nd prefetch from shared memory
if (k < wA8)
{
mFetchSmem(sh_a+0*16, sh_b+0*16, 0);//shared memory-->register, memory access
}
}
//__syncthreads();
//if (tid_x == 0 && tid_y == 2 && bx == 0 && by == 0)
//{
// printf("c[0] : %f\n", c[0]);
//}
// store the 8*8 result to shared
#if 1
int C_index = wB*64*by + 64*bx + tid4*8*wB + tid15*4;
int tid31 = tid & 31;
int cs = ((tid31/2)&7) + (tid31&1)*16 + tid4*32;
#pragma unroll
for (int i = 0; i < 8; i++)
{
// reform C to shared memory
smem[cs + 0] = make_float4(c[i*8+0], c[i*8+1], c[i*8+2], c[i*8+3]);
smem[cs + 8] = make_float4(c[i*8+4], c[i*8+5], c[i*8+6], c[i*8+7]);
//if (i == 0 && tid_x == 0 && tid_y == 2 && bx == 0 && by == 0)
// printf("%d: %f\n", cs, smem[cs + 0].x);
//if (C_index + (i + (i/4)*32 + 4)*wB >= 1024*1024 - 1)
// printf("%d %d %d %d\n", bx, by, tid, (i + (i/4)*32 + 4)*wB);
// coalesing access
st_gbl_cs((const float4 *)(C + C_index + (i + (i/4)*28 + 0)*wB), smem[tid15 + (tid4<<5)+ 0] );
st_gbl_cs((const float4 *)(C + C_index + (i + (i/4)*28 + 4)*wB), smem[tid15 + (tid4<<5)+16] );
}
#else
int C_index = wB * 64 * by + tid_y * 8 * wB + 64 * bx + tid_x * 8;
#pragma unroll
for (int i = 0; i < 8; i++)
{
C[C_index + i * wB + 0] = c[i * 8 + 0];
C[C_index + i * wB + 1] = c[i * 8 + 1];
C[C_index + i * wB + 2] = c[i * 8 + 2];
C[C_index + i * wB + 3] = c[i * 8 + 3];
C[C_index + i * wB + 4] = c[i * 8 + 4];
C[C_index + i * wB + 5] = c[i * 8 + 5];
C[C_index + i * wB + 6] = c[i * 8 + 6];
C[C_index + i * wB + 7] = c[i * 8 + 7];
}
#endif
}
void constantInit(float *data, int size, float val)
{
for (int i = 0; i < size; ++i)
{
data[i] = val;
}
}
/**
* Program main
*/
int main(int argc, char **argv)
{
printf("[Matrix Multiply Using CUDA] - Starting...\n");
int devID = 0;
cudaSetDevice(devID);
int N = 1024;
int NITER = 10000;
if (argc == 2)
{
N = atoi(argv[1]);
}
else if (argc == 3)
{
N = atoi(argv[1]);
NITER = atoi(argv[2]);
}
dim3 dimsA(N, N, 1);
dim3 dimsB(N, N, 1);
//if (dimsA.x != dimsB.y)
//{
// printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
// dimsA.x, dimsB.y);
// exit(EXIT_FAILURE);
//}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y);
// set seed for rand()
srand(2006);
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = (float *)malloc(mem_size_A);
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = (float *)malloc(mem_size_B);
// Initialize host memory
randomInit(h_A, size_A);
randomInit(h_B, size_B);
// Allocate device memory
float *d_A, *d_B, *d_C;
//checkCudaErrors(cudaDeviceSetCacheConfig(cudaFuncCachePreferL1));
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = (float *) malloc(mem_size_C);
checkCudaErrors(cudaMalloc((void **)&d_A, mem_size_A));
checkCudaErrors(cudaMalloc((void **)&d_B, mem_size_B));
checkCudaErrors(cudaMalloc((void **)&d_C, mem_size_C));
// copy host memory to device
checkCudaErrors(cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice));
// Setup execution parameters
dim3 threads(64);//32x32
dim3 grid(dimsB.x/64, dimsA.y/64);
//Performs warmup operation using matrixMul CUDA kernel
sgemm_kernel_64<<< grid, threads >>>(d_C, d_A, d_B, dimsA.y, dimsA.x, dimsB.x);
printf("done\n");
cudaDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
// Record the start event
checkCudaErrors(cudaEventRecord(start, NULL));
// Execute the kernel
int nIter = NITER;
for (int j = 0; j < nIter; j++)
{
sgemm_kernel_64<<< grid, threads >>>(d_C, d_A, d_B, dimsA.y, dimsA.x, dimsB.x);
}
// Record the stop event
checkCudaErrors(cudaEventRecord(stop, NULL));
// Wait for the stop event to complete
checkCudaErrors(cudaEventSynchronize(stop));
float msecTotal = 0.0f;
checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop));
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
checkCudaErrors(cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost));
printf("Computing result using host CPU...\n");
float *A_T = (float *)malloc(mem_size_A);
for (int i = 0; i < dimsA.x; i++){
for (int j = 0; j < dimsA.y; j++){
A_T[i*dimsA.y + j] = h_A[j*dimsA.x+i];
}
}
float *reference = (float *)malloc(mem_size_C);
matrixMulCPU(reference, A_T, h_B, (unsigned int)dimsA.y, (unsigned int)dimsA.x, (unsigned int)dimsB.x);
//float c08 = 0;
//for (int i = 0; i < dimsB.x; i++)
//{
// c08 += A_T[i] * h_B[i * dimsB.x + 32];
//}
//printf("c08: %f\n", c08);
//bool resCUBLAS = sdkCompareL2fe(reference, h_C, mem_size_C, 1.0e-4f);
//if (resCUBLAS != true)
//{
// printf("COMPARE ERROR!\n");
//}
for (int i = 0; i < dimsA.y; i++)
{
for (int j = 0; j < dimsB.x; j++)
{
if (fabs(reference[i * dimsB.x + j] - h_C[i * dimsB.x + j]) > 1e-3)
{
printf("index (%d, %d)---- reference %f, gpu_data: %f\n", i, j, reference[i * dimsB.x + j], h_C[i * dimsB.x + j]);
return 0; //exit(1);
}
}
}
//printf("done.\n");
//for (int i = 0; i < 100; i++)
// printf("h_C is %f \t", h_C[i]);
//printf("\n");
//printf("Checking computed result for correctness: ");
//bool correct = true;
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
//double eps = 1.e-6 ; // machine zero
//for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++)
//{
// double abs_err = fabs(h_C[i] - (dimsA.x * valB));
// double dot_length = dimsA.x;
// double abs_val = fabs(h_C[i]);
// double rel_err = abs_err/abs_val/dot_length ;
// if (rel_err > eps)
// {
// printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], dimsA.x*valB, eps);
// correct = false;
// }
//}
//printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return 0;
}
|
cde620121bb8699b32075e32f1f6717a6ff41cb9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2011-2017 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "batch_norm_layer_updater_cuda.h"
#include "neural_network_cudnn_exception.h"
#include "util_cuda.h"
#include "cudnn_util.h"
#include "../batch_norm_layer.h"
namespace nnforge
{
namespace cuda
{
__global__ void batch_norm_update_mean_invsigma_gradient_upd_kernel(
float * __restrict gradient_mean,
const float * __restrict target_mean,
const float * __restrict current_mean,
float * __restrict gradient_invstddev,
const float * __restrict target_invstddev,
const float * __restrict current_invstddev,
float mult,
int elem_count)
{
int elem_id = blockDim.x * blockIdx.x + threadIdx.x;
if (elem_id < elem_count)
{
gradient_mean[elem_id] += mult * (target_mean[elem_id] - current_mean[elem_id]);
gradient_invstddev[elem_id] += mult * (target_invstddev[elem_id] - current_invstddev[elem_id]);
}
}
const float batch_norm_layer_updater_cuda::mean_and_variance_gradient_slope = 1.0F; // As if it were MSE/2
batch_norm_layer_updater_cuda::batch_norm_layer_updater_cuda()
{
cudnn_safe_call(cudnnCreateTensorDescriptor(&weights_desc));
cudnn_safe_call(cudnnCreateTensorDescriptor(&data_desc));
}
batch_norm_layer_updater_cuda::~batch_norm_layer_updater_cuda()
{
cudnnDestroyTensorDescriptor(weights_desc);
cudnnDestroyTensorDescriptor(data_desc);
}
void batch_norm_layer_updater_cuda::enqueue_forward_propagation(
hipStream_t stream_id,
cuda_linear_buffer_device::ptr output_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
cuda_linear_buffer_device::ptr temporary_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_per_entry_buffer,
unsigned int entry_count)
{
cudnn_safe_call(cudnnSetStream(cuda_config->get_cudnn_handle(), stream_id));
{
cudnn_util::set_tensor_descriptor(
data_desc,
output_configuration_specific,
entry_count);
float alpha = 1.0F;
float beta = 0.0F;
#if CUDNN_MAJOR < 7
const cudnnBatchNormMode_t mode = CUDNN_BATCHNORM_SPATIAL;
#else
const cudnnBatchNormMode_t mode = CUDNN_BATCHNORM_SPATIAL_PERSISTENT;
#endif
cudnn_safe_call(cudnnBatchNormalizationForwardTraining(
cuda_config->get_cudnn_handle(),
mode,
&alpha,
&beta,
data_desc,
*input_buffers[0],
data_desc,
*output_buffer,
weights_desc,
*data[0],
*data[1],
1.0,
0,
0,
epsilon,
((float *)*temporary_fixed_buffer),
((float *)*temporary_fixed_buffer) + output_configuration_specific.feature_map_count));
}
}
void batch_norm_layer_updater_cuda::enqueue_backward_data_and_weights_propagation(
hipStream_t stream_id,
const std::vector<cuda_linear_buffer_device::ptr> input_errors_buffers,
cuda_linear_buffer_device::const_ptr output_errors_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::ptr>& gradient,
const std::vector<cuda_linear_buffer_device::const_ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_neurons_buffers,
cuda_linear_buffer_device::const_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
cuda_linear_buffer_device::const_ptr temporary_fixed_buffer,
cuda_linear_buffer_device::const_ptr temporary_per_entry_buffer,
bool add_update_to_destination,
unsigned int entry_count)
{
cudnn_safe_call(cudnnSetStream(cuda_config->get_cudnn_handle(), stream_id));
{
cudnn_util::set_tensor_descriptor(
data_desc,
output_configuration_specific,
entry_count);
float alpha_data = 1.0F;
float beta_data = add_update_to_destination ? 1.0F : 0.0F;
float alpha_weights = 1.0F;
float beta_weights = 1.0F;
#if CUDNN_MAJOR < 7
const cudnnBatchNormMode_t mode = CUDNN_BATCHNORM_SPATIAL;
#else
const cudnnBatchNormMode_t mode = CUDNN_BATCHNORM_SPATIAL_PERSISTENT;
#endif
cudnn_safe_call(cudnnBatchNormalizationBackward(
cuda_config->get_cudnn_handle(),
mode,
&alpha_data,
&beta_data,
&alpha_weights,
&beta_weights,
data_desc,
*input_neurons_buffers[0],
data_desc,
*output_errors_buffer,
data_desc,
*input_errors_buffers[0],
weights_desc,
*data[0],
*gradient[0],
*gradient[1],
epsilon,
((const float *)*temporary_fixed_buffer),
((const float *)*temporary_fixed_buffer) + output_configuration_specific.feature_map_count));
}
{
float mult = mean_and_variance_gradient_slope * static_cast<float>(entry_count);
int elem_count = output_configuration_specific.feature_map_count;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
elem_count);
hipLaunchKernelGGL(( batch_norm_update_mean_invsigma_gradient_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
*gradient[2],
((const float *)*temporary_fixed_buffer),
*data[2],
*gradient[3],
((const float *)*temporary_fixed_buffer) + output_configuration_specific.feature_map_count,
*data[3],
mult,
elem_count);
}
}
bool batch_norm_layer_updater_cuda::is_backward_data_and_weights_dependent_on_input_buffer(unsigned int data_input_index) const
{
return true;
}
bool batch_norm_layer_updater_cuda::is_backward_data_and_weights_dependent_on_output_buffer() const
{
return false;
}
void batch_norm_layer_updater_cuda::updater_configured()
{
std::shared_ptr<const batch_norm_layer> layer_derived = std::dynamic_pointer_cast<const batch_norm_layer>(layer_schema);
epsilon = layer_derived->epsilon;
if (epsilon < CUDNN_BN_MIN_EPSILON)
throw neural_network_exception((boost::format("Too small epsilon specified: %1%, cuDNN requires at least %2%") % epsilon % CUDNN_BN_MIN_EPSILON).str());
cudnn_util::set_tensor_bn_weights_descriptor(
weights_desc,
output_configuration_specific.feature_map_count,
static_cast<unsigned int>(output_configuration_specific.dimension_sizes.size()));
}
size_t batch_norm_layer_updater_cuda::get_temporary_fixed_buffer_size() const
{
return output_configuration_specific.feature_map_count * 2 * sizeof(float);
}
int batch_norm_layer_updater_cuda::get_input_index_layer_can_write(const layer_action& action) const
{
if (action.get_action_type() == layer_action::backward_data_and_weights)
return 0;
else
return -1;
}
}
}
| cde620121bb8699b32075e32f1f6717a6ff41cb9.cu | /*
* Copyright 2011-2017 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "batch_norm_layer_updater_cuda.h"
#include "neural_network_cudnn_exception.h"
#include "util_cuda.h"
#include "cudnn_util.h"
#include "../batch_norm_layer.h"
namespace nnforge
{
namespace cuda
{
__global__ void batch_norm_update_mean_invsigma_gradient_upd_kernel(
float * __restrict gradient_mean,
const float * __restrict target_mean,
const float * __restrict current_mean,
float * __restrict gradient_invstddev,
const float * __restrict target_invstddev,
const float * __restrict current_invstddev,
float mult,
int elem_count)
{
int elem_id = blockDim.x * blockIdx.x + threadIdx.x;
if (elem_id < elem_count)
{
gradient_mean[elem_id] += mult * (target_mean[elem_id] - current_mean[elem_id]);
gradient_invstddev[elem_id] += mult * (target_invstddev[elem_id] - current_invstddev[elem_id]);
}
}
const float batch_norm_layer_updater_cuda::mean_and_variance_gradient_slope = 1.0F; // As if it were MSE/2
batch_norm_layer_updater_cuda::batch_norm_layer_updater_cuda()
{
cudnn_safe_call(cudnnCreateTensorDescriptor(&weights_desc));
cudnn_safe_call(cudnnCreateTensorDescriptor(&data_desc));
}
batch_norm_layer_updater_cuda::~batch_norm_layer_updater_cuda()
{
cudnnDestroyTensorDescriptor(weights_desc);
cudnnDestroyTensorDescriptor(data_desc);
}
void batch_norm_layer_updater_cuda::enqueue_forward_propagation(
cudaStream_t stream_id,
cuda_linear_buffer_device::ptr output_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
cuda_linear_buffer_device::ptr temporary_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_per_entry_buffer,
unsigned int entry_count)
{
cudnn_safe_call(cudnnSetStream(cuda_config->get_cudnn_handle(), stream_id));
{
cudnn_util::set_tensor_descriptor(
data_desc,
output_configuration_specific,
entry_count);
float alpha = 1.0F;
float beta = 0.0F;
#if CUDNN_MAJOR < 7
const cudnnBatchNormMode_t mode = CUDNN_BATCHNORM_SPATIAL;
#else
const cudnnBatchNormMode_t mode = CUDNN_BATCHNORM_SPATIAL_PERSISTENT;
#endif
cudnn_safe_call(cudnnBatchNormalizationForwardTraining(
cuda_config->get_cudnn_handle(),
mode,
&alpha,
&beta,
data_desc,
*input_buffers[0],
data_desc,
*output_buffer,
weights_desc,
*data[0],
*data[1],
1.0,
0,
0,
epsilon,
((float *)*temporary_fixed_buffer),
((float *)*temporary_fixed_buffer) + output_configuration_specific.feature_map_count));
}
}
void batch_norm_layer_updater_cuda::enqueue_backward_data_and_weights_propagation(
cudaStream_t stream_id,
const std::vector<cuda_linear_buffer_device::ptr> input_errors_buffers,
cuda_linear_buffer_device::const_ptr output_errors_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::ptr>& gradient,
const std::vector<cuda_linear_buffer_device::const_ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_neurons_buffers,
cuda_linear_buffer_device::const_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
cuda_linear_buffer_device::const_ptr temporary_fixed_buffer,
cuda_linear_buffer_device::const_ptr temporary_per_entry_buffer,
bool add_update_to_destination,
unsigned int entry_count)
{
cudnn_safe_call(cudnnSetStream(cuda_config->get_cudnn_handle(), stream_id));
{
cudnn_util::set_tensor_descriptor(
data_desc,
output_configuration_specific,
entry_count);
float alpha_data = 1.0F;
float beta_data = add_update_to_destination ? 1.0F : 0.0F;
float alpha_weights = 1.0F;
float beta_weights = 1.0F;
#if CUDNN_MAJOR < 7
const cudnnBatchNormMode_t mode = CUDNN_BATCHNORM_SPATIAL;
#else
const cudnnBatchNormMode_t mode = CUDNN_BATCHNORM_SPATIAL_PERSISTENT;
#endif
cudnn_safe_call(cudnnBatchNormalizationBackward(
cuda_config->get_cudnn_handle(),
mode,
&alpha_data,
&beta_data,
&alpha_weights,
&beta_weights,
data_desc,
*input_neurons_buffers[0],
data_desc,
*output_errors_buffer,
data_desc,
*input_errors_buffers[0],
weights_desc,
*data[0],
*gradient[0],
*gradient[1],
epsilon,
((const float *)*temporary_fixed_buffer),
((const float *)*temporary_fixed_buffer) + output_configuration_specific.feature_map_count));
}
{
float mult = mean_and_variance_gradient_slope * static_cast<float>(entry_count);
int elem_count = output_configuration_specific.feature_map_count;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
elem_count);
batch_norm_update_mean_invsigma_gradient_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
*gradient[2],
((const float *)*temporary_fixed_buffer),
*data[2],
*gradient[3],
((const float *)*temporary_fixed_buffer) + output_configuration_specific.feature_map_count,
*data[3],
mult,
elem_count);
}
}
bool batch_norm_layer_updater_cuda::is_backward_data_and_weights_dependent_on_input_buffer(unsigned int data_input_index) const
{
return true;
}
bool batch_norm_layer_updater_cuda::is_backward_data_and_weights_dependent_on_output_buffer() const
{
return false;
}
void batch_norm_layer_updater_cuda::updater_configured()
{
std::shared_ptr<const batch_norm_layer> layer_derived = std::dynamic_pointer_cast<const batch_norm_layer>(layer_schema);
epsilon = layer_derived->epsilon;
if (epsilon < CUDNN_BN_MIN_EPSILON)
throw neural_network_exception((boost::format("Too small epsilon specified: %1%, cuDNN requires at least %2%") % epsilon % CUDNN_BN_MIN_EPSILON).str());
cudnn_util::set_tensor_bn_weights_descriptor(
weights_desc,
output_configuration_specific.feature_map_count,
static_cast<unsigned int>(output_configuration_specific.dimension_sizes.size()));
}
size_t batch_norm_layer_updater_cuda::get_temporary_fixed_buffer_size() const
{
return output_configuration_specific.feature_map_count * 2 * sizeof(float);
}
int batch_norm_layer_updater_cuda::get_input_index_layer_can_write(const layer_action& action) const
{
if (action.get_action_type() == layer_action::backward_data_and_weights)
return 0;
else
return -1;
}
}
}
|
fa052172357e078db14f102f420e6b932c7220a4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from magmablas/zlascl.cu, normal z -> d, Wed Jan 2 14:18:51 2019
@author Mark Gates
*/
#include "magma_internal.h"
#define NB 64
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right.
__global__ void
dlascl_full(
int m, int n, double mul,
double* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
A += ind;
if (ind < m) {
for (int j=0; j < n; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
dlascl_lower(
int m, int n, double mul,
double* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
int break_d = (ind < n) ? ind : n-1;
A += ind;
if (ind < m) {
for (int j=0; j <= break_d; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
dlascl_upper(
int m, int n, double mul,
double* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
A += ind;
if (ind < m) {
for (int j=n-1; j >= ind; j--)
A[j*lda] *= mul;
}
}
/***************************************************************************//**
Purpose
-------
DLASCL multiplies the M by N real matrix A by the real scalar
CTO/CFROM. This is done without over/underflow as long as the final
result CTO*A(I,J)/CFROM does not over/underflow. TYPE specifies that
A may be full, upper triangular, lower triangular.
Arguments
---------
@param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaFull: full matrix.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
@param[in]
kl INTEGER
Unused, for LAPACK compatability.
@param[in]
ku KU is INTEGER
Unused, for LAPACK compatability.
@param[in]
cfrom DOUBLE PRECISION
@param[in]
cto DOUBLE PRECISION
\n
The matrix A is multiplied by CTO/CFROM. A(I,J) is computed
without over/underflow if the final result CTO*A(I,J)/CFROM
can be represented without over/underflow.
CFROM must be nonzero. CFROM and CTO must not be NAN.
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in,out]
dA DOUBLE PRECISION array, dimension (LDDA,N)
The matrix to be multiplied by CTO/CFROM. See TYPE for the
storage type.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lascl
*******************************************************************************/
extern "C" void
magmablas_dlascl(
magma_type_t type, magma_int_t kl, magma_int_t ku,
double cfrom, double cto,
magma_int_t m, magma_int_t n,
magmaDouble_ptr dA, magma_int_t ldda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull )
*info = -1;
else if ( cfrom == 0 || isnan(cfrom) )
*info = -4;
else if ( isnan(cto) )
*info = -5;
else if ( m < 0 )
*info = -6;
else if ( n < 0 )
*info = -3;
else if ( ldda < max(1,m) )
*info = -7;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ) );
double smlnum, bignum, cfromc, ctoc, cto1, cfrom1, mul;
magma_int_t done = false;
// Uses over/underflow procedure from LAPACK dlascl
// Get machine parameters
smlnum = lapackf77_dlamch("s");
bignum = 1 / smlnum;
cfromc = cfrom;
ctoc = cto;
int cnt = 0;
while( ! done ) {
cfrom1 = cfromc*smlnum;
if ( cfrom1 == cfromc ) {
// cfromc is an inf. Multiply by a correctly signed zero for
// finite ctoc, or a nan if ctoc is infinite.
mul = ctoc / cfromc;
done = true;
cto1 = ctoc;
}
else {
cto1 = ctoc / bignum;
if ( cto1 == ctoc ) {
// ctoc is either 0 or an inf. In both cases, ctoc itself
// serves as the correct multiplication factor.
mul = ctoc;
done = true;
cfromc = 1;
}
else if ( fabs(cfrom1) > fabs(ctoc) && ctoc != 0 ) {
mul = smlnum;
done = false;
cfromc = cfrom1;
}
else if ( fabs(cto1) > fabs(cfromc) ) {
mul = bignum;
done = false;
ctoc = cto1;
}
else {
mul = ctoc / cfromc;
done = true;
}
}
if (type == MagmaLower) {
hipLaunchKernelGGL(( dlascl_lower) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, mul, dA, ldda);
}
else if (type == MagmaUpper) {
hipLaunchKernelGGL(( dlascl_upper) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, mul, dA, ldda);
}
else if (type == MagmaFull) {
hipLaunchKernelGGL(( dlascl_full) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, mul, dA, ldda);
}
cnt += 1;
}
}
| fa052172357e078db14f102f420e6b932c7220a4.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from magmablas/zlascl.cu, normal z -> d, Wed Jan 2 14:18:51 2019
@author Mark Gates
*/
#include "magma_internal.h"
#define NB 64
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right.
__global__ void
dlascl_full(
int m, int n, double mul,
double* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
A += ind;
if (ind < m) {
for (int j=0; j < n; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
dlascl_lower(
int m, int n, double mul,
double* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
int break_d = (ind < n) ? ind : n-1;
A += ind;
if (ind < m) {
for (int j=0; j <= break_d; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
dlascl_upper(
int m, int n, double mul,
double* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
A += ind;
if (ind < m) {
for (int j=n-1; j >= ind; j--)
A[j*lda] *= mul;
}
}
/***************************************************************************//**
Purpose
-------
DLASCL multiplies the M by N real matrix A by the real scalar
CTO/CFROM. This is done without over/underflow as long as the final
result CTO*A(I,J)/CFROM does not over/underflow. TYPE specifies that
A may be full, upper triangular, lower triangular.
Arguments
---------
@param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaFull: full matrix.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
@param[in]
kl INTEGER
Unused, for LAPACK compatability.
@param[in]
ku KU is INTEGER
Unused, for LAPACK compatability.
@param[in]
cfrom DOUBLE PRECISION
@param[in]
cto DOUBLE PRECISION
\n
The matrix A is multiplied by CTO/CFROM. A(I,J) is computed
without over/underflow if the final result CTO*A(I,J)/CFROM
can be represented without over/underflow.
CFROM must be nonzero. CFROM and CTO must not be NAN.
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in,out]
dA DOUBLE PRECISION array, dimension (LDDA,N)
The matrix to be multiplied by CTO/CFROM. See TYPE for the
storage type.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lascl
*******************************************************************************/
extern "C" void
magmablas_dlascl(
magma_type_t type, magma_int_t kl, magma_int_t ku,
double cfrom, double cto,
magma_int_t m, magma_int_t n,
magmaDouble_ptr dA, magma_int_t ldda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull )
*info = -1;
else if ( cfrom == 0 || isnan(cfrom) )
*info = -4;
else if ( isnan(cto) )
*info = -5;
else if ( m < 0 )
*info = -6;
else if ( n < 0 )
*info = -3;
else if ( ldda < max(1,m) )
*info = -7;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ) );
double smlnum, bignum, cfromc, ctoc, cto1, cfrom1, mul;
magma_int_t done = false;
// Uses over/underflow procedure from LAPACK dlascl
// Get machine parameters
smlnum = lapackf77_dlamch("s");
bignum = 1 / smlnum;
cfromc = cfrom;
ctoc = cto;
int cnt = 0;
while( ! done ) {
cfrom1 = cfromc*smlnum;
if ( cfrom1 == cfromc ) {
// cfromc is an inf. Multiply by a correctly signed zero for
// finite ctoc, or a nan if ctoc is infinite.
mul = ctoc / cfromc;
done = true;
cto1 = ctoc;
}
else {
cto1 = ctoc / bignum;
if ( cto1 == ctoc ) {
// ctoc is either 0 or an inf. In both cases, ctoc itself
// serves as the correct multiplication factor.
mul = ctoc;
done = true;
cfromc = 1;
}
else if ( fabs(cfrom1) > fabs(ctoc) && ctoc != 0 ) {
mul = smlnum;
done = false;
cfromc = cfrom1;
}
else if ( fabs(cto1) > fabs(cfromc) ) {
mul = bignum;
done = false;
ctoc = cto1;
}
else {
mul = ctoc / cfromc;
done = true;
}
}
if (type == MagmaLower) {
dlascl_lower <<< grid, threads, 0, queue->cuda_stream() >>> (m, n, mul, dA, ldda);
}
else if (type == MagmaUpper) {
dlascl_upper <<< grid, threads, 0, queue->cuda_stream() >>> (m, n, mul, dA, ldda);
}
else if (type == MagmaFull) {
dlascl_full <<< grid, threads, 0, queue->cuda_stream() >>> (m, n, mul, dA, ldda);
}
cnt += 1;
}
}
|
be1c65422e5e45cf073baedea73ee3dfa328593b.hip | // !!! This is a file automatically generated by hipify!!!
/* ************************************************************************** */
/* */
/* ::: :::::::: */
/* cuda_check_kernel_errors.cu :+: :+: :+: */
/* +:+ +:+ +:+ */
/* By: talemari <talemari@student.42.fr> +#+ +:+ +#+ */
/* +#+#+#+#+#+ +#+ */
/* Created: 2017/06/03 14:13:47 by talemari #+# #+# */
/* Updated: 2017/06/08 20:32:50 by talemari ### ########.fr */
/* */
/* ************************************************************************** */
#include "../../inc/rt.cuh"
#include "../inc/cuda_call.cuh"
/*
** An error checking function to be used agfter every kernel launch.
*/
void cuda_check_kernel_errors(void)
{
hipError_t errSync;
hipError_t errAsync;
const char *s;
int l;
errSync = hipGetLastError();
errAsync = hipDeviceSynchronize();
if (errSync != hipSuccess)
{
write(2, "\e[1;93mSync kernel error: ", 26);
s = hipGetErrorString(errSync);
l = strlen(s);
write(2, s, l);
write(2, "\e[0m\n", 5);
}
if (errAsync != hipSuccess)
{
write(2, "\e[1;93mAsync kernel error: ", 27);
s = hipGetErrorString(errAsync);
l = strlen(s);
write(2, s, l);
write(2, "\e[0m\n", 5);
}
if (errSync != hipSuccess || errAsync != hipSuccess)
exit(-1);
}
| be1c65422e5e45cf073baedea73ee3dfa328593b.cu | /* ************************************************************************** */
/* */
/* ::: :::::::: */
/* cuda_check_kernel_errors.cu :+: :+: :+: */
/* +:+ +:+ +:+ */
/* By: talemari <talemari@student.42.fr> +#+ +:+ +#+ */
/* +#+#+#+#+#+ +#+ */
/* Created: 2017/06/03 14:13:47 by talemari #+# #+# */
/* Updated: 2017/06/08 20:32:50 by talemari ### ########.fr */
/* */
/* ************************************************************************** */
#include "../../inc/rt.cuh"
#include "../inc/cuda_call.cuh"
/*
** An error checking function to be used agfter every kernel launch.
*/
void cuda_check_kernel_errors(void)
{
cudaError_t errSync;
cudaError_t errAsync;
const char *s;
int l;
errSync = cudaGetLastError();
errAsync = cudaDeviceSynchronize();
if (errSync != cudaSuccess)
{
write(2, "\e[1;93mSync kernel error: ", 26);
s = cudaGetErrorString(errSync);
l = strlen(s);
write(2, s, l);
write(2, "\e[0m\n", 5);
}
if (errAsync != cudaSuccess)
{
write(2, "\e[1;93mAsync kernel error: ", 27);
s = cudaGetErrorString(errAsync);
l = strlen(s);
write(2, s, l);
write(2, "\e[0m\n", 5);
}
if (errSync != cudaSuccess || errAsync != cudaSuccess)
exit(-1);
}
|
0b92983d6354de04a1cd5c18c573bbee855739e9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void naive_backward_cross_entropy(float *in, int *one_hot_classes, float batches, int size, float *out)
{
int bid = blockIdx.x * blockDim.x + threadIdx.x;
if (!(bid < size)) return;
out[bid] = (in[bid] - one_hot_classes[bid]) / batches;
} | 0b92983d6354de04a1cd5c18c573bbee855739e9.cu | #include "includes.h"
__global__ void naive_backward_cross_entropy(float *in, int *one_hot_classes, float batches, int size, float *out)
{
int bid = blockIdx.x * blockDim.x + threadIdx.x;
if (!(bid < size)) return;
out[bid] = (in[bid] - one_hot_classes[bid]) / batches;
} |
4787a65daf10a45ac03acc3318f0ead024a584c4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <iostream>
#include <ctime>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
// Waris Vorathumdusadee Section 3 6088128
// Sirichoke Yooyen Section 3 6088232
// Phummarat Yosamornsuntorn Section 3 608823
// this is the kernel for change RGB to grey color
__global__ void ChangeToGrey(unsigned char *rgb, unsigned char *grey, int Row, int Column) {
//find the global ID of each row
int ColIDThread = threadIdx.x + blockIdx.x * blockDim.x;
//find the global ID of each columns
int RowIDThread = threadIdx.y + blockIdx.y * blockDim.y;
int NumberOffset = RowIDThread * Column + ColIDThread;
//multiply 3 because 3 color which is r g b
int rgb_offset = NumberOffset * 3;
//
float weight[3] = {0.22,0.72,0.07};
// this is the weight that to compute the gray scale
float sum=0; //sum for compute the value that of all RGB color
//this loop for find the value of Gray color
for(int i=0;i<3 ;i++)
{
sum += rgb[rgb_offset+i]*weight[i];
}
grey[NumberOffset] = sum;
}
// this is the kernel for find the sobel of X and Y
__global__ void ChangeToSobel(unsigned char *grayData,
unsigned char *ValueSobelX,
unsigned char *ValueSobelY,
int Row,
int Column) {
//find the global ID of each columns
int ColIDThread = threadIdx.x + blockIdx.x * blockDim.x;
//find the global ID of each row
int RowIDThread = threadIdx.y + blockIdx.y * blockDim.y;
// calculate Global threadID
int offset = RowIDThread * Column + ColIDThread;
int ValueOfMargin[3][3]; //it is used for save the value of margin of that pixel
// set the matrix value for find the sobel of horizontal
int Horizon[3][3] = {{1,0,-1},{2,0,-2},{1,0,-1}};
// set the matrix value for find the sobel of vertical
int Vertic[3][3] = {{1,2,1},{0,0,0},{-1,-2,-1}};
int step = -1; //step for find the neighbor
// this loop for find the neighbor of that pixel
for(int i=0;i<3;i++)
{
for(int j=0;j<3;j++)
{
ValueOfMargin[i][j] = (RowIDThread+(i-1))*Column + (ColIDThread+step);
//set the value of margin of that pixel
step++;
// increase because use for go to the pixel
}
step=-1;
}
int sumHorizon = 0;// set the matrix value for find the sobel of horizontal
int sumVertical = 0;// set the matrix value for find the sobel of vertical
// this loop for find the sobel in the vertical and horizontal
for(int i = 0; i < 3; i ++){
for(int j = 0; j < 3; j++){
int TempX = sumHorizon;
// TempX for save the previous value of horizontal
int TempY = sumVertical;
// TempY for save the previous value of vertical
sumVertical = (grayData[ValueOfMargin[i][j]] * Horizon[i][j]);
sumVertical=sumVertical+TempY;
// set the new value sum of vertical
sumHorizon = (grayData[ValueOfMargin[i][j]] * Vertic[i][j]);
sumHorizon = TempX+sumHorizon;
// set the new value sum of horizontal
}
}
// set the value sum of horizontal if it more than 255 set it to 255
// because the value cannot exceed 255
if(sumHorizon >= 255){
sumHorizon = 255;
}
// set the value sum of horizontal if it less than 0 set it to 0
// because the value cannot less than 0
if(sumHorizon <= 0){
sumHorizon = 0;
}
// set the value sum of vertical if it more than 255 set it to 255
// because the value cannot exceed 255
if(sumVertical >= 255){
sumVertical = 255;
}
// set the value sum of vertical if it less than 0 set it to 0
// because the value cannot less than 0
if(sumVertical <= 0){
sumVertical = 0;
}
//set the value of sobelX
ValueSobelX[offset] = sumHorizon;
//set the value of sobelY
ValueSobelY[offset] = sumVertical;
}
int main(int argc, char **argv) {
clock_t begin = clock();
std::string input_file;
std::string OutputFilename[3];
// OutputFilename[0] for gray image output
// OutputFilename[1] for sobelX of image output
// OutputFilename[2] for sobelY of image output
//Check for the input file that user input or not
if(argc==1)
{
std::cerr << "Please input your image file in the directory first.\n";
std::cerr << "And type ./sobelProject filename.jpg\n";
exit(1);
}
// set the value of output file use for write the output to the user
input_file = std::string(argv[1]);
OutputFilename[0] = "Gray_Of_"+std::string(argv[1]);
OutputFilename[1] = "OutSobelX_Of_"+std::string(argv[1]);
OutputFilename[2] = "OutSobelY_Of_"+std::string(argv[1]);
unsigned char *DataForCopy[4];
// That use for cuda to do in the device
// DataForCopy[0] for array for storing rgb data
//DataForCopy[1] for data of grey
//DataForCopy[2] for data of sobelX
//DataForCopy[3] for data of sobelY
unsigned char *Data[3];
// That use for host
//Data[0] for data of grey
//Data[1] for data of sobelX
//Data[2] for data of sobelY
int RowOfPix; //number of rows of pixels
int ColumnfPix; //number of columns of pixels
//read image data into DataImg Mat object
cv::Mat DataImg = cv::imread(input_file, cv::IMREAD_ANYCOLOR | cv::IMREAD_ANYDEPTH);
if (DataImg.empty()) {
std::cerr << "Please check your " << input_file << std::endl;
std::cerr << "It is not in the same directory of .cu "<< std::endl;
}
// set the value of RowOfPix equal the row of image
RowOfPix = DataImg.rows;
// set the value of ColumnfPix equal the colums of image
ColumnfPix = DataImg.cols;
// find the total size of image
const size_t SizeofAllPixel = DataImg.rows * DataImg.cols;
// find the value of all pixel that contains RGB color
const size_t SizeofAllRGB = 3*SizeofAllPixel;
//this loop for allocate the memory
for(int i=0;i<4;i++)
{
if(i==0)
{
//allocate for
//DataForCopy[0] for array for storing rgb data
hipMalloc(&DataForCopy[i], sizeof(unsigned char)* SizeofAllRGB);
}
else
{
//allocate for
//DataForCopy[1] for data of greydata
//DataForCopy[2] for data of sobelX
//DataForCopy[3] for data of sobelY
//Data[0] for data of grey
//Data[1] for data of sobelX
//Data[2] for data of sobelY
Data[i-1] = (unsigned char *)malloc(sizeof(unsigned char*)* SizeofAllPixel);
hipMalloc(&DataForCopy[i], sizeof(unsigned char) * SizeofAllPixel);
}
}
hipMemset(DataForCopy[1], 0, sizeof(unsigned char) * SizeofAllPixel);
//use hipMemcpy for copy data from host to device
hipMemcpy(DataForCopy[0], (unsigned char*)DataImg.data, sizeof(unsigned char) *SizeofAllRGB, hipMemcpyHostToDevice);
hipMemcpy(DataForCopy[2], Data[1], sizeof(unsigned char) * SizeofAllPixel, hipMemcpyHostToDevice);
hipMemcpy(DataForCopy[3], Data[2], sizeof(unsigned char) * SizeofAllPixel, hipMemcpyHostToDevice);
int Thread = 16; //maximum thread
const dim3 dimBlock(Thread, Thread); //16*16 thread per blocks
int x=ColumnfPix%Thread;
int y=RowOfPix%Thread;
// below condition for n divide by number of thread and remainder equal 0 or not
if(x==0 && y==0)
{
//this the cuda kernal and we set the dimGrid and dimBlock
const dim3 dimGrid((ColumnfPix)/Thread, (RowOfPix)/Thread); //number of block
hipLaunchKernelGGL(( ChangeToGrey), dim3(dimGrid), dim3(dimBlock), 0, 0, DataForCopy[0], DataForCopy[1], RowOfPix, ColumnfPix);
hipLaunchKernelGGL(( ChangeToSobel), dim3(dimGrid), dim3(dimBlock), 0, 0, DataForCopy[1],DataForCopy[2], DataForCopy[3], RowOfPix, ColumnfPix);
}
if(x!=0 && y!=0)
{
//this the cuda kernal and we set the dimGrid and dimBlock
const dim3 dimGrid((int)ceil((ColumnfPix+Thread-1)/Thread), (RowOfPix+Thread-1)/Thread);//number of block
hipLaunchKernelGGL(( ChangeToGrey), dim3(dimGrid), dim3(dimBlock), 0, 0, DataForCopy[0], DataForCopy[1], RowOfPix, ColumnfPix);
hipLaunchKernelGGL(( ChangeToSobel), dim3(dimGrid), dim3(dimBlock), 0, 0, DataForCopy[1],DataForCopy[2], DataForCopy[3], RowOfPix, ColumnfPix);
}
if(x!=0 && y==0)
{
//this the cuda kernal and we set the dimGrid and dimBlock
const dim3 dimGrid((ColumnfPix+Thread-1)/Thread, (RowOfPix)/Thread);//number of block
hipLaunchKernelGGL(( ChangeToGrey), dim3(dimGrid), dim3(dimBlock), 0, 0, DataForCopy[0], DataForCopy[1], RowOfPix, ColumnfPix);
hipLaunchKernelGGL(( ChangeToSobel), dim3(dimGrid), dim3(dimBlock), 0, 0, DataForCopy[1],DataForCopy[2], DataForCopy[3], RowOfPix, ColumnfPix);
}
if(x!=0 && y!=0)
{
//this the cuda kernal and we set the dimGrid and dimBlock
const dim3 dimGrid((ColumnfPix)/Thread, (RowOfPix+Thread-1)/Thread);//number of block
hipLaunchKernelGGL(( ChangeToGrey), dim3(dimGrid), dim3(dimBlock), 0, 0, DataForCopy[0], DataForCopy[1], RowOfPix, ColumnfPix);
hipLaunchKernelGGL(( ChangeToSobel), dim3(dimGrid), dim3(dimBlock), 0, 0, DataForCopy[1],DataForCopy[2], DataForCopy[3], RowOfPix, ColumnfPix);
}
//copy computed gray data array from device to host
for(int i=0;i<3;i++)
{
//copy DataForCopy to Data by using hipMemcpyDeviceToHost
hipMemcpy(Data[i], DataForCopy[i+1], sizeof(unsigned char) * SizeofAllPixel, hipMemcpyDeviceToHost);
//set the Mat to save the value of data
cv::Mat outputData(RowOfPix, ColumnfPix, CV_8UC1,(void *) Data[i]);
// write the file by using above Mat
cv::imwrite(OutputFilename[i].c_str(), outputData);
// print to the user that it write file already
std::cout <<"Write file " << OutputFilename[i] << std::endl;
}
// this loop for clear memory in that store in the array
for(int i=0;i<4;i++)
{
hipFree(DataForCopy[i]);
}
clock_t finish = clock();
double timeuse = double(finish - begin);
fflush(stdout);
//print the time that use to execute
std::cout <<"Time used: " << timeuse << std::endl;
return 0;
}
| 4787a65daf10a45ac03acc3318f0ead024a584c4.cu | #include <stdio.h>
#include <iostream>
#include <ctime>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
// Waris Vorathumdusadee Section 3 6088128
// Sirichoke Yooyen Section 3 6088232
// Phummarat Yosamornsuntorn Section 3 608823
// this is the kernel for change RGB to grey color
__global__ void ChangeToGrey(unsigned char *rgb, unsigned char *grey, int Row, int Column) {
//find the global ID of each row
int ColIDThread = threadIdx.x + blockIdx.x * blockDim.x;
//find the global ID of each columns
int RowIDThread = threadIdx.y + blockIdx.y * blockDim.y;
int NumberOffset = RowIDThread * Column + ColIDThread;
//multiply 3 because 3 color which is r g b
int rgb_offset = NumberOffset * 3;
//
float weight[3] = {0.22,0.72,0.07};
// this is the weight that to compute the gray scale
float sum=0; //sum for compute the value that of all RGB color
//this loop for find the value of Gray color
for(int i=0;i<3 ;i++)
{
sum += rgb[rgb_offset+i]*weight[i];
}
grey[NumberOffset] = sum;
}
// this is the kernel for find the sobel of X and Y
__global__ void ChangeToSobel(unsigned char *grayData,
unsigned char *ValueSobelX,
unsigned char *ValueSobelY,
int Row,
int Column) {
//find the global ID of each columns
int ColIDThread = threadIdx.x + blockIdx.x * blockDim.x;
//find the global ID of each row
int RowIDThread = threadIdx.y + blockIdx.y * blockDim.y;
// calculate Global threadID
int offset = RowIDThread * Column + ColIDThread;
int ValueOfMargin[3][3]; //it is used for save the value of margin of that pixel
// set the matrix value for find the sobel of horizontal
int Horizon[3][3] = {{1,0,-1},{2,0,-2},{1,0,-1}};
// set the matrix value for find the sobel of vertical
int Vertic[3][3] = {{1,2,1},{0,0,0},{-1,-2,-1}};
int step = -1; //step for find the neighbor
// this loop for find the neighbor of that pixel
for(int i=0;i<3;i++)
{
for(int j=0;j<3;j++)
{
ValueOfMargin[i][j] = (RowIDThread+(i-1))*Column + (ColIDThread+step);
//set the value of margin of that pixel
step++;
// increase because use for go to the pixel
}
step=-1;
}
int sumHorizon = 0;// set the matrix value for find the sobel of horizontal
int sumVertical = 0;// set the matrix value for find the sobel of vertical
// this loop for find the sobel in the vertical and horizontal
for(int i = 0; i < 3; i ++){
for(int j = 0; j < 3; j++){
int TempX = sumHorizon;
// TempX for save the previous value of horizontal
int TempY = sumVertical;
// TempY for save the previous value of vertical
sumVertical = (grayData[ValueOfMargin[i][j]] * Horizon[i][j]);
sumVertical=sumVertical+TempY;
// set the new value sum of vertical
sumHorizon = (grayData[ValueOfMargin[i][j]] * Vertic[i][j]);
sumHorizon = TempX+sumHorizon;
// set the new value sum of horizontal
}
}
// set the value sum of horizontal if it more than 255 set it to 255
// because the value cannot exceed 255
if(sumHorizon >= 255){
sumHorizon = 255;
}
// set the value sum of horizontal if it less than 0 set it to 0
// because the value cannot less than 0
if(sumHorizon <= 0){
sumHorizon = 0;
}
// set the value sum of vertical if it more than 255 set it to 255
// because the value cannot exceed 255
if(sumVertical >= 255){
sumVertical = 255;
}
// set the value sum of vertical if it less than 0 set it to 0
// because the value cannot less than 0
if(sumVertical <= 0){
sumVertical = 0;
}
//set the value of sobelX
ValueSobelX[offset] = sumHorizon;
//set the value of sobelY
ValueSobelY[offset] = sumVertical;
}
int main(int argc, char **argv) {
clock_t begin = clock();
std::string input_file;
std::string OutputFilename[3];
// OutputFilename[0] for gray image output
// OutputFilename[1] for sobelX of image output
// OutputFilename[2] for sobelY of image output
//Check for the input file that user input or not
if(argc==1)
{
std::cerr << "Please input your image file in the directory first.\n";
std::cerr << "And type ./sobelProject filename.jpg\n";
exit(1);
}
// set the value of output file use for write the output to the user
input_file = std::string(argv[1]);
OutputFilename[0] = "Gray_Of_"+std::string(argv[1]);
OutputFilename[1] = "OutSobelX_Of_"+std::string(argv[1]);
OutputFilename[2] = "OutSobelY_Of_"+std::string(argv[1]);
unsigned char *DataForCopy[4];
// That use for cuda to do in the device
// DataForCopy[0] for array for storing rgb data
//DataForCopy[1] for data of grey
//DataForCopy[2] for data of sobelX
//DataForCopy[3] for data of sobelY
unsigned char *Data[3];
// That use for host
//Data[0] for data of grey
//Data[1] for data of sobelX
//Data[2] for data of sobelY
int RowOfPix; //number of rows of pixels
int ColumnfPix; //number of columns of pixels
//read image data into DataImg Mat object
cv::Mat DataImg = cv::imread(input_file, cv::IMREAD_ANYCOLOR | cv::IMREAD_ANYDEPTH);
if (DataImg.empty()) {
std::cerr << "Please check your " << input_file << std::endl;
std::cerr << "It is not in the same directory of .cu "<< std::endl;
}
// set the value of RowOfPix equal the row of image
RowOfPix = DataImg.rows;
// set the value of ColumnfPix equal the colums of image
ColumnfPix = DataImg.cols;
// find the total size of image
const size_t SizeofAllPixel = DataImg.rows * DataImg.cols;
// find the value of all pixel that contains RGB color
const size_t SizeofAllRGB = 3*SizeofAllPixel;
//this loop for allocate the memory
for(int i=0;i<4;i++)
{
if(i==0)
{
//allocate for
//DataForCopy[0] for array for storing rgb data
cudaMalloc(&DataForCopy[i], sizeof(unsigned char)* SizeofAllRGB);
}
else
{
//allocate for
//DataForCopy[1] for data of greydata
//DataForCopy[2] for data of sobelX
//DataForCopy[3] for data of sobelY
//Data[0] for data of grey
//Data[1] for data of sobelX
//Data[2] for data of sobelY
Data[i-1] = (unsigned char *)malloc(sizeof(unsigned char*)* SizeofAllPixel);
cudaMalloc(&DataForCopy[i], sizeof(unsigned char) * SizeofAllPixel);
}
}
cudaMemset(DataForCopy[1], 0, sizeof(unsigned char) * SizeofAllPixel);
//use cudaMemcpy for copy data from host to device
cudaMemcpy(DataForCopy[0], (unsigned char*)DataImg.data, sizeof(unsigned char) *SizeofAllRGB, cudaMemcpyHostToDevice);
cudaMemcpy(DataForCopy[2], Data[1], sizeof(unsigned char) * SizeofAllPixel, cudaMemcpyHostToDevice);
cudaMemcpy(DataForCopy[3], Data[2], sizeof(unsigned char) * SizeofAllPixel, cudaMemcpyHostToDevice);
int Thread = 16; //maximum thread
const dim3 dimBlock(Thread, Thread); //16*16 thread per blocks
int x=ColumnfPix%Thread;
int y=RowOfPix%Thread;
// below condition for n divide by number of thread and remainder equal 0 or not
if(x==0 && y==0)
{
//this the cuda kernal and we set the dimGrid and dimBlock
const dim3 dimGrid((ColumnfPix)/Thread, (RowOfPix)/Thread); //number of block
ChangeToGrey<<<dimGrid, dimBlock>>>(DataForCopy[0], DataForCopy[1], RowOfPix, ColumnfPix);
ChangeToSobel<<<dimGrid, dimBlock>>>(DataForCopy[1],DataForCopy[2], DataForCopy[3], RowOfPix, ColumnfPix);
}
if(x!=0 && y!=0)
{
//this the cuda kernal and we set the dimGrid and dimBlock
const dim3 dimGrid((int)ceil((ColumnfPix+Thread-1)/Thread), (RowOfPix+Thread-1)/Thread);//number of block
ChangeToGrey<<<dimGrid, dimBlock>>>(DataForCopy[0], DataForCopy[1], RowOfPix, ColumnfPix);
ChangeToSobel<<<dimGrid, dimBlock>>>(DataForCopy[1],DataForCopy[2], DataForCopy[3], RowOfPix, ColumnfPix);
}
if(x!=0 && y==0)
{
//this the cuda kernal and we set the dimGrid and dimBlock
const dim3 dimGrid((ColumnfPix+Thread-1)/Thread, (RowOfPix)/Thread);//number of block
ChangeToGrey<<<dimGrid, dimBlock>>>(DataForCopy[0], DataForCopy[1], RowOfPix, ColumnfPix);
ChangeToSobel<<<dimGrid, dimBlock>>>(DataForCopy[1],DataForCopy[2], DataForCopy[3], RowOfPix, ColumnfPix);
}
if(x!=0 && y!=0)
{
//this the cuda kernal and we set the dimGrid and dimBlock
const dim3 dimGrid((ColumnfPix)/Thread, (RowOfPix+Thread-1)/Thread);//number of block
ChangeToGrey<<<dimGrid, dimBlock>>>(DataForCopy[0], DataForCopy[1], RowOfPix, ColumnfPix);
ChangeToSobel<<<dimGrid, dimBlock>>>(DataForCopy[1],DataForCopy[2], DataForCopy[3], RowOfPix, ColumnfPix);
}
//copy computed gray data array from device to host
for(int i=0;i<3;i++)
{
//copy DataForCopy to Data by using cudaMemcpyDeviceToHost
cudaMemcpy(Data[i], DataForCopy[i+1], sizeof(unsigned char) * SizeofAllPixel, cudaMemcpyDeviceToHost);
//set the Mat to save the value of data
cv::Mat outputData(RowOfPix, ColumnfPix, CV_8UC1,(void *) Data[i]);
// write the file by using above Mat
cv::imwrite(OutputFilename[i].c_str(), outputData);
// print to the user that it write file already
std::cout <<"Write file " << OutputFilename[i] << std::endl;
}
// this loop for clear memory in that store in the array
for(int i=0;i<4;i++)
{
cudaFree(DataForCopy[i]);
}
clock_t finish = clock();
double timeuse = double(finish - begin);
fflush(stdout);
//print the time that use to execute
std::cout <<"Time used: " << timeuse << std::endl;
return 0;
}
|
553ae442cdb9281b0861ac0cfb4d587a046a5198.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <cstdlib>
#include <hip/hip_runtime.h>
#include "Vector3D_f.h"
// from CUDA Toolkit samples
#include <helper_cuda.h>
__device__ Vector3D::Vector3D() : Coordinate( 0, 0, 0 ) {
}
__device__ Vector3D::Vector3D( const Vector3D& v ) : Coordinate( v.x, v.y, v.z ) {
}
__device__ Vector3D::Vector3D( float x, float y, float z ) : Coordinate( x, y, z ) {
}
__device__ Vector3D& Vector3D::add( const Vector3D& v ) {
x += v.x ;
y += v.y ;
z += v.z ;
return *this ;
}
__device__ Vector3D& Vector3D::sub( const Vector3D& v ) {
x -= v.x ;
y -= v.y ;
z -= v.z ;
return *this ;
}
__device__ Vector3D& Vector3D::mul( float a ) {
x *= a ;
y *= a ;
z *= a ;
return *this ;
}
__device__ float Vector3D::dot( const Vector3D& v ) {
return x*v.x+y*v.y+z*v.z ;
}
__device__ Vector3D& Vector3D::cross( const Vector3D& v ) {
float x, y, z ;
x = this->x ;
y = this->y ;
z = this->z ;
this->x = y*v.z-z*v.y ;
this->y = z*v.x-x*v.z ;
this->z = x*v.y-y*v.x ;
return *this ;
}
__device__ Vector3D& Vector3D::apply( const Vector3D& matcol0, const Vector3D& matcol1, const Vector3D& matcol2 ) {
return apply(
matcol0.x, matcol1.x, matcol2.x,
matcol0.y, matcol1.y, matcol2.y,
matcol0.z, matcol1.z, matcol2.z ) ;
}
__device__ Vector3D& Vector3D::apply(
const float m00, const float m01, const float m02,
const float m10, const float m11, const float m12,
const float m20, const float m21, const float m22 ) {
float _x = x, _y = y, _z = z ;
x = _x*m00+_y*m01+_z*m02 ;
y = _x*m10+_y*m11+_z*m12 ;
z = _x*m20+_y*m21+_z*m22 ;
return *this ;
}
#ifdef VECTOR3D_MAIN
// kernel
__global__ void vector3d( float* buf ) {
Vector3D a, b ;
int i = threadIdx.x ;
a.set( i, i+.123, i+.234 ) ;
b.set( i+.234, i+.123, i ) ;
buf[i] = a.cross( b ).dot( a ) ;
}
#define NUM_BLOCKS 1
#define NUM_THREADS 360
int main( int argc, char** argv ) {
// host buffer
float buf[NUM_THREADS] ;
// device buffer
float* dbuf = NULL ;
hipDeviceProp_t devProp ;
int devID ;
// find device and output compute capability on stderr
devID = gpuGetMaxGflopsDeviceId() ;
checkCudaErrors( hipSetDevice( devID ) ) ;
checkCudaErrors( hipGetDeviceProperties( &devProp, devID ) ) ;
fprintf( stderr, "%d%d\n", devProp.major, devProp.minor ) ;
// allocate device buffer memory
checkCudaErrors( hipMalloc( (void**) &dbuf, sizeof( float )*NUM_THREADS ) ) ;
// run kernel
hipLaunchKernelGGL(( vector3d), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, dbuf ) ;
// copy kernel results from device buffer to host
checkCudaErrors( hipMemcpy( buf, dbuf, sizeof( float )*NUM_THREADS, hipMemcpyDeviceToHost ) ) ;
checkCudaErrors( hipFree( dbuf ) ) ;
// output result on stdout
for ( int i=0 ; NUM_THREADS>i ; i++ )
printf( "%.6f\n", buf[i] ) ;
return EXIT_SUCCESS ;
}
#endif // VECTOR3D_MAIN
| 553ae442cdb9281b0861ac0cfb4d587a046a5198.cu | #include <cstdio>
#include <cstdlib>
#include <cuda_runtime.h>
#include "Vector3D_f.h"
// from CUDA Toolkit samples
#include <helper_cuda.h>
__device__ Vector3D::Vector3D() : Coordinate( 0, 0, 0 ) {
}
__device__ Vector3D::Vector3D( const Vector3D& v ) : Coordinate( v.x, v.y, v.z ) {
}
__device__ Vector3D::Vector3D( float x, float y, float z ) : Coordinate( x, y, z ) {
}
__device__ Vector3D& Vector3D::add( const Vector3D& v ) {
x += v.x ;
y += v.y ;
z += v.z ;
return *this ;
}
__device__ Vector3D& Vector3D::sub( const Vector3D& v ) {
x -= v.x ;
y -= v.y ;
z -= v.z ;
return *this ;
}
__device__ Vector3D& Vector3D::mul( float a ) {
x *= a ;
y *= a ;
z *= a ;
return *this ;
}
__device__ float Vector3D::dot( const Vector3D& v ) {
return x*v.x+y*v.y+z*v.z ;
}
__device__ Vector3D& Vector3D::cross( const Vector3D& v ) {
float x, y, z ;
x = this->x ;
y = this->y ;
z = this->z ;
this->x = y*v.z-z*v.y ;
this->y = z*v.x-x*v.z ;
this->z = x*v.y-y*v.x ;
return *this ;
}
__device__ Vector3D& Vector3D::apply( const Vector3D& matcol0, const Vector3D& matcol1, const Vector3D& matcol2 ) {
return apply(
matcol0.x, matcol1.x, matcol2.x,
matcol0.y, matcol1.y, matcol2.y,
matcol0.z, matcol1.z, matcol2.z ) ;
}
__device__ Vector3D& Vector3D::apply(
const float m00, const float m01, const float m02,
const float m10, const float m11, const float m12,
const float m20, const float m21, const float m22 ) {
float _x = x, _y = y, _z = z ;
x = _x*m00+_y*m01+_z*m02 ;
y = _x*m10+_y*m11+_z*m12 ;
z = _x*m20+_y*m21+_z*m22 ;
return *this ;
}
#ifdef VECTOR3D_MAIN
// kernel
__global__ void vector3d( float* buf ) {
Vector3D a, b ;
int i = threadIdx.x ;
a.set( i, i+.123, i+.234 ) ;
b.set( i+.234, i+.123, i ) ;
buf[i] = a.cross( b ).dot( a ) ;
}
#define NUM_BLOCKS 1
#define NUM_THREADS 360
int main( int argc, char** argv ) {
// host buffer
float buf[NUM_THREADS] ;
// device buffer
float* dbuf = NULL ;
cudaDeviceProp devProp ;
int devID ;
// find device and output compute capability on stderr
devID = gpuGetMaxGflopsDeviceId() ;
checkCudaErrors( cudaSetDevice( devID ) ) ;
checkCudaErrors( cudaGetDeviceProperties( &devProp, devID ) ) ;
fprintf( stderr, "%d%d\n", devProp.major, devProp.minor ) ;
// allocate device buffer memory
checkCudaErrors( cudaMalloc( (void**) &dbuf, sizeof( float )*NUM_THREADS ) ) ;
// run kernel
vector3d<<<NUM_BLOCKS, NUM_THREADS>>>( dbuf ) ;
// copy kernel results from device buffer to host
checkCudaErrors( cudaMemcpy( buf, dbuf, sizeof( float )*NUM_THREADS, cudaMemcpyDeviceToHost ) ) ;
checkCudaErrors( cudaFree( dbuf ) ) ;
// output result on stdout
for ( int i=0 ; NUM_THREADS>i ; i++ )
printf( "%.6f\n", buf[i] ) ;
return EXIT_SUCCESS ;
}
#endif // VECTOR3D_MAIN
|
ae3113feaf246bc6a70e9fb529005bd826f6a343.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <string>
#include <stdlib.h>
#include<Windows.h>
#include "../HighPerformanceTimer/HighPerformanceTimer.h"
#include <omp.h>
typedef int array_type_t;
bool allocMemory(array_type_t** a, array_type_t**b, int**c, int size) {
bool retval = false;
if (!((*a = (array_type_t*)(malloc(sizeof(array_type_t)*size))) == nullptr)) {
retval = true;
}
if (!((*b = (array_type_t*)(malloc(sizeof(array_type_t)*size))) == nullptr)){
retval = true;
}
if (!((*c = (array_type_t*)(malloc(sizeof(array_type_t)*size))) == nullptr)) {
retval = true;
}
return retval;
}
void clean_up(array_type_t **a, array_type_t**b, array_type_t**c){
if (!(*a == nullptr))
free(*a);
if (!(*b == nullptr))
free(*b);
if (!(*c == nullptr))
free(*c);
}
bool fill_array(array_type_t *a, array_type_t*b, array_type_t*c, int size) {
for (int i = 0; i < size; i++) {
array_type_t random_number = (rand() % 100) + 1;
a[i] = random_number;
b[i] = random_number;
c[i] = 0;
}
return (!(a == nullptr)||(b == nullptr));
}
void print_arrays(array_type_t *my_array, int size) {
for (int i = 0; i < size; i++) {
std::cout << my_array[i] << " ";
if (i % 5 == 0 && i != 0) {
std::cout << "\n";
}
}
std::cout<<std::endl;
}
bool addVecSerialCPU(array_type_t *a, array_type_t*b, array_type_t*c, int size) {
for (int i = 0; i < size; i++) {
c[i] = a[i] + b[i];
}
return (!(c == nullptr));
}
int main(int argc, char*argv[]) {
//Start timing.
HighPrecisionTime h;
int size = 100;
if (argc > 1) {
size = std::stoi(argv[1]);
std::cout << "Size of array: " << size << std::endl;
}
else {
std::cout << "ERROR: Usage: nameofprogram sizeofarray" << std::endl;
}
//Malloc 3 arrays
array_type_t *a = nullptr;
array_type_t *b = nullptr;
array_type_t *c = nullptr;
try {
bool malloc_retval = allocMemory(&a, &b, &c, size);
if (!malloc_retval)
throw "ERROR: allocating memory for the arrays.";
//Initialize randomness
srand(GetTickCount());
//A program that uses a commandline argument to fill an array
std::cout << "Filling Arrays with random numbers:" << std::endl;
if (!fill_array(a, b, c, size)) {
throw "ERROR: filling arrays with random numbers.";
}
//startTimer
const int AVERAGE_TIMES = 100;
double function_performance = 0;
h.TimeSinceLastCall();
for (int i = 0; i < AVERAGE_TIMES; i++) {
//h.TimeSinceLastCall();
addVecSerialCPU(a, b, c, size);
function_performance += h.TimeSinceLastCall();
}
function_performance = function_performance / AVERAGE_TIMES;
std::cout <<"Runtime of addingVectors:: " << std::fixed << function_performance << std::endl;
/*print_arrays(a, size);
print_arrays(b, size);*/
//Add the two vectors
}
catch (char* err_message) {
std::cout << err_message << std::endl;
}
clean_up(&a, &b, &c);
system("pause");
return 0;
}
| ae3113feaf246bc6a70e9fb529005bd826f6a343.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <string>
#include <stdlib.h>
#include<Windows.h>
#include "../HighPerformanceTimer/HighPerformanceTimer.h"
#include <omp.h>
typedef int array_type_t;
bool allocMemory(array_type_t** a, array_type_t**b, int**c, int size) {
bool retval = false;
if (!((*a = (array_type_t*)(malloc(sizeof(array_type_t)*size))) == nullptr)) {
retval = true;
}
if (!((*b = (array_type_t*)(malloc(sizeof(array_type_t)*size))) == nullptr)){
retval = true;
}
if (!((*c = (array_type_t*)(malloc(sizeof(array_type_t)*size))) == nullptr)) {
retval = true;
}
return retval;
}
void clean_up(array_type_t **a, array_type_t**b, array_type_t**c){
if (!(*a == nullptr))
free(*a);
if (!(*b == nullptr))
free(*b);
if (!(*c == nullptr))
free(*c);
}
bool fill_array(array_type_t *a, array_type_t*b, array_type_t*c, int size) {
for (int i = 0; i < size; i++) {
array_type_t random_number = (rand() % 100) + 1;
a[i] = random_number;
b[i] = random_number;
c[i] = 0;
}
return (!(a == nullptr)||(b == nullptr));
}
void print_arrays(array_type_t *my_array, int size) {
for (int i = 0; i < size; i++) {
std::cout << my_array[i] << " ";
if (i % 5 == 0 && i != 0) {
std::cout << "\n";
}
}
std::cout<<std::endl;
}
bool addVecSerialCPU(array_type_t *a, array_type_t*b, array_type_t*c, int size) {
for (int i = 0; i < size; i++) {
c[i] = a[i] + b[i];
}
return (!(c == nullptr));
}
int main(int argc, char*argv[]) {
//Start timing.
HighPrecisionTime h;
int size = 100;
if (argc > 1) {
size = std::stoi(argv[1]);
std::cout << "Size of array: " << size << std::endl;
}
else {
std::cout << "ERROR: Usage: nameofprogram sizeofarray" << std::endl;
}
//Malloc 3 arrays
array_type_t *a = nullptr;
array_type_t *b = nullptr;
array_type_t *c = nullptr;
try {
bool malloc_retval = allocMemory(&a, &b, &c, size);
if (!malloc_retval)
throw "ERROR: allocating memory for the arrays.";
//Initialize randomness
srand(GetTickCount());
//A program that uses a commandline argument to fill an array
std::cout << "Filling Arrays with random numbers:" << std::endl;
if (!fill_array(a, b, c, size)) {
throw "ERROR: filling arrays with random numbers.";
}
//startTimer
const int AVERAGE_TIMES = 100;
double function_performance = 0;
h.TimeSinceLastCall();
for (int i = 0; i < AVERAGE_TIMES; i++) {
//h.TimeSinceLastCall();
addVecSerialCPU(a, b, c, size);
function_performance += h.TimeSinceLastCall();
}
function_performance = function_performance / AVERAGE_TIMES;
std::cout <<"Runtime of addingVectors:: " << std::fixed << function_performance << std::endl;
/*print_arrays(a, size);
print_arrays(b, size);*/
//Add the two vectors
}
catch (char* err_message) {
std::cout << err_message << std::endl;
}
clean_up(&a, &b, &c);
system("pause");
return 0;
}
|
011c7f57ba84e209d276896970d651d3c774e0df.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <iostream>
using namespace std;
__global__ void arrayadd(int *a,int *b,int *c){
int row=threadIdx.y;
int col=threadIdx.x;
c[2*row+col]=a[2*row+col]-b[2*row+col];
}
int main()
{
int size=4;
int a[size],b[size],c[size];
int *h_a,*h_b,*h_c;
for(int i=0;i<size;i++)
{
a[i]=i*8;
b[i]=i*5;
c[i]=0;
}
int gpu_size=sizeof(int)*size;
hipMalloc((void**)&h_a,gpu_size);
hipMalloc((void**)&h_b,gpu_size);
hipMalloc((void**)&h_c,gpu_size);
hipMemcpy(h_a,a,gpu_size,hipMemcpyHostToDevice);
hipMemcpy(h_b,b,gpu_size,hipMemcpyHostToDevice);
hipLaunchKernelGGL((
arrayadd), dim3(1),dim3(4), 0, 0, h_a,h_b,h_c);
hipMemcpy(c,h_c,gpu_size,hipMemcpyDeviceToHost);
cout<<"Matrix A\n";
for(int i=1;i<=size;i++)
{
cout<<a[i-1]<<"\t";
if(i%2==0)
cout<<"\n";
}
cout<<"Matrix B\n";
for(int i=1;i<=size;i++)
{
cout<<b[i-1]<<"\t";
if(i%2==0)
cout<<"\n";
}
cout<<"Substraction is Matrix C\n";
for(int i=1;i<=size;i++)
{
cout<<c[i-1]<<"\t";
if(i%2==0)
cout<<"\n";
}
}
| 011c7f57ba84e209d276896970d651d3c774e0df.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
using namespace std;
__global__ void arrayadd(int *a,int *b,int *c){
int row=threadIdx.y;
int col=threadIdx.x;
c[2*row+col]=a[2*row+col]-b[2*row+col];
}
int main()
{
int size=4;
int a[size],b[size],c[size];
int *h_a,*h_b,*h_c;
for(int i=0;i<size;i++)
{
a[i]=i*8;
b[i]=i*5;
c[i]=0;
}
int gpu_size=sizeof(int)*size;
cudaMalloc((void**)&h_a,gpu_size);
cudaMalloc((void**)&h_b,gpu_size);
cudaMalloc((void**)&h_c,gpu_size);
cudaMemcpy(h_a,a,gpu_size,cudaMemcpyHostToDevice);
cudaMemcpy(h_b,b,gpu_size,cudaMemcpyHostToDevice);
arrayadd<<<1,4>>>(h_a,h_b,h_c);
cudaMemcpy(c,h_c,gpu_size,cudaMemcpyDeviceToHost);
cout<<"Matrix A\n";
for(int i=1;i<=size;i++)
{
cout<<a[i-1]<<"\t";
if(i%2==0)
cout<<"\n";
}
cout<<"Matrix B\n";
for(int i=1;i<=size;i++)
{
cout<<b[i-1]<<"\t";
if(i%2==0)
cout<<"\n";
}
cout<<"Substraction is Matrix C\n";
for(int i=1;i<=size;i++)
{
cout<<c[i-1]<<"\t";
if(i%2==0)
cout<<"\n";
}
}
|
ecba8cd439910405e090e0eda2890e43f45ef6aa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
unsigned char *pdata; // pointer to data content
__global__ void processData(unsigned char *Da, int* filter)
{
int tx = threadIdx.x; // threadxid
int bx = blockIdx.x; // blockxid
int bn = blockDim.x;
int gid = bx * bn + tx;
__shared__ int sfilter[3][3];
__shared__ int sR[3][512]; // block
__shared__ int sG[3][512];
__shared__ int sB[3][512];
__shared__ int sRsum[512]; // block 512sum
__shared__ int sGsum[512];
__shared__ int sBsum[512];
if (tx < 9) // block filter share memory
{
sfilter[tx / 3][tx % 3] = filter[tx];
}
__syncthreads();
if (bx == 0 || bx == 511 || tx == 0 || tx == 511)
{
// -->
sRsum[tx] = Da[gid * 3];
sGsum[tx] = Da[gid * 3 + 1];
sBsum[tx] = Da[gid * 3 + 2];
}
// (1blockblock)
if (bx != 0 && bx != 511)
{
// R, G, BRow(Block)Share Memory
sR[0][tx] = Da[gid * 3 - 512 * 3];
sR[1][tx] = Da[gid * 3];
sR[2][tx] = Da[gid * 3 + 512 * 3];
sG[0][tx] = Da[gid * 3 - 512 * 3 + 1];
sG[1][tx] = Da[gid * 3 + 1];
sG[2][tx] = Da[gid * 3 + 512 * 3 + 1];
sB[0][tx] = Da[gid * 3 - 512 * 3 + 2];
sB[1][tx] = Da[gid * 3 + 2];
sB[2][tx] = Da[gid * 3 + 512 * 3 + 2];
__syncthreads();
// (block)
if (tx != 0 && tx != 511)
{
// R
sRsum[tx] = sR[0][tx - 1] * sfilter[0][0];
sRsum[tx] += sR[0][tx] * sfilter[0][1];
sRsum[tx] += sR[0][tx + 1] * sfilter[0][2];
sRsum[tx] += sR[1][tx - 1] * sfilter[1][0];
sRsum[tx] += sR[1][tx] * sfilter[1][1];
sRsum[tx] += sR[1][tx + 1] * sfilter[1][2];
sRsum[tx] += sR[2][tx - 1] * sfilter[2][0];
sRsum[tx] += sR[2][tx] * sfilter[2][1];
sRsum[tx] += sR[2][tx + 1] * sfilter[2][2];
// G
sGsum[tx] = sG[0][tx - 1] * sfilter[0][0];
sGsum[tx] += sG[0][tx] * sfilter[0][1];
sGsum[tx] += sG[0][tx + 1] * sfilter[0][2];
sGsum[tx] += sG[1][tx - 1] * sfilter[1][0];
sGsum[tx] += sG[1][tx] * sfilter[1][1];
sGsum[tx] += sG[1][tx + 1] * sfilter[1][2];
sGsum[tx] += sG[2][tx - 1] * sfilter[2][0];
sGsum[tx] += sG[2][tx] * sfilter[2][1];
sGsum[tx] += sG[2][tx + 1] * sfilter[2][2];
// B
sBsum[tx] = sB[0][tx - 1] * sfilter[0][0];
sBsum[tx] += sB[0][tx] * sfilter[0][1];
sBsum[tx] += sB[0][tx + 1] * sfilter[0][2];
sBsum[tx] += sB[1][tx - 1] * sfilter[1][0];
sBsum[tx] += sB[1][tx] * sfilter[1][1];
sBsum[tx] += sB[1][tx + 1] * sfilter[1][2];
sBsum[tx] += sB[2][tx - 1] * sfilter[2][0];
sBsum[tx] += sB[2][tx] * sfilter[2][1];
sBsum[tx] += sB[2][tx + 1] * sfilter[2][2];
sRsum[tx] /= filter[9];
sGsum[tx] /= filter[9];
sBsum[tx] /= filter[9];
// 255 0
if (sRsum[tx] > 255)
sRsum[tx] = 255;
else if (sRsum[tx] < 0)
sRsum[tx] = 0;
if (sGsum[tx] > 255)
sGsum[tx] = 255;
else if (sGsum[tx] < 0)
sGsum[tx] = 0;
if (sBsum[tx] > 255)
sBsum[tx] = 255;
else if (sBsum[tx] < 0)
sBsum[tx] = 0;
}
}
__syncthreads();
// R, G, B
Da[gid * 3] = sRsum[tx];
Da[gid * 3 + 1] = sGsum[tx];
Da[gid * 3 + 2] = sBsum[tx];
} | ecba8cd439910405e090e0eda2890e43f45ef6aa.cu | #include "includes.h"
unsigned char *pdata; // pointer to data content
__global__ void processData(unsigned char *Da, int* filter)
{
int tx = threadIdx.x; // thread的x軸id
int bx = blockIdx.x; // block的x軸id
int bn = blockDim.x;
int gid = bx * bn + tx;
__shared__ int sfilter[3][3];
__shared__ int sR[3][512]; // 每個block存上中下三行
__shared__ int sG[3][512];
__shared__ int sB[3][512];
__shared__ int sRsum[512]; // 每個block 最後512個sum
__shared__ int sGsum[512];
__shared__ int sBsum[512];
if (tx < 9) // 每個block 存filter 到 share memory
{
sfilter[tx / 3][tx % 3] = filter[tx];
}
__syncthreads();
if (bx == 0 || bx == 511 || tx == 0 || tx == 511)
{
// 邊界處理 --> 直接給原本值不動
sRsum[tx] = Da[gid * 3];
sGsum[tx] = Da[gid * 3 + 1];
sBsum[tx] = Da[gid * 3 + 2];
}
// 邊界處理(第1個block跟最後一個block不做)
if (bx != 0 && bx != 511)
{
// R, G, B個別將該Row(Block)運算會用到的上中下三行存入Share Memory
sR[0][tx] = Da[gid * 3 - 512 * 3];
sR[1][tx] = Da[gid * 3];
sR[2][tx] = Da[gid * 3 + 512 * 3];
sG[0][tx] = Da[gid * 3 - 512 * 3 + 1];
sG[1][tx] = Da[gid * 3 + 1];
sG[2][tx] = Da[gid * 3 + 512 * 3 + 1];
sB[0][tx] = Da[gid * 3 - 512 * 3 + 2];
sB[1][tx] = Da[gid * 3 + 2];
sB[2][tx] = Da[gid * 3 + 512 * 3 + 2];
__syncthreads();
// 邊界處理(每個block的的第一個值和最後一個值不做)
if (tx != 0 && tx != 511)
{
// R
sRsum[tx] = sR[0][tx - 1] * sfilter[0][0];
sRsum[tx] += sR[0][tx] * sfilter[0][1];
sRsum[tx] += sR[0][tx + 1] * sfilter[0][2];
sRsum[tx] += sR[1][tx - 1] * sfilter[1][0];
sRsum[tx] += sR[1][tx] * sfilter[1][1];
sRsum[tx] += sR[1][tx + 1] * sfilter[1][2];
sRsum[tx] += sR[2][tx - 1] * sfilter[2][0];
sRsum[tx] += sR[2][tx] * sfilter[2][1];
sRsum[tx] += sR[2][tx + 1] * sfilter[2][2];
// G
sGsum[tx] = sG[0][tx - 1] * sfilter[0][0];
sGsum[tx] += sG[0][tx] * sfilter[0][1];
sGsum[tx] += sG[0][tx + 1] * sfilter[0][2];
sGsum[tx] += sG[1][tx - 1] * sfilter[1][0];
sGsum[tx] += sG[1][tx] * sfilter[1][1];
sGsum[tx] += sG[1][tx + 1] * sfilter[1][2];
sGsum[tx] += sG[2][tx - 1] * sfilter[2][0];
sGsum[tx] += sG[2][tx] * sfilter[2][1];
sGsum[tx] += sG[2][tx + 1] * sfilter[2][2];
// B
sBsum[tx] = sB[0][tx - 1] * sfilter[0][0];
sBsum[tx] += sB[0][tx] * sfilter[0][1];
sBsum[tx] += sB[0][tx + 1] * sfilter[0][2];
sBsum[tx] += sB[1][tx - 1] * sfilter[1][0];
sBsum[tx] += sB[1][tx] * sfilter[1][1];
sBsum[tx] += sB[1][tx + 1] * sfilter[1][2];
sBsum[tx] += sB[2][tx - 1] * sfilter[2][0];
sBsum[tx] += sB[2][tx] * sfilter[2][1];
sBsum[tx] += sB[2][tx + 1] * sfilter[2][2];
sRsum[tx] /= filter[9];
sGsum[tx] /= filter[9];
sBsum[tx] /= filter[9];
// 大於255 或 小於0處理
if (sRsum[tx] > 255)
sRsum[tx] = 255;
else if (sRsum[tx] < 0)
sRsum[tx] = 0;
if (sGsum[tx] > 255)
sGsum[tx] = 255;
else if (sGsum[tx] < 0)
sGsum[tx] = 0;
if (sBsum[tx] > 255)
sBsum[tx] = 255;
else if (sBsum[tx] < 0)
sBsum[tx] = 0;
}
}
__syncthreads();
// 將R, G, B三個陣列值合併寫回一維陣列,以利輸出到檔案
Da[gid * 3] = sRsum[tx];
Da[gid * 3 + 1] = sGsum[tx];
Da[gid * 3 + 2] = sBsum[tx];
} |
6abd381e5c139e19718a463dcefc892bc9293dcc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "hip/device_functions.h"
#include <stdio.h>
#include <stdlib.h>
const int thread_per_block = 256;
__global__ void Dot(float* a, float* b, float* c, int size)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ float sh[thread_per_block];
float temp = 0;
while (tid < size)
{
temp += a[tid] * b[tid];
tid += gridDim.x * blockDim.x;
}
sh[threadIdx.x] = temp;
__syncthreads();
int i = threadIdx.x / 2;
while (i > 0)
{
if (threadIdx.x < i)
{
sh[threadIdx.x] += sh[threadIdx.x + i];
}
__syncthreads();
i /= 2;
}
if (threadIdx.x == 0)
{
c[blockIdx.x] = sh[0];
}
}
extern "C"
float CUDADot(float* a, float* b, int size)
{
hipError_t cudaStatus;
int block_per_grid = (size + thread_per_block - 1) / thread_per_block;
float* partial_c = (float*)(malloc(block_per_grid * sizeof(float)));
float* dev_a, * dev_b, * dev_c, c;
hipMalloc((void**)&dev_a, size * sizeof(float));
hipMalloc((void**)&dev_b, size * sizeof(float));
hipMalloc((void**)&dev_c, block_per_grid * sizeof(float));
for (int i = 0; i < size; ++i)
{
a[i] = i;
b[i] = i * 2;
}
hipMemcpy(dev_a, a, size * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, size * sizeof(float), hipMemcpyHostToDevice);
Dot << <block_per_grid, thread_per_block >> > (dev_a, dev_b, dev_c, size);
cudaStatus = hipMemcpy(partial_c, dev_c, block_per_grid * sizeof(float), hipMemcpyDeviceToHost);
c = 0;
for (int i = 0; i < block_per_grid; ++i)
{
c += partial_c[i];
}
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
free(partial_c);
return c;
} | 6abd381e5c139e19718a463dcefc892bc9293dcc.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "device_functions.h"
#include <stdio.h>
#include <stdlib.h>
const int thread_per_block = 256;
__global__ void Dot(float* a, float* b, float* c, int size)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ float sh[thread_per_block];
float temp = 0;
while (tid < size)
{
temp += a[tid] * b[tid];
tid += gridDim.x * blockDim.x;
}
sh[threadIdx.x] = temp;
__syncthreads();
int i = threadIdx.x / 2;
while (i > 0)
{
if (threadIdx.x < i)
{
sh[threadIdx.x] += sh[threadIdx.x + i];
}
__syncthreads();
i /= 2;
}
if (threadIdx.x == 0)
{
c[blockIdx.x] = sh[0];
}
}
extern "C"
float CUDADot(float* a, float* b, int size)
{
cudaError_t cudaStatus;
int block_per_grid = (size + thread_per_block - 1) / thread_per_block;
float* partial_c = (float*)(malloc(block_per_grid * sizeof(float)));
float* dev_a, * dev_b, * dev_c, c;
cudaMalloc((void**)&dev_a, size * sizeof(float));
cudaMalloc((void**)&dev_b, size * sizeof(float));
cudaMalloc((void**)&dev_c, block_per_grid * sizeof(float));
for (int i = 0; i < size; ++i)
{
a[i] = i;
b[i] = i * 2;
}
cudaMemcpy(dev_a, a, size * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, size * sizeof(float), cudaMemcpyHostToDevice);
Dot << <block_per_grid, thread_per_block >> > (dev_a, dev_b, dev_c, size);
cudaStatus = cudaMemcpy(partial_c, dev_c, block_per_grid * sizeof(float), cudaMemcpyDeviceToHost);
c = 0;
for (int i = 0; i < block_per_grid; ++i)
{
c += partial_c[i];
}
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
free(partial_c);
return c;
} |
63f9c1ab5ca5b521f232a07ada7f2f8f34f2215a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <cmath>
#include <cassert>
#include <cstdlib>
#include <omp.h>
#ifdef WITH_CUDA5
# include <helper_cuda.h>
# define CUDA_SAFE_CALL checkCudaErrors
#else
# include <cutil.h>
#endif
#include "cuda_pointer.h"
#define NTHREAD 64 // 64 or 128
// #define NJBLOCK 14 // for GTX 470
#define NJBLOCK 30 // for GTX660Ti
#define NIBLOCK 32 // 16 or 32
#define NIMAX (NTHREAD * NIBLOCK) // 2048
#define NXREDUCE 32 // must be >NJBLOCK
#define NYREDUCE 8
#define NNB_PER_BLOCK 256 // NNB per block, must be power of 2
#define NB_BUF_SIZE (1<<21)
// #define NNB_MAX 384 // total NNB at reduced
#define MAX_CPU 16
#define MAX_GPU 4
// for clearity, for myself
#define __out
#define PROFILE
#define NAN_CHECK(val) assert((val) == (val));
typedef unsigned short uint16;
struct Jparticle{
float3 pos;
float mass;
float3 vel;
float pad;
Jparticle() {}
Jparticle(double mj, double xj[3], double vj[3]){
pos.x = xj[0];
pos.y = xj[1];
pos.z = xj[2];
mass = mj;
vel.x = vj[0];
vel.y = vj[1];
vel.z = vj[2];
NAN_CHECK(xj[0]);
NAN_CHECK(xj[1]);
NAN_CHECK(xj[2]);
NAN_CHECK(mj);
NAN_CHECK(vj[0]);
NAN_CHECK(vj[1]);
NAN_CHECK(vj[2]);
}
__device__
Jparticle(const float4 *buf){
float4 tmp1 = buf[0];
float4 tmp2 = buf[1];
pos.x = tmp1.x;
pos.y = tmp1.y;
pos.z = tmp1.z;
mass = tmp1.w;
vel.x = tmp2.x;
vel.y = tmp2.y;
vel.z = tmp2.z;
}
};
struct Iparticle{
float3 pos;
float h2;
float3 vel;
float dtr;
Iparticle() {}
Iparticle(double h2i, double dtri, double xi[3], double vi[3]){
pos.x = xi[0];
pos.y = xi[1];
pos.z = xi[2];
h2 = h2i;
vel.x = vi[0];
vel.y = vi[1];
vel.z = vi[2];
dtr = dtri;
NAN_CHECK(xi[0]);
NAN_CHECK(xi[1]);
NAN_CHECK(xi[2]);
NAN_CHECK(h2i);
NAN_CHECK(vi[0]);
NAN_CHECK(vi[1]);
NAN_CHECK(vi[2]);
}
};
struct Force{
float3 acc;
float pot;
float3 jrk;
int nnb; // 8 words
__device__ void clear(){
acc.x = acc.y = acc.z = 0.f;
jrk.x = jrk.y = jrk.z = 0.f;
pot = 0.f;
nnb = 0;
}
__device__ void operator+=(const Force &rhs){
acc.x += rhs.acc.x;
acc.y += rhs.acc.y;
acc.z += rhs.acc.z;
pot += rhs.pot;
jrk.x += rhs.jrk.x;
jrk.y += rhs.jrk.y;
jrk.z += rhs.jrk.z;
if(nnb>=0 && rhs.nnb>=0){
nnb += rhs.nnb;
}else{
nnb = -1;
}
}
#if __CUDA_ARCH__ >= 300
__device__ void reduce_with(const int mask){
acc.x += __shfl_xor(acc.x, mask);
acc.y += __shfl_xor(acc.y, mask);
acc.z += __shfl_xor(acc.z, mask);
pot += __shfl_xor(pot , mask);
jrk.x += __shfl_xor(jrk.x, mask);
jrk.y += __shfl_xor(jrk.y, mask);
jrk.z += __shfl_xor(jrk.z, mask);
int ntmp = __shfl_xor(nnb, mask);
if(nnb>=0 && ntmp>=0){
nnb += ntmp;
}else{
nnb = -1;
}
}
#endif
};
__device__ void dev_gravity(
const int jidx,
const Iparticle &ip,
const Jparticle &jp,
__out Force &fo,
__out uint16 nblist[]){
float dx = jp.pos.x - ip.pos.x;
float dy = jp.pos.y - ip.pos.y;
float dz = jp.pos.z - ip.pos.z;
float dvx = jp.vel.x - ip.vel.x;
float dvy = jp.vel.y - ip.vel.y;
float dvz = jp.vel.z - ip.vel.z;
float r2 = dx*dx + dy*dy + dz*dz;
#if 1
float dxp = dx + ip.dtr * dvx;
float dyp = dy + ip.dtr * dvy;
float dzp = dz + ip.dtr * dvz;
float r2p = dxp*dxp + dyp*dyp + dzp*dzp;
#else
float r2p = r2;
#endif
float rv = dx*dvx + dy*dvy + dz*dvz;
float rinv1 = rsqrtf(r2);
if(min(r2, r2p) < jp.mass * ip.h2){
// fo.neib[fo.nnb++ % NBMAX] = j;
nblist[fo.nnb & (NNB_PER_BLOCK-1)] = (uint16)jidx;
fo.nnb++;
rinv1 = 0.f;
}
float rinv2 = rinv1 * rinv1;
float mrinv1 = jp.mass * rinv1;
float mrinv3 = mrinv1 * rinv2;
rv *= -3.f * rinv2;
#ifdef POTENTIAL
fo.pot += mrinv1;
#endif
fo.acc.x += mrinv3 * dx;
fo.acc.y += mrinv3 * dy;
fo.acc.z += mrinv3 * dz;
// fo.acc.z += 1.0;
fo.jrk.x += mrinv3 * (dvx + rv * dx);
fo.jrk.y += mrinv3 * (dvy + rv * dy);
fo.jrk.z += mrinv3 * (dvz + rv * dz);
}
__global__ void gravity_kernel(
const int nbody,
const Iparticle ipbuf[],
const Jparticle jpbuf[],
__out Force fobuf[][NJBLOCK],
__out uint16 nbbuf[][NJBLOCK][NNB_PER_BLOCK]){
int ibid = blockIdx.x;
int jbid = blockIdx.y;
int tid = threadIdx.x;
int iaddr = tid + blockDim.x * ibid;
int jstart = (nbody * (jbid )) / NJBLOCK;
int jend = (nbody * (jbid+1)) / NJBLOCK;
Iparticle ip = ipbuf[iaddr];
Force fo;
fo.clear();
uint16 *nblist = nbbuf[iaddr][jbid];
#if __CUDA_ARCH__ >= 300 // just some trial
for(int j=jstart; j<jend; j+=32){
__shared__ Jparticle jpshare[32];
__syncthreads();
float4 *src = (float4 *)&jpbuf[j];
float4 *dst = (float4 *)jpshare;
dst[tid] = src[tid];
__syncthreads();
if(jend-j < 32){
#pragma unroll 4
for(int jj=0; jj<jend-j; jj++){
const Jparticle jp = jpshare[jj];
// const Jparticle jp( (float4 *)jpshare + 2*jj);
dev_gravity(j-jstart+jj, ip, jp, fo, nblist);
}
}else{
#pragma unroll 8
for(int jj=0; jj<32; jj++){
const Jparticle jp = jpshare[jj];
// const Jparticle jp( (float4 *)jpshare + 2*jj);
dev_gravity(j-jstart+jj, ip, jp, fo, nblist);
}
}
}
#else
for(int j=jstart; j<jend; j+=NTHREAD){
__shared__ Jparticle jpshare[NTHREAD];
__syncthreads();
float4 *src = (float4 *)&jpbuf[j];
float4 *dst = (float4 *)jpshare;
dst[ tid] = src[ tid];
dst[NTHREAD+tid] = src[NTHREAD+tid];
__syncthreads();
if(jend-j < NTHREAD){
#pragma unroll 4
for(int jj=0; jj<jend-j; jj++){
Jparticle jp = jpshare[jj];
dev_gravity(j-jstart+jj, ip, jp, fo, nblist);
}
}else{
#pragma unroll 8
for(int jj=0; jj<NTHREAD; jj++){
Jparticle jp = jpshare[jj];
dev_gravity(j-jstart+jj, ip, jp, fo, nblist);
}
}
}
#endif
if(fo.nnb > NNB_PER_BLOCK) fo.nnb = -1;
fobuf[iaddr][jbid] = fo;
}
#if __CUDA_ARCH__ >= 300
__device__ void warp_reduce_int(int inp, int *out){
inp += __shfl_xor(inp, 1);
inp += __shfl_xor(inp, 2);
inp += __shfl_xor(inp, 4);
inp += __shfl_xor(inp, 8);
# if NXREDUCE==32
inp += __shfl_xor(inp, 16);
# endif
*out = inp;
}
__device__ void warp_reduce_float8(float4 inp1, float4 inp2, float *out){
const int tid = threadIdx.x;
float4 tmp4L = (4&tid) ? inp2 : inp1;
float4 tmp4R = (4&tid) ? inp1 : inp2;
tmp4L.x += __shfl_xor(tmp4R.x, 4);
tmp4L.y += __shfl_xor(tmp4R.y, 4);
tmp4L.z += __shfl_xor(tmp4R.z, 4);
tmp4L.w += __shfl_xor(tmp4R.w, 4);
float4 tmp4;
tmp4.x = (2&tid) ? tmp4L.z : tmp4L.x;
tmp4.y = (2&tid) ? tmp4L.w : tmp4L.y;
tmp4.z = (2&tid) ? tmp4L.x : tmp4L.z;
tmp4.w = (2&tid) ? tmp4L.y : tmp4L.w;
tmp4.x += __shfl_xor(tmp4.z, 2);
tmp4.y += __shfl_xor(tmp4.w, 2);
float2 tmp2;
tmp2.x = (1&tid) ? tmp4.y : tmp4.x;
tmp2.y = (1&tid) ? tmp4.x : tmp4.y;
tmp2.x += __shfl_xor(tmp2.y, 1);
tmp2.x += __shfl_xor(tmp2.x, 8);
# if NXREDUCE==32
tmp2.x += __shfl_xor(tmp2.x, 16);
# endif
if(tid < 8){
out[tid] = tmp2.x;
}
}
#endif
__global__ void force_reduce_kernel(
const int ni,
const Force fpart[][NJBLOCK],
__out Force ftot []){
const int xid = threadIdx.x;
const int yid = threadIdx.y;
const int bid = blockIdx.x;
const int iaddr = yid + blockDim.y * bid;
#if __CUDA_ARCH__ >= 300
Force f;
if(xid < NJBLOCK){
f = fpart[iaddr][xid];
}else{
f.clear();
}
# if 0
# pragma unroll
for(int mask=1; mask<NXREDUCE; mask*=2){
f.reduce_with(mask);
}
if(iaddr < ni && xid == 0){
ftot[iaddr] = f;
}
# else
if(iaddr < ni){
const float4 tmp1 = make_float4(f.acc.x, f.acc.y, f.acc.z, f.pot);
const float4 tmp2 = make_float4(f.jrk.x, f.jrk.y, f.jrk.z, 0.0f);
const int itmp = f.nnb;
float *dst = (float *)(ftot + iaddr);
int *idst = (int *)(dst + 7);
warp_reduce_float8(tmp1, tmp2, dst);
warp_reduce_int(itmp, idst);
}
# endif
#else
__shared__ Force fshare[NYREDUCE][NXREDUCE];
if(xid < NJBLOCK){
fshare[yid][xid] = fpart[iaddr][xid];
}else{
fshare[yid][xid].clear();
}
Force *fs = fshare[yid];
#if NXREDUCE==32
if(xid < 16) fs[xid] += fs[xid + 16];
#endif
if(xid < 8) fs[xid] += fs[xid + 8];
if(xid < 4) fs[xid] += fs[xid + 4];
if(xid < 2) fs[xid] += fs[xid + 2];
if(xid < 1) fs[xid] += fs[xid + 1];
if(iaddr < ni){
ftot[iaddr] = fs[0];
}
#endif
}
__global__ void gather_nb_kernel(
const int ni,
const int nj,
const int joff,
const Force fpart[][NJBLOCK],
const Force ftot [],
const int nboff[],
const uint16 nbpart[][NJBLOCK][NNB_PER_BLOCK],
__out int nblist[])
{
const int xid = threadIdx.x;
const int yid = threadIdx.y;
const int bid = blockIdx.x;
const int iaddr = yid + blockDim.y * bid;
if(iaddr >= ni) return;
if(ftot[iaddr].nnb < 0) return;
const int mynnb = (xid < NJBLOCK) ? fpart[iaddr][xid].nnb
: 0;
// now performe prefix sum
#if __CUDA_ARCH__ >= 300
int ix = mynnb;
#pragma unroll
for(int ioff=1; ioff<NXREDUCE; ioff*=2){
int iy = __shfl_up(ix, ioff);
if(xid>=ioff) ix += iy;
}
int iz = __shfl_up(ix, 1);
const int off = (xid == 0) ? 0 : iz;
#else
__shared__ int ishare[NYREDUCE][NXREDUCE];
ishare[yid][xid] = mynnb;
volatile int *ish = ishare[yid];
if(xid>=1) ish[xid] += ish[xid-1];
if(xid>=2) ish[xid] += ish[xid-2];
if(xid>=4) ish[xid] += ish[xid-4];
if(xid>=8) ish[xid] += ish[xid-8];
#if NXREDUCE==32
if(xid>=16) ish[xid] += ish[xid-16];
#endif
const int off = (xid == 0) ? 0
: ish[xid-1];
#endif
int *nbdst = nblist + nboff[iaddr] + off;
const int jstart = (nj * xid) / NJBLOCK;
if(xid < NJBLOCK){
for(int k=0; k<mynnb; k++){
const int nbid = (joff + jstart) + int(nbpart[iaddr][xid][k]);
// const int nbid = iaddr * 1000 + k;
nbdst[k] = nbid;
}
}
}
// Host Part
#ifdef PROFILE
#include <sys/time.h>
static double get_wtime(){
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec + 1.e-6 * tv.tv_usec;
}
#else
static double get_wtime(){
return 0.0;
}
#endif
static double time_send, time_grav, time_reduce;
static long long numInter;
static cudaPointer <Jparticle> jpbuf[MAX_GPU];
static cudaPointer <Iparticle> ipbuf[MAX_GPU];
static cudaPointer <Force[NJBLOCK]> fpart[MAX_GPU];
static cudaPointer <Force> ftot [MAX_GPU];
static cudaPointer <uint16[NJBLOCK][NNB_PER_BLOCK]> nbpart[MAX_GPU];
static cudaPointer <int> nblist [MAX_GPU];
static cudaPointer <int> nboff [MAX_GPU];
static int numCPU, numGPU;
static int joff[MAX_GPU + 1];
static int nbody, nbodymax;
static int devid[MAX_GPU];
static bool is_open = false;
static bool devinit = false;
void GPUNB_devinit(){
if(devinit) return;
assert(NXREDUCE >= NJBLOCK);
assert(NXREDUCE <= 32);
hipGetDeviceCount(&numGPU);
assert(numGPU <= MAX_GPU);
char *gpu_list = getenv("GPU_LIST");
if(gpu_list){
// get GPU list from environment variable
numGPU = 0;
char *p = strtok(gpu_list, " ");
while(p){
devid[numGPU++] = atoi(p);
p = strtok(NULL, " ");
assert(numGPU <= MAX_GPU);
}
}else{
// use all GPUs
for(int i=0; i<numGPU; i++){
devid[i] = i;
}
}
// numGPU = 1;
#pragma omp parallel
{
int tid = omp_get_thread_num();
if(tid == 0) numCPU = omp_get_num_threads();
}
assert(numCPU <= MAX_CPU);
assert(numGPU <= numCPU);
#pragma omp parallel
{
int tid = omp_get_thread_num();
if(tid < numGPU){
hipSetDevice(devid[tid]);
}
}
#ifdef PROFILE
fprintf(stderr, "***********************\n");
fprintf(stderr, "Initializing NBODY6/GPU library\n");
fprintf(stderr, "#CPU %d, #GPU %d\n", numCPU, numGPU);
fprintf(stderr, " device:");
for(int i=0; i<numGPU; i++){
fprintf(stderr, " %d", devid[i]);
}
fprintf(stderr, "\n");
#if 1
for(int i=0; i<numGPU; i++){
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, devid[i]);
fprintf(stderr, " device %d: %s\n", devid[i], prop.name);
}
#endif
fprintf(stderr, "***********************\n");
#endif
devinit = true;
}
void GPUNB_open(int nbmax){
time_send = time_grav = time_reduce = 0.0;
numInter = 0;
nbodymax = nbmax;
GPUNB_devinit();
if(is_open){
fprintf(stderr, "gpunb: it is already open\n");
return;
}
is_open = true;
for(int id=0; id<numGPU + 1; id++){
joff[id] = (id * nbmax) / numGPU;
}
// omp_set_num_threads(numGPU);
#pragma omp parallel
{
int tid = omp_get_thread_num();
if(tid < numGPU){
hipSetDevice(devid[tid]);
int nj = joff[tid+1] - joff[tid];
jpbuf [tid].allocate(nj + NTHREAD);
ipbuf [tid].allocate(NIMAX);
fpart [tid].allocate(NIMAX);
ftot [tid].allocate(NIMAX);
nbpart[tid].allocate(NIMAX);
nblist[tid].allocate(NB_BUF_SIZE); // total ganged nblist
nboff [tid].allocate(NIMAX+1);
}
}
#ifdef PROFILE
fprintf(stderr, "***********************\n");
fprintf(stderr, "Opened NBODY6/GPU library\n");
fprintf(stderr, "#CPU %d, #GPU %d\n", numCPU, numGPU);
fprintf(stderr, " device:");
for(int i=0; i<numGPU; i++){
fprintf(stderr, " %d", devid[i]);
}
fprintf(stderr, "\n");
for(int i=0; i<numGPU+1; i++){
fprintf(stderr, " %d", joff[i]);
}
fprintf(stderr, "\n");
fprintf(stderr, "nbmax = %d\n", nbmax);
fprintf(stderr, "***********************\n");
#endif
}
void GPUNB_close(){
if(!is_open){
fprintf(stderr, "gpunb: it is already close\n");
return;
}
is_open = false;
// omp_set_num_threads(numGPU);
#pragma omp parallel
{
int tid = omp_get_thread_num();
if(tid < numGPU){
jpbuf [tid].free();
ipbuf [tid].free();
fpart [tid].free();
ftot [tid].free();
nbpart[tid].free();
nblist[tid].free();
nboff [tid].free();
}
}
// omp_set_num_threads(numCPU);
nbodymax = 0;
#ifdef PROFILE
fprintf(stderr, "***********************\n");
fprintf(stderr, "Closed NBODY6/GPU library\n");
fprintf(stderr, "time send : %f sec\n", time_send);
fprintf(stderr, "time grav : %f sec\n", time_grav);
fprintf(stderr, "time reduce : %f sec\n", time_reduce);
fprintf(stderr, "time regtot : %f sec\n", time_send + time_grav + time_reduce);
fprintf(stderr, "%f Gflops (gravity part only)\n", 60.e-9 * numInter / time_grav);
fprintf(stderr, "***********************\n");
#endif
}
void GPUNB_send(
int _nbody,
double mj[],
double xj[][3],
double vj[][3]){
assert(is_open);
nbody = _nbody;
assert(nbody <= nbodymax);
time_send -= get_wtime();
for(int id=0; id<numGPU + 1; id++){
joff[id] = (id * nbody) / numGPU;
}
#pragma omp parallel
{
int tid = omp_get_thread_num();
if(tid < numGPU){
int nj = joff[tid+1] - joff[tid];
for(int j=0; j<nj; j++){
int jj = j + joff[tid];
jpbuf[tid][j] = Jparticle(mj[jj], xj[jj], vj[jj]);
}
jpbuf[tid].htod(nj);
}
}
time_send += get_wtime();
}
void GPUNB_regf(
int ni,
double h2[],
double dtr[],
double xi[][3],
double vi[][3],
double acc[][3],
double jrk[][3],
double pot[],
int lmax,
int nnbmax,
int *listbase){
assert(is_open);
time_grav -= get_wtime();
numInter += ni * nbody;
assert(0 < ni && ni <= NIMAX);
// omp_set_num_threads(numGPU);
#pragma omp parallel
{
int tid = omp_get_thread_num();
if(tid < numGPU){
// hipSetDevice(device_id[tid]);
for(int i=0; i<ni; i++){
ipbuf[tid][i] = Iparticle(h2[i], dtr[i], xi[i], vi[i]);
}
// set i-particles
ipbuf[tid].htod(ni);
// gravity kernel
int niblock = 1 + (ni-1) / NTHREAD;
dim3 grid(niblock, NJBLOCK, 1);
dim3 threads(NTHREAD, 1, 1);
int nj = joff[tid+1] - joff[tid];
hipLaunchKernelGGL(( gravity_kernel) , dim3(grid), dim3(threads) , 0, 0,
nj, ipbuf[tid], jpbuf[tid], fpart[tid], nbpart[tid]);
// CUDA_SAFE_THREAD_SYNC();
#if 0
dim3 rgrid(niblock, 1, 1);
hipLaunchKernelGGL(( reduce_kernel) , dim3(rgrid), dim3(threads) , 0, 0,
nj, joff[tid], fpart[tid], nbpart[tid], ftot[tid], nbtot[tid]);
#else
const int ni8 = 1 + (ni-1) / NYREDUCE;
dim3 rgrid (ni8, 1, 1);
dim3 rthreads(NXREDUCE, NYREDUCE, 1);
hipLaunchKernelGGL(( force_reduce_kernel) , dim3(rgrid), dim3(rthreads) , 0, 0,
ni, fpart[tid], ftot[tid]);
#endif
// CUDA_SAFE_THREAD_SYNC();
ftot [tid].dtoh(ni);
// now make prefix sum
int nbsum = 0;
for(int i=0; i<ni; i++){
nboff[tid][i] = nbsum;
const int nnb = ftot[tid][i].nnb;
// assert(nnb >= 0);
if(nnb >= 0) nbsum += nnb;
}
assert(nbsum <= NB_BUF_SIZE);
nboff[tid].htod(ni);
// debugging
// for(int k=0; k<nbsum; k++) nblist[tid][k] = -1;
// nblist[tid].htod(nbsum);
hipLaunchKernelGGL(( gather_nb_kernel) , dim3(rgrid), dim3(rthreads), 0, 0,
ni, nj, joff[tid], fpart[tid], ftot[tid],
nboff[tid], nbpart[tid], nblist[tid]);
// CUDA_SAFE_THREAD_SYNC();
nblist[tid].dtoh(nbsum);
}
}
const double wt = get_wtime();
time_grav += wt;
time_reduce -= wt;
// reduction phase
// omp_set_num_threads(numCPU);
#pragma omp parallel for
for(int i=0; i<ni; i++){
double ax=0.0, ay=0.0, az=0.0;
double jx=0.0, jy=0.0, jz=0.0;
double po=0.0;
for(int id=0; id<numGPU; id++){
Force &fo = ftot[id][i];
ax += fo.acc.x;
ay += fo.acc.y;
az += fo.acc.z;
jx += fo.jrk.x;
jy += fo.jrk.y;
jz += fo.jrk.z;
po += fo.pot;
}
acc[i][0] = ax;
acc[i][1] = ay;
acc[i][2] = az;
jrk[i][0] = jx;
jrk[i][1] = jy;
jrk[i][2] = jz;
pot[i] = po;
}
#pragma omp parallel for
for(int i=0; i<ni; i++){
bool overflow = false;
int *nnbp = listbase + lmax * i;
int *nblistp = nnbp + 1;
int nnb = 0;
for(int id=0; id<numGPU; id++){
const int nnb_part = ftot[id][i].nnb;
if(nnb_part < 0){
overflow = true;
fprintf(stderr, "!!!overflow : i=%d, id=%d, nnb_part=%d\n", i, id, nnb_part);
}
// assert(!overflow);
nnb += nnb_part;
if(nnb > nnbmax){
overflow = true;
fprintf(stderr, "!!!overflow : i=%d, id=%d, nnb_tot =%d, nnbmax=%d\n", i, id, nnb, nnbmax);
}
// assert(!overflow);
if(!overflow){
const int off = nboff[id][i];
for(int k=0; k<nnb_part; k++){
*nblistp++ = nblist[id][off + k];
}
}
}
if(overflow){
// *nnbp = -1;
*nnbp = nnb ? -abs(nnb) : -9999;
}else{
*nnbp = nnb;
}
}
time_reduce += get_wtime();
}
extern "C" {
void gpunb_devinit_(){
GPUNB_devinit();
}
void gpunb_open_(int *nbmax){
GPUNB_open(*nbmax);
}
void gpunb_close_(){
GPUNB_close();
}
void gpunb_send_(
int *nj,
double mj[],
double xj[][3],
double vj[][3]){
GPUNB_send(*nj, mj, xj, vj);
}
void gpunb_regf_(
int *ni,
double h2[],
double dtr[],
double xi[][3],
double vi[][3],
double acc[][3],
double jrk[][3],
double pot[],
int *lmax,
int *nbmax,
int *list){ // list[][lmax]
GPUNB_regf(*ni, h2, dtr, xi, vi, acc, jrk, pot, *lmax, *nbmax, list);
}
}
| 63f9c1ab5ca5b521f232a07ada7f2f8f34f2215a.cu | #include <cstdio>
#include <cmath>
#include <cassert>
#include <cstdlib>
#include <omp.h>
#ifdef WITH_CUDA5
# include <helper_cuda.h>
# define CUDA_SAFE_CALL checkCudaErrors
#else
# include <cutil.h>
#endif
#include "cuda_pointer.h"
#define NTHREAD 64 // 64 or 128
// #define NJBLOCK 14 // for GTX 470
#define NJBLOCK 30 // for GTX660Ti
#define NIBLOCK 32 // 16 or 32
#define NIMAX (NTHREAD * NIBLOCK) // 2048
#define NXREDUCE 32 // must be >NJBLOCK
#define NYREDUCE 8
#define NNB_PER_BLOCK 256 // NNB per block, must be power of 2
#define NB_BUF_SIZE (1<<21)
// #define NNB_MAX 384 // total NNB at reduced
#define MAX_CPU 16
#define MAX_GPU 4
// for clearity, for myself
#define __out
#define PROFILE
#define NAN_CHECK(val) assert((val) == (val));
typedef unsigned short uint16;
struct Jparticle{
float3 pos;
float mass;
float3 vel;
float pad;
Jparticle() {}
Jparticle(double mj, double xj[3], double vj[3]){
pos.x = xj[0];
pos.y = xj[1];
pos.z = xj[2];
mass = mj;
vel.x = vj[0];
vel.y = vj[1];
vel.z = vj[2];
NAN_CHECK(xj[0]);
NAN_CHECK(xj[1]);
NAN_CHECK(xj[2]);
NAN_CHECK(mj);
NAN_CHECK(vj[0]);
NAN_CHECK(vj[1]);
NAN_CHECK(vj[2]);
}
__device__
Jparticle(const float4 *buf){
float4 tmp1 = buf[0];
float4 tmp2 = buf[1];
pos.x = tmp1.x;
pos.y = tmp1.y;
pos.z = tmp1.z;
mass = tmp1.w;
vel.x = tmp2.x;
vel.y = tmp2.y;
vel.z = tmp2.z;
}
};
struct Iparticle{
float3 pos;
float h2;
float3 vel;
float dtr;
Iparticle() {}
Iparticle(double h2i, double dtri, double xi[3], double vi[3]){
pos.x = xi[0];
pos.y = xi[1];
pos.z = xi[2];
h2 = h2i;
vel.x = vi[0];
vel.y = vi[1];
vel.z = vi[2];
dtr = dtri;
NAN_CHECK(xi[0]);
NAN_CHECK(xi[1]);
NAN_CHECK(xi[2]);
NAN_CHECK(h2i);
NAN_CHECK(vi[0]);
NAN_CHECK(vi[1]);
NAN_CHECK(vi[2]);
}
};
struct Force{
float3 acc;
float pot;
float3 jrk;
int nnb; // 8 words
__device__ void clear(){
acc.x = acc.y = acc.z = 0.f;
jrk.x = jrk.y = jrk.z = 0.f;
pot = 0.f;
nnb = 0;
}
__device__ void operator+=(const Force &rhs){
acc.x += rhs.acc.x;
acc.y += rhs.acc.y;
acc.z += rhs.acc.z;
pot += rhs.pot;
jrk.x += rhs.jrk.x;
jrk.y += rhs.jrk.y;
jrk.z += rhs.jrk.z;
if(nnb>=0 && rhs.nnb>=0){
nnb += rhs.nnb;
}else{
nnb = -1;
}
}
#if __CUDA_ARCH__ >= 300
__device__ void reduce_with(const int mask){
acc.x += __shfl_xor(acc.x, mask);
acc.y += __shfl_xor(acc.y, mask);
acc.z += __shfl_xor(acc.z, mask);
pot += __shfl_xor(pot , mask);
jrk.x += __shfl_xor(jrk.x, mask);
jrk.y += __shfl_xor(jrk.y, mask);
jrk.z += __shfl_xor(jrk.z, mask);
int ntmp = __shfl_xor(nnb, mask);
if(nnb>=0 && ntmp>=0){
nnb += ntmp;
}else{
nnb = -1;
}
}
#endif
};
__device__ void dev_gravity(
const int jidx,
const Iparticle &ip,
const Jparticle &jp,
__out Force &fo,
__out uint16 nblist[]){
float dx = jp.pos.x - ip.pos.x;
float dy = jp.pos.y - ip.pos.y;
float dz = jp.pos.z - ip.pos.z;
float dvx = jp.vel.x - ip.vel.x;
float dvy = jp.vel.y - ip.vel.y;
float dvz = jp.vel.z - ip.vel.z;
float r2 = dx*dx + dy*dy + dz*dz;
#if 1
float dxp = dx + ip.dtr * dvx;
float dyp = dy + ip.dtr * dvy;
float dzp = dz + ip.dtr * dvz;
float r2p = dxp*dxp + dyp*dyp + dzp*dzp;
#else
float r2p = r2;
#endif
float rv = dx*dvx + dy*dvy + dz*dvz;
float rinv1 = rsqrtf(r2);
if(min(r2, r2p) < jp.mass * ip.h2){
// fo.neib[fo.nnb++ % NBMAX] = j;
nblist[fo.nnb & (NNB_PER_BLOCK-1)] = (uint16)jidx;
fo.nnb++;
rinv1 = 0.f;
}
float rinv2 = rinv1 * rinv1;
float mrinv1 = jp.mass * rinv1;
float mrinv3 = mrinv1 * rinv2;
rv *= -3.f * rinv2;
#ifdef POTENTIAL
fo.pot += mrinv1;
#endif
fo.acc.x += mrinv3 * dx;
fo.acc.y += mrinv3 * dy;
fo.acc.z += mrinv3 * dz;
// fo.acc.z += 1.0;
fo.jrk.x += mrinv3 * (dvx + rv * dx);
fo.jrk.y += mrinv3 * (dvy + rv * dy);
fo.jrk.z += mrinv3 * (dvz + rv * dz);
}
__global__ void gravity_kernel(
const int nbody,
const Iparticle ipbuf[],
const Jparticle jpbuf[],
__out Force fobuf[][NJBLOCK],
__out uint16 nbbuf[][NJBLOCK][NNB_PER_BLOCK]){
int ibid = blockIdx.x;
int jbid = blockIdx.y;
int tid = threadIdx.x;
int iaddr = tid + blockDim.x * ibid;
int jstart = (nbody * (jbid )) / NJBLOCK;
int jend = (nbody * (jbid+1)) / NJBLOCK;
Iparticle ip = ipbuf[iaddr];
Force fo;
fo.clear();
uint16 *nblist = nbbuf[iaddr][jbid];
#if __CUDA_ARCH__ >= 300 // just some trial
for(int j=jstart; j<jend; j+=32){
__shared__ Jparticle jpshare[32];
__syncthreads();
float4 *src = (float4 *)&jpbuf[j];
float4 *dst = (float4 *)jpshare;
dst[tid] = src[tid];
__syncthreads();
if(jend-j < 32){
#pragma unroll 4
for(int jj=0; jj<jend-j; jj++){
const Jparticle jp = jpshare[jj];
// const Jparticle jp( (float4 *)jpshare + 2*jj);
dev_gravity(j-jstart+jj, ip, jp, fo, nblist);
}
}else{
#pragma unroll 8
for(int jj=0; jj<32; jj++){
const Jparticle jp = jpshare[jj];
// const Jparticle jp( (float4 *)jpshare + 2*jj);
dev_gravity(j-jstart+jj, ip, jp, fo, nblist);
}
}
}
#else
for(int j=jstart; j<jend; j+=NTHREAD){
__shared__ Jparticle jpshare[NTHREAD];
__syncthreads();
float4 *src = (float4 *)&jpbuf[j];
float4 *dst = (float4 *)jpshare;
dst[ tid] = src[ tid];
dst[NTHREAD+tid] = src[NTHREAD+tid];
__syncthreads();
if(jend-j < NTHREAD){
#pragma unroll 4
for(int jj=0; jj<jend-j; jj++){
Jparticle jp = jpshare[jj];
dev_gravity(j-jstart+jj, ip, jp, fo, nblist);
}
}else{
#pragma unroll 8
for(int jj=0; jj<NTHREAD; jj++){
Jparticle jp = jpshare[jj];
dev_gravity(j-jstart+jj, ip, jp, fo, nblist);
}
}
}
#endif
if(fo.nnb > NNB_PER_BLOCK) fo.nnb = -1;
fobuf[iaddr][jbid] = fo;
}
#if __CUDA_ARCH__ >= 300
__device__ void warp_reduce_int(int inp, int *out){
inp += __shfl_xor(inp, 1);
inp += __shfl_xor(inp, 2);
inp += __shfl_xor(inp, 4);
inp += __shfl_xor(inp, 8);
# if NXREDUCE==32
inp += __shfl_xor(inp, 16);
# endif
*out = inp;
}
__device__ void warp_reduce_float8(float4 inp1, float4 inp2, float *out){
const int tid = threadIdx.x;
float4 tmp4L = (4&tid) ? inp2 : inp1;
float4 tmp4R = (4&tid) ? inp1 : inp2;
tmp4L.x += __shfl_xor(tmp4R.x, 4);
tmp4L.y += __shfl_xor(tmp4R.y, 4);
tmp4L.z += __shfl_xor(tmp4R.z, 4);
tmp4L.w += __shfl_xor(tmp4R.w, 4);
float4 tmp4;
tmp4.x = (2&tid) ? tmp4L.z : tmp4L.x;
tmp4.y = (2&tid) ? tmp4L.w : tmp4L.y;
tmp4.z = (2&tid) ? tmp4L.x : tmp4L.z;
tmp4.w = (2&tid) ? tmp4L.y : tmp4L.w;
tmp4.x += __shfl_xor(tmp4.z, 2);
tmp4.y += __shfl_xor(tmp4.w, 2);
float2 tmp2;
tmp2.x = (1&tid) ? tmp4.y : tmp4.x;
tmp2.y = (1&tid) ? tmp4.x : tmp4.y;
tmp2.x += __shfl_xor(tmp2.y, 1);
tmp2.x += __shfl_xor(tmp2.x, 8);
# if NXREDUCE==32
tmp2.x += __shfl_xor(tmp2.x, 16);
# endif
if(tid < 8){
out[tid] = tmp2.x;
}
}
#endif
__global__ void force_reduce_kernel(
const int ni,
const Force fpart[][NJBLOCK],
__out Force ftot []){
const int xid = threadIdx.x;
const int yid = threadIdx.y;
const int bid = blockIdx.x;
const int iaddr = yid + blockDim.y * bid;
#if __CUDA_ARCH__ >= 300
Force f;
if(xid < NJBLOCK){
f = fpart[iaddr][xid];
}else{
f.clear();
}
# if 0
# pragma unroll
for(int mask=1; mask<NXREDUCE; mask*=2){
f.reduce_with(mask);
}
if(iaddr < ni && xid == 0){
ftot[iaddr] = f;
}
# else
if(iaddr < ni){
const float4 tmp1 = make_float4(f.acc.x, f.acc.y, f.acc.z, f.pot);
const float4 tmp2 = make_float4(f.jrk.x, f.jrk.y, f.jrk.z, 0.0f);
const int itmp = f.nnb;
float *dst = (float *)(ftot + iaddr);
int *idst = (int *)(dst + 7);
warp_reduce_float8(tmp1, tmp2, dst);
warp_reduce_int(itmp, idst);
}
# endif
#else
__shared__ Force fshare[NYREDUCE][NXREDUCE];
if(xid < NJBLOCK){
fshare[yid][xid] = fpart[iaddr][xid];
}else{
fshare[yid][xid].clear();
}
Force *fs = fshare[yid];
#if NXREDUCE==32
if(xid < 16) fs[xid] += fs[xid + 16];
#endif
if(xid < 8) fs[xid] += fs[xid + 8];
if(xid < 4) fs[xid] += fs[xid + 4];
if(xid < 2) fs[xid] += fs[xid + 2];
if(xid < 1) fs[xid] += fs[xid + 1];
if(iaddr < ni){
ftot[iaddr] = fs[0];
}
#endif
}
__global__ void gather_nb_kernel(
const int ni,
const int nj,
const int joff,
const Force fpart[][NJBLOCK],
const Force ftot [],
const int nboff[],
const uint16 nbpart[][NJBLOCK][NNB_PER_BLOCK],
__out int nblist[])
{
const int xid = threadIdx.x;
const int yid = threadIdx.y;
const int bid = blockIdx.x;
const int iaddr = yid + blockDim.y * bid;
if(iaddr >= ni) return;
if(ftot[iaddr].nnb < 0) return;
const int mynnb = (xid < NJBLOCK) ? fpart[iaddr][xid].nnb
: 0;
// now performe prefix sum
#if __CUDA_ARCH__ >= 300
int ix = mynnb;
#pragma unroll
for(int ioff=1; ioff<NXREDUCE; ioff*=2){
int iy = __shfl_up(ix, ioff);
if(xid>=ioff) ix += iy;
}
int iz = __shfl_up(ix, 1);
const int off = (xid == 0) ? 0 : iz;
#else
__shared__ int ishare[NYREDUCE][NXREDUCE];
ishare[yid][xid] = mynnb;
volatile int *ish = ishare[yid];
if(xid>=1) ish[xid] += ish[xid-1];
if(xid>=2) ish[xid] += ish[xid-2];
if(xid>=4) ish[xid] += ish[xid-4];
if(xid>=8) ish[xid] += ish[xid-8];
#if NXREDUCE==32
if(xid>=16) ish[xid] += ish[xid-16];
#endif
const int off = (xid == 0) ? 0
: ish[xid-1];
#endif
int *nbdst = nblist + nboff[iaddr] + off;
const int jstart = (nj * xid) / NJBLOCK;
if(xid < NJBLOCK){
for(int k=0; k<mynnb; k++){
const int nbid = (joff + jstart) + int(nbpart[iaddr][xid][k]);
// const int nbid = iaddr * 1000 + k;
nbdst[k] = nbid;
}
}
}
// Host Part
#ifdef PROFILE
#include <sys/time.h>
static double get_wtime(){
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec + 1.e-6 * tv.tv_usec;
}
#else
static double get_wtime(){
return 0.0;
}
#endif
static double time_send, time_grav, time_reduce;
static long long numInter;
static cudaPointer <Jparticle> jpbuf[MAX_GPU];
static cudaPointer <Iparticle> ipbuf[MAX_GPU];
static cudaPointer <Force[NJBLOCK]> fpart[MAX_GPU];
static cudaPointer <Force> ftot [MAX_GPU];
static cudaPointer <uint16[NJBLOCK][NNB_PER_BLOCK]> nbpart[MAX_GPU];
static cudaPointer <int> nblist [MAX_GPU];
static cudaPointer <int> nboff [MAX_GPU];
static int numCPU, numGPU;
static int joff[MAX_GPU + 1];
static int nbody, nbodymax;
static int devid[MAX_GPU];
static bool is_open = false;
static bool devinit = false;
void GPUNB_devinit(){
if(devinit) return;
assert(NXREDUCE >= NJBLOCK);
assert(NXREDUCE <= 32);
cudaGetDeviceCount(&numGPU);
assert(numGPU <= MAX_GPU);
char *gpu_list = getenv("GPU_LIST");
if(gpu_list){
// get GPU list from environment variable
numGPU = 0;
char *p = strtok(gpu_list, " ");
while(p){
devid[numGPU++] = atoi(p);
p = strtok(NULL, " ");
assert(numGPU <= MAX_GPU);
}
}else{
// use all GPUs
for(int i=0; i<numGPU; i++){
devid[i] = i;
}
}
// numGPU = 1;
#pragma omp parallel
{
int tid = omp_get_thread_num();
if(tid == 0) numCPU = omp_get_num_threads();
}
assert(numCPU <= MAX_CPU);
assert(numGPU <= numCPU);
#pragma omp parallel
{
int tid = omp_get_thread_num();
if(tid < numGPU){
cudaSetDevice(devid[tid]);
}
}
#ifdef PROFILE
fprintf(stderr, "***********************\n");
fprintf(stderr, "Initializing NBODY6/GPU library\n");
fprintf(stderr, "#CPU %d, #GPU %d\n", numCPU, numGPU);
fprintf(stderr, " device:");
for(int i=0; i<numGPU; i++){
fprintf(stderr, " %d", devid[i]);
}
fprintf(stderr, "\n");
#if 1
for(int i=0; i<numGPU; i++){
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, devid[i]);
fprintf(stderr, " device %d: %s\n", devid[i], prop.name);
}
#endif
fprintf(stderr, "***********************\n");
#endif
devinit = true;
}
void GPUNB_open(int nbmax){
time_send = time_grav = time_reduce = 0.0;
numInter = 0;
nbodymax = nbmax;
GPUNB_devinit();
if(is_open){
fprintf(stderr, "gpunb: it is already open\n");
return;
}
is_open = true;
for(int id=0; id<numGPU + 1; id++){
joff[id] = (id * nbmax) / numGPU;
}
// omp_set_num_threads(numGPU);
#pragma omp parallel
{
int tid = omp_get_thread_num();
if(tid < numGPU){
cudaSetDevice(devid[tid]);
int nj = joff[tid+1] - joff[tid];
jpbuf [tid].allocate(nj + NTHREAD);
ipbuf [tid].allocate(NIMAX);
fpart [tid].allocate(NIMAX);
ftot [tid].allocate(NIMAX);
nbpart[tid].allocate(NIMAX);
nblist[tid].allocate(NB_BUF_SIZE); // total ganged nblist
nboff [tid].allocate(NIMAX+1);
}
}
#ifdef PROFILE
fprintf(stderr, "***********************\n");
fprintf(stderr, "Opened NBODY6/GPU library\n");
fprintf(stderr, "#CPU %d, #GPU %d\n", numCPU, numGPU);
fprintf(stderr, " device:");
for(int i=0; i<numGPU; i++){
fprintf(stderr, " %d", devid[i]);
}
fprintf(stderr, "\n");
for(int i=0; i<numGPU+1; i++){
fprintf(stderr, " %d", joff[i]);
}
fprintf(stderr, "\n");
fprintf(stderr, "nbmax = %d\n", nbmax);
fprintf(stderr, "***********************\n");
#endif
}
void GPUNB_close(){
if(!is_open){
fprintf(stderr, "gpunb: it is already close\n");
return;
}
is_open = false;
// omp_set_num_threads(numGPU);
#pragma omp parallel
{
int tid = omp_get_thread_num();
if(tid < numGPU){
jpbuf [tid].free();
ipbuf [tid].free();
fpart [tid].free();
ftot [tid].free();
nbpart[tid].free();
nblist[tid].free();
nboff [tid].free();
}
}
// omp_set_num_threads(numCPU);
nbodymax = 0;
#ifdef PROFILE
fprintf(stderr, "***********************\n");
fprintf(stderr, "Closed NBODY6/GPU library\n");
fprintf(stderr, "time send : %f sec\n", time_send);
fprintf(stderr, "time grav : %f sec\n", time_grav);
fprintf(stderr, "time reduce : %f sec\n", time_reduce);
fprintf(stderr, "time regtot : %f sec\n", time_send + time_grav + time_reduce);
fprintf(stderr, "%f Gflops (gravity part only)\n", 60.e-9 * numInter / time_grav);
fprintf(stderr, "***********************\n");
#endif
}
void GPUNB_send(
int _nbody,
double mj[],
double xj[][3],
double vj[][3]){
assert(is_open);
nbody = _nbody;
assert(nbody <= nbodymax);
time_send -= get_wtime();
for(int id=0; id<numGPU + 1; id++){
joff[id] = (id * nbody) / numGPU;
}
#pragma omp parallel
{
int tid = omp_get_thread_num();
if(tid < numGPU){
int nj = joff[tid+1] - joff[tid];
for(int j=0; j<nj; j++){
int jj = j + joff[tid];
jpbuf[tid][j] = Jparticle(mj[jj], xj[jj], vj[jj]);
}
jpbuf[tid].htod(nj);
}
}
time_send += get_wtime();
}
void GPUNB_regf(
int ni,
double h2[],
double dtr[],
double xi[][3],
double vi[][3],
double acc[][3],
double jrk[][3],
double pot[],
int lmax,
int nnbmax,
int *listbase){
assert(is_open);
time_grav -= get_wtime();
numInter += ni * nbody;
assert(0 < ni && ni <= NIMAX);
// omp_set_num_threads(numGPU);
#pragma omp parallel
{
int tid = omp_get_thread_num();
if(tid < numGPU){
// cudaSetDevice(device_id[tid]);
for(int i=0; i<ni; i++){
ipbuf[tid][i] = Iparticle(h2[i], dtr[i], xi[i], vi[i]);
}
// set i-particles
ipbuf[tid].htod(ni);
// gravity kernel
int niblock = 1 + (ni-1) / NTHREAD;
dim3 grid(niblock, NJBLOCK, 1);
dim3 threads(NTHREAD, 1, 1);
int nj = joff[tid+1] - joff[tid];
gravity_kernel <<< grid, threads >>>
(nj, ipbuf[tid], jpbuf[tid], fpart[tid], nbpart[tid]);
// CUDA_SAFE_THREAD_SYNC();
#if 0
dim3 rgrid(niblock, 1, 1);
reduce_kernel <<< rgrid, threads >>>
(nj, joff[tid], fpart[tid], nbpart[tid], ftot[tid], nbtot[tid]);
#else
const int ni8 = 1 + (ni-1) / NYREDUCE;
dim3 rgrid (ni8, 1, 1);
dim3 rthreads(NXREDUCE, NYREDUCE, 1);
force_reduce_kernel <<< rgrid, rthreads >>>
(ni, fpart[tid], ftot[tid]);
#endif
// CUDA_SAFE_THREAD_SYNC();
ftot [tid].dtoh(ni);
// now make prefix sum
int nbsum = 0;
for(int i=0; i<ni; i++){
nboff[tid][i] = nbsum;
const int nnb = ftot[tid][i].nnb;
// assert(nnb >= 0);
if(nnb >= 0) nbsum += nnb;
}
assert(nbsum <= NB_BUF_SIZE);
nboff[tid].htod(ni);
// debugging
// for(int k=0; k<nbsum; k++) nblist[tid][k] = -1;
// nblist[tid].htod(nbsum);
gather_nb_kernel <<< rgrid, rthreads>>>
(ni, nj, joff[tid], fpart[tid], ftot[tid],
nboff[tid], nbpart[tid], nblist[tid]);
// CUDA_SAFE_THREAD_SYNC();
nblist[tid].dtoh(nbsum);
}
}
const double wt = get_wtime();
time_grav += wt;
time_reduce -= wt;
// reduction phase
// omp_set_num_threads(numCPU);
#pragma omp parallel for
for(int i=0; i<ni; i++){
double ax=0.0, ay=0.0, az=0.0;
double jx=0.0, jy=0.0, jz=0.0;
double po=0.0;
for(int id=0; id<numGPU; id++){
Force &fo = ftot[id][i];
ax += fo.acc.x;
ay += fo.acc.y;
az += fo.acc.z;
jx += fo.jrk.x;
jy += fo.jrk.y;
jz += fo.jrk.z;
po += fo.pot;
}
acc[i][0] = ax;
acc[i][1] = ay;
acc[i][2] = az;
jrk[i][0] = jx;
jrk[i][1] = jy;
jrk[i][2] = jz;
pot[i] = po;
}
#pragma omp parallel for
for(int i=0; i<ni; i++){
bool overflow = false;
int *nnbp = listbase + lmax * i;
int *nblistp = nnbp + 1;
int nnb = 0;
for(int id=0; id<numGPU; id++){
const int nnb_part = ftot[id][i].nnb;
if(nnb_part < 0){
overflow = true;
fprintf(stderr, "!!!overflow : i=%d, id=%d, nnb_part=%d\n", i, id, nnb_part);
}
// assert(!overflow);
nnb += nnb_part;
if(nnb > nnbmax){
overflow = true;
fprintf(stderr, "!!!overflow : i=%d, id=%d, nnb_tot =%d, nnbmax=%d\n", i, id, nnb, nnbmax);
}
// assert(!overflow);
if(!overflow){
const int off = nboff[id][i];
for(int k=0; k<nnb_part; k++){
*nblistp++ = nblist[id][off + k];
}
}
}
if(overflow){
// *nnbp = -1;
*nnbp = nnb ? -abs(nnb) : -9999;
}else{
*nnbp = nnb;
}
}
time_reduce += get_wtime();
}
extern "C" {
void gpunb_devinit_(){
GPUNB_devinit();
}
void gpunb_open_(int *nbmax){
GPUNB_open(*nbmax);
}
void gpunb_close_(){
GPUNB_close();
}
void gpunb_send_(
int *nj,
double mj[],
double xj[][3],
double vj[][3]){
GPUNB_send(*nj, mj, xj, vj);
}
void gpunb_regf_(
int *ni,
double h2[],
double dtr[],
double xi[][3],
double vi[][3],
double acc[][3],
double jrk[][3],
double pot[],
int *lmax,
int *nbmax,
int *list){ // list[][lmax]
GPUNB_regf(*ni, h2, dtr, xi, vi, acc, jrk, pot, *lmax, *nbmax, list);
}
}
|
745c176b512ba88782e968fe804ead5b8607d1e1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "util.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define H(a) (-a * log2f(a))
#define H2(a1, a2, p) (H(((float)(a1) + (p)) / ((float)(a1 + a2) + 1.0f)) + \
H(((float)(a2) + (1.0f - p)) / ((float)(a1 + a2) + 1.0f)))
/* Makra do sumowania tablicy 2 x 3 x 3 */
#define SUM_N3(a, n1, n2) (a[n1][n2][0] + a[n1][n2][1] + a[n1][n2][2])
#define SUM_N2(a, n1, n3) (a[n1][0][n3] + a[n1][1][n3] + a[n1][2][n3])
#define SUM_N1(a, n2, n3) (a[0][n2][n3] + a[1][n2][n3])
#define SUM_N2_N3(a, n1) (SUM_N3(a, n1, 0) + SUM_N3(a, n1, 1) + SUM_N3(a, n1, 2))
#define SUM_N1_N3(a, n2) (SUM_N3(a, 0, n2) + SUM_N3(a, 1, n2))
#define SUM_N1_N2(a, n3) (SUM_N2(a, 0, n3) + SUM_N2(a, 1, n3))
/* Format danych:
* - wektor wartoci pierwszej zmiennej opisowej *v1s, 1 zmienna, wszystkie obiekty
* - wektor wartoci drugiej zmiennej opisowej *v2s, 1 zmienna, wszystkie obiekty
* - wektor wartoci zmiennych decyzyjnych *ds
* - ilo obiektw num_objects
*/
__device__ float compute_gig_1_2(char *v1s, char *v2s, char *ds, int num_objects, float p)
{
int count[2][3][3] = { 0 };
for (int i = 0; i < num_objects; ++i) {
char d = (ds[i / 8] >> (i % 8)) & 1;
char v1 = (v1s[i / 4] >> ((i % 4) * 2)) & 3;
char v2 = (v2s[i / 4] >> ((i % 4) * 2)) & 3;
count[d][v1][v2]++;
}
float ig1, ig2, ig12, h_p;
h_p = H2(SUM_N2_N3(count, 0), SUM_N2_N3(count, 1), p);
ig1 = h_p - SUM_N1_N3(count, 0) * H2(SUM_N3(count, 0, 0), SUM_N3(count, 1, 0), p) -
SUM_N1_N3(count, 1) * H2(SUM_N3(count, 0, 1), SUM_N3(count, 1, 1), p) -
SUM_N1_N3(count, 2) * H2(SUM_N3(count, 0, 2), SUM_N3(count, 1, 2), p);
ig2 = h_p - SUM_N1_N2(count, 0) * H2(SUM_N2(count, 0, 0), SUM_N2(count, 1, 0), p) -
SUM_N1_N2(count, 1) * H2(SUM_N2(count, 0, 1), SUM_N2(count, 1, 1), p) -
SUM_N1_N2(count, 2) * H2(SUM_N2(count, 0, 2), SUM_N2(count, 1, 2), p);
ig12 = h_p - SUM_N1(count, 0, 0) * H2(count[0][0][0], count[1][0][0], p) -
SUM_N1(count, 1, 0) * H2(count[0][1][0], count[1][1][0], p) -
SUM_N1(count, 2, 0) * H2(count[0][2][0], count[1][2][0], p) -
SUM_N1(count, 0, 1) * H2(count[0][0][1], count[1][0][1], p) -
SUM_N1(count, 1, 1) * H2(count[0][1][1], count[1][1][1], p) -
SUM_N1(count, 2, 1) * H2(count[0][2][1], count[1][2][1], p) -
SUM_N1(count, 0, 2) * H2(count[0][0][2], count[1][0][2], p) -
SUM_N1(count, 1, 2) * H2(count[0][1][2], count[1][1][2], p) -
SUM_N1(count, 2, 2) * H2(count[0][2][2], count[1][2][2], p);
//printf(" IG(v1) = %f\n", ig1);
//printf(" IG(v2) = %f\n", ig2);
//printf(" IG(v1 u v2) = %f\n", ig12);
return ig12 - ((ig1 > ig2) ? ig1 : ig2);
}
/* Format danych:
* - macierz wartoci zmiennych opisowych *vars, 1 wiersz - 1 zmienna
* - wektor wartoci zmiennych decyzyjnych *ds
* - ilo obiektw num_objects
* - ilo zmiennych num_vars
* - wynikowe GIG
*/
__global__ void compute_gig_kernel(char *vars, char *ds, int num_objects, int num_vars, float *r_gig, float p)
{
int v1_p = blockIdx.x * blockDim.x + threadIdx.x;
int v2_p = blockIdx.y * blockDim.y + threadIdx.y;
if (v1_p >= v2_p) return;
if (v1_p >= num_vars) return;
if (v2_p >= num_vars) return;
//printf("compute_gig(%d, %d) %d\n", v1_p, v2_p, blockIdx.y);
const int num_o_padded = (num_objects - 1) / 4 + 1;
r_gig[v1_p * num_vars + v2_p] = compute_gig_1_2(&vars[v1_p * num_o_padded], &vars[v2_p * num_o_padded], ds, num_objects, p);
//printf(" GIG = %f\n", r_gig[v1_p * num_vars + v2_p]);
}
struct GigStruct {
float gig;
int v1, v2;
};
__global__ void compute_gig_wt_kernel(char *vars, char *ds, int num_objects, int num_vars,
struct GigStruct *r_gig, int max_num_gig_structs, int* num_gig_structs,
float p, float threshold)
{
int v1_p = blockIdx.x * blockDim.x + threadIdx.x;
int v2_p = blockIdx.y * blockDim.y + threadIdx.y;
if (v1_p >= v2_p) return;
if (v1_p >= num_vars) return;
if (v2_p >= num_vars) return;
//printf("compute_gig(%d, %d) %d\n", v1_p, v2_p, blockIdx.y);
const int num_o_padded = (num_objects - 1) / 4 + 1;
float gig = compute_gig_1_2(&vars[v1_p * num_o_padded], &vars[v2_p * num_o_padded], ds, num_objects, p);
if (gig < threshold) return;
/* atomicInc() wraps around to 0 */
int num = atomicAdd(num_gig_structs, 1);
if (num < max_num_gig_structs) {
r_gig[num].gig = gig;
r_gig[num].v1 = v1_p;
r_gig[num].v2 = v2_p;
}
//printf(" GIG = %f\n", r_gig[v1_p * num_vars + v2_p]);
}
/* Komparatory do sortowania _malejco_ */
int compare_gig(const void *a, const void *b)
{
if (((struct GigStruct*)a)->gig > ((struct GigStruct*)b)->gig) return -1;
else if (((struct GigStruct*)a)->gig == ((struct GigStruct*)b)->gig) return 0;
else return 1;
}
int compare_float(const void *a, const void *b)
{
if (*((float*)a) > *((float*)b)) return -1;
else if (*((float*)a) == *((float*)b)) return 0;
else return 1;
}
int main()
{
int num_objects, num_vars, result_size, real_result_size;
float a_priori, threshold;
float input, copy, random_trial_kernel, random_trial_copy, random_trial_process, main_kernel, main_copy, main_process, all;
Timer timer;
timer.start();
scanf("%d %d %d %f", &num_objects, &num_vars, &result_size, &a_priori);
Sync2BitArray2D vars(num_vars, num_objects);
SyncBitArray ds(num_objects);
/* Czytamy dane */
{
for (int i = 0; i < num_objects; ++i) {
int a; scanf("%d", &a); a &= 1;
ds.setHost(i, a);
for (int j = 0; j < num_vars; ++j) {
int b; scanf("%d", &b); b &= 3;
vars.setHost(j, i, b);
}
}
input = timer.lap();
}
/* Kopiujemy dane na kart */
{
vars.syncToDevice();
ds.syncToDevice();
copy = timer.lap();
}
/* Wykonujemy zrandomizowan prb na pierwszym 10% zmiennych */
{
int random_trial_size = num_vars / 10;
/* Alokacja pamici na wynikowe GIG si nie udaje gdy pami jest > ok. 400MB.
XXX: Tablica gig nie musiaaby by kwadratowa. */
if (random_trial_size > 8192)
random_trial_size = 8192;
float percent = (float)random_trial_size / (float)num_vars ;
SyncArray2D<float> gig(random_trial_size, random_trial_size);
dim3 block_size(16, 16);
dim3 grid_size(padToMultipleOf(random_trial_size, block_size.x) / block_size.x,
padToMultipleOf(random_trial_size, block_size.y) / block_size.y);
hipLaunchKernelGGL(( compute_gig_kernel), dim3(grid_size), dim3(block_size), 0, 0, (char*)vars.getDevice(), (char*)ds.getDevice(),
num_objects, random_trial_size, (float*)gig.getDevice(), a_priori);
CUDA_CALL(hipGetLastError());
hipDeviceSynchronize();
random_trial_kernel = timer.lap();
gig.syncToHost();
random_trial_copy = timer.lap();
/* Przepisujemy obliczone GIG do spjnego kawaka pamici,
sortujemy i wybieramy odpowiedni element jako threshold */
{
int num_gig = 0;
float *gig_sorted = (float*)malloc(sizeof(float) * random_trial_size * random_trial_size);
for (int v1_p = 0; v1_p < random_trial_size; ++v1_p)
for (int v2_p = v1_p + 1; v2_p < random_trial_size; ++v2_p)
gig_sorted[num_gig++] = gig.getHostEl(v1_p, v2_p);
qsort(gig_sorted, num_gig, sizeof(float), compare_float);
/* gig_sorted jest posortowany malejco */
threshold = gig_sorted[(int)((float)result_size * percent * percent)];
free(gig_sorted);
}
random_trial_process = timer.lap();
}
/* Wykonujemy docelowe obliczenia na wszystkich zmiennych kernelem,
ktry zapisuje tylko wartoci wiksze ni threshold */
{
const int max_num_structs = result_size * 2;
SyncArray<struct GigStruct> gig_structs(max_num_structs);
SyncVar<int> num_structs;
dim3 block_size(16, 16);
dim3 grid_size(padToMultipleOf(num_vars, block_size.x) / block_size.x,
padToMultipleOf(num_vars, block_size.y) / block_size.y);
hipLaunchKernelGGL(( compute_gig_wt_kernel), dim3(grid_size), dim3(block_size), 0, 0, (char*)vars.getDevice(), (char*)ds.getDevice(),
num_objects, num_vars, (struct GigStruct*)gig_structs.getDevice(),
max_num_structs, num_structs.getDevice(), a_priori, threshold);
CUDA_CALL(hipGetLastError());
hipDeviceSynchronize();
main_kernel = timer.lap();
num_structs.syncToHost();
gig_structs.syncToHost();
main_copy = timer.lap();
real_result_size = *num_structs.getHost();
qsort(gig_structs.getHost(), *num_structs.getHost(), sizeof(struct GigStruct), compare_gig);
for (int i = *num_structs.getHost() - 1; i >= 0; --i)
printf("%f %d %d\n", gig_structs.getHostEl(i).gig, gig_structs.getHostEl(i).v1, gig_structs.getHostEl(i).v2);
main_process = timer.lap();
}
all = input + copy + random_trial_kernel + random_trial_copy + random_trial_process + main_kernel + main_copy + main_process;
fprintf(stderr, "data: variables, objects, result_size, true result size, threshold\n");
fprintf(stderr, "%d, %d, %d, %d, %f\n", num_vars, num_objects, result_size, real_result_size, threshold);
fprintf(stderr, "times: input, copy, random_trial_kernel, random_trial_copy, random_trial_process, main_kernel, main_copy, main_process, all\n");
fprintf(stderr, "%.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f\n", input, copy, random_trial_kernel,
random_trial_copy, random_trial_process, main_kernel, main_copy, main_process, all);
fprintf(stderr, "%.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f\n", input / all * 100.0f, copy / all * 100.0f,
random_trial_kernel / all * 100.0f, random_trial_copy / all * 100.0f, random_trial_process / all * 100.0f,
main_kernel / all * 100.0f, main_copy / all * 100.0f, main_process / all * 100.0f);
return 0;
}
| 745c176b512ba88782e968fe804ead5b8607d1e1.cu | #include "util.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define H(a) (-a * log2f(a))
#define H2(a1, a2, p) (H(((float)(a1) + (p)) / ((float)(a1 + a2) + 1.0f)) + \
H(((float)(a2) + (1.0f - p)) / ((float)(a1 + a2) + 1.0f)))
/* Makra do sumowania tablicy 2 x 3 x 3 */
#define SUM_N3(a, n1, n2) (a[n1][n2][0] + a[n1][n2][1] + a[n1][n2][2])
#define SUM_N2(a, n1, n3) (a[n1][0][n3] + a[n1][1][n3] + a[n1][2][n3])
#define SUM_N1(a, n2, n3) (a[0][n2][n3] + a[1][n2][n3])
#define SUM_N2_N3(a, n1) (SUM_N3(a, n1, 0) + SUM_N3(a, n1, 1) + SUM_N3(a, n1, 2))
#define SUM_N1_N3(a, n2) (SUM_N3(a, 0, n2) + SUM_N3(a, 1, n2))
#define SUM_N1_N2(a, n3) (SUM_N2(a, 0, n3) + SUM_N2(a, 1, n3))
/* Format danych:
* - wektor wartości pierwszej zmiennej opisowej *v1s, 1 zmienna, wszystkie obiekty
* - wektor wartości drugiej zmiennej opisowej *v2s, 1 zmienna, wszystkie obiekty
* - wektor wartości zmiennych decyzyjnych *ds
* - ilość obiektów num_objects
*/
__device__ float compute_gig_1_2(char *v1s, char *v2s, char *ds, int num_objects, float p)
{
int count[2][3][3] = { 0 };
for (int i = 0; i < num_objects; ++i) {
char d = (ds[i / 8] >> (i % 8)) & 1;
char v1 = (v1s[i / 4] >> ((i % 4) * 2)) & 3;
char v2 = (v2s[i / 4] >> ((i % 4) * 2)) & 3;
count[d][v1][v2]++;
}
float ig1, ig2, ig12, h_p;
h_p = H2(SUM_N2_N3(count, 0), SUM_N2_N3(count, 1), p);
ig1 = h_p - SUM_N1_N3(count, 0) * H2(SUM_N3(count, 0, 0), SUM_N3(count, 1, 0), p) -
SUM_N1_N3(count, 1) * H2(SUM_N3(count, 0, 1), SUM_N3(count, 1, 1), p) -
SUM_N1_N3(count, 2) * H2(SUM_N3(count, 0, 2), SUM_N3(count, 1, 2), p);
ig2 = h_p - SUM_N1_N2(count, 0) * H2(SUM_N2(count, 0, 0), SUM_N2(count, 1, 0), p) -
SUM_N1_N2(count, 1) * H2(SUM_N2(count, 0, 1), SUM_N2(count, 1, 1), p) -
SUM_N1_N2(count, 2) * H2(SUM_N2(count, 0, 2), SUM_N2(count, 1, 2), p);
ig12 = h_p - SUM_N1(count, 0, 0) * H2(count[0][0][0], count[1][0][0], p) -
SUM_N1(count, 1, 0) * H2(count[0][1][0], count[1][1][0], p) -
SUM_N1(count, 2, 0) * H2(count[0][2][0], count[1][2][0], p) -
SUM_N1(count, 0, 1) * H2(count[0][0][1], count[1][0][1], p) -
SUM_N1(count, 1, 1) * H2(count[0][1][1], count[1][1][1], p) -
SUM_N1(count, 2, 1) * H2(count[0][2][1], count[1][2][1], p) -
SUM_N1(count, 0, 2) * H2(count[0][0][2], count[1][0][2], p) -
SUM_N1(count, 1, 2) * H2(count[0][1][2], count[1][1][2], p) -
SUM_N1(count, 2, 2) * H2(count[0][2][2], count[1][2][2], p);
//printf(" IG(v1) = %f\n", ig1);
//printf(" IG(v2) = %f\n", ig2);
//printf(" IG(v1 u v2) = %f\n", ig12);
return ig12 - ((ig1 > ig2) ? ig1 : ig2);
}
/* Format danych:
* - macierz wartości zmiennych opisowych *vars, 1 wiersz - 1 zmienna
* - wektor wartości zmiennych decyzyjnych *ds
* - ilość obiektów num_objects
* - ilość zmiennych num_vars
* - wynikowe GIG
*/
__global__ void compute_gig_kernel(char *vars, char *ds, int num_objects, int num_vars, float *r_gig, float p)
{
int v1_p = blockIdx.x * blockDim.x + threadIdx.x;
int v2_p = blockIdx.y * blockDim.y + threadIdx.y;
if (v1_p >= v2_p) return;
if (v1_p >= num_vars) return;
if (v2_p >= num_vars) return;
//printf("compute_gig(%d, %d) %d\n", v1_p, v2_p, blockIdx.y);
const int num_o_padded = (num_objects - 1) / 4 + 1;
r_gig[v1_p * num_vars + v2_p] = compute_gig_1_2(&vars[v1_p * num_o_padded], &vars[v2_p * num_o_padded], ds, num_objects, p);
//printf(" GIG = %f\n", r_gig[v1_p * num_vars + v2_p]);
}
struct GigStruct {
float gig;
int v1, v2;
};
__global__ void compute_gig_wt_kernel(char *vars, char *ds, int num_objects, int num_vars,
struct GigStruct *r_gig, int max_num_gig_structs, int* num_gig_structs,
float p, float threshold)
{
int v1_p = blockIdx.x * blockDim.x + threadIdx.x;
int v2_p = blockIdx.y * blockDim.y + threadIdx.y;
if (v1_p >= v2_p) return;
if (v1_p >= num_vars) return;
if (v2_p >= num_vars) return;
//printf("compute_gig(%d, %d) %d\n", v1_p, v2_p, blockIdx.y);
const int num_o_padded = (num_objects - 1) / 4 + 1;
float gig = compute_gig_1_2(&vars[v1_p * num_o_padded], &vars[v2_p * num_o_padded], ds, num_objects, p);
if (gig < threshold) return;
/* atomicInc() wraps around to 0 */
int num = atomicAdd(num_gig_structs, 1);
if (num < max_num_gig_structs) {
r_gig[num].gig = gig;
r_gig[num].v1 = v1_p;
r_gig[num].v2 = v2_p;
}
//printf(" GIG = %f\n", r_gig[v1_p * num_vars + v2_p]);
}
/* Komparatory do sortowania _malejąco_ */
int compare_gig(const void *a, const void *b)
{
if (((struct GigStruct*)a)->gig > ((struct GigStruct*)b)->gig) return -1;
else if (((struct GigStruct*)a)->gig == ((struct GigStruct*)b)->gig) return 0;
else return 1;
}
int compare_float(const void *a, const void *b)
{
if (*((float*)a) > *((float*)b)) return -1;
else if (*((float*)a) == *((float*)b)) return 0;
else return 1;
}
int main()
{
int num_objects, num_vars, result_size, real_result_size;
float a_priori, threshold;
float input, copy, random_trial_kernel, random_trial_copy, random_trial_process, main_kernel, main_copy, main_process, all;
Timer timer;
timer.start();
scanf("%d %d %d %f", &num_objects, &num_vars, &result_size, &a_priori);
Sync2BitArray2D vars(num_vars, num_objects);
SyncBitArray ds(num_objects);
/* Czytamy dane */
{
for (int i = 0; i < num_objects; ++i) {
int a; scanf("%d", &a); a &= 1;
ds.setHost(i, a);
for (int j = 0; j < num_vars; ++j) {
int b; scanf("%d", &b); b &= 3;
vars.setHost(j, i, b);
}
}
input = timer.lap();
}
/* Kopiujemy dane na kartę */
{
vars.syncToDevice();
ds.syncToDevice();
copy = timer.lap();
}
/* Wykonujemy zrandomizowaną próbę na pierwszym 10% zmiennych */
{
int random_trial_size = num_vars / 10;
/* Alokacja pamięci na wynikowe GIG się nie udaje gdy pamięć jest > ok. 400MB.
XXX: Tablica gig nie musiałaby być kwadratowa. */
if (random_trial_size > 8192)
random_trial_size = 8192;
float percent = (float)random_trial_size / (float)num_vars ;
SyncArray2D<float> gig(random_trial_size, random_trial_size);
dim3 block_size(16, 16);
dim3 grid_size(padToMultipleOf(random_trial_size, block_size.x) / block_size.x,
padToMultipleOf(random_trial_size, block_size.y) / block_size.y);
compute_gig_kernel<<<grid_size, block_size>>>((char*)vars.getDevice(), (char*)ds.getDevice(),
num_objects, random_trial_size, (float*)gig.getDevice(), a_priori);
CUDA_CALL(cudaGetLastError());
cudaDeviceSynchronize();
random_trial_kernel = timer.lap();
gig.syncToHost();
random_trial_copy = timer.lap();
/* Przepisujemy obliczone GIG do spójnego kawałka pamięci,
sortujemy i wybieramy odpowiedni element jako threshold */
{
int num_gig = 0;
float *gig_sorted = (float*)malloc(sizeof(float) * random_trial_size * random_trial_size);
for (int v1_p = 0; v1_p < random_trial_size; ++v1_p)
for (int v2_p = v1_p + 1; v2_p < random_trial_size; ++v2_p)
gig_sorted[num_gig++] = gig.getHostEl(v1_p, v2_p);
qsort(gig_sorted, num_gig, sizeof(float), compare_float);
/* gig_sorted jest posortowany malejąco */
threshold = gig_sorted[(int)((float)result_size * percent * percent)];
free(gig_sorted);
}
random_trial_process = timer.lap();
}
/* Wykonujemy docelowe obliczenia na wszystkich zmiennych kernelem,
który zapisuje tylko wartości większe niż threshold */
{
const int max_num_structs = result_size * 2;
SyncArray<struct GigStruct> gig_structs(max_num_structs);
SyncVar<int> num_structs;
dim3 block_size(16, 16);
dim3 grid_size(padToMultipleOf(num_vars, block_size.x) / block_size.x,
padToMultipleOf(num_vars, block_size.y) / block_size.y);
compute_gig_wt_kernel<<<grid_size, block_size>>>((char*)vars.getDevice(), (char*)ds.getDevice(),
num_objects, num_vars, (struct GigStruct*)gig_structs.getDevice(),
max_num_structs, num_structs.getDevice(), a_priori, threshold);
CUDA_CALL(cudaGetLastError());
cudaDeviceSynchronize();
main_kernel = timer.lap();
num_structs.syncToHost();
gig_structs.syncToHost();
main_copy = timer.lap();
real_result_size = *num_structs.getHost();
qsort(gig_structs.getHost(), *num_structs.getHost(), sizeof(struct GigStruct), compare_gig);
for (int i = *num_structs.getHost() - 1; i >= 0; --i)
printf("%f %d %d\n", gig_structs.getHostEl(i).gig, gig_structs.getHostEl(i).v1, gig_structs.getHostEl(i).v2);
main_process = timer.lap();
}
all = input + copy + random_trial_kernel + random_trial_copy + random_trial_process + main_kernel + main_copy + main_process;
fprintf(stderr, "data: variables, objects, result_size, true result size, threshold\n");
fprintf(stderr, "%d, %d, %d, %d, %f\n", num_vars, num_objects, result_size, real_result_size, threshold);
fprintf(stderr, "times: input, copy, random_trial_kernel, random_trial_copy, random_trial_process, main_kernel, main_copy, main_process, all\n");
fprintf(stderr, "%.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f\n", input, copy, random_trial_kernel,
random_trial_copy, random_trial_process, main_kernel, main_copy, main_process, all);
fprintf(stderr, "%.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f\n", input / all * 100.0f, copy / all * 100.0f,
random_trial_kernel / all * 100.0f, random_trial_copy / all * 100.0f, random_trial_process / all * 100.0f,
main_kernel / all * 100.0f, main_copy / all * 100.0f, main_process / all * 100.0f);
return 0;
}
|
413cbd6d7e048eebaedd3bfb58ed68dc9f7f65ba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "lib/SyncedMemory.h"
#include "lib/pgm.h"
#include "lib/BEMD.h"
#include <string>
using namespace std;
int main(int argc, char **argv)
{
auto input_path = "figures/2D_sin_0.pgm";
string output_name = "figures/test_";
bool success;
int width, height, channel;
auto img = ReadNetpbm(width, height, channel, success, input_path);
if (!success) {
puts("Something wrong with reading the input image files.");
abort();
}
float** output_2D;
const int SIZE = width*height;
MemoryBuffer<float> output(SIZE);
auto output_s = output.CreateSync(SIZE);
float *output_cpu = output_s.get_cpu_wo();
float * data = (float*) malloc(SIZE*sizeof(float));
copy(img.get(), img.get() + SIZE, data);
minus_value(data, 128.0, SIZE);
hipMalloc(&output_2D, sizeof(float*) * height);
hipLaunchKernelGGL(( set_2D), dim3(1),dim3(1), 0, 0, output_2D, output_s.get_gpu_rw(), width, height);
float **max_map_2D, **min_map_2D;
float *max_map, *min_map;
hipMalloc(&max_map, sizeof(float) * SIZE);
hipMalloc(&min_map, sizeof(float) * SIZE);
hipMalloc(&max_map_2D, sizeof(float *) * height);
hipMalloc(&min_map_2D, sizeof(float *) * height);
hipLaunchKernelGGL(( set_2D), dim3(1),dim3(1), 0, 0, max_map_2D, max_map, width, height);
hipLaunchKernelGGL(( set_2D), dim3(1),dim3(1), 0, 0, min_map_2D, min_map, width, height);
int* extrema_array;
hipMalloc(&extrema_array, 6 * EXTREMA_SIZE * sizeof(int));
int* other_;
char c[BUFSIZ];
for (int i = 0; i < 2; i++) {
copy(data, data + SIZE, output_s.get_cpu_wo());
output_s.get_gpu_ro();
BEMD(output_2D, max_map_2D, min_map_2D, extrema_array, width, height);
unique_ptr<uint8_t[]> o(new uint8_t[SIZE]);
const float *o_cpu = output_s.get_cpu_sr();
transform(o_cpu, o_cpu + SIZE, o.get(), [](float f) -> uint8_t { return max(min(int(f + 0.5f + 128), 255), 0); });
itoa(i, c, 10);
string output_path = output_name + c + ".pgm";
WritePGM(o.get(), width, height, output_path.data());
minus_data(data, o_cpu, SIZE);
}
system("pause");
free(data);
hipFree(output_2D);
hipFree(max_map_2D);
hipFree(min_map_2D);
hipFree(max_map);
hipFree(min_map);
return 0;
}
| 413cbd6d7e048eebaedd3bfb58ed68dc9f7f65ba.cu | #include "lib/SyncedMemory.h"
#include "lib/pgm.h"
#include "lib/BEMD.h"
#include <string>
using namespace std;
int main(int argc, char **argv)
{
auto input_path = "figures/2D_sin_0.pgm";
string output_name = "figures/test_";
bool success;
int width, height, channel;
auto img = ReadNetpbm(width, height, channel, success, input_path);
if (!success) {
puts("Something wrong with reading the input image files.");
abort();
}
float** output_2D;
const int SIZE = width*height;
MemoryBuffer<float> output(SIZE);
auto output_s = output.CreateSync(SIZE);
float *output_cpu = output_s.get_cpu_wo();
float * data = (float*) malloc(SIZE*sizeof(float));
copy(img.get(), img.get() + SIZE, data);
minus_value(data, 128.0, SIZE);
cudaMalloc(&output_2D, sizeof(float*) * height);
set_2D<<<1,1>>> (output_2D, output_s.get_gpu_rw(), width, height);
float **max_map_2D, **min_map_2D;
float *max_map, *min_map;
cudaMalloc(&max_map, sizeof(float) * SIZE);
cudaMalloc(&min_map, sizeof(float) * SIZE);
cudaMalloc(&max_map_2D, sizeof(float *) * height);
cudaMalloc(&min_map_2D, sizeof(float *) * height);
set_2D<<<1,1>>> (max_map_2D, max_map, width, height);
set_2D<<<1,1>>>(min_map_2D, min_map, width, height);
int* extrema_array;
cudaMalloc(&extrema_array, 6 * EXTREMA_SIZE * sizeof(int));
int* other_;
char c[BUFSIZ];
for (int i = 0; i < 2; i++) {
copy(data, data + SIZE, output_s.get_cpu_wo());
output_s.get_gpu_ro();
BEMD(output_2D, max_map_2D, min_map_2D, extrema_array, width, height);
unique_ptr<uint8_t[]> o(new uint8_t[SIZE]);
const float *o_cpu = output_s.get_cpu_sr();
transform(o_cpu, o_cpu + SIZE, o.get(), [](float f) -> uint8_t { return max(min(int(f + 0.5f + 128), 255), 0); });
itoa(i, c, 10);
string output_path = output_name + c + ".pgm";
WritePGM(o.get(), width, height, output_path.data());
minus_data(data, o_cpu, SIZE);
}
system("pause");
free(data);
cudaFree(output_2D);
cudaFree(max_map_2D);
cudaFree(min_map_2D);
cudaFree(max_map);
cudaFree(min_map);
return 0;
}
|
f730fd6dac5b0e7c8fcfc56c1cd339369c4ddbb5.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime_api.h>
#include <device_launch_parameters.h>
#define INDEX_NUM 3
#define INDEX_SUM 0
#define INDEX_MAX 1
#define INDEX_MIN 2
#define NUM_MAX 1024
#define ITEMS_NUM (1024 * 1024)
#define BLOCK_SIZE 256
using namespace std;
// TODO-1 => ./task_no_atomic
// 1 thread does all compute, no atomic/sync
// thread.0 of block.0 computes everything
__global__ void kernel_no_atomics(int *data, int *results)
{
//if (threadIdx.x == 0) {
for (int i = 0; i < ITEMS_NUM; i++) {
results[INDEX_SUM] += data[i];
results[INDEX_MAX] = (data[i] > results[INDEX_MAX]) ?
data[i] : results[INDEX_MAX];
results[INDEX_MIN] = (data[i] < results[INDEX_MIN]) ?
data[i] : results[INDEX_MIN];
}
//}
}
// TODO-2 => ./task_partial_atomic
// ITEMS_NUM / 256 threads, ITEMS_NUM / 256 * 3 atomic calls
// thread.0 of each block does partial compute, than uses atomics to compute
__global__ void kernel_partial_atomics(int *data, int *results)
{
if (threadIdx.x == 0) {
int j = blockIdx.x * BLOCK_SIZE;
for (int i = 0; i < BLOCK_SIZE; i++) {
atomicAdd(&results[INDEX_SUM], data[i + j]);
atomicMax(&results[INDEX_MAX], data[i + j]);
atomicMin(&results[INDEX_MIN], data[i + j]);
}
}
}
// TODO-3 => ./task_full_atomic
// ITEMS_NUM threads do compute, ITEMS_NUM * 3 atomic calls
// all threads to atomics to compute
__global__ void kernel_full_atomics(int *data, int *results)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < ITEMS_NUM) {
atomicAdd(&results[INDEX_SUM], data[idx]);
atomicMax(&results[INDEX_MAX], data[idx]);
atomicMin(&results[INDEX_MIN], data[idx]);
}
}
int main(void)
{
int expResults[INDEX_NUM];
int *data = NULL;
hipMallocManaged(reinterpret_cast<void **>(&data),
ITEMS_NUM * sizeof(int));
if (data == 0) {
cout << "[HOST] Couldn't allocate memory\n";
return 1;
}
// generate data and expected result
expResults[INDEX_SUM] = 0;
expResults[INDEX_MAX] = 0;
expResults[INDEX_MIN] = NUM_MAX;
for (int i = 0; i < ITEMS_NUM; i++) {
// each generated number is lower than NUM_MAX as value
data[i] = rand() % NUM_MAX;
expResults[INDEX_SUM] += data[i];
expResults[INDEX_MAX] = (data[i] > expResults[INDEX_MAX]) ?
data[i] : expResults[INDEX_MAX];
expResults[INDEX_MIN] = (data[i] < expResults[INDEX_MIN]) ?
data[i] : expResults[INDEX_MIN];
}
int *results = NULL;
hipMallocManaged(reinterpret_cast<void **>(&results),
INDEX_NUM * sizeof(int));
if (results == 0) {
cout << "[HOST] Couldn't allocate memory\n";
return 1;
}
// compute 10 times the results
for (int i = 0; i < 10; i++) {
// init
results[INDEX_SUM] = 0;
results[INDEX_MAX] = 0;
results[INDEX_MIN] = NUM_MAX;
#ifdef NO_ATOMIC
hipLaunchKernelGGL(( kernel_no_atomics), dim3(1) , dim3(1) , 0, 0, data, results);
hipDeviceSynchronize();
#endif
#ifdef PARTIAL_ATOMIC
hipLaunchKernelGGL(( kernel_partial_atomics), dim3(ITEMS_NUM / 256) , dim3(1) , 0, 0, data, results);
hipDeviceSynchronize();
#endif
#ifdef FULL_ATOMIC
hipLaunchKernelGGL(( kernel_full_atomics), dim3(ITEMS_NUM / 256) , dim3(256) , 0, 0, data, results);
hipDeviceSynchronize();
#endif
}
cout << "SUM: " << results[INDEX_SUM] << endl;
if (results[INDEX_SUM] != expResults[INDEX_SUM]) {
cout << "Failed, SUM should be " << expResults[INDEX_SUM]
<< endl;
}
cout << "MAX: " << results[INDEX_MAX] << endl;
if (results[INDEX_MAX] != expResults[INDEX_MAX]) {
cout << "Failed, MAX should be " << expResults[INDEX_MAX]
<< endl;
}
cout << "MIN: " << results[INDEX_MIN] << endl;
if (results[INDEX_MIN] != expResults[INDEX_MIN]) {
cout << "Failed, MIN should be " << expResults[INDEX_MIN]
<< endl;
}
hipFree(results);
return 0;
}
| f730fd6dac5b0e7c8fcfc56c1cd339369c4ddbb5.cu | #include <iostream>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#define INDEX_NUM 3
#define INDEX_SUM 0
#define INDEX_MAX 1
#define INDEX_MIN 2
#define NUM_MAX 1024
#define ITEMS_NUM (1024 * 1024)
#define BLOCK_SIZE 256
using namespace std;
// TODO-1 => ./task_no_atomic
// 1 thread does all compute, no atomic/sync
// thread.0 of block.0 computes everything
__global__ void kernel_no_atomics(int *data, int *results)
{
//if (threadIdx.x == 0) {
for (int i = 0; i < ITEMS_NUM; i++) {
results[INDEX_SUM] += data[i];
results[INDEX_MAX] = (data[i] > results[INDEX_MAX]) ?
data[i] : results[INDEX_MAX];
results[INDEX_MIN] = (data[i] < results[INDEX_MIN]) ?
data[i] : results[INDEX_MIN];
}
//}
}
// TODO-2 => ./task_partial_atomic
// ITEMS_NUM / 256 threads, ITEMS_NUM / 256 * 3 atomic calls
// thread.0 of each block does partial compute, than uses atomics to compute
__global__ void kernel_partial_atomics(int *data, int *results)
{
if (threadIdx.x == 0) {
int j = blockIdx.x * BLOCK_SIZE;
for (int i = 0; i < BLOCK_SIZE; i++) {
atomicAdd(&results[INDEX_SUM], data[i + j]);
atomicMax(&results[INDEX_MAX], data[i + j]);
atomicMin(&results[INDEX_MIN], data[i + j]);
}
}
}
// TODO-3 => ./task_full_atomic
// ITEMS_NUM threads do compute, ITEMS_NUM * 3 atomic calls
// all threads to atomics to compute
__global__ void kernel_full_atomics(int *data, int *results)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < ITEMS_NUM) {
atomicAdd(&results[INDEX_SUM], data[idx]);
atomicMax(&results[INDEX_MAX], data[idx]);
atomicMin(&results[INDEX_MIN], data[idx]);
}
}
int main(void)
{
int expResults[INDEX_NUM];
int *data = NULL;
cudaMallocManaged(reinterpret_cast<void **>(&data),
ITEMS_NUM * sizeof(int));
if (data == 0) {
cout << "[HOST] Couldn't allocate memory\n";
return 1;
}
// generate data and expected result
expResults[INDEX_SUM] = 0;
expResults[INDEX_MAX] = 0;
expResults[INDEX_MIN] = NUM_MAX;
for (int i = 0; i < ITEMS_NUM; i++) {
// each generated number is lower than NUM_MAX as value
data[i] = rand() % NUM_MAX;
expResults[INDEX_SUM] += data[i];
expResults[INDEX_MAX] = (data[i] > expResults[INDEX_MAX]) ?
data[i] : expResults[INDEX_MAX];
expResults[INDEX_MIN] = (data[i] < expResults[INDEX_MIN]) ?
data[i] : expResults[INDEX_MIN];
}
int *results = NULL;
cudaMallocManaged(reinterpret_cast<void **>(&results),
INDEX_NUM * sizeof(int));
if (results == 0) {
cout << "[HOST] Couldn't allocate memory\n";
return 1;
}
// compute 10 times the results
for (int i = 0; i < 10; i++) {
// init
results[INDEX_SUM] = 0;
results[INDEX_MAX] = 0;
results[INDEX_MIN] = NUM_MAX;
#ifdef NO_ATOMIC
kernel_no_atomics<<< 1 , 1 >>> (data, results);
cudaDeviceSynchronize();
#endif
#ifdef PARTIAL_ATOMIC
kernel_partial_atomics<<< ITEMS_NUM / 256 , 1 >>> (data, results);
cudaDeviceSynchronize();
#endif
#ifdef FULL_ATOMIC
kernel_full_atomics<<< ITEMS_NUM / 256 , 256 >>> (data, results);
cudaDeviceSynchronize();
#endif
}
cout << "SUM: " << results[INDEX_SUM] << endl;
if (results[INDEX_SUM] != expResults[INDEX_SUM]) {
cout << "Failed, SUM should be " << expResults[INDEX_SUM]
<< endl;
}
cout << "MAX: " << results[INDEX_MAX] << endl;
if (results[INDEX_MAX] != expResults[INDEX_MAX]) {
cout << "Failed, MAX should be " << expResults[INDEX_MAX]
<< endl;
}
cout << "MIN: " << results[INDEX_MIN] << endl;
if (results[INDEX_MIN] != expResults[INDEX_MIN]) {
cout << "Failed, MIN should be " << expResults[INDEX_MIN]
<< endl;
}
cudaFree(results);
return 0;
}
|
db7445fff664b20af351fa231a38e94250094e95.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#define BLOCK_SIZE 4
__global__ void matrixGPU(int *pMatrizA,int *pMatrizB, int *pMatrizResultante, int pColumnasMatrizA, int pFilasMatrizB, int pColumnasMatrizB)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int sum = 0;
if( col < pColumnasMatrizB && row < pColumnasMatrizA)
{
for(int i = 0; i < pFilasMatrizB; i++)
{
sum += pMatrizA[row * pFilasMatrizB + i] * pMatrizB[i * pColumnasMatrizB + col];
}
pMatrizResultante[row * pColumnasMatrizB + col] = sum;
}
}
void matrixCPU(int *pMatrizA, int *pMatrizB, int *pMatrizResultante, int pColumnasMatrizA, int pFilasMatrizB, int pColumnasMatrizB) {
for (int i = 0; i < pColumnasMatrizA; ++i)
{
for (int j = 0; j < pColumnasMatrizB; ++j)
{
int tmp = 0.0;
for (int h = 0; h < pFilasMatrizB; ++h)
{
tmp += pMatrizA[i * pFilasMatrizB + h] * pMatrizB[h * pColumnasMatrizB + j];
}
pMatrizResultante[i * pColumnasMatrizB + j] = tmp;
}
}
}
int main(int argc, char const *argv[])
{
int columnasMatrizA = 4, filasMatrizB = 4, columnasMatrizB = 4;
/* Fixed seed for illustration */
srand(123456987);
// allocate memory in host RAM, h_cc is used to store CPU result
int *h_a, *h_b, *h_c, *h_cc;
hipMalloc((void **) &h_a, sizeof(int)*columnasMatrizA*filasMatrizB);
hipMalloc((void **) &h_b, sizeof(int)*filasMatrizB*columnasMatrizB);
hipMalloc((void **) &h_c, sizeof(int)*columnasMatrizA*columnasMatrizB);
hipMalloc((void **) &h_cc, sizeof(int)*columnasMatrizA*columnasMatrizB);
// Rellenando Matriz A y Matriz B
for (int i = 0; i < columnasMatrizA; ++i) {
for (int j = 0; j < filasMatrizB; ++j) {
h_a[i* filasMatrizB+ j] = rand() % 1024;
h_b[i* filasMatrizB+ j] = rand() % 1024;
}
}
double tiempoGPU, tiempoCPU;
int *d_a, *d_b, *d_c;
hipMalloc((void **) &d_a, sizeof(int)*columnasMatrizA*filasMatrizB);
hipMalloc((void **) &d_b, sizeof(int)*filasMatrizB*columnasMatrizB);
hipMalloc((void **) &d_c, sizeof(int)*columnasMatrizA*columnasMatrizB);
hipMemcpy(d_a, h_a, sizeof(int)*columnasMatrizA*filasMatrizB, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, sizeof(int)*filasMatrizB*columnasMatrizB, hipMemcpyHostToDevice);
unsigned int grid_rows = (columnasMatrizA + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int grid_cols = (columnasMatrizB + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
clock_t start_d=clock();
hipLaunchKernelGGL(( matrixGPU), dim3(dimGrid), dim3(dimBlock), 0, 0, d_a, d_b, d_c, columnasMatrizA, filasMatrizB, columnasMatrizB);
hipDeviceSynchronize();
clock_t end_d = clock();
hipMemcpy(h_c, d_c, sizeof(int)*columnasMatrizA*columnasMatrizB, hipMemcpyDeviceToHost);
// calcula tiempo de ejecucion del GPU
tiempoGPU = (double)(end_d-start_d)/CLOCKS_PER_SEC;
printf("GPU time: %f ms.\n\n", tiempoGPU);
// Inicializa tiempo de ejecucion del CPU
clock_t start_h=clock();
matrixCPU(h_a, h_b, h_cc, columnasMatrizA, filasMatrizB,columnasMatrizB);
clock_t end_h = clock();
tiempoCPU = (double)(end_h-start_h)/CLOCKS_PER_SEC;
printf("CPU time: %f ms.\n\n", tiempoCPU);
// free memory
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
hipHostFree(h_a);
hipHostFree(h_b);
hipHostFree(h_c);
hipHostFree(h_cc);
return 0;
}
| db7445fff664b20af351fa231a38e94250094e95.cu | #include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <cuda.h>
#define BLOCK_SIZE 4
__global__ void matrixGPU(int *pMatrizA,int *pMatrizB, int *pMatrizResultante, int pColumnasMatrizA, int pFilasMatrizB, int pColumnasMatrizB)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int sum = 0;
if( col < pColumnasMatrizB && row < pColumnasMatrizA)
{
for(int i = 0; i < pFilasMatrizB; i++)
{
sum += pMatrizA[row * pFilasMatrizB + i] * pMatrizB[i * pColumnasMatrizB + col];
}
pMatrizResultante[row * pColumnasMatrizB + col] = sum;
}
}
void matrixCPU(int *pMatrizA, int *pMatrizB, int *pMatrizResultante, int pColumnasMatrizA, int pFilasMatrizB, int pColumnasMatrizB) {
for (int i = 0; i < pColumnasMatrizA; ++i)
{
for (int j = 0; j < pColumnasMatrizB; ++j)
{
int tmp = 0.0;
for (int h = 0; h < pFilasMatrizB; ++h)
{
tmp += pMatrizA[i * pFilasMatrizB + h] * pMatrizB[h * pColumnasMatrizB + j];
}
pMatrizResultante[i * pColumnasMatrizB + j] = tmp;
}
}
}
int main(int argc, char const *argv[])
{
int columnasMatrizA = 4, filasMatrizB = 4, columnasMatrizB = 4;
/* Fixed seed for illustration */
srand(123456987);
// allocate memory in host RAM, h_cc is used to store CPU result
int *h_a, *h_b, *h_c, *h_cc;
cudaMalloc((void **) &h_a, sizeof(int)*columnasMatrizA*filasMatrizB);
cudaMalloc((void **) &h_b, sizeof(int)*filasMatrizB*columnasMatrizB);
cudaMalloc((void **) &h_c, sizeof(int)*columnasMatrizA*columnasMatrizB);
cudaMalloc((void **) &h_cc, sizeof(int)*columnasMatrizA*columnasMatrizB);
// Rellenando Matriz A y Matriz B
for (int i = 0; i < columnasMatrizA; ++i) {
for (int j = 0; j < filasMatrizB; ++j) {
h_a[i* filasMatrizB+ j] = rand() % 1024;
h_b[i* filasMatrizB+ j] = rand() % 1024;
}
}
double tiempoGPU, tiempoCPU;
int *d_a, *d_b, *d_c;
cudaMalloc((void **) &d_a, sizeof(int)*columnasMatrizA*filasMatrizB);
cudaMalloc((void **) &d_b, sizeof(int)*filasMatrizB*columnasMatrizB);
cudaMalloc((void **) &d_c, sizeof(int)*columnasMatrizA*columnasMatrizB);
cudaMemcpy(d_a, h_a, sizeof(int)*columnasMatrizA*filasMatrizB, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, sizeof(int)*filasMatrizB*columnasMatrizB, cudaMemcpyHostToDevice);
unsigned int grid_rows = (columnasMatrizA + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int grid_cols = (columnasMatrizB + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
clock_t start_d=clock();
matrixGPU<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, columnasMatrizA, filasMatrizB, columnasMatrizB);
cudaThreadSynchronize();
clock_t end_d = clock();
cudaMemcpy(h_c, d_c, sizeof(int)*columnasMatrizA*columnasMatrizB, cudaMemcpyDeviceToHost);
// calcula tiempo de ejecucion del GPU
tiempoGPU = (double)(end_d-start_d)/CLOCKS_PER_SEC;
printf("GPU time: %f ms.\n\n", tiempoGPU);
// Inicializa tiempo de ejecucion del CPU
clock_t start_h=clock();
matrixCPU(h_a, h_b, h_cc, columnasMatrizA, filasMatrizB,columnasMatrizB);
clock_t end_h = clock();
tiempoCPU = (double)(end_h-start_h)/CLOCKS_PER_SEC;
printf("CPU time: %f ms.\n\n", tiempoCPU);
// free memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaFreeHost(h_a);
cudaFreeHost(h_b);
cudaFreeHost(h_c);
cudaFreeHost(h_cc);
return 0;
}
|
52a7f07f57f8d63e307ba0cb01b3aae517449a20.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Copyright NVIDIA/apex
// This file is adapted from NVIDIA/apex, commit 3ff1a10f72ec07067c4e44759442329804ac5162
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/Exceptions.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
// Another possibility:
// #include <torch/all.h>
#include <assert.h>
#include "type_shim.h"
#include "multi_tensor_apply.cuh"
#define BLOCK_SIZE 512
#define ILP 4
template <typename T>
__device__ __forceinline__ bool is_aligned(T* p) {
return ((uint64_t)p) % (ILP * sizeof(T)) == 0;
}
template <typename T>
__device__ __forceinline__ void load_store(T* dst, T* src, int dst_offset, int src_offset) {
typedef typename std::aligned_storage<ILP * sizeof(T), ILP * alignof(T)>::type LT;
((LT*)dst)[dst_offset] = ((LT*)src)[src_offset];
}
template <typename x_t>
struct L2NormFunctor {
__device__ __forceinline__ void operator()(int chunk_size, volatile int* noop_gmem, TensorListMetadata<1>& tl,
float* output, float* output_per_tensor, bool per_tensor,
int max_chunks_per_tensor) {
// I'd like this kernel to propagate infs/nans.
// if(*noop_gmem == 1)
// return;
int tensor_loc = tl.block_to_tensor[blockIdx.x];
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
x_t* x = (x_t*)tl.addresses[0][tensor_loc];
x += chunk_idx * chunk_size;
n -= chunk_idx * chunk_size;
__shared__ float s_vals[512];
float vals[ILP]; // = {0}; // this probably works too but I want to be sure...
x_t r_x[ILP];
for (int i = 0; i < ILP; i++) {
vals[i] = 0.f;
r_x[i] = 0;
}
// to make things simple, we put aligned case in a different code path
if (n % ILP == 0 && chunk_size % ILP == 0 && is_aligned(x)) {
for (int i_start = threadIdx.x; i_start * ILP < n && i_start * ILP < chunk_size; i_start += blockDim.x) {
// load
load_store(r_x, x, 0, i_start);
#pragma unroll
for (int ii = 0; ii < ILP; ii++) {
float next = static_cast<float>(r_x[ii]);
vals[ii] += next * next;
}
}
} else {
for (int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x * ILP) {
#pragma unroll
for (int ii = 0; ii < ILP; ii++) {
int i = i_start + threadIdx.x + ii * blockDim.x;
if (i < n && i < chunk_size) {
float next = static_cast<float>(x[i]);
vals[ii] += next * next;
}
}
}
}
float val = 0.f;
for (int i = 0; i < ILP; i++) val += vals[i];
float final = reduce_block_into_lanes(s_vals, val);
if (threadIdx.x == 0) {
if (!isfinite(final)) *noop_gmem = 1; // Blindly fire off a write. These will race but that's ok.
output[blockIdx.x] += final;
if (per_tensor)
output_per_tensor[(tl.start_tensor_this_launch + tensor_loc) * max_chunks_per_tensor + chunk_idx] = final;
}
}
};
__global__ void cleanup(float* output, float* output_per_tensor, float* ret, float* ret_per_tensor, bool per_tensor,
int max_chunks_per_tensor) {
__shared__ float vals[512];
if (blockIdx.x == 0) {
float val = 0;
if (threadIdx.x < 320) val = output[threadIdx.x];
float final = reduce_block_into_lanes(vals, val);
if (threadIdx.x == 0) *ret = sqrt(final);
}
if (per_tensor) {
float* output_this_tensor = output_per_tensor + blockIdx.x * max_chunks_per_tensor;
float val = 0;
for (int i = threadIdx.x; i < max_chunks_per_tensor; i += blockDim.x) val += output_this_tensor[i];
float final = reduce_block_into_lanes(vals, val);
if (threadIdx.x == 0) ret_per_tensor[blockIdx.x] = sqrt(final);
}
}
std::tuple<at::Tensor, at::Tensor> multi_tensor_l2norm_cuda(int chunk_size, at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists,
at::optional<bool> per_tensor_python) {
bool per_tensor = per_tensor_python.has_value() ? per_tensor_python.value() : false;
auto float_options = tensor_lists[0][0].options().dtype(at::kFloat);
auto output = at::zeros({320}, float_options);
at::Tensor output_per_tensor;
at::Tensor ret_per_tensor;
int ntensors = tensor_lists[0].size();
int max_chunks_per_tensor = -1;
if (per_tensor) {
for (int t = 0; t < ntensors; t++) {
int max_chunks_this_tensor = (tensor_lists[0][t].numel() + chunk_size - 1) / chunk_size;
if (max_chunks_this_tensor > max_chunks_per_tensor) max_chunks_per_tensor = max_chunks_this_tensor;
}
output_per_tensor = at::zeros({ntensors * max_chunks_per_tensor}, float_options);
ret_per_tensor = at::empty({ntensors}, float_options);
} else {
ret_per_tensor = at::empty({0}, float_options);
}
DISPATCH_DOUBLE_FLOAT_AND_HALF(
tensor_lists[0][0].scalar_type(), 0, "multi_tensor_l2norm_cuda",
multi_tensor_apply<1>(BLOCK_SIZE, chunk_size, noop_flag, tensor_lists, L2NormFunctor<scalar_t_0>(),
output.data_ptr<float>(), per_tensor ? output_per_tensor.data_ptr<float>() : nullptr,
per_tensor, max_chunks_per_tensor);)
AT_CUDA_CHECK(hipGetLastError());
// AT_CUDA_CHECK(hipDeviceSynchronize());
// This involves one more small kernel launches, but will be negligible end to end.
// I could get rid of these by hacking the functor + multi tensor harness with persistence
// logic, but keeping it simple for now
auto ret = at::empty({1}, output.options());
const at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard(device_of(output));
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( cleanup), dim3(per_tensor ? ntensors : 1), dim3(512), 0, stream,
output.data_ptr<float>(), per_tensor ? output_per_tensor.data_ptr<float>() : nullptr, ret.data_ptr<float>(),
per_tensor ? ret_per_tensor.data_ptr<float>() : nullptr, per_tensor, max_chunks_per_tensor);
return std::tuple<at::Tensor, at::Tensor>(ret, ret_per_tensor);
}
| 52a7f07f57f8d63e307ba0cb01b3aae517449a20.cu | // Copyright (c) Microsoft Corporation. All rights reserved.
// Copyright NVIDIA/apex
// This file is adapted from NVIDIA/apex, commit 3ff1a10f72ec07067c4e44759442329804ac5162
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/Exceptions.h>
#include <c10/cuda/CUDAGuard.h>
// Another possibility:
// #include <torch/all.h>
#include <assert.h>
#include "type_shim.h"
#include "multi_tensor_apply.cuh"
#define BLOCK_SIZE 512
#define ILP 4
template <typename T>
__device__ __forceinline__ bool is_aligned(T* p) {
return ((uint64_t)p) % (ILP * sizeof(T)) == 0;
}
template <typename T>
__device__ __forceinline__ void load_store(T* dst, T* src, int dst_offset, int src_offset) {
typedef typename std::aligned_storage<ILP * sizeof(T), ILP * alignof(T)>::type LT;
((LT*)dst)[dst_offset] = ((LT*)src)[src_offset];
}
template <typename x_t>
struct L2NormFunctor {
__device__ __forceinline__ void operator()(int chunk_size, volatile int* noop_gmem, TensorListMetadata<1>& tl,
float* output, float* output_per_tensor, bool per_tensor,
int max_chunks_per_tensor) {
// I'd like this kernel to propagate infs/nans.
// if(*noop_gmem == 1)
// return;
int tensor_loc = tl.block_to_tensor[blockIdx.x];
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
x_t* x = (x_t*)tl.addresses[0][tensor_loc];
x += chunk_idx * chunk_size;
n -= chunk_idx * chunk_size;
__shared__ float s_vals[512];
float vals[ILP]; // = {0}; // this probably works too but I want to be sure...
x_t r_x[ILP];
for (int i = 0; i < ILP; i++) {
vals[i] = 0.f;
r_x[i] = 0;
}
// to make things simple, we put aligned case in a different code path
if (n % ILP == 0 && chunk_size % ILP == 0 && is_aligned(x)) {
for (int i_start = threadIdx.x; i_start * ILP < n && i_start * ILP < chunk_size; i_start += blockDim.x) {
// load
load_store(r_x, x, 0, i_start);
#pragma unroll
for (int ii = 0; ii < ILP; ii++) {
float next = static_cast<float>(r_x[ii]);
vals[ii] += next * next;
}
}
} else {
for (int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x * ILP) {
#pragma unroll
for (int ii = 0; ii < ILP; ii++) {
int i = i_start + threadIdx.x + ii * blockDim.x;
if (i < n && i < chunk_size) {
float next = static_cast<float>(x[i]);
vals[ii] += next * next;
}
}
}
}
float val = 0.f;
for (int i = 0; i < ILP; i++) val += vals[i];
float final = reduce_block_into_lanes(s_vals, val);
if (threadIdx.x == 0) {
if (!isfinite(final)) *noop_gmem = 1; // Blindly fire off a write. These will race but that's ok.
output[blockIdx.x] += final;
if (per_tensor)
output_per_tensor[(tl.start_tensor_this_launch + tensor_loc) * max_chunks_per_tensor + chunk_idx] = final;
}
}
};
__global__ void cleanup(float* output, float* output_per_tensor, float* ret, float* ret_per_tensor, bool per_tensor,
int max_chunks_per_tensor) {
__shared__ float vals[512];
if (blockIdx.x == 0) {
float val = 0;
if (threadIdx.x < 320) val = output[threadIdx.x];
float final = reduce_block_into_lanes(vals, val);
if (threadIdx.x == 0) *ret = sqrt(final);
}
if (per_tensor) {
float* output_this_tensor = output_per_tensor + blockIdx.x * max_chunks_per_tensor;
float val = 0;
for (int i = threadIdx.x; i < max_chunks_per_tensor; i += blockDim.x) val += output_this_tensor[i];
float final = reduce_block_into_lanes(vals, val);
if (threadIdx.x == 0) ret_per_tensor[blockIdx.x] = sqrt(final);
}
}
std::tuple<at::Tensor, at::Tensor> multi_tensor_l2norm_cuda(int chunk_size, at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists,
at::optional<bool> per_tensor_python) {
bool per_tensor = per_tensor_python.has_value() ? per_tensor_python.value() : false;
auto float_options = tensor_lists[0][0].options().dtype(at::kFloat);
auto output = at::zeros({320}, float_options);
at::Tensor output_per_tensor;
at::Tensor ret_per_tensor;
int ntensors = tensor_lists[0].size();
int max_chunks_per_tensor = -1;
if (per_tensor) {
for (int t = 0; t < ntensors; t++) {
int max_chunks_this_tensor = (tensor_lists[0][t].numel() + chunk_size - 1) / chunk_size;
if (max_chunks_this_tensor > max_chunks_per_tensor) max_chunks_per_tensor = max_chunks_this_tensor;
}
output_per_tensor = at::zeros({ntensors * max_chunks_per_tensor}, float_options);
ret_per_tensor = at::empty({ntensors}, float_options);
} else {
ret_per_tensor = at::empty({0}, float_options);
}
DISPATCH_DOUBLE_FLOAT_AND_HALF(
tensor_lists[0][0].scalar_type(), 0, "multi_tensor_l2norm_cuda",
multi_tensor_apply<1>(BLOCK_SIZE, chunk_size, noop_flag, tensor_lists, L2NormFunctor<scalar_t_0>(),
output.data_ptr<float>(), per_tensor ? output_per_tensor.data_ptr<float>() : nullptr,
per_tensor, max_chunks_per_tensor);)
AT_CUDA_CHECK(cudaGetLastError());
// AT_CUDA_CHECK(cudaDeviceSynchronize());
// This involves one more small kernel launches, but will be negligible end to end.
// I could get rid of these by hacking the functor + multi tensor harness with persistence
// logic, but keeping it simple for now
auto ret = at::empty({1}, output.options());
const at::cuda::OptionalCUDAGuard device_guard(device_of(output));
auto stream = at::cuda::getCurrentCUDAStream();
cleanup<<<per_tensor ? ntensors : 1, 512, 0, stream>>>(
output.data_ptr<float>(), per_tensor ? output_per_tensor.data_ptr<float>() : nullptr, ret.data_ptr<float>(),
per_tensor ? ret_per_tensor.data_ptr<float>() : nullptr, per_tensor, max_chunks_per_tensor);
return std::tuple<at::Tensor, at::Tensor>(ret, ret_per_tensor);
}
|
9e04d0f911d3ebb1120a740f18e1fb44006aff55.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@author Azzam Haidar
@author Tingxing Dong
@generated from magmablas/zpotf2_kernels_old.cu, normal z -> c, Mon Jun 25 18:24:16 2018
*/
#include "magma_internal.h"
#include "batched_kernel_param.h"
#include "magma_templates.h"
#define COMPLEX
#define A(i, j) (A + (i) + (j)*lda) // A(i, j) means at i row, j column
// dynamically allocated shared memory, set to size number of threads when the kernel is launched.
// See CUDA Guide B.2.3
extern __shared__ magmaFloatComplex shared_data[];
// dynamically allocated shared memory, set to size number of threads when the kernel is launched.
// See CUDA Guide B.2.3
extern __shared__ float dble_shared_data[];
/******************************************************************************/
__global__ void cdotc_kernel_batched(
int n, magmaFloatComplex **x_array, int incx, int offset,
magma_int_t *info_array, int gbstep)
{
int tx = threadIdx.x;
magmaFloatComplex *x = x_array[blockIdx.z]+offset;
float *sdata = dble_shared_data;
magmaFloatComplex res = MAGMA_C_ZERO;
if (tx < n) {
res = x[tx*incx];
}
sdata[tx] = MAGMA_C_REAL(res * MAGMA_C_CONJ(res));
__syncthreads();
for (int s = blockDim.x/2; s > 32; s >>= 1 ) {
if (tx < s) {
sdata[tx] += sdata[tx+s];
}
__syncthreads();
}
if (tx < 32) {
volatile float* smem = sdata;
smem[tx] += smem[tx+32];
smem[tx] += smem[tx+16];
smem[tx] += smem[tx+8];
smem[tx] += smem[tx+4];
smem[tx] += smem[tx+2];
smem[tx] += smem[tx+1];
}
if (tx == 0) {
float xreal = MAGMA_C_REAL(x[n*incx]);
x[n*incx] = MAGMA_C_MAKE(sqrt(xreal - sdata[0]), 0);
if (xreal <= MAGMA_D_ZERO) {
info_array[blockIdx.z] = offset + gbstep + 1;
}
}
}
/******************************************************************************/
void magma_cpotf2_cdotc_batched(magma_int_t n, magmaFloatComplex **x_array, magma_int_t incx, magma_int_t offset, magma_int_t *info_array, magma_int_t gbstep, magma_int_t batchCount, magma_queue_t queue)
{
/*
Specialized Cdotc
1) performs cdotc sum = x[0:n-1]*conj(x[0:n-1])
2) updates x[n] = sqrt(x[n]-sum);
*/
if (n > MAX_NTHREADS) {
fprintf( stderr, "%s: n = %lld > %lld is not supported\n",
__func__, (long long) n, (long long) MAX_NTHREADS );
}
int threadSize;
if (n <= 1024 && n > 512) {
threadSize = 1024;
}
else if (n <= 512 && n > 256 ) {
threadSize = 512;
}
else if (n <= 256 && n > 128) {
threadSize = 256;
}
else if (n <= 128 && n > 64) {
threadSize = 128;
}
else {
threadSize = 64;
}
dim3 grid(1, 1, batchCount);
size_t shmem = threadSize * sizeof(float);
hipLaunchKernelGGL(( cdotc_kernel_batched)
, dim3(grid), dim3(threadSize), shmem, queue->cuda_stream() ,
n, x_array, incx, offset, info_array, gbstep);
}
/******************************************************************************/
__global__ void csscal_kernel_batched(
int n, magmaFloatComplex **x_array, int incx, int offset,
magma_int_t *info_array)
{
// checkinfo to avoid computation of the singular matrix
if (info_array[blockIdx.z] != 0 ) return;
int id = threadIdx.x;
magmaFloatComplex *x = x_array[blockIdx.z]+offset;
__shared__ magmaFloatComplex factor;
if (threadIdx.x == 0) {
factor = MAGMA_C_MAKE(1.0/MAGMA_C_REAL(x[0]), 0.0);
}
__syncthreads();
if ( id < n && id > 0) {
x[id*incx] = x[id*incx] * factor;
//printf("x=%f", x[id*incx]);
}
}
/******************************************************************************/
void magma_cpotf2_csscal_batched(
magma_int_t n, magmaFloatComplex **x_array, magma_int_t incx,
magma_int_t offset, magma_int_t *info_array,
magma_int_t batchCount, magma_queue_t queue)
{
/*
Specialized Csscal perform x[1:n-1]/x[0]
*/
dim3 grid(1, 1, batchCount);
dim3 threads(n, 1, 1);
hipLaunchKernelGGL(( csscal_kernel_batched)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
n, x_array, incx, offset, info_array);
}
/******************************************************************************/
__global__ void clacgv_kernel_batched(int n, magmaFloatComplex **x_array, int incx, int offset)
{
int id = threadIdx.x;
magmaFloatComplex *x = x_array[blockIdx.z]+offset;
if ( id < n ) {
x[id*incx] = MAGMA_C_CONJ(x[id*incx]);
}
}
/***************************************************************************//**
Purpose
-------
CLACGV conjugates a complex vector of length N.
Arguments
---------
N (input) INTEGER
The length of the vector X. N >= 0.
X (input/output) COMPLEX array, dimension
(1+(N-1)*abs(INCX))
On entry, the vector of length N to be conjugated.
On exit, X is overwritten with conjg(X).
INCX (input) INTEGER
The spacing between successive elements of X.
@ingroup magma_lacgv_batched
*******************************************************************************/
void magma_clacgv_batched(
magma_int_t n, magmaFloatComplex **x_array, magma_int_t incx,
magma_int_t offset, magma_int_t batchCount, magma_queue_t queue)
{
dim3 grid(1, 1, batchCount);
dim3 threads(n, 1, 1);
hipLaunchKernelGGL(( clacgv_kernel_batched)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
n, x_array, incx, offset);
}
/******************************************************************************/
static __device__ void cpotf2_device(int m, int n,
magmaFloatComplex *A, int lda,
magmaFloatComplex alpha,
magmaFloatComplex beta, magma_int_t *info, int gbstep)
{
/*
Each thread block load entire A into shared memory
factorize it and copy back. n must be small enough to fit shared memory.
n is checked by a macro POTF2_TILE_SIZE before the kernel.
*/
// checkinfo to avoid computation of the singular matrix
if (*info != 0 ) return;
int tx = threadIdx.x;
magmaFloatComplex *sdata_A = shared_data;
__shared__ magmaFloatComplex factor;
__shared__ float sum[POTF2_TILE_SIZE];
// load A into sdata_A
if (tx < m)
{
for (int i=0; i < n; i++)
{
sdata_A[tx + i * m] = A[tx + i * lda];
}
}
__syncthreads();
for (int iter=0; iter < n; iter++)
{
float res = MAGMA_D_ZERO;
magmaFloatComplex res1 = MAGMA_C_ZERO;
//1) performs cdotc sum = A[iter, 0:iter-1]*conj(A[iter, 0:iter-1])
//2) updates A[iter,iter] = sqrt(A[iter,iter]-sum);
if (tx < iter)
{
res = MAGMA_C_REAL (sdata_A[iter + tx * m] * MAGMA_C_CONJ(sdata_A[iter + tx * m]));
sum[tx] = res;
}
else
{
sum[tx] = 0.0;
}
__syncthreads();
magma_sum_reduce<POTF2_TILE_SIZE>(tx, sum); //tried on K40: if m=32 n=32 the overall cpotf2_device routine time is 60ms n=16 time=25 n=8 time=20ms
//magma_sum_reduce_n(iter, tx, sum); //tried on K40: if m=32 n=32 the time went from 61ms to 70ms when switching to reduce_n. n=16 time=28.
//magma_sum_reduce_inlined(iter, tx, sum); //tried on K40: similar to magma_sum_reduce<POTF2_TILE_SIZE>(tx, sum);
__shared__ float xreal;
if (tx == 0) {
xreal = MAGMA_C_REAL(sdata_A[iter + iter * m]);
sdata_A[iter + iter * m] = MAGMA_C_MAKE(sqrt(xreal - sum[0]), 0);
if (xreal <= MAGMA_D_ZERO) {
*info = iter + gbstep + 1;
}
}
__syncthreads();
if (xreal <= MAGMA_D_ZERO) return;
__syncthreads();
//clacgv conjugates a complex vector of length iter. //TODO
#ifdef COMPLEX
if (tx < iter)
{
sdata_A[iter + tx * m] = MAGMA_C_CONJ(sdata_A[iter + tx * m]);
}
__syncthreads();
#endif
// cgemv
// Compute elements iter:n-1 of column iter = A(iter:n,0:iter-1) * A(iter-1,0:iter-1) (row).
if (tx < m && tx > iter)
{
for (int j=0; j < iter; j++)
{
res1 += sdata_A[tx + j * m] * sdata_A[iter + j * m]; // TODO move the clacgv conj to be done automatically here implicitly.
}
sdata_A [tx + iter * m] = alpha * res1 + sdata_A [tx + iter * m] * beta;
}
__syncthreads();
//clacgv conjugates a complex vector of length iter.
#ifdef COMPLEX
if (tx < iter)
{
sdata_A[iter + tx * m] = MAGMA_C_CONJ(sdata_A[iter + tx * m]);
}
__syncthreads();
#endif
// csscal perform A[iter:n-1, iter]/A[iter,iter];
if (tx == 0) {
factor = MAGMA_C_MAKE(1.0/MAGMA_C_REAL(sdata_A[iter + iter * m]), 0.0);
}
__syncthreads();
if ( tx < m && tx > iter) {
sdata_A[ tx + iter * m ] *= factor;
}
__syncthreads();
}// end of iter
//copy sdata_A to A
if (tx < m)
{
for (int i=0; i < n; i++)
{
A[tx + i * lda] = sdata_A[tx + i * m];
}
}
}
/******************************************************************************/
__global__ void cpotf2_kernel_batched(int m, int n,
magmaFloatComplex **dA_array, int lda,
magmaFloatComplex alpha,
magmaFloatComplex beta,
magma_int_t *info_array, int gbstep)
{
/*
Each thread block load entire dA_array[blockIdx.z] into shared memory
factorize it and copy back. n must be small enough to fit shared memory.
n is checked by a macro POTF2_TILE_SIZE before the kernel.
*/
int batchid = blockIdx.z;
cpotf2_device(m, n, dA_array[batchid], lda, alpha, beta, &(info_array[batchid]), gbstep);
}
/******************************************************************************/
__global__ void cpotf2_kernel(int m, int n,
magmaFloatComplex *dA, int lda,
magmaFloatComplex alpha,
magmaFloatComplex beta,
magma_int_t *info)
{
cpotf2_device(m, n, dA, lda, alpha, beta, info, 0);
}
/***************************************************************************//**
Purpose
-------
cpotf2 computes the Cholesky factorization of a real symmetric
positive definite matrix A.
The factorization has the form
A = U**H * U, if UPLO = MagmaUpper, or
A = L * L**H, if UPLO = MagmaLower,
where U is an upper triangular matrix and L is lower triangular.
This is the unblocked version of the algorithm, calling Level 2 BLAS.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies whether the upper or lower triangular part of the
symmetric matrix A is stored.
- = MagmaUpper: Upper triangular
- = MagmaLower: Lower triangular
@param[in]
m INTEGER
The number of rows of the matrix A.
@param[in]
n INTEGER
The order of the matrix A. N >= 0 and N <= 512.
@param[in,out]
dA_array Array of pointers, dimension (batchCount).
Each is a COMPLEX array A, dimension (lda,n)
On entry, the symmetric matrix A. If UPLO = MagmaUpper, the leading
n by n upper triangular part of A contains the upper
triangular part of the matrix A, and the strictly lower
triangular part of A is not referenced. If UPLO = MagmaLower, the
leading n by n lower triangular part of A contains the lower
triangular part of the matrix A, and the strictly upper
triangular part of A is not referenced.
\n
On exit, if INFO = 0, the factor U or L from the Cholesky
factorization A = U**H * U or A = L * L**H.
@param[in]
lda INTEGER
The leading dimension of the array A. LDDA >= max(1,N).
@param[out]
info_array INTEGER array, dimension (batchCount).
Each is the info parameter for the corresponding matrix A
- = 0: successful exit
- < 0: if INFO = -k, the k-th argument had an illegal value
- > 0: if INFO = k, the leading minor of order k is not
positive definite, and the factorization could not be
completed.
@param[in]
gbstep INTEGER
Internal use, global step.
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_potf2_batched
*******************************************************************************/
extern "C" magma_int_t
magma_cpotf2_tile_batched(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaFloatComplex **dA_array, magma_int_t lda,
magma_int_t *info_array, magma_int_t gbstep, magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t arginfo = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower) {
arginfo = -1;
} else if (m < 0 || n < 0 || m > POTF2_TILE_SIZE || n > POTF2_TILE_SIZE) {
arginfo = -2;
} else if (lda < max(1,m)) {
arginfo = -4;
} else if (m < n) {
arginfo = -10;
}
if (uplo == MagmaUpper) {
fprintf( stderr, "%s: uplo=upper is not yet implemented\n", __func__ );
arginfo = -1;
}
if (arginfo != 0) {
magma_xerbla( __func__, -(arginfo) );
return arginfo;
}
// Quick return if possible
if (m == 0 || n == 0) {
return arginfo;
}
magmaFloatComplex alpha = MAGMA_C_NEG_ONE;
magmaFloatComplex beta = MAGMA_C_ONE;
dim3 dimGrid(1, 1, batchCount);
dim3 threads(POTF2_TILE_SIZE, 1);
size_t shmem = sizeof(magmaFloatComplex)*m*n; // + sizeof(float)*(POTF2_TILE_SIZE+1);
hipLaunchKernelGGL(( cpotf2_kernel_batched)
, dim3(dimGrid), dim3(threads), shmem, queue->cuda_stream() ,
m, n, dA_array, lda, alpha, beta, info_array, gbstep);
return arginfo;
}
| 9e04d0f911d3ebb1120a740f18e1fb44006aff55.cu | /*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@author Azzam Haidar
@author Tingxing Dong
@generated from magmablas/zpotf2_kernels_old.cu, normal z -> c, Mon Jun 25 18:24:16 2018
*/
#include "magma_internal.h"
#include "batched_kernel_param.h"
#include "magma_templates.h"
#define COMPLEX
#define A(i, j) (A + (i) + (j)*lda) // A(i, j) means at i row, j column
// dynamically allocated shared memory, set to size number of threads when the kernel is launched.
// See CUDA Guide B.2.3
extern __shared__ magmaFloatComplex shared_data[];
// dynamically allocated shared memory, set to size number of threads when the kernel is launched.
// See CUDA Guide B.2.3
extern __shared__ float dble_shared_data[];
/******************************************************************************/
__global__ void cdotc_kernel_batched(
int n, magmaFloatComplex **x_array, int incx, int offset,
magma_int_t *info_array, int gbstep)
{
int tx = threadIdx.x;
magmaFloatComplex *x = x_array[blockIdx.z]+offset;
float *sdata = dble_shared_data;
magmaFloatComplex res = MAGMA_C_ZERO;
if (tx < n) {
res = x[tx*incx];
}
sdata[tx] = MAGMA_C_REAL(res * MAGMA_C_CONJ(res));
__syncthreads();
for (int s = blockDim.x/2; s > 32; s >>= 1 ) {
if (tx < s) {
sdata[tx] += sdata[tx+s];
}
__syncthreads();
}
if (tx < 32) {
volatile float* smem = sdata;
smem[tx] += smem[tx+32];
smem[tx] += smem[tx+16];
smem[tx] += smem[tx+8];
smem[tx] += smem[tx+4];
smem[tx] += smem[tx+2];
smem[tx] += smem[tx+1];
}
if (tx == 0) {
float xreal = MAGMA_C_REAL(x[n*incx]);
x[n*incx] = MAGMA_C_MAKE(sqrt(xreal - sdata[0]), 0);
if (xreal <= MAGMA_D_ZERO) {
info_array[blockIdx.z] = offset + gbstep + 1;
}
}
}
/******************************************************************************/
void magma_cpotf2_cdotc_batched(magma_int_t n, magmaFloatComplex **x_array, magma_int_t incx, magma_int_t offset, magma_int_t *info_array, magma_int_t gbstep, magma_int_t batchCount, magma_queue_t queue)
{
/*
Specialized Cdotc
1) performs cdotc sum = x[0:n-1]*conj(x[0:n-1])
2) updates x[n] = sqrt(x[n]-sum);
*/
if (n > MAX_NTHREADS) {
fprintf( stderr, "%s: n = %lld > %lld is not supported\n",
__func__, (long long) n, (long long) MAX_NTHREADS );
}
int threadSize;
if (n <= 1024 && n > 512) {
threadSize = 1024;
}
else if (n <= 512 && n > 256 ) {
threadSize = 512;
}
else if (n <= 256 && n > 128) {
threadSize = 256;
}
else if (n <= 128 && n > 64) {
threadSize = 128;
}
else {
threadSize = 64;
}
dim3 grid(1, 1, batchCount);
size_t shmem = threadSize * sizeof(float);
cdotc_kernel_batched
<<< grid, threadSize, shmem, queue->cuda_stream() >>>
(n, x_array, incx, offset, info_array, gbstep);
}
/******************************************************************************/
__global__ void csscal_kernel_batched(
int n, magmaFloatComplex **x_array, int incx, int offset,
magma_int_t *info_array)
{
// checkinfo to avoid computation of the singular matrix
if (info_array[blockIdx.z] != 0 ) return;
int id = threadIdx.x;
magmaFloatComplex *x = x_array[blockIdx.z]+offset;
__shared__ magmaFloatComplex factor;
if (threadIdx.x == 0) {
factor = MAGMA_C_MAKE(1.0/MAGMA_C_REAL(x[0]), 0.0);
}
__syncthreads();
if ( id < n && id > 0) {
x[id*incx] = x[id*incx] * factor;
//printf("x=%f", x[id*incx]);
}
}
/******************************************************************************/
void magma_cpotf2_csscal_batched(
magma_int_t n, magmaFloatComplex **x_array, magma_int_t incx,
magma_int_t offset, magma_int_t *info_array,
magma_int_t batchCount, magma_queue_t queue)
{
/*
Specialized Csscal perform x[1:n-1]/x[0]
*/
dim3 grid(1, 1, batchCount);
dim3 threads(n, 1, 1);
csscal_kernel_batched
<<< grid, threads, 0, queue->cuda_stream() >>>
(n, x_array, incx, offset, info_array);
}
/******************************************************************************/
__global__ void clacgv_kernel_batched(int n, magmaFloatComplex **x_array, int incx, int offset)
{
int id = threadIdx.x;
magmaFloatComplex *x = x_array[blockIdx.z]+offset;
if ( id < n ) {
x[id*incx] = MAGMA_C_CONJ(x[id*incx]);
}
}
/***************************************************************************//**
Purpose
-------
CLACGV conjugates a complex vector of length N.
Arguments
---------
N (input) INTEGER
The length of the vector X. N >= 0.
X (input/output) COMPLEX array, dimension
(1+(N-1)*abs(INCX))
On entry, the vector of length N to be conjugated.
On exit, X is overwritten with conjg(X).
INCX (input) INTEGER
The spacing between successive elements of X.
@ingroup magma_lacgv_batched
*******************************************************************************/
void magma_clacgv_batched(
magma_int_t n, magmaFloatComplex **x_array, magma_int_t incx,
magma_int_t offset, magma_int_t batchCount, magma_queue_t queue)
{
dim3 grid(1, 1, batchCount);
dim3 threads(n, 1, 1);
clacgv_kernel_batched
<<< grid, threads, 0, queue->cuda_stream() >>>
(n, x_array, incx, offset);
}
/******************************************************************************/
static __device__ void cpotf2_device(int m, int n,
magmaFloatComplex *A, int lda,
magmaFloatComplex alpha,
magmaFloatComplex beta, magma_int_t *info, int gbstep)
{
/*
Each thread block load entire A into shared memory
factorize it and copy back. n must be small enough to fit shared memory.
n is checked by a macro POTF2_TILE_SIZE before the kernel.
*/
// checkinfo to avoid computation of the singular matrix
if (*info != 0 ) return;
int tx = threadIdx.x;
magmaFloatComplex *sdata_A = shared_data;
__shared__ magmaFloatComplex factor;
__shared__ float sum[POTF2_TILE_SIZE];
// load A into sdata_A
if (tx < m)
{
for (int i=0; i < n; i++)
{
sdata_A[tx + i * m] = A[tx + i * lda];
}
}
__syncthreads();
for (int iter=0; iter < n; iter++)
{
float res = MAGMA_D_ZERO;
magmaFloatComplex res1 = MAGMA_C_ZERO;
//1) performs cdotc sum = A[iter, 0:iter-1]*conj(A[iter, 0:iter-1])
//2) updates A[iter,iter] = sqrt(A[iter,iter]-sum);
if (tx < iter)
{
res = MAGMA_C_REAL (sdata_A[iter + tx * m] * MAGMA_C_CONJ(sdata_A[iter + tx * m]));
sum[tx] = res;
}
else
{
sum[tx] = 0.0;
}
__syncthreads();
magma_sum_reduce<POTF2_TILE_SIZE>(tx, sum); //tried on K40: if m=32 n=32 the overall cpotf2_device routine time is 60ms n=16 time=25 n=8 time=20ms
//magma_sum_reduce_n(iter, tx, sum); //tried on K40: if m=32 n=32 the time went from 61ms to 70ms when switching to reduce_n. n=16 time=28.
//magma_sum_reduce_inlined(iter, tx, sum); //tried on K40: similar to magma_sum_reduce<POTF2_TILE_SIZE>(tx, sum);
__shared__ float xreal;
if (tx == 0) {
xreal = MAGMA_C_REAL(sdata_A[iter + iter * m]);
sdata_A[iter + iter * m] = MAGMA_C_MAKE(sqrt(xreal - sum[0]), 0);
if (xreal <= MAGMA_D_ZERO) {
*info = iter + gbstep + 1;
}
}
__syncthreads();
if (xreal <= MAGMA_D_ZERO) return;
__syncthreads();
//clacgv conjugates a complex vector of length iter. //TODO
#ifdef COMPLEX
if (tx < iter)
{
sdata_A[iter + tx * m] = MAGMA_C_CONJ(sdata_A[iter + tx * m]);
}
__syncthreads();
#endif
// cgemv
// Compute elements iter:n-1 of column iter = A(iter:n,0:iter-1) * A(iter-1,0:iter-1) (row).
if (tx < m && tx > iter)
{
for (int j=0; j < iter; j++)
{
res1 += sdata_A[tx + j * m] * sdata_A[iter + j * m]; // TODO move the clacgv conj to be done automatically here implicitly.
}
sdata_A [tx + iter * m] = alpha * res1 + sdata_A [tx + iter * m] * beta;
}
__syncthreads();
//clacgv conjugates a complex vector of length iter.
#ifdef COMPLEX
if (tx < iter)
{
sdata_A[iter + tx * m] = MAGMA_C_CONJ(sdata_A[iter + tx * m]);
}
__syncthreads();
#endif
// csscal perform A[iter:n-1, iter]/A[iter,iter];
if (tx == 0) {
factor = MAGMA_C_MAKE(1.0/MAGMA_C_REAL(sdata_A[iter + iter * m]), 0.0);
}
__syncthreads();
if ( tx < m && tx > iter) {
sdata_A[ tx + iter * m ] *= factor;
}
__syncthreads();
}// end of iter
//copy sdata_A to A
if (tx < m)
{
for (int i=0; i < n; i++)
{
A[tx + i * lda] = sdata_A[tx + i * m];
}
}
}
/******************************************************************************/
__global__ void cpotf2_kernel_batched(int m, int n,
magmaFloatComplex **dA_array, int lda,
magmaFloatComplex alpha,
magmaFloatComplex beta,
magma_int_t *info_array, int gbstep)
{
/*
Each thread block load entire dA_array[blockIdx.z] into shared memory
factorize it and copy back. n must be small enough to fit shared memory.
n is checked by a macro POTF2_TILE_SIZE before the kernel.
*/
int batchid = blockIdx.z;
cpotf2_device(m, n, dA_array[batchid], lda, alpha, beta, &(info_array[batchid]), gbstep);
}
/******************************************************************************/
__global__ void cpotf2_kernel(int m, int n,
magmaFloatComplex *dA, int lda,
magmaFloatComplex alpha,
magmaFloatComplex beta,
magma_int_t *info)
{
cpotf2_device(m, n, dA, lda, alpha, beta, info, 0);
}
/***************************************************************************//**
Purpose
-------
cpotf2 computes the Cholesky factorization of a real symmetric
positive definite matrix A.
The factorization has the form
A = U**H * U, if UPLO = MagmaUpper, or
A = L * L**H, if UPLO = MagmaLower,
where U is an upper triangular matrix and L is lower triangular.
This is the unblocked version of the algorithm, calling Level 2 BLAS.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies whether the upper or lower triangular part of the
symmetric matrix A is stored.
- = MagmaUpper: Upper triangular
- = MagmaLower: Lower triangular
@param[in]
m INTEGER
The number of rows of the matrix A.
@param[in]
n INTEGER
The order of the matrix A. N >= 0 and N <= 512.
@param[in,out]
dA_array Array of pointers, dimension (batchCount).
Each is a COMPLEX array A, dimension (lda,n)
On entry, the symmetric matrix A. If UPLO = MagmaUpper, the leading
n by n upper triangular part of A contains the upper
triangular part of the matrix A, and the strictly lower
triangular part of A is not referenced. If UPLO = MagmaLower, the
leading n by n lower triangular part of A contains the lower
triangular part of the matrix A, and the strictly upper
triangular part of A is not referenced.
\n
On exit, if INFO = 0, the factor U or L from the Cholesky
factorization A = U**H * U or A = L * L**H.
@param[in]
lda INTEGER
The leading dimension of the array A. LDDA >= max(1,N).
@param[out]
info_array INTEGER array, dimension (batchCount).
Each is the info parameter for the corresponding matrix A
- = 0: successful exit
- < 0: if INFO = -k, the k-th argument had an illegal value
- > 0: if INFO = k, the leading minor of order k is not
positive definite, and the factorization could not be
completed.
@param[in]
gbstep INTEGER
Internal use, global step.
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_potf2_batched
*******************************************************************************/
extern "C" magma_int_t
magma_cpotf2_tile_batched(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaFloatComplex **dA_array, magma_int_t lda,
magma_int_t *info_array, magma_int_t gbstep, magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t arginfo = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower) {
arginfo = -1;
} else if (m < 0 || n < 0 || m > POTF2_TILE_SIZE || n > POTF2_TILE_SIZE) {
arginfo = -2;
} else if (lda < max(1,m)) {
arginfo = -4;
} else if (m < n) {
arginfo = -10;
}
if (uplo == MagmaUpper) {
fprintf( stderr, "%s: uplo=upper is not yet implemented\n", __func__ );
arginfo = -1;
}
if (arginfo != 0) {
magma_xerbla( __func__, -(arginfo) );
return arginfo;
}
// Quick return if possible
if (m == 0 || n == 0) {
return arginfo;
}
magmaFloatComplex alpha = MAGMA_C_NEG_ONE;
magmaFloatComplex beta = MAGMA_C_ONE;
dim3 dimGrid(1, 1, batchCount);
dim3 threads(POTF2_TILE_SIZE, 1);
size_t shmem = sizeof(magmaFloatComplex)*m*n; // + sizeof(float)*(POTF2_TILE_SIZE+1);
cpotf2_kernel_batched
<<< dimGrid, threads, shmem, queue->cuda_stream() >>>
(m, n, dA_array, lda, alpha, beta, info_array, gbstep);
return arginfo;
}
|
24d9e71de216a3b27ea6299f913d41098a82b5ae.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
#include <ops/declarable/helpers/compare_elem.h>
namespace sd {
namespace ops {
namespace helpers {
template <typename T>
static _CUDA_G void comparator(void *vx, const Nd4jLong *xShapeInfo, Nd4jLong length, const bool isStrict, void *reductionBuffer, bool *z) {
auto x = reinterpret_cast<T*>(vx);
auto reduction = reinterpret_cast<uint32_t*>(reductionBuffer);
extern __shared__ uint32_t shared[];
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
shared[threadIdx.x] = 0;
// each thread will compare 2 elements: E and E+1
for (int e = tid; e < length - 1; e += blockDim.x * gridDim.x) {
auto val0 = x[shape::getIndexOffset(e, xShapeInfo)];
auto val1 = x[shape::getIndexOffset(e+1, xShapeInfo)];
bool v = false;
if (isStrict)
v = val1 > val0;
else
v = val1 >= val0;
// store comparison result in shared memory
shared[threadIdx.x] += v ? 0 : 1;
}
__syncthreads();
// aggregate sums in shared memory
for (uint activeThreads = blockDim.x / 2; activeThreads > 0; activeThreads /= 2) {
if (threadIdx.x < activeThreads)
shared[threadIdx.x] += shared[threadIdx.x + activeThreads];
__syncthreads();
}
// store over the grid if we have more than 1 block
if (gridDim.x > 1) {
auto tc = reinterpret_cast<unsigned int *>(reductionBuffer);
__shared__ bool amLast;
tid = threadIdx.x;
if (threadIdx.x == 0)
reduction[blockIdx.x] = shared[0];
__threadfence();
__syncthreads();
if (threadIdx.x == 0) {
unsigned int ticket = atomicInc(&tc[16384], gridDim.x);
amLast = (ticket == gridDim.x - 1);
}
__syncthreads();
if (amLast) {
tc[16384] = 0;
shared[threadIdx.x] = 0;
for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x)
shared[threadIdx.x] += reduction[i];
__syncthreads();
for (uint activeThreads = blockDim.x / 2; activeThreads > 0; activeThreads /= 2) {
if (threadIdx.x < activeThreads)
shared[threadIdx.x] += shared[threadIdx.x + activeThreads];
__syncthreads();
}
__syncthreads();
if (threadIdx.x == 0) {
z[0] = shared[0] == 0;
}
}
}
else {
// if we have only 1 block, we just store results right away
if (threadIdx.x == 0) {
auto tc = reinterpret_cast<unsigned int*>(reductionBuffer);
tc[16384] = 0;
z[0] = shared[0] == 0;
}
}
}
template<typename T>
static void _compare_elem(sd::LaunchContext * context, NDArray *input, bool isStrictlyIncreasing, bool& output) {
auto z = NDArrayFactory::create<bool>(false, context);
const int numThreads = 256;
const int numBlocks = sd::math::nd4j_min<int>(128, sd::math::nd4j_max<int>(1, input->lengthOf() / numThreads));
hipLaunchKernelGGL(( comparator<T>), dim3(numBlocks), dim3(numThreads), numThreads * 4 + 1024, *context->getCudaStream(), input->specialBuffer(), input->specialShapeInfo(), input->lengthOf(), isStrictlyIncreasing, context->getReductionPointer(), reinterpret_cast<bool *>(z.specialBuffer()));
z.tickWriteDevice();
sd::DebugHelper::checkErrorCode(context->getCudaStream(), "is_strictly_increasing");
output = z.e<bool>(0);
}
ND4J_LOCAL void compare_elem(sd::LaunchContext * context, NDArray *input, bool isStrictlyIncreasing, bool& output) {
auto xType = input->dataType();
input->syncToDevice();
BUILD_SINGLE_SELECTOR(xType, _compare_elem, (context, input, isStrictlyIncreasing, output), LIBND4J_TYPES);
}
BUILD_SINGLE_TEMPLATE(template ND4J_LOCAL void _compare_elem, (sd::LaunchContext * context, NDArray *A, bool isStrictlyIncreasing, bool& output);, LIBND4J_TYPES);
}
}
}
| 24d9e71de216a3b27ea6299f913d41098a82b5ae.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
#include <ops/declarable/helpers/compare_elem.h>
namespace sd {
namespace ops {
namespace helpers {
template <typename T>
static _CUDA_G void comparator(void *vx, const Nd4jLong *xShapeInfo, Nd4jLong length, const bool isStrict, void *reductionBuffer, bool *z) {
auto x = reinterpret_cast<T*>(vx);
auto reduction = reinterpret_cast<uint32_t*>(reductionBuffer);
extern __shared__ uint32_t shared[];
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
shared[threadIdx.x] = 0;
// each thread will compare 2 elements: E and E+1
for (int e = tid; e < length - 1; e += blockDim.x * gridDim.x) {
auto val0 = x[shape::getIndexOffset(e, xShapeInfo)];
auto val1 = x[shape::getIndexOffset(e+1, xShapeInfo)];
bool v = false;
if (isStrict)
v = val1 > val0;
else
v = val1 >= val0;
// store comparison result in shared memory
shared[threadIdx.x] += v ? 0 : 1;
}
__syncthreads();
// aggregate sums in shared memory
for (uint activeThreads = blockDim.x / 2; activeThreads > 0; activeThreads /= 2) {
if (threadIdx.x < activeThreads)
shared[threadIdx.x] += shared[threadIdx.x + activeThreads];
__syncthreads();
}
// store over the grid if we have more than 1 block
if (gridDim.x > 1) {
auto tc = reinterpret_cast<unsigned int *>(reductionBuffer);
__shared__ bool amLast;
tid = threadIdx.x;
if (threadIdx.x == 0)
reduction[blockIdx.x] = shared[0];
__threadfence();
__syncthreads();
if (threadIdx.x == 0) {
unsigned int ticket = atomicInc(&tc[16384], gridDim.x);
amLast = (ticket == gridDim.x - 1);
}
__syncthreads();
if (amLast) {
tc[16384] = 0;
shared[threadIdx.x] = 0;
for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x)
shared[threadIdx.x] += reduction[i];
__syncthreads();
for (uint activeThreads = blockDim.x / 2; activeThreads > 0; activeThreads /= 2) {
if (threadIdx.x < activeThreads)
shared[threadIdx.x] += shared[threadIdx.x + activeThreads];
__syncthreads();
}
__syncthreads();
if (threadIdx.x == 0) {
z[0] = shared[0] == 0;
}
}
}
else {
// if we have only 1 block, we just store results right away
if (threadIdx.x == 0) {
auto tc = reinterpret_cast<unsigned int*>(reductionBuffer);
tc[16384] = 0;
z[0] = shared[0] == 0;
}
}
}
template<typename T>
static void _compare_elem(sd::LaunchContext * context, NDArray *input, bool isStrictlyIncreasing, bool& output) {
auto z = NDArrayFactory::create<bool>(false, context);
const int numThreads = 256;
const int numBlocks = sd::math::nd4j_min<int>(128, sd::math::nd4j_max<int>(1, input->lengthOf() / numThreads));
comparator<T><<<numBlocks, numThreads, numThreads * 4 + 1024, *context->getCudaStream()>>>(input->specialBuffer(), input->specialShapeInfo(), input->lengthOf(), isStrictlyIncreasing, context->getReductionPointer(), reinterpret_cast<bool *>(z.specialBuffer()));
z.tickWriteDevice();
sd::DebugHelper::checkErrorCode(context->getCudaStream(), "is_strictly_increasing");
output = z.e<bool>(0);
}
ND4J_LOCAL void compare_elem(sd::LaunchContext * context, NDArray *input, bool isStrictlyIncreasing, bool& output) {
auto xType = input->dataType();
input->syncToDevice();
BUILD_SINGLE_SELECTOR(xType, _compare_elem, (context, input, isStrictlyIncreasing, output), LIBND4J_TYPES);
}
BUILD_SINGLE_TEMPLATE(template ND4J_LOCAL void _compare_elem, (sd::LaunchContext * context, NDArray *A, bool isStrictlyIncreasing, bool& output);, LIBND4J_TYPES);
}
}
}
|
ff7d66fa1d8200799f7e454064e464b082ebfcdf.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
const float eps = 0.0001f;
const float dt = 0.01f;
const int block_size = 1024;
const int N = 128 * block_size;
#define coord float4
__global__ void integrate(coord *new_p, coord *new_v,
coord *p, coord *v, int n, float dt)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
return;
coord body_pos = p[index];
coord body_vel = v[index];
coord f;
f.x = 0;
f.y = 0;
f.z = 0;
__shared__ coord sp[block_size];
// Assert: n % block_size == 0
for (int ind = 0; ind < n; ind += block_size) {
sp[threadIdx.x] = p[ind + threadIdx.x];
__syncthreads();
for (int i = 0; i < block_size; i++) {
// Vector from p[i] to body
coord r;
r.x = sp[i].x - body_pos.x;
r.y = sp[i].y - body_pos.y;
r.z = sp[i].z - body_pos.z;
float invDist = 1.0 / sqrtf(r.x * r.x + r.y * r.y + r.z * r.z + eps * eps);
float s = invDist * invDist * invDist;
// Add force of body i
f.x += r.x * s;
f.y += r.y * s;
f.z += r.z * s;
}
__syncthreads();
}
// Correct velocity
body_vel.x += f.x * dt;
body_vel.y += f.y * dt;
body_vel.z += f.z * dt;
body_pos.x += body_vel.x * dt;
body_pos.y += body_vel.y * dt;
body_pos.z += body_vel.z * dt;
new_p[index] = body_pos;
new_v[index] = body_vel;
}
double wtime()
{
struct timeval t;
gettimeofday(&t, NULL);
return (double)t.tv_sec + (double)t.tv_usec * 1E-6;
}
void init_rand(coord *v, int n)
{
for (int i = 0; i < n; i++) {
v[i].x = rand() / (float)RAND_MAX - 0.5f;
v[i].y = rand() / (float)RAND_MAX - 0.5f;
v[i].z = rand() / (float)RAND_MAX - 0.5f;
}
}
int main()
{
double tgpu = 0, tmem = 0;
size_t size = sizeof(coord) * N;
coord *p = (coord *)malloc(size);
coord *v = (coord *)malloc(size);
coord *d_p[2] = {NULL, NULL};
coord *d_v[2] = {NULL, NULL};
init_rand(p, N);
init_rand(v, N);
tmem = -wtime();
hipMalloc((void **)&d_p[0], size);
hipMalloc((void **)&d_p[1], size);
hipMalloc((void **)&d_v[0], size);
hipMalloc((void **)&d_v[1], size);
hipMemcpy(d_p[0], p, size, hipMemcpyHostToDevice);
hipMemcpy(d_v[0], v, size, hipMemcpyHostToDevice);
tmem += wtime();
tgpu = -wtime();
dim3 block(block_size);
dim3 grid((N + block_size - 1) / block_size);
int index = 0;
for (int i = 0; i < 2; i++, index ^= 1) {
hipLaunchKernelGGL(( integrate), dim3(grid), dim3(block), 0, 0, d_p[index ^ 1], d_v[index ^ 1], d_p[index], d_v[index], N, dt);
}
hipDeviceSynchronize();
tgpu += wtime();
tmem -= wtime();
hipMemcpy(p, d_p[index], size, hipMemcpyDeviceToHost);
hipMemcpy(v, d_v[index], size, hipMemcpyDeviceToHost);
tmem += wtime();
/*
for (int i = 0; i < N; i++) {
printf("%4d: %f %f %f %f %f %f\n", i, p[i].x, p[i].y, p[i].z, v[i].x, v[i].y, v[i].z);
}
*/
printf("GPU version (sec.): %.6f\n", tgpu);
printf("Memory ops. (sec.): %.6f\n", tmem);
printf(" Total time (sec.): %.6f\n", tgpu + tmem);
hipFree(d_p[0]);
hipFree(d_p[1]);
hipFree(d_v[0]);
hipFree(d_v[1]);
free(p);
free(v);
hipDeviceReset();
return 0;
}
| ff7d66fa1d8200799f7e454064e464b082ebfcdf.cu | #include <stdio.h>
#include <sys/time.h>
#include <cuda_runtime.h>
const float eps = 0.0001f;
const float dt = 0.01f;
const int block_size = 1024;
const int N = 128 * block_size;
#define coord float4
__global__ void integrate(coord *new_p, coord *new_v,
coord *p, coord *v, int n, float dt)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
return;
coord body_pos = p[index];
coord body_vel = v[index];
coord f;
f.x = 0;
f.y = 0;
f.z = 0;
__shared__ coord sp[block_size];
// Assert: n % block_size == 0
for (int ind = 0; ind < n; ind += block_size) {
sp[threadIdx.x] = p[ind + threadIdx.x];
__syncthreads();
for (int i = 0; i < block_size; i++) {
// Vector from p[i] to body
coord r;
r.x = sp[i].x - body_pos.x;
r.y = sp[i].y - body_pos.y;
r.z = sp[i].z - body_pos.z;
float invDist = 1.0 / sqrtf(r.x * r.x + r.y * r.y + r.z * r.z + eps * eps);
float s = invDist * invDist * invDist;
// Add force of body i
f.x += r.x * s;
f.y += r.y * s;
f.z += r.z * s;
}
__syncthreads();
}
// Correct velocity
body_vel.x += f.x * dt;
body_vel.y += f.y * dt;
body_vel.z += f.z * dt;
body_pos.x += body_vel.x * dt;
body_pos.y += body_vel.y * dt;
body_pos.z += body_vel.z * dt;
new_p[index] = body_pos;
new_v[index] = body_vel;
}
double wtime()
{
struct timeval t;
gettimeofday(&t, NULL);
return (double)t.tv_sec + (double)t.tv_usec * 1E-6;
}
void init_rand(coord *v, int n)
{
for (int i = 0; i < n; i++) {
v[i].x = rand() / (float)RAND_MAX - 0.5f;
v[i].y = rand() / (float)RAND_MAX - 0.5f;
v[i].z = rand() / (float)RAND_MAX - 0.5f;
}
}
int main()
{
double tgpu = 0, tmem = 0;
size_t size = sizeof(coord) * N;
coord *p = (coord *)malloc(size);
coord *v = (coord *)malloc(size);
coord *d_p[2] = {NULL, NULL};
coord *d_v[2] = {NULL, NULL};
init_rand(p, N);
init_rand(v, N);
tmem = -wtime();
cudaMalloc((void **)&d_p[0], size);
cudaMalloc((void **)&d_p[1], size);
cudaMalloc((void **)&d_v[0], size);
cudaMalloc((void **)&d_v[1], size);
cudaMemcpy(d_p[0], p, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_v[0], v, size, cudaMemcpyHostToDevice);
tmem += wtime();
tgpu = -wtime();
dim3 block(block_size);
dim3 grid((N + block_size - 1) / block_size);
int index = 0;
for (int i = 0; i < 2; i++, index ^= 1) {
integrate<<<grid, block>>>(d_p[index ^ 1], d_v[index ^ 1], d_p[index], d_v[index], N, dt);
}
cudaDeviceSynchronize();
tgpu += wtime();
tmem -= wtime();
cudaMemcpy(p, d_p[index], size, cudaMemcpyDeviceToHost);
cudaMemcpy(v, d_v[index], size, cudaMemcpyDeviceToHost);
tmem += wtime();
/*
for (int i = 0; i < N; i++) {
printf("%4d: %f %f %f %f %f %f\n", i, p[i].x, p[i].y, p[i].z, v[i].x, v[i].y, v[i].z);
}
*/
printf("GPU version (sec.): %.6f\n", tgpu);
printf("Memory ops. (sec.): %.6f\n", tmem);
printf(" Total time (sec.): %.6f\n", tgpu + tmem);
cudaFree(d_p[0]);
cudaFree(d_p[1]);
cudaFree(d_v[0]);
cudaFree(d_v[1]);
free(p);
free(v);
cudaDeviceReset();
return 0;
}
|
38ab4b0a2ca9b66e3657b3cfa36037594d5ec086.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright 2012 Jeffrey Blanchard, Erik Opavsky, and Emircan Uysaler
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "quickMultiSelect.cpp"
#include "SMOSheader.cu"
#define NUMBEROFALGORITHMS 3
char* namesOfMultiselectTimingFunctions[NUMBEROFALGORITHMS] =
{"Sort and Choose Multiselect", "Bucket Multiselect", "CPU QuickMultiselect"};
using namespace std;
namespace CompareMultiselect {
/* This function compares bucketMultiselect with the other algorithms given in the
defined range of kVals and array size.
*/
template<typename T>
void compareMultiselectAlgorithms(uint size, uint* kVals, uint numKs, uint numTests
, uint *algorithmsToTest, uint generateType, uint kGenerateType, char* fileNamecsv
, T* data = NULL) {
// allocate space for operations
T *h_vec, *h_vec_copy;
float timeArray[NUMBEROFALGORITHMS][numTests];
T * resultsArray[NUMBEROFALGORITHMS][numTests];
float totalTimesPerAlgorithm[NUMBEROFALGORITHMS];
uint winnerArray[numTests];
uint timesWon[NUMBEROFALGORITHMS];
uint i,j,m,x;
int runOrder[NUMBEROFALGORITHMS];
unsigned long long seed; //, seed2;
results_t<T> *temp;
ofstream fileCsv;
timeval t1; //, t2;
typedef results_t<T>* (*ptrToTimingFunction)(T*, uint, uint *, uint);
typedef void (*ptrToGeneratingFunction)(T*, uint, hiprandGenerator_t);
//these are the functions that can be called
ptrToTimingFunction arrayOfTimingFunctions[NUMBEROFALGORITHMS] =
{&timeSortAndChooseMultiselect<T>,
&timeBucketMultiselect<T>,
&timeQuickMultiselect<T>};
ptrToGeneratingFunction *arrayOfGenerators;
char** namesOfGeneratingFunctions;
// this is the array of names of functions that generate problems of this type,
// ie float, double, or uint
namesOfGeneratingFunctions = returnNamesOfGenerators<T>();
arrayOfGenerators = (ptrToGeneratingFunction *) returnGenFunctions<T>();
printf("Files will be written to %s\n", fileNamecsv);
fileCsv.open(fileNamecsv, ios_base::app);
//zero out the totals and times won
bzero(totalTimesPerAlgorithm, NUMBEROFALGORITHMS * sizeof(uint));
bzero(timesWon, NUMBEROFALGORITHMS * sizeof(uint));
//allocate space for h_vec, and h_vec_copy
h_vec = (T *) malloc(size * sizeof(T));
h_vec_copy = (T *) malloc(size * sizeof(T));
//create the random generators.
hiprandGenerator_t generator;
srand(unsigned(time(NULL)));
printf("The distribution is: %s\n", namesOfGeneratingFunctions[generateType]);
printf("The k distribution is: %s\n", namesOfKGenerators[kGenerateType]);
/***********************************************/
/*********** START RUNNING TESTS ************
/***********************************************/
for(i = 0; i < numTests; i++) {
//hipDeviceReset();
gettimeofday(&t1, NULL);
seed = t1.tv_usec * t1.tv_sec;
for(m = 0; m < NUMBEROFALGORITHMS;m++)
runOrder[m] = m;
std::random_shuffle(runOrder, runOrder + NUMBEROFALGORITHMS);
fileCsv << size << "," << numKs << "," <<
namesOfGeneratingFunctions[generateType] << "," <<
namesOfKGenerators[kGenerateType] << ",";
hiprandCreateGenerator(&generator, HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandSetPseudoRandomGeneratorSeed(generator,seed);
printf("Running test %u of %u for size: %u and numK: %u\n", i + 1,
numTests, size, numKs);
//generate the random vector using the specified distribution
if(data == NULL)
arrayOfGenerators[generateType](h_vec, size, generator);
else
h_vec = data;
//copy the vector to h_vec_copy, which will be used to restore it later
memcpy(h_vec_copy, h_vec, size * sizeof(T));
/*
***************************************************
****** In this file, the kDistribution is always set to UNIFORM (kGenerateType = 1)
****** so this regeneration of the order statistics is not needed.
****** It is saved here in case one wants to run these tests for a different kDistribution
***************************************************
// if the kdistribution is random, we need to generate new a kList for each new random problem instance.
if ( (kGenerateType != 1) && (i>0) ){
gettimeofday(&t2, NULL);
seed2 = t2.tv_usec * t2.tv_sec;
hiprandGenerator_t generator2;
srand(unsigned(time(NULL)));
hiprandCreateGenerator(&generator2, HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandSetPseudoRandomGeneratorSeed(generator2,seed2);
arrayOfKDistributionGenerators[kGenerateType](kVals, numKs, size, generator2);
}
*/
winnerArray[i] = 0;
float currentWinningTime = INFINITY;
//run the various timing functions
for(x = 0; x < NUMBEROFALGORITHMS; x++){
j = runOrder[x];
if(algorithmsToTest[j]){
//run timing function j
printf("TESTING: %u\n", j);
temp = arrayOfTimingFunctions[j](h_vec_copy, size, kVals, numKs);
//record the time result
timeArray[j][i] = temp->time;
//record the value returned
resultsArray[j][i] = temp->vals;
//update the current "winner" if necessary
if(timeArray[j][i] < currentWinningTime){
currentWinningTime = temp->time;
winnerArray[i] = j;
}
//perform clean up
free(temp);
memcpy(h_vec_copy, h_vec, size * sizeof(T));
}
}
hiprandDestroyGenerator(generator);
for(x = 0; x < NUMBEROFALGORITHMS; x++)
if(algorithmsToTest[x])
fileCsv << namesOfMultiselectTimingFunctions[x] << "," << timeArray[x][i] << ",";
// check for errors, and output information to recreate problem
uint flag = 0;
for(m = 1; m < NUMBEROFALGORITHMS;m++)
if(algorithmsToTest[m])
for (j = 0; j < numKs; j++) {
if(resultsArray[m][i][j] != resultsArray[0][i][j]) {
flag++;
fileCsv << "\nERROR ON TEST " << i << " of " << numTests << " tests!!!!!\n";
fileCsv << "vector size = " << size << "\nvector seed = " << seed << "\n";
fileCsv << "numKs = " << numKs << "\n";
fileCsv << "wrong k = " << kVals[j] << " kIndex = " << j <<
" wrong result = " << resultsArray[m][i][j] << " correct result = " <<
resultsArray[0][i][j] << "\n";
std::cout <<namesOfMultiselectTimingFunctions[m] <<
" did not return the correct answer on test " << i + 1 << " at k[" << j <<
"]. It got "<< resultsArray[m][i][j];
std::cout << " instead of " << resultsArray[0][i][j] << ".\n" ;
std::cout << "RESULT:\t";
PrintFunctions::printBinary(resultsArray[m][i][j]);
std::cout << "Right:\t";
PrintFunctions::printBinary(resultsArray[0][i][j]);
}
}
fileCsv << flag << "\n";
}
//calculate the total time each algorithm took
for(i = 0; i < numTests; i++)
for(j = 0; j < NUMBEROFALGORITHMS;j++)
if(algorithmsToTest[j])
totalTimesPerAlgorithm[j] += timeArray[j][i];
//count the number of times each algorithm won.
for(i = 0; i < numTests;i++)
timesWon[winnerArray[i]]++;
printf("\n\n");
//print out the average times
for(i = 0; i < NUMBEROFALGORITHMS; i++)
if(algorithmsToTest[i])
printf("%-20s averaged: %f ms\n", namesOfMultiselectTimingFunctions[i], totalTimesPerAlgorithm[i] / numTests);
for(i = 0; i < NUMBEROFALGORITHMS; i++)
if(algorithmsToTest[i])
printf("%s won %u times\n", namesOfMultiselectTimingFunctions[i], timesWon[i]);
// free results
for(i = 0; i < numTests; i++)
for(m = 0; m < NUMBEROFALGORITHMS; m++)
if(algorithmsToTest[m])
free(resultsArray[m][i]);
//free h_vec and h_vec_copy
if(data == NULL)
free(h_vec);
free(h_vec_copy);
//close the file
fileCsv.close();
}
/* This function generates the array of kVals to work on and acts as a wrapper for
comparison.
*/
template<typename T>
void runTests (uint generateType, char* fileName, uint startPower, uint stopPower
, uint timesToTestEachK, uint kDistribution, uint startK, uint stopK, uint kJump) {
uint algorithmsToRun[NUMBEROFALGORITHMS]= {1, 1, 1};
uint size;
// uint i;
uint arrayOfKs[stopK+1];
/*
*****************************
**** In this file, the kDistribution is not random.
**** The number of order statistics (numKs) is fixed at 101.
**** We only need to generate the kDistribuion one time for each size.
*****************************
*/
unsigned long long seed;
timeval t1;
gettimeofday(&t1, NULL);
seed = t1.tv_usec * t1.tv_sec;
hiprandGenerator_t generator;
srand(unsigned(time(NULL)));
hiprandCreateGenerator(&generator, HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandSetPseudoRandomGeneratorSeed(generator,seed);
// double the array size to the next powers of 2
for(size = (1 << startPower); size <= (1 << stopPower); size *= 2) {
hipDeviceReset();
arrayOfKDistributionGenerators[kDistribution](arrayOfKs, stopK, size, generator);
compareMultiselectAlgorithms<T>(size, arrayOfKs, stopK, timesToTestEachK,
algorithmsToRun, generateType, kDistribution, fileName);
} // end for(size)
hiprandDestroyGenerator(generator);
} // end runTests
} // end namespace CompareMultiselect
int main (int argc, char *argv[]) {
using namespace CompareMultiselect;
char *fileName, *hostName, *typeString;
fileName = (char*) malloc(128 * sizeof(char));
typeString = (char*) malloc(10 * sizeof(char));
hostName = (char*) malloc(20 * sizeof(char));
gethostname(hostName, 20);
#ifdef GPUNUMBER
hipSetDevice(GPUNUMBER);
#endif
time_t rawtime;
struct tm * timeinfo;
time ( &rawtime );
timeinfo = localtime ( &rawtime );
char * humanTime = asctime(timeinfo);
humanTime[strlen(humanTime)-1] = '\0';
uint testCount, type,distributionType,startPower,stopPower,kDistribution,startK
,stopK,jumpK;
uint vecDistr[4];
vecDistr[0]=0; // Uniform
vecDistr[1]=1; // Normal
vecDistr[2]=3; // Half Normal
vecDistr[3]=9; // Cauchy
kDistribution=1; // Uniformly Spaced
startPower=10;
stopPower=24;
startK=101; // This gives the 0,1,2,...,98,99,100 percentiles
jumpK=1;
stopK=101;
testCount=25;
for(int j=0; j<4; j++){
distributionType = vecDistr[j];
for(type=0; type<3; type++){
switch(type){
case 0:
typeString = "float";
snprintf(fileName, 128,
"CPUQuickMultiselect %s %s k-dist:%s 2^%d to 2^%d (%d:%d:%d) %d-tests on %s at %s",
typeString, getDistributionOptions(type, distributionType),
getKDistributionOptions(kDistribution), startPower, stopPower,
startK, jumpK, stopK, testCount, hostName, humanTime);
printf("File Name: %s \n", fileName);
runTests<float>(distributionType,fileName,startPower,stopPower,testCount,
kDistribution,startK,stopK,jumpK);
break;
case 1:
typeString = "double";
if (distributionType<2){
snprintf(fileName, 128,
"CPUQuickMultiselect %s %s k-dist:%s 2^%d to 2^%d (%d:%d:%d) %d-tests on %s at %s",
typeString, getDistributionOptions(type, distributionType),
getKDistributionOptions(kDistribution), startPower, stopPower,
startK, jumpK, stopK, testCount, hostName, humanTime);
printf("File Name: %s \n", fileName);
runTests<double>(distributionType,fileName,startPower,stopPower,testCount,
kDistribution,startK,stopK,jumpK);
} // end if(distributionType)
break;
case 2:
typeString = "uint";
if (distributionType<1){
snprintf(fileName, 128,
"CPUQuickMultiselect %s %s k-dist:%s 2^%d to 2^%d (%d:%d:%d) %d-tests on %s at %s",
typeString, getDistributionOptions(type, distributionType),
getKDistributionOptions(kDistribution), startPower, stopPower,
startK, jumpK, stopK, testCount, hostName, humanTime);
printf("File Name: %s \n", fileName);
runTests<uint>(distributionType,fileName,startPower,stopPower,testCount,
kDistribution,startK,stopK,jumpK);
} // end if(distributionType)
break;
default:
printf("You entered and invalid option, now exiting\n");
break;
} // end switch(type)
} // end for(type)
} // end for (int j)
free (fileName);
return 0;
}
| 38ab4b0a2ca9b66e3657b3cfa36037594d5ec086.cu | /* Copyright 2012 Jeffrey Blanchard, Erik Opavsky, and Emircan Uysaler
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "quickMultiSelect.cpp"
#include "SMOSheader.cu"
#define NUMBEROFALGORITHMS 3
char* namesOfMultiselectTimingFunctions[NUMBEROFALGORITHMS] =
{"Sort and Choose Multiselect", "Bucket Multiselect", "CPU QuickMultiselect"};
using namespace std;
namespace CompareMultiselect {
/* This function compares bucketMultiselect with the other algorithms given in the
defined range of kVals and array size.
*/
template<typename T>
void compareMultiselectAlgorithms(uint size, uint* kVals, uint numKs, uint numTests
, uint *algorithmsToTest, uint generateType, uint kGenerateType, char* fileNamecsv
, T* data = NULL) {
// allocate space for operations
T *h_vec, *h_vec_copy;
float timeArray[NUMBEROFALGORITHMS][numTests];
T * resultsArray[NUMBEROFALGORITHMS][numTests];
float totalTimesPerAlgorithm[NUMBEROFALGORITHMS];
uint winnerArray[numTests];
uint timesWon[NUMBEROFALGORITHMS];
uint i,j,m,x;
int runOrder[NUMBEROFALGORITHMS];
unsigned long long seed; //, seed2;
results_t<T> *temp;
ofstream fileCsv;
timeval t1; //, t2;
typedef results_t<T>* (*ptrToTimingFunction)(T*, uint, uint *, uint);
typedef void (*ptrToGeneratingFunction)(T*, uint, curandGenerator_t);
//these are the functions that can be called
ptrToTimingFunction arrayOfTimingFunctions[NUMBEROFALGORITHMS] =
{&timeSortAndChooseMultiselect<T>,
&timeBucketMultiselect<T>,
&timeQuickMultiselect<T>};
ptrToGeneratingFunction *arrayOfGenerators;
char** namesOfGeneratingFunctions;
// this is the array of names of functions that generate problems of this type,
// ie float, double, or uint
namesOfGeneratingFunctions = returnNamesOfGenerators<T>();
arrayOfGenerators = (ptrToGeneratingFunction *) returnGenFunctions<T>();
printf("Files will be written to %s\n", fileNamecsv);
fileCsv.open(fileNamecsv, ios_base::app);
//zero out the totals and times won
bzero(totalTimesPerAlgorithm, NUMBEROFALGORITHMS * sizeof(uint));
bzero(timesWon, NUMBEROFALGORITHMS * sizeof(uint));
//allocate space for h_vec, and h_vec_copy
h_vec = (T *) malloc(size * sizeof(T));
h_vec_copy = (T *) malloc(size * sizeof(T));
//create the random generators.
curandGenerator_t generator;
srand(unsigned(time(NULL)));
printf("The distribution is: %s\n", namesOfGeneratingFunctions[generateType]);
printf("The k distribution is: %s\n", namesOfKGenerators[kGenerateType]);
/***********************************************/
/*********** START RUNNING TESTS ************
/***********************************************/
for(i = 0; i < numTests; i++) {
//cudaDeviceReset();
gettimeofday(&t1, NULL);
seed = t1.tv_usec * t1.tv_sec;
for(m = 0; m < NUMBEROFALGORITHMS;m++)
runOrder[m] = m;
std::random_shuffle(runOrder, runOrder + NUMBEROFALGORITHMS);
fileCsv << size << "," << numKs << "," <<
namesOfGeneratingFunctions[generateType] << "," <<
namesOfKGenerators[kGenerateType] << ",";
curandCreateGenerator(&generator, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(generator,seed);
printf("Running test %u of %u for size: %u and numK: %u\n", i + 1,
numTests, size, numKs);
//generate the random vector using the specified distribution
if(data == NULL)
arrayOfGenerators[generateType](h_vec, size, generator);
else
h_vec = data;
//copy the vector to h_vec_copy, which will be used to restore it later
memcpy(h_vec_copy, h_vec, size * sizeof(T));
/*
***************************************************
****** In this file, the kDistribution is always set to UNIFORM (kGenerateType = 1)
****** so this regeneration of the order statistics is not needed.
****** It is saved here in case one wants to run these tests for a different kDistribution
***************************************************
// if the kdistribution is random, we need to generate new a kList for each new random problem instance.
if ( (kGenerateType != 1) && (i>0) ){
gettimeofday(&t2, NULL);
seed2 = t2.tv_usec * t2.tv_sec;
curandGenerator_t generator2;
srand(unsigned(time(NULL)));
curandCreateGenerator(&generator2, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(generator2,seed2);
arrayOfKDistributionGenerators[kGenerateType](kVals, numKs, size, generator2);
}
*/
winnerArray[i] = 0;
float currentWinningTime = INFINITY;
//run the various timing functions
for(x = 0; x < NUMBEROFALGORITHMS; x++){
j = runOrder[x];
if(algorithmsToTest[j]){
//run timing function j
printf("TESTING: %u\n", j);
temp = arrayOfTimingFunctions[j](h_vec_copy, size, kVals, numKs);
//record the time result
timeArray[j][i] = temp->time;
//record the value returned
resultsArray[j][i] = temp->vals;
//update the current "winner" if necessary
if(timeArray[j][i] < currentWinningTime){
currentWinningTime = temp->time;
winnerArray[i] = j;
}
//perform clean up
free(temp);
memcpy(h_vec_copy, h_vec, size * sizeof(T));
}
}
curandDestroyGenerator(generator);
for(x = 0; x < NUMBEROFALGORITHMS; x++)
if(algorithmsToTest[x])
fileCsv << namesOfMultiselectTimingFunctions[x] << "," << timeArray[x][i] << ",";
// check for errors, and output information to recreate problem
uint flag = 0;
for(m = 1; m < NUMBEROFALGORITHMS;m++)
if(algorithmsToTest[m])
for (j = 0; j < numKs; j++) {
if(resultsArray[m][i][j] != resultsArray[0][i][j]) {
flag++;
fileCsv << "\nERROR ON TEST " << i << " of " << numTests << " tests!!!!!\n";
fileCsv << "vector size = " << size << "\nvector seed = " << seed << "\n";
fileCsv << "numKs = " << numKs << "\n";
fileCsv << "wrong k = " << kVals[j] << " kIndex = " << j <<
" wrong result = " << resultsArray[m][i][j] << " correct result = " <<
resultsArray[0][i][j] << "\n";
std::cout <<namesOfMultiselectTimingFunctions[m] <<
" did not return the correct answer on test " << i + 1 << " at k[" << j <<
"]. It got "<< resultsArray[m][i][j];
std::cout << " instead of " << resultsArray[0][i][j] << ".\n" ;
std::cout << "RESULT:\t";
PrintFunctions::printBinary(resultsArray[m][i][j]);
std::cout << "Right:\t";
PrintFunctions::printBinary(resultsArray[0][i][j]);
}
}
fileCsv << flag << "\n";
}
//calculate the total time each algorithm took
for(i = 0; i < numTests; i++)
for(j = 0; j < NUMBEROFALGORITHMS;j++)
if(algorithmsToTest[j])
totalTimesPerAlgorithm[j] += timeArray[j][i];
//count the number of times each algorithm won.
for(i = 0; i < numTests;i++)
timesWon[winnerArray[i]]++;
printf("\n\n");
//print out the average times
for(i = 0; i < NUMBEROFALGORITHMS; i++)
if(algorithmsToTest[i])
printf("%-20s averaged: %f ms\n", namesOfMultiselectTimingFunctions[i], totalTimesPerAlgorithm[i] / numTests);
for(i = 0; i < NUMBEROFALGORITHMS; i++)
if(algorithmsToTest[i])
printf("%s won %u times\n", namesOfMultiselectTimingFunctions[i], timesWon[i]);
// free results
for(i = 0; i < numTests; i++)
for(m = 0; m < NUMBEROFALGORITHMS; m++)
if(algorithmsToTest[m])
free(resultsArray[m][i]);
//free h_vec and h_vec_copy
if(data == NULL)
free(h_vec);
free(h_vec_copy);
//close the file
fileCsv.close();
}
/* This function generates the array of kVals to work on and acts as a wrapper for
comparison.
*/
template<typename T>
void runTests (uint generateType, char* fileName, uint startPower, uint stopPower
, uint timesToTestEachK, uint kDistribution, uint startK, uint stopK, uint kJump) {
uint algorithmsToRun[NUMBEROFALGORITHMS]= {1, 1, 1};
uint size;
// uint i;
uint arrayOfKs[stopK+1];
/*
*****************************
**** In this file, the kDistribution is not random.
**** The number of order statistics (numKs) is fixed at 101.
**** We only need to generate the kDistribuion one time for each size.
*****************************
*/
unsigned long long seed;
timeval t1;
gettimeofday(&t1, NULL);
seed = t1.tv_usec * t1.tv_sec;
curandGenerator_t generator;
srand(unsigned(time(NULL)));
curandCreateGenerator(&generator, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(generator,seed);
// double the array size to the next powers of 2
for(size = (1 << startPower); size <= (1 << stopPower); size *= 2) {
cudaDeviceReset();
arrayOfKDistributionGenerators[kDistribution](arrayOfKs, stopK, size, generator);
compareMultiselectAlgorithms<T>(size, arrayOfKs, stopK, timesToTestEachK,
algorithmsToRun, generateType, kDistribution, fileName);
} // end for(size)
curandDestroyGenerator(generator);
} // end runTests
} // end namespace CompareMultiselect
int main (int argc, char *argv[]) {
using namespace CompareMultiselect;
char *fileName, *hostName, *typeString;
fileName = (char*) malloc(128 * sizeof(char));
typeString = (char*) malloc(10 * sizeof(char));
hostName = (char*) malloc(20 * sizeof(char));
gethostname(hostName, 20);
#ifdef GPUNUMBER
cudaSetDevice(GPUNUMBER);
#endif
time_t rawtime;
struct tm * timeinfo;
time ( &rawtime );
timeinfo = localtime ( &rawtime );
char * humanTime = asctime(timeinfo);
humanTime[strlen(humanTime)-1] = '\0';
uint testCount, type,distributionType,startPower,stopPower,kDistribution,startK
,stopK,jumpK;
uint vecDistr[4];
vecDistr[0]=0; // Uniform
vecDistr[1]=1; // Normal
vecDistr[2]=3; // Half Normal
vecDistr[3]=9; // Cauchy
kDistribution=1; // Uniformly Spaced
startPower=10;
stopPower=24;
startK=101; // This gives the 0,1,2,...,98,99,100 percentiles
jumpK=1;
stopK=101;
testCount=25;
for(int j=0; j<4; j++){
distributionType = vecDistr[j];
for(type=0; type<3; type++){
switch(type){
case 0:
typeString = "float";
snprintf(fileName, 128,
"CPUQuickMultiselect %s %s k-dist:%s 2^%d to 2^%d (%d:%d:%d) %d-tests on %s at %s",
typeString, getDistributionOptions(type, distributionType),
getKDistributionOptions(kDistribution), startPower, stopPower,
startK, jumpK, stopK, testCount, hostName, humanTime);
printf("File Name: %s \n", fileName);
runTests<float>(distributionType,fileName,startPower,stopPower,testCount,
kDistribution,startK,stopK,jumpK);
break;
case 1:
typeString = "double";
if (distributionType<2){
snprintf(fileName, 128,
"CPUQuickMultiselect %s %s k-dist:%s 2^%d to 2^%d (%d:%d:%d) %d-tests on %s at %s",
typeString, getDistributionOptions(type, distributionType),
getKDistributionOptions(kDistribution), startPower, stopPower,
startK, jumpK, stopK, testCount, hostName, humanTime);
printf("File Name: %s \n", fileName);
runTests<double>(distributionType,fileName,startPower,stopPower,testCount,
kDistribution,startK,stopK,jumpK);
} // end if(distributionType)
break;
case 2:
typeString = "uint";
if (distributionType<1){
snprintf(fileName, 128,
"CPUQuickMultiselect %s %s k-dist:%s 2^%d to 2^%d (%d:%d:%d) %d-tests on %s at %s",
typeString, getDistributionOptions(type, distributionType),
getKDistributionOptions(kDistribution), startPower, stopPower,
startK, jumpK, stopK, testCount, hostName, humanTime);
printf("File Name: %s \n", fileName);
runTests<uint>(distributionType,fileName,startPower,stopPower,testCount,
kDistribution,startK,stopK,jumpK);
} // end if(distributionType)
break;
default:
printf("You entered and invalid option, now exiting\n");
break;
} // end switch(type)
} // end for(type)
} // end for (int j)
free (fileName);
return 0;
}
|
4bb5b63cf87deb95bf4f2bf9c6a5ef74a55af9a9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* _CT_CUDA_IMPL_CU_
*
* Copyright (C) 2017-2021 Tactical Computing Laboratories, LLC
* All Rights Reserved
* contact@tactcomplabs.com
*
* See LICENSE in the top level directory for licensing details
*/
#include "CT_CUDA.cuh"
__global__ void RAND_ADD(uint64_t* __restrict__ ARRAY, uint64_t* __restrict__ IDX, uint64_t iters) {
uint64_t i, ret;
uint64_t num_threads = (uint64_t) blockDim.x;
uint64_t iters_per_thread = (uint64_t) ((threadIdx.x == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads));
uint64_t start = (uint64_t) (blockIdx.x * iters) + (threadIdx.x * (iters / num_threads));
for(i = start; i < (start + iters_per_thread); i++) {
ret = atomicAdd((unsigned long long int *) &ARRAY[IDX[i]], (unsigned long long int) 0x1);
}
}
__global__ void RAND_CAS(uint64_t* __restrict__ ARRAY, uint64_t* IDX, uint64_t iters) {
uint64_t i, ret;
uint64_t num_threads = (uint64_t) blockDim.x;
uint64_t iters_per_thread = (uint64_t) ((threadIdx.x == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads));
uint64_t start = (uint64_t) (blockIdx.x * iters) + (threadIdx.x * (iters / num_threads));
for(i = start; i < (start + iters_per_thread); i++) {
ret = atomicCAS((unsigned long long int *) &ARRAY[IDX[i]], (unsigned long long int) ARRAY[IDX[i]], (unsigned long long int) ARRAY[IDX[i]]);
}
}
__global__ void STRIDE1_ADD(uint64_t* __restrict__ ARRAY, uint64_t iters) {
uint64_t i, ret;
uint64_t num_threads = (uint64_t) blockDim.x;
uint64_t iters_per_thread = (uint64_t) ((threadIdx.x == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads));
uint64_t start = (uint64_t) (blockIdx.x * iters) + (threadIdx.x * (iters / num_threads));
for(i = start; i < (start + iters_per_thread); i++) {
ret = atomicAdd((unsigned long long int *) &ARRAY[i], (unsigned long long int) 0x1);
}
}
__global__ void STRIDE1_CAS(uint64_t* __restrict__ ARRAY, uint64_t iters) {
uint64_t i, ret;
uint64_t num_threads = (uint64_t) blockDim.x;
uint64_t iters_per_thread = (uint64_t) ((threadIdx.x == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads));
uint64_t start = (uint64_t) (blockIdx.x * iters) + (threadIdx.x * (iters / num_threads));
for(i = start; i < (start + iters_per_thread); i++) {
ret = atomicCAS((unsigned long long int *) &ARRAY[i], (unsigned long long int) ARRAY[i], (unsigned long long int) ARRAY[i]);
}
}
__global__ void STRIDEN_ADD(uint64_t* __restrict__ ARRAY, uint64_t iters, uint64_t stride) {
uint64_t i, ret;
uint64_t num_threads = (uint64_t) blockDim.x;
uint64_t iters_per_thread = (uint64_t) ((threadIdx.x == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads));
uint64_t start = (uint64_t) ((blockIdx.x * iters) + (threadIdx.x * (iters / num_threads))) * stride;
for(i = start; i < (start + (iters_per_thread * stride)); i += stride) {
ret = atomicAdd((unsigned long long int *) &ARRAY[i], (unsigned long long int) 0x1);
}
}
__global__ void STRIDEN_CAS(uint64_t* __restrict__ ARRAY, uint64_t iters, uint64_t stride) {
uint64_t i, ret;
uint64_t num_threads = (uint64_t) blockDim.x;
uint64_t iters_per_thread = (uint64_t) ((threadIdx.x == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads));
uint64_t start = (uint64_t) ((blockIdx.x * iters) + (threadIdx.x * (iters / num_threads))) * stride;
for(i = start; i < (start + (iters_per_thread * stride)); i += stride) {
ret = atomicCAS((unsigned long long int *) &ARRAY[i], (unsigned long long int) ARRAY[i], (unsigned long long int) ARRAY[i]);
}
}
__global__ void CENTRAL_ADD(uint64_t* __restrict__ ARRAY, uint64_t iters) {
uint64_t i, ret;
uint64_t num_threads = (uint64_t) blockDim.x;
uint64_t iters_per_thread = (uint64_t) ((threadIdx.x == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads));
for(i = 0; i < iters_per_thread; i++) {
ret = atomicAdd((unsigned long long int *) &ARRAY[0], (unsigned long long int) 0x1);
}
}
__global__ void CENTRAL_CAS(uint64_t* __restrict__ ARRAY, uint64_t iters) {
uint64_t i, ret;
uint64_t num_threads = (uint64_t) blockDim.x;
uint64_t iters_per_thread = (uint64_t) ((threadIdx.x == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads));
for(i = 0; i < iters_per_thread; i++) {
ret = atomicCAS((unsigned long long int *) &ARRAY[0], (unsigned long long int) ARRAY[0], (unsigned long long int) ARRAY[0]);
}
}
/* Note that the PTRCHASE kernels utilize only a single thread per thread block. As the *
* iterations for a given thread block are not independent, utilizing multiple threads *
* per block would destroy the semantics of a pointer chasing operation. */
__global__ void PTRCHASE_ADD(uint64_t* __restrict__ IDX, uint64_t iters) {
uint64_t i;
uint64_t start = (uint64_t) (blockIdx.x * iters);
for(i = 0; i < iters; i++) {
start = atomicAdd((unsigned long long int *) &IDX[start], (unsigned long long int) 0x0);
}
}
/* Note that the PTRCHASE kernels utilize only a single thread per thread block. As the *
* iterations for a given thread block are not independent, utilizing multiple threads *
* per block would destroy the semantics of a pointer chasing operation. */
__global__ void PTRCHASE_CAS(uint64_t* __restrict__ IDX, uint64_t iters) {
uint64_t i;
uint64_t start = (uint64_t) (blockIdx.x * iters);
for(i = 0; i < iters; i++) {
start = atomicCAS((unsigned long long int *) &IDX[start], (unsigned long long int) IDX[start], (unsigned long long int) IDX[start]);
}
}
__global__ void SG_ADD(uint64_t* __restrict__ ARRAY, uint64_t* __restrict__ IDX, uint64_t iters) {
uint64_t i, ret;
uint64_t src, dest, val;
uint64_t num_threads = (uint64_t) blockDim.x;
uint64_t iters_per_thread = (uint64_t) ((threadIdx.x == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads));
uint64_t start = (uint64_t) (blockIdx.x * iters) + (threadIdx.x * (iters / num_threads));
src = 0x0;
dest = 0x0;
val = 0x0;
for(i = start; i < (start + iters_per_thread); i++) {
src = atomicAdd((unsigned long long int *) &IDX[i], (unsigned long long int) 0x0);
dest = atomicAdd((unsigned long long int *) &IDX[i+1], (unsigned long long int) 0x0);
val = atomicAdd((unsigned long long int *) &ARRAY[src], (unsigned long long int) 0x1);
ret = atomicAdd((unsigned long long int *) &ARRAY[dest], (unsigned long long int) val);
}
}
__global__ void SG_CAS(uint64_t* __restrict__ ARRAY, uint64_t* __restrict__ IDX, uint64_t iters) {
uint64_t i, ret;
uint64_t src, dest, val;
uint64_t num_threads = (uint64_t) blockDim.x;
uint64_t iters_per_thread = (uint64_t) ((threadIdx.x == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads));
uint64_t start = (uint64_t) (blockIdx.x * iters) + (threadIdx.x * (iters / num_threads));
src = 0x0;
dest = 0x0;
val = 0x0;
for(i = start; i < (start + iters_per_thread); i++) {
src = atomicCAS((unsigned long long int *) &IDX[i], (unsigned long long int) IDX[i], (unsigned long long int) IDX[i]);
dest = atomicCAS((unsigned long long int *) &IDX[i+1], (unsigned long long int) IDX[i+1], (unsigned long long int) IDX[i+1]);
val = atomicCAS((unsigned long long int *) &ARRAY[src], (unsigned long long int) ARRAY[src], (unsigned long long int) ARRAY[src]);
ret = atomicCAS((unsigned long long int *) &ARRAY[dest], (unsigned long long int) ARRAY[dest], (unsigned long long int) val);
}
}
__global__ void SCATTER_ADD(uint64_t* __restrict__ ARRAY, uint64_t* __restrict__ IDX, uint64_t iters) {
uint64_t i, ret;
uint64_t dest, val;
uint64_t num_threads = (uint64_t) blockDim.x;
uint64_t iters_per_thread = (uint64_t) ((threadIdx.x == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads));
uint64_t start = (uint64_t) (blockIdx.x * iters) + (threadIdx.x * (iters / num_threads));
dest = 0x0;
val = 0x0;
for(i = start; i < (start + iters_per_thread); i++) {
dest = atomicAdd((unsigned long long int *) &IDX[i+1], (unsigned long long int) 0x0);
val = atomicAdd((unsigned long long int *) &ARRAY[i], (unsigned long long int) 0x1);
ret = atomicAdd((unsigned long long int *) &ARRAY[dest], (unsigned long long int) val);
}
}
__global__ void SCATTER_CAS(uint64_t* __restrict__ ARRAY, uint64_t* __restrict__ IDX, uint64_t iters) {
uint64_t i, ret;
uint64_t dest, val;
uint64_t num_threads = (uint64_t) blockDim.x;
uint64_t iters_per_thread = (uint64_t) ((threadIdx.x == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads));
uint64_t start = (uint64_t) (blockIdx.x * iters) + (threadIdx.x * (iters / num_threads));
dest = 0x0;
val = 0x0;
for(i = start; i < (start + iters_per_thread); i++) {
dest = atomicCAS((unsigned long long int *) &IDX[i+1], (unsigned long long int) IDX[i+1], (unsigned long long int) IDX[i+1]);
val = atomicCAS((unsigned long long int *) &ARRAY[i], (unsigned long long int) ARRAY[i], (unsigned long long int) ARRAY[i]);
ret = atomicCAS((unsigned long long int *) &ARRAY[dest], (unsigned long long int) ARRAY[dest], (unsigned long long int) val);
}
}
__global__ void GATHER_ADD(uint64_t* __restrict__ ARRAY, uint64_t* __restrict__ IDX, uint64_t iters) {
uint64_t i, ret;
uint64_t dest, val;
uint64_t num_threads = (uint64_t) blockDim.x;
uint64_t iters_per_thread = (uint64_t) ((threadIdx.x == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads));
uint64_t start = (uint64_t) (blockIdx.x * iters) + (threadIdx.x * (iters / num_threads));
dest = 0x0;
val = 0x0;
for(i = start; i < (start + iters_per_thread); i++) {
dest = atomicAdd((unsigned long long int *) &IDX[i+1], (unsigned long long int) 0x0);
val = atomicAdd((unsigned long long int *) &ARRAY[dest], (unsigned long long int) 0x1);
ret = atomicAdd((unsigned long long int *) &ARRAY[i], (unsigned long long int) val);
}
}
__global__ void GATHER_CAS(uint64_t* __restrict__ ARRAY, uint64_t* __restrict__ IDX, uint64_t iters) {
uint64_t i, ret;
uint64_t dest, val;
uint64_t num_threads = (uint64_t) blockDim.x;
uint64_t iters_per_thread = (uint64_t) ((threadIdx.x == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads));
uint64_t start = (uint64_t) (blockIdx.x * iters) + (threadIdx.x * (iters / num_threads));
dest = 0x0;
val = 0x0;
for(i = start; i < (start + iters_per_thread); i++) {
dest = atomicCAS((unsigned long long int *) &IDX[i+1], (unsigned long long int) IDX[i+1], (unsigned long long int) IDX[i+1]);
val = atomicCAS((unsigned long long int *) &ARRAY[dest], (unsigned long long int) ARRAY[dest], (unsigned long long int) ARRAY[dest]);
ret = atomicCAS((unsigned long long int *) &ARRAY[i], (unsigned long long int) ARRAY[i], (unsigned long long int) val);
}
} | 4bb5b63cf87deb95bf4f2bf9c6a5ef74a55af9a9.cu | /*
* _CT_CUDA_IMPL_CU_
*
* Copyright (C) 2017-2021 Tactical Computing Laboratories, LLC
* All Rights Reserved
* contact@tactcomplabs.com
*
* See LICENSE in the top level directory for licensing details
*/
#include "CT_CUDA.cuh"
__global__ void RAND_ADD(uint64_t* __restrict__ ARRAY, uint64_t* __restrict__ IDX, uint64_t iters) {
uint64_t i, ret;
uint64_t num_threads = (uint64_t) blockDim.x;
uint64_t iters_per_thread = (uint64_t) ((threadIdx.x == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads));
uint64_t start = (uint64_t) (blockIdx.x * iters) + (threadIdx.x * (iters / num_threads));
for(i = start; i < (start + iters_per_thread); i++) {
ret = atomicAdd((unsigned long long int *) &ARRAY[IDX[i]], (unsigned long long int) 0x1);
}
}
__global__ void RAND_CAS(uint64_t* __restrict__ ARRAY, uint64_t* IDX, uint64_t iters) {
uint64_t i, ret;
uint64_t num_threads = (uint64_t) blockDim.x;
uint64_t iters_per_thread = (uint64_t) ((threadIdx.x == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads));
uint64_t start = (uint64_t) (blockIdx.x * iters) + (threadIdx.x * (iters / num_threads));
for(i = start; i < (start + iters_per_thread); i++) {
ret = atomicCAS((unsigned long long int *) &ARRAY[IDX[i]], (unsigned long long int) ARRAY[IDX[i]], (unsigned long long int) ARRAY[IDX[i]]);
}
}
__global__ void STRIDE1_ADD(uint64_t* __restrict__ ARRAY, uint64_t iters) {
uint64_t i, ret;
uint64_t num_threads = (uint64_t) blockDim.x;
uint64_t iters_per_thread = (uint64_t) ((threadIdx.x == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads));
uint64_t start = (uint64_t) (blockIdx.x * iters) + (threadIdx.x * (iters / num_threads));
for(i = start; i < (start + iters_per_thread); i++) {
ret = atomicAdd((unsigned long long int *) &ARRAY[i], (unsigned long long int) 0x1);
}
}
__global__ void STRIDE1_CAS(uint64_t* __restrict__ ARRAY, uint64_t iters) {
uint64_t i, ret;
uint64_t num_threads = (uint64_t) blockDim.x;
uint64_t iters_per_thread = (uint64_t) ((threadIdx.x == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads));
uint64_t start = (uint64_t) (blockIdx.x * iters) + (threadIdx.x * (iters / num_threads));
for(i = start; i < (start + iters_per_thread); i++) {
ret = atomicCAS((unsigned long long int *) &ARRAY[i], (unsigned long long int) ARRAY[i], (unsigned long long int) ARRAY[i]);
}
}
__global__ void STRIDEN_ADD(uint64_t* __restrict__ ARRAY, uint64_t iters, uint64_t stride) {
uint64_t i, ret;
uint64_t num_threads = (uint64_t) blockDim.x;
uint64_t iters_per_thread = (uint64_t) ((threadIdx.x == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads));
uint64_t start = (uint64_t) ((blockIdx.x * iters) + (threadIdx.x * (iters / num_threads))) * stride;
for(i = start; i < (start + (iters_per_thread * stride)); i += stride) {
ret = atomicAdd((unsigned long long int *) &ARRAY[i], (unsigned long long int) 0x1);
}
}
__global__ void STRIDEN_CAS(uint64_t* __restrict__ ARRAY, uint64_t iters, uint64_t stride) {
uint64_t i, ret;
uint64_t num_threads = (uint64_t) blockDim.x;
uint64_t iters_per_thread = (uint64_t) ((threadIdx.x == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads));
uint64_t start = (uint64_t) ((blockIdx.x * iters) + (threadIdx.x * (iters / num_threads))) * stride;
for(i = start; i < (start + (iters_per_thread * stride)); i += stride) {
ret = atomicCAS((unsigned long long int *) &ARRAY[i], (unsigned long long int) ARRAY[i], (unsigned long long int) ARRAY[i]);
}
}
__global__ void CENTRAL_ADD(uint64_t* __restrict__ ARRAY, uint64_t iters) {
uint64_t i, ret;
uint64_t num_threads = (uint64_t) blockDim.x;
uint64_t iters_per_thread = (uint64_t) ((threadIdx.x == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads));
for(i = 0; i < iters_per_thread; i++) {
ret = atomicAdd((unsigned long long int *) &ARRAY[0], (unsigned long long int) 0x1);
}
}
__global__ void CENTRAL_CAS(uint64_t* __restrict__ ARRAY, uint64_t iters) {
uint64_t i, ret;
uint64_t num_threads = (uint64_t) blockDim.x;
uint64_t iters_per_thread = (uint64_t) ((threadIdx.x == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads));
for(i = 0; i < iters_per_thread; i++) {
ret = atomicCAS((unsigned long long int *) &ARRAY[0], (unsigned long long int) ARRAY[0], (unsigned long long int) ARRAY[0]);
}
}
/* Note that the PTRCHASE kernels utilize only a single thread per thread block. As the *
* iterations for a given thread block are not independent, utilizing multiple threads *
* per block would destroy the semantics of a pointer chasing operation. */
__global__ void PTRCHASE_ADD(uint64_t* __restrict__ IDX, uint64_t iters) {
uint64_t i;
uint64_t start = (uint64_t) (blockIdx.x * iters);
for(i = 0; i < iters; i++) {
start = atomicAdd((unsigned long long int *) &IDX[start], (unsigned long long int) 0x0);
}
}
/* Note that the PTRCHASE kernels utilize only a single thread per thread block. As the *
* iterations for a given thread block are not independent, utilizing multiple threads *
* per block would destroy the semantics of a pointer chasing operation. */
__global__ void PTRCHASE_CAS(uint64_t* __restrict__ IDX, uint64_t iters) {
uint64_t i;
uint64_t start = (uint64_t) (blockIdx.x * iters);
for(i = 0; i < iters; i++) {
start = atomicCAS((unsigned long long int *) &IDX[start], (unsigned long long int) IDX[start], (unsigned long long int) IDX[start]);
}
}
__global__ void SG_ADD(uint64_t* __restrict__ ARRAY, uint64_t* __restrict__ IDX, uint64_t iters) {
uint64_t i, ret;
uint64_t src, dest, val;
uint64_t num_threads = (uint64_t) blockDim.x;
uint64_t iters_per_thread = (uint64_t) ((threadIdx.x == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads));
uint64_t start = (uint64_t) (blockIdx.x * iters) + (threadIdx.x * (iters / num_threads));
src = 0x0;
dest = 0x0;
val = 0x0;
for(i = start; i < (start + iters_per_thread); i++) {
src = atomicAdd((unsigned long long int *) &IDX[i], (unsigned long long int) 0x0);
dest = atomicAdd((unsigned long long int *) &IDX[i+1], (unsigned long long int) 0x0);
val = atomicAdd((unsigned long long int *) &ARRAY[src], (unsigned long long int) 0x1);
ret = atomicAdd((unsigned long long int *) &ARRAY[dest], (unsigned long long int) val);
}
}
__global__ void SG_CAS(uint64_t* __restrict__ ARRAY, uint64_t* __restrict__ IDX, uint64_t iters) {
uint64_t i, ret;
uint64_t src, dest, val;
uint64_t num_threads = (uint64_t) blockDim.x;
uint64_t iters_per_thread = (uint64_t) ((threadIdx.x == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads));
uint64_t start = (uint64_t) (blockIdx.x * iters) + (threadIdx.x * (iters / num_threads));
src = 0x0;
dest = 0x0;
val = 0x0;
for(i = start; i < (start + iters_per_thread); i++) {
src = atomicCAS((unsigned long long int *) &IDX[i], (unsigned long long int) IDX[i], (unsigned long long int) IDX[i]);
dest = atomicCAS((unsigned long long int *) &IDX[i+1], (unsigned long long int) IDX[i+1], (unsigned long long int) IDX[i+1]);
val = atomicCAS((unsigned long long int *) &ARRAY[src], (unsigned long long int) ARRAY[src], (unsigned long long int) ARRAY[src]);
ret = atomicCAS((unsigned long long int *) &ARRAY[dest], (unsigned long long int) ARRAY[dest], (unsigned long long int) val);
}
}
__global__ void SCATTER_ADD(uint64_t* __restrict__ ARRAY, uint64_t* __restrict__ IDX, uint64_t iters) {
uint64_t i, ret;
uint64_t dest, val;
uint64_t num_threads = (uint64_t) blockDim.x;
uint64_t iters_per_thread = (uint64_t) ((threadIdx.x == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads));
uint64_t start = (uint64_t) (blockIdx.x * iters) + (threadIdx.x * (iters / num_threads));
dest = 0x0;
val = 0x0;
for(i = start; i < (start + iters_per_thread); i++) {
dest = atomicAdd((unsigned long long int *) &IDX[i+1], (unsigned long long int) 0x0);
val = atomicAdd((unsigned long long int *) &ARRAY[i], (unsigned long long int) 0x1);
ret = atomicAdd((unsigned long long int *) &ARRAY[dest], (unsigned long long int) val);
}
}
__global__ void SCATTER_CAS(uint64_t* __restrict__ ARRAY, uint64_t* __restrict__ IDX, uint64_t iters) {
uint64_t i, ret;
uint64_t dest, val;
uint64_t num_threads = (uint64_t) blockDim.x;
uint64_t iters_per_thread = (uint64_t) ((threadIdx.x == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads));
uint64_t start = (uint64_t) (blockIdx.x * iters) + (threadIdx.x * (iters / num_threads));
dest = 0x0;
val = 0x0;
for(i = start; i < (start + iters_per_thread); i++) {
dest = atomicCAS((unsigned long long int *) &IDX[i+1], (unsigned long long int) IDX[i+1], (unsigned long long int) IDX[i+1]);
val = atomicCAS((unsigned long long int *) &ARRAY[i], (unsigned long long int) ARRAY[i], (unsigned long long int) ARRAY[i]);
ret = atomicCAS((unsigned long long int *) &ARRAY[dest], (unsigned long long int) ARRAY[dest], (unsigned long long int) val);
}
}
__global__ void GATHER_ADD(uint64_t* __restrict__ ARRAY, uint64_t* __restrict__ IDX, uint64_t iters) {
uint64_t i, ret;
uint64_t dest, val;
uint64_t num_threads = (uint64_t) blockDim.x;
uint64_t iters_per_thread = (uint64_t) ((threadIdx.x == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads));
uint64_t start = (uint64_t) (blockIdx.x * iters) + (threadIdx.x * (iters / num_threads));
dest = 0x0;
val = 0x0;
for(i = start; i < (start + iters_per_thread); i++) {
dest = atomicAdd((unsigned long long int *) &IDX[i+1], (unsigned long long int) 0x0);
val = atomicAdd((unsigned long long int *) &ARRAY[dest], (unsigned long long int) 0x1);
ret = atomicAdd((unsigned long long int *) &ARRAY[i], (unsigned long long int) val);
}
}
__global__ void GATHER_CAS(uint64_t* __restrict__ ARRAY, uint64_t* __restrict__ IDX, uint64_t iters) {
uint64_t i, ret;
uint64_t dest, val;
uint64_t num_threads = (uint64_t) blockDim.x;
uint64_t iters_per_thread = (uint64_t) ((threadIdx.x == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads));
uint64_t start = (uint64_t) (blockIdx.x * iters) + (threadIdx.x * (iters / num_threads));
dest = 0x0;
val = 0x0;
for(i = start; i < (start + iters_per_thread); i++) {
dest = atomicCAS((unsigned long long int *) &IDX[i+1], (unsigned long long int) IDX[i+1], (unsigned long long int) IDX[i+1]);
val = atomicCAS((unsigned long long int *) &ARRAY[dest], (unsigned long long int) ARRAY[dest], (unsigned long long int) ARRAY[dest]);
ret = atomicCAS((unsigned long long int *) &ARRAY[i], (unsigned long long int) ARRAY[i], (unsigned long long int) val);
}
} |
b698d933fd41873ddb54cf830eb914af229a9ee1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layers/shuffle_channel_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ShuffleChannelKernel(const int nthreads, const int feature_map_size,
Dtype *output, const Dtype *input, int group_row, int group_column, int len) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / group_row / group_column / len;
const int i = (index / group_column / len) % group_row;
const int j = index / len % group_column;
const int k = index - (n * feature_map_size + (i * group_column + j) * len);
Dtype* p_o = output + n * feature_map_size + (j * group_row + i) * len;
p_o[k] = input[index];
}
}
template <typename Dtype>
void ShuffleChannelLayer<Dtype>::Resize_gpu(Dtype *output, const Dtype *input, int group_row, int group_column, int len)
{
for (int i = 0; i < group_row; ++i) // 2
{
for(int j = 0; j < group_column ; ++j) // 3
{
const Dtype* p_i = input + (i * group_column + j ) * len;
Dtype* p_o = output + (j * group_row + i ) * len;
caffe_copy(len, p_i, p_o);
}
}
}
template <typename Dtype>
void ShuffleChannelLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int num = bottom[0]->num();
const int feature_map_size = bottom[0]->count(1);
const int sp_sz = bottom[0]->count(2);
const int chs = bottom[0]->channels();
int group_row = group_;
int group_column = int(chs / group_row);
CHECK_EQ(chs, (group_column * group_row)) << "Wrong group size.";
int count = num * group_column * group_row * sp_sz;
ShuffleChannelKernel<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, feature_map_size, top_data, bottom_data, group_row, group_column, sp_sz);
//Dtype* temp_data = temp_blob_.mutable_gpu_data();
//for(int n = 0; n < num; ++n)
//{
// Resize_gpu(top_data + n*feature_map_size, bottom_data + n*feature_map_size, group_row, group_column, sp_sz);
//}
//caffe_copy(bottom[0]->count(), temp_blob_.gpu_data(), top_data);
}
template <typename Dtype>
void ShuffleChannelLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int num = bottom[0]->num();
const int feature_map_size = bottom[0]->count(1);
const int sp_sz = bottom[0]->count(2);
const int chs = bottom[0]->channels();
int group_row = int(chs / group_);
int group_column = group_;
int count = num * group_column * group_row * sp_sz;
ShuffleChannelKernel<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, feature_map_size, bottom_diff, top_diff, group_row, group_column, sp_sz);
//Dtype* temp_diff = temp_blob_.mutable_gpu_diff();
// for(int n = 0; n < num; ++n)
// {
//Resize_gpu(bottom_diff + n * feature_map_size, top_diff + n*feature_map_size, group_row, group_column, sp_sz);
// }
//caffe_copy(top[0]->count(), temp_blob_.gpu_diff(), bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ShuffleChannelLayer);
} // namespace caffe
| b698d933fd41873ddb54cf830eb914af229a9ee1.cu | #include <algorithm>
#include <vector>
#include "caffe/layers/shuffle_channel_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ShuffleChannelKernel(const int nthreads, const int feature_map_size,
Dtype *output, const Dtype *input, int group_row, int group_column, int len) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / group_row / group_column / len;
const int i = (index / group_column / len) % group_row;
const int j = index / len % group_column;
const int k = index - (n * feature_map_size + (i * group_column + j) * len);
Dtype* p_o = output + n * feature_map_size + (j * group_row + i) * len;
p_o[k] = input[index];
}
}
template <typename Dtype>
void ShuffleChannelLayer<Dtype>::Resize_gpu(Dtype *output, const Dtype *input, int group_row, int group_column, int len)
{
for (int i = 0; i < group_row; ++i) // 2
{
for(int j = 0; j < group_column ; ++j) // 3
{
const Dtype* p_i = input + (i * group_column + j ) * len;
Dtype* p_o = output + (j * group_row + i ) * len;
caffe_copy(len, p_i, p_o);
}
}
}
template <typename Dtype>
void ShuffleChannelLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int num = bottom[0]->num();
const int feature_map_size = bottom[0]->count(1);
const int sp_sz = bottom[0]->count(2);
const int chs = bottom[0]->channels();
int group_row = group_;
int group_column = int(chs / group_row);
CHECK_EQ(chs, (group_column * group_row)) << "Wrong group size.";
int count = num * group_column * group_row * sp_sz;
ShuffleChannelKernel<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, feature_map_size, top_data, bottom_data, group_row, group_column, sp_sz);
//Dtype* temp_data = temp_blob_.mutable_gpu_data();
//for(int n = 0; n < num; ++n)
//{
// Resize_gpu(top_data + n*feature_map_size, bottom_data + n*feature_map_size, group_row, group_column, sp_sz);
//}
//caffe_copy(bottom[0]->count(), temp_blob_.gpu_data(), top_data);
}
template <typename Dtype>
void ShuffleChannelLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int num = bottom[0]->num();
const int feature_map_size = bottom[0]->count(1);
const int sp_sz = bottom[0]->count(2);
const int chs = bottom[0]->channels();
int group_row = int(chs / group_);
int group_column = group_;
int count = num * group_column * group_row * sp_sz;
ShuffleChannelKernel<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, feature_map_size, bottom_diff, top_diff, group_row, group_column, sp_sz);
//Dtype* temp_diff = temp_blob_.mutable_gpu_diff();
// for(int n = 0; n < num; ++n)
// {
//Resize_gpu(bottom_diff + n * feature_map_size, top_diff + n*feature_map_size, group_row, group_column, sp_sz);
// }
//caffe_copy(top[0]->count(), temp_blob_.gpu_diff(), bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ShuffleChannelLayer);
} // namespace caffe
|
44fe0064d5cedbe3ba622c78da746a321d4eba40.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernelMultMatTiled(float *d_M, float *d_N, float *d_P, int m,int n , int y){
// se define la memoria compartida de los tiles de tamao TILE_WIDTH
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * TILE_WIDTH + ty;
int col = bx * TILE_WIDTH + tx;
float Pvalue = 0;
for(int i = 0; i < n / TILE_WIDTH; i++){
/* primeramente se revisa que el elemento se encuentre en la matriz d_M ,
si no es as se establecen como cero
*/
if((i*TILE_WIDTH + tx) < n && row < m){
Mds[ty][tx]=d_M[row*n + (i*TILE_WIDTH + tx)];
}else{
Mds[ty][tx]=0.0;
}
/* despues se revisa que el elemento se encuentre en la matriz d_N ,
si no es as se establecen como cero
*/
if((i*TILE_WIDTH + ty) < n && col < y){
Nds[ty][tx]= d_N[(i*TILE_WIDTH + ty)*y + col];
}else{
Nds[ty][tx]=0.0;
}
__syncthreads();
/*Se realiza la multiplicacion de elementos que estn dentro del TILE
y se va guardando en Pvalue*/
for(int k = 0; k < TILE_WIDTH; ++k){
Pvalue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
//se asigna el resultado de Pvalue en las posiciones de d_P
if(row<m && col < y)
d_P[(row*y)+ col] = Pvalue;
} | 44fe0064d5cedbe3ba622c78da746a321d4eba40.cu | #include "includes.h"
__global__ void kernelMultMatTiled(float *d_M, float *d_N, float *d_P, int m,int n , int y){
// se define la memoria compartida de los tiles de tamaño TILE_WIDTH
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * TILE_WIDTH + ty;
int col = bx * TILE_WIDTH + tx;
float Pvalue = 0;
for(int i = 0; i < n / TILE_WIDTH; i++){
/* primeramente se revisa que el elemento se encuentre en la matriz d_M ,
si no es así se establecen como cero
*/
if((i*TILE_WIDTH + tx) < n && row < m){
Mds[ty][tx]=d_M[row*n + (i*TILE_WIDTH + tx)];
}else{
Mds[ty][tx]=0.0;
}
/* despues se revisa que el elemento se encuentre en la matriz d_N ,
si no es así se establecen como cero
*/
if((i*TILE_WIDTH + ty) < n && col < y){
Nds[ty][tx]= d_N[(i*TILE_WIDTH + ty)*y + col];
}else{
Nds[ty][tx]=0.0;
}
__syncthreads();
/*Se realiza la multiplicacion de elementos que están dentro del TILE
y se va guardando en Pvalue*/
for(int k = 0; k < TILE_WIDTH; ++k){
Pvalue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
//se asigna el resultado de Pvalue en las posiciones de d_P
if(row<m && col < y)
d_P[(row*y)+ col] = Pvalue;
} |
70cb1fd3fb7f7bbe46da3ec849da780fc9136c78.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) 2017 Darius Rckert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
#include "saiga/cuda/device_helper.h"
#include "saiga/cuda/imageProcessing/imageProcessing.h"
#include "saiga/cuda/shfl_helper.h"
namespace Saiga
{
namespace CUDA
{
//todo maybe change
static __constant__ float d_Kernel[SAIGA_MAX_KERNEL_SIZE];
template <typename T, int RADIUS, unsigned int BLOCK_W, unsigned int BLOCK_H, unsigned int Y_ELEMENTS>
__global__ static void d_convolveOuterLinear(ImageView<T> src, ImageView<T> dst)
{
const unsigned BLOCK_H2 = BLOCK_H * Y_ELEMENTS;
// for radius = 4: elements = (32+8) * (16+8) = 960 = 3840
__shared__ T buffer[BLOCK_H2 + 2 * RADIUS][BLOCK_W + 2 * RADIUS];
// for radius = 4: elements = (32+8) * (16) = 640 = 2560
__shared__ T buffer2[BLOCK_H2][BLOCK_W + 2 * RADIUS];
// total s mem per block = 6400
// with 512 threads per block smem per sm: 25600 -> 100% occ
int tx = threadIdx.x;
int ty = threadIdx.y;
int t = tx + ty * BLOCK_W;
int xp = blockIdx.x * BLOCK_W + tx;
int yp = blockIdx.y * BLOCK_H2 + ty;
int blockStartX = blockIdx.x * BLOCK_W - RADIUS;
int blockStartY = blockIdx.y * BLOCK_H2 - RADIUS;
const int blockSizeX = BLOCK_W + 2 * RADIUS;
const int blockSizeY = BLOCK_H2 + 2 * RADIUS;
// fill buffer
for (int i = t; i < blockSizeX * blockSizeY; i += (BLOCK_W * BLOCK_H))
{
int x = i % blockSizeX;
int y = i / blockSizeX;
int gx = x + blockStartX;
int gy = y + blockStartY;
src.clampToEdge(gy, gx);
buffer[y][x] = src(gy, gx);
}
__syncthreads();
T* kernel = d_Kernel;
for (int i = t; i < blockSizeX * BLOCK_H2; i += (BLOCK_W * BLOCK_H))
{
int x = i % blockSizeX;
int y = i / blockSizeX;
T sum = 0;
#pragma unroll
for (int j = -RADIUS; j <= RADIUS; j++)
{
int kernelIndex = j + RADIUS;
sum += buffer[y + RADIUS + j][x] * kernel[kernelIndex];
}
buffer2[y][x] = sum;
}
__syncthreads();
for (int i = 0; i < Y_ELEMENTS; ++i)
{
T sum = 0;
#pragma unroll
for (int j = -RADIUS; j <= RADIUS; j++)
{
int kernelIndex = j + RADIUS;
sum += buffer2[ty][tx + RADIUS + j] * kernel[kernelIndex];
}
if (dst.inImage(yp, xp)) dst(yp, xp) = sum;
yp += BLOCK_H;
ty += BLOCK_H;
}
}
template <typename T, int RADIUS>
inline void convolveOuterLinear(ImageView<T> src, ImageView<T> dst)
{
int w = src.width;
int h = src.height;
const int BLOCK_W = 32;
const int BLOCK_H = 16;
const int Y_ELEMENTS = 2;
dim3 blocks(Saiga::iDivUp(w, BLOCK_W), Saiga::iDivUp(h, BLOCK_H * Y_ELEMENTS), 1);
// dim3 blocks(Saiga::CUDA::getBlockCount(w, BLOCK_W), Saiga::CUDA::getBlockCount(h, BLOCK_H));
dim3 threads(BLOCK_W, BLOCK_H);
hipLaunchKernelGGL(( d_convolveOuterLinear<T, RADIUS, BLOCK_W, BLOCK_H, Y_ELEMENTS>), dim3(blocks), dim3(threads), 0, 0, src, dst);
}
template <typename T, int RADIUS, unsigned int BLOCK_W, unsigned int BLOCK_H, unsigned int Y_ELEMENTS>
__global__ static void d_convolveOuterHalo(ImageView<T> src, ImageView<T> dst)
{
const unsigned int BLOCK_H2 = BLOCK_H * Y_ELEMENTS;
const unsigned int WARPS_PER_BLOCK = BLOCK_W * BLOCK_H / 32; // 16
static_assert(WARPS_PER_BLOCK == 16, "warps per block wrong");
// for radius = 4: elements = (32+8) * (16+8) = 960 = 3840
__shared__ T buffer[BLOCK_H2 + 2 * RADIUS][BLOCK_W + 2 * RADIUS];
// for radius = 4: elements = (32+8) * (16) = 640 = 2560
__shared__ T buffer2[BLOCK_H2][BLOCK_W + 2 * RADIUS];
// total s mem per block = 6400
// with 512 threads per block smem per sm: 25600 -> 100% occ
int tx = threadIdx.x;
int ty = threadIdx.y;
int t = tx + ty * BLOCK_W;
const unsigned int warp_lane = t / 32;
const unsigned int lane_id = t & 31;
int xp = blockIdx.x * BLOCK_W + tx;
int yp = blockIdx.y * BLOCK_H2 + ty;
int x = xp;
int y = yp;
const unsigned int x_tile = blockIdx.x * BLOCK_W;
// const unsigned int y_tile = blockIdx.y * BLOCK_H2;
int blockStartX = blockIdx.x * BLOCK_W - RADIUS;
int blockStartY = blockIdx.y * BLOCK_H2 - RADIUS;
const int blockSizeX = BLOCK_W + 2 * RADIUS;
// const int blockSizeY = BLOCK_H2 + 2*RADIUS;
// copy main data
for (int i = 0; i < Y_ELEMENTS; ++i)
{
buffer[ty + i * BLOCK_H + RADIUS][tx + RADIUS] = src.clampedRead(y + i * BLOCK_H, x);
}
// top and bottom halo
if (warp_lane < 4)
{
const unsigned int num_warps = 4;
for (int i = warp_lane; i < RADIUS; i += num_warps)
{
buffer[i][lane_id + RADIUS] = src.clampedRead(blockStartY + i, x_tile + lane_id);
buffer[BLOCK_H2 + RADIUS + i][lane_id + RADIUS] =
src.clampedRead(blockStartY + BLOCK_H2 + RADIUS + i, x_tile + lane_id);
}
}
const unsigned int side_halo_rows_per_warp = 32 / RADIUS;
int local_warp_id = lane_id / RADIUS;
int local_lane_id = lane_id % RADIUS;
// left halo
if (warp_lane >= 4 && warp_lane < 10)
{
const unsigned int num_warps = 6;
int wid = warp_lane - 4;
int rows = BLOCK_H2 + 2 * RADIUS;
for (int i = wid * side_halo_rows_per_warp + local_warp_id; i < rows; i += num_warps * side_halo_rows_per_warp)
{
if (local_warp_id < side_halo_rows_per_warp)
{
buffer[i][local_lane_id] = src.clampedRead(blockStartY + i, blockStartX + local_lane_id);
}
}
}
// right halo
if (warp_lane >= 10 && warp_lane < 16)
{
const unsigned int num_warps = 6;
int wid = warp_lane - 10;
int rows = BLOCK_H2 + 2 * RADIUS;
for (int i = wid * side_halo_rows_per_warp + local_warp_id; i < rows; i += num_warps * side_halo_rows_per_warp)
{
if (local_warp_id < side_halo_rows_per_warp)
{
buffer[i][local_lane_id + RADIUS + BLOCK_W] =
src.clampedRead(blockStartY + i, blockStartX + local_lane_id + RADIUS + BLOCK_W);
}
}
}
__syncthreads();
T* kernel = d_Kernel;
for (int i = t; i < blockSizeX * BLOCK_H2; i += (BLOCK_W * BLOCK_H))
{
int x = i % blockSizeX;
int y = i / blockSizeX;
T sum = 0;
#pragma unroll
for (int j = -RADIUS; j <= RADIUS; j++)
{
int kernelIndex = j + RADIUS;
sum += buffer[y + RADIUS + j][x] * kernel[kernelIndex];
}
buffer2[y][x] = sum;
}
__syncthreads();
for (int i = 0; i < Y_ELEMENTS; ++i)
{
T sum = 0;
#pragma unroll
for (int j = -RADIUS; j <= RADIUS; j++)
{
int kernelIndex = j + RADIUS;
sum += buffer2[ty][tx + RADIUS + j] * kernel[kernelIndex];
}
if (dst.inImage(yp, xp)) dst(yp, xp) = sum;
yp += BLOCK_H;
ty += BLOCK_H;
}
}
template <typename T, int RADIUS>
inline void convolveOuterHalo(ImageView<T> src, ImageView<T> dst)
{
int w = src.width;
int h = src.height;
const int BLOCK_W = 32;
const int BLOCK_H = 16;
const int Y_ELEMENTS = 2;
dim3 blocks(Saiga::iDivUp(w, BLOCK_W), Saiga::iDivUp(h, BLOCK_H * Y_ELEMENTS), 1);
// dim3 blocks(Saiga::CUDA::getBlockCount(w, BLOCK_W), Saiga::CUDA::getBlockCount(h, BLOCK_H));
dim3 threads(BLOCK_W, BLOCK_H);
hipLaunchKernelGGL(( d_convolveOuterHalo<T, RADIUS, BLOCK_W, BLOCK_H, Y_ELEMENTS>), dim3(blocks), dim3(threads), 0, 0, src, dst);
}
template <typename T, int RADIUS, unsigned int BLOCK_W, unsigned int BLOCK_H, unsigned int Y_ELEMENTS>
__global__ static void d_convolveInner(ImageView<T> src, ImageView<T> dst)
{
const unsigned int TILE_H = BLOCK_H;
const unsigned int TILE_W = BLOCK_W;
const unsigned int TILE_H2 = TILE_H * Y_ELEMENTS;
const unsigned int tx = threadIdx.x;
const unsigned int ty = threadIdx.y;
// int t = tx + ty * BLOCK_W;
int x_tile = blockIdx.x * (TILE_W - 2 * RADIUS) - RADIUS;
int y_tile = blockIdx.y * (TILE_H2 - 2 * RADIUS) - RADIUS;
int x = x_tile + tx;
int y = y_tile + ty;
__shared__ T buffer[TILE_H2][TILE_W];
__shared__ T buffer2[TILE_H2 - RADIUS * 2][TILE_W];
// copy main data
for (int i = 0; i < Y_ELEMENTS; ++i) buffer[ty + i * TILE_H][tx] = src.clampedRead(y + i * TILE_H, x);
__syncthreads();
T* kernel = d_Kernel;
// convolve along y axis
// if(ty > RADIUS && ty < TILE_H2 - RADIUS)
// {
// int oy = ty - RADIUS;
for (int i = 0; i < Y_ELEMENTS; ++i)
{
// int gx = x;
// int gy = y + i * TILE_H;
int lx = tx;
int ly = ty + i * TILE_H;
if (ly < RADIUS || ly >= TILE_H2 - RADIUS) continue;
T sum = 0;
#pragma unroll
for (int j = -RADIUS; j <= RADIUS; j++)
{
int kernelIndex = j + RADIUS;
sum += buffer[ly + j][lx] * kernel[kernelIndex];
}
buffer2[ly - RADIUS][lx] = sum;
}
__syncthreads();
for (int i = 0; i < Y_ELEMENTS; ++i)
{
int gx = x;
int gy = y + i * TILE_H;
int lx = tx;
int ly = ty + i * TILE_H;
if (ly < RADIUS || ly >= TILE_H2 - RADIUS) continue;
if (lx < RADIUS || lx >= TILE_W - RADIUS) continue;
T sum = 0;
#pragma unroll
for (int j = -RADIUS; j <= RADIUS; j++)
{
int kernelIndex = j + RADIUS;
sum += buffer2[ly - RADIUS][lx + j] * kernel[kernelIndex];
}
// if(dst.inImage(gx,gy))
// dst(g,yp) = sum;
dst.clampedWrite(gy, gx, sum);
}
#if 0
for(int i =0; i < Y_ELEMENTS; ++i){
T sum = 0;
# pragma unroll
for (int j=-RADIUS;j<=RADIUS;j++){
int kernelIndex = j + RADIUS;
sum += buffer2[ty][tx + RADIUS + j] * kernel[kernelIndex];
}
if(dst.inImage(xp,yp))
dst(xp,yp) = sum;
yp += BLOCK_H;
ty += BLOCK_H;
}
#endif
}
template <typename T, int RADIUS, bool LOW_OCC = false>
inline void convolveInner(ImageView<T> src, ImageView<T> dst)
{
int w = src.width;
int h = src.height;
const int BLOCK_W = LOW_OCC ? 64 : 32;
const int BLOCK_H = LOW_OCC ? 8 : 16;
const int Y_ELEMENTS = LOW_OCC ? 4 : 2;
dim3 blocks(Saiga::iDivUp(w, BLOCK_W - 2 * RADIUS), Saiga::iDivUp(h, BLOCK_H * Y_ELEMENTS - 2 * RADIUS), 1);
// dim3 blocks(Saiga::CUDA::getBlockCount(w, BLOCK_W), Saiga::CUDA::getBlockCount(h, BLOCK_H));
dim3 threads(BLOCK_W, BLOCK_H);
hipLaunchKernelGGL(( d_convolveInner<T, RADIUS, BLOCK_W, BLOCK_H, Y_ELEMENTS>), dim3(blocks), dim3(threads), 0, 0, src, dst);
}
template <typename T, int RADIUS, unsigned int BLOCK_W, unsigned int BLOCK_H, unsigned int Y_ELEMENTS>
__global__ static void d_convolveInnerShuffle(ImageView<T> src, ImageView<T> dst)
{
const unsigned int TILE_H = BLOCK_H;
const unsigned int TILE_W = BLOCK_W;
const unsigned int TILE_H2 = TILE_H * Y_ELEMENTS;
const unsigned int tx = threadIdx.x;
const unsigned int ty = threadIdx.y;
// int t = tx + ty * BLOCK_W;
unsigned int lane_id = threadIdx.x % 32;
int x_tile = blockIdx.x * (TILE_W - 2 * RADIUS) - RADIUS;
int y_tile = blockIdx.y * (TILE_H2 - 2 * RADIUS) - RADIUS;
int x = x_tile + tx;
int y = y_tile + ty;
// __shared__ T buffer[TILE_H2][TILE_W];
// __shared__ T buffer2[TILE_H2][TILE_W - RADIUS * 2 + 1];
__shared__ T buffer2[TILE_H2][TILE_W - RADIUS * 2];
// __shared__ T buffer2[TILE_W - RADIUS * 2][TILE_H2];
T localElements[Y_ELEMENTS];
for (int i = 0; i < Y_ELEMENTS; ++i)
{
localElements[i] = src.clampedRead(y + i * TILE_H, x);
}
// conv row
T* kernel = d_Kernel;
for (int i = 0; i < Y_ELEMENTS; ++i)
{
int lx = tx;
int ly = ty + i * TILE_H;
T sum = 0;
#pragma unroll
for (int j = -RADIUS; j <= RADIUS; j++)
{
int kernelIndex = j + RADIUS;
auto value = shfl(localElements[i], lane_id + j);
sum += value * kernel[kernelIndex];
}
if (lx < RADIUS || lx >= TILE_W - RADIUS) continue;
buffer2[ly][lx - RADIUS] = sum;
// buffer2[lx- RADIUS][ly] = sum;
}
__syncthreads();
// conv col
for (int i = 0; i < Y_ELEMENTS; ++i)
{
int gx = x;
int gy = y + i * TILE_H;
int lx = tx;
int ly = ty + i * TILE_H;
if (ly < RADIUS || ly >= TILE_H2 - RADIUS) continue;
if (lx < RADIUS || lx >= TILE_W - RADIUS) continue;
T sum = 0;
#if 1
# pragma unroll
for (int j = -RADIUS; j <= RADIUS; j++)
{
int kernelIndex = j + RADIUS;
auto value = buffer2[ly + j][lx - RADIUS];
// auto value = buffer2[lx - RADIUS][ly + j];
sum += value * kernel[kernelIndex];
}
#endif
dst.clampedWrite(gy, gx, sum);
}
}
// | ---- BLOCK_W * X_ELEMENTS * vectorSize ---- |
// [ x x x x x x x x x x x x x x x x x x x x x x ]
//
template <typename T, int RADIUS, unsigned int BLOCK_W, unsigned int BLOCK_H, unsigned int X_ELEMENTS,
unsigned int Y_ELEMENTS, typename VectorType = int2>
//__launch_bounds__(BLOCK_W* BLOCK_H, 3)
__global__ static void d_convolveInnerShuffle2(ImageView<T> src, ImageView<T> dst)
{
const unsigned int TILE_W = BLOCK_W;
const unsigned int TILE_H = BLOCK_H;
const unsigned int TILE_W2 = TILE_W * X_ELEMENTS;
const unsigned int TILE_H2 = TILE_H * Y_ELEMENTS;
const unsigned int tx = threadIdx.x;
const unsigned int ty = threadIdx.y;
// int t = tx + ty * BLOCK_W;
// static_assert( sizeof(VectorType) / sizeof(T) == X_ELEMENTS);
unsigned int lane_id = threadIdx.x % 32;
// start position of tile
int x_tile = blockIdx.x * (TILE_W2 - 2 * RADIUS) - RADIUS;
int y_tile = blockIdx.y * (TILE_H2 - 2 * RADIUS) - RADIUS;
// global position of thread
int x = x_tile + tx * X_ELEMENTS;
int y = y_tile + ty;
T* kernel = d_Kernel;
// for vec4 radius 8:
// (16 * Y_ELEMENTS) * (32 - 4) * 16
// Y 3 -> 21504 100 occ
// Y 4 -> 28672 75 occ
// Y 5 -> 35840 50 occ
// Y 6 -> 43008 50 occ
// Y 8 -> 57344 failed
__shared__ VectorType buffer2[TILE_H2][TILE_W - 2 * RADIUS / X_ELEMENTS];
// own element + left and right radius
VectorType localElements[Y_ELEMENTS][1 + 2 * RADIUS / X_ELEMENTS]; // 5
// without this unroll we get a strange compile error
#pragma unroll
for (int i = 0; i < Y_ELEMENTS; ++i)
{
int rowId = y + i * TILE_H;
rowId = ::min(rowId, src.height - 1);
rowId = ::max(0, rowId);
int colId = ::max(0, x);
int xb = Saiga::iAlignUp(src.width, X_ELEMENTS) - X_ELEMENTS;
colId = ::min(colId, xb);
T* row = src.rowPtr(rowId);
CUDA_ASSERT(size_t(row) % sizeof(VectorType) == 0);
T* elem = row + colId;
// if(rowId == 0)
// printf("%d \n",colId);
T* localElementsT = reinterpret_cast<T*>(localElements[i]);
// center of localElements
// the left and right of the center will be filled by shuffles
VectorType& myValue = localElements[i][RADIUS / X_ELEMENTS]; //[i][2]
// load own value from global memory (note: this is the only global memory read)
CUDA_ASSERT(size_t(elem) % sizeof(VectorType) == 0);
myValue = reinterpret_cast<VectorType*>(elem)[0];
if (x < 0)
{
for (int k = 0; k < X_ELEMENTS; ++k)
{
localElementsT[RADIUS + k] = localElementsT[RADIUS];
}
}
if (x >= src.width)
{
for (int k = 0; k < X_ELEMENTS; ++k)
{
localElementsT[RADIUS + k] = localElementsT[RADIUS + X_ELEMENTS - 1];
}
}
// shuffle left
for (int j = 0; j < RADIUS / X_ELEMENTS; ++j)
{
localElements[i][j] = shfl(myValue, lane_id + j - RADIUS / X_ELEMENTS);
}
// shuffle right
for (int j = 0; j < RADIUS / X_ELEMENTS; ++j)
{
localElements[i][j + RADIUS / X_ELEMENTS + 1] = shfl(myValue, lane_id + j + 1);
}
T sum[X_ELEMENTS];
#pragma unroll
for (int j = 0; j < X_ELEMENTS; ++j)
{
sum[j] = 0;
}
#pragma unroll
for (int j = -RADIUS; j <= RADIUS; j++)
{
T kernelValue = kernel[j + RADIUS];
#pragma unroll
for (int k = 0; k < X_ELEMENTS; ++k)
{
sum[k] += localElementsT[RADIUS + j + k] * kernelValue;
}
}
// write to shared memory if this thread is 'inner' (not in the halo)
int lx = tx;
int ly = ty + i * TILE_H;
// continue if this thread is not a 'inner thread'
if (lx < RADIUS / X_ELEMENTS || lx >= TILE_W - RADIUS / X_ELEMENTS) continue;
if (x >= src.width) continue;
// if(lx >= RADIUS / X_ELEMENTS && lx < TILE_W - RADIUS / X_ELEMENTS)
{
if (rowId <= RADIUS && colId == 508)
{
// printf("sum row %d %d %f \n",x,y,sum[0]);
if (y == 1)
for (int k = 0; k < X_ELEMENTS + 2 * RADIUS; ++k)
{
// printf("localElementsT %d %f \n",k,localElementsT[k]);
}
}
buffer2[ly][lx - RADIUS / X_ELEMENTS] = reinterpret_cast<VectorType*>(sum)[0];
}
// return;
}
// the only sync in this kernel
__syncthreads();
#pragma unroll
for (int i = 0; i < Y_ELEMENTS; ++i)
{
int rowId = y + i * TILE_H;
rowId = ::min(rowId, src.height - 1);
rowId = ::max(0, rowId);
int colId = ::max(0, x);
int xb = Saiga::iAlignUp(src.width, X_ELEMENTS) - X_ELEMENTS;
colId = ::min(colId, xb);
// colId = ::min(colId,src.width - X_ELEMENTS);
// continue if this thread is not a 'inner thread'
int lx = tx;
int ly = ty + i * TILE_H;
if (lx < RADIUS / X_ELEMENTS || lx >= TILE_W - RADIUS / X_ELEMENTS) continue;
if (ly < RADIUS || ly >= TILE_H2 - RADIUS) continue;
// continue if this thread is not in image
if (x >= src.width || y + i * TILE_H >= src.height) continue;
T* row = dst.rowPtr(rowId);
T* elem = row + colId;
T sum[X_ELEMENTS];
for (int j = 0; j < X_ELEMENTS; ++j)
{
sum[j] = 0;
}
// simple row convolution in shared memory
for (int j = -RADIUS; j <= RADIUS; j++)
{
T kernelValue = kernel[j + RADIUS];
VectorType valueV = buffer2[ly + j][lx - RADIUS / X_ELEMENTS];
for (int k = 0; k < X_ELEMENTS; ++k)
{
auto v = reinterpret_cast<T*>(&valueV)[k];
sum[k] += v * kernelValue;
}
}
reinterpret_cast<VectorType*>(elem)[0] = reinterpret_cast<VectorType*>(sum)[0];
}
}
template <typename T, int RADIUS, typename VectorType = int>
inline void convolveInnerShuffle(ImageView<T> src, ImageView<T> dst)
{
int w = src.width;
int h = src.height;
// int p = src.pitchBytes;
const int BLOCK_W = 32;
const int BLOCK_H = 16;
const int X_ELEMENTS = sizeof(VectorType) / sizeof(T);
const int Y_ELEMENTS = 4;
dim3 blocks(Saiga::iDivUp(w, BLOCK_W * X_ELEMENTS - 2 * RADIUS),
Saiga::iDivUp(h, BLOCK_H * Y_ELEMENTS - 2 * RADIUS), 1);
dim3 threads(BLOCK_W, BLOCK_H);
if (sizeof(VectorType) >= 8)
hipFuncSetSharedMemConfig(
d_convolveInnerShuffle2<T, RADIUS, BLOCK_W, BLOCK_H, X_ELEMENTS, Y_ELEMENTS, VectorType>,
hipSharedMemBankSizeEightByte);
else
hipFuncSetSharedMemConfig(
d_convolveInnerShuffle2<T, RADIUS, BLOCK_W, BLOCK_H, X_ELEMENTS, Y_ELEMENTS, VectorType>,
hipSharedMemBankSizeFourByte);
hipLaunchKernelGGL(( d_convolveInnerShuffle2<T, RADIUS, BLOCK_W, BLOCK_H, X_ELEMENTS, Y_ELEMENTS, VectorType>)
, dim3(blocks), dim3(threads), 0, 0, src, dst);
// d_convolveInnerShuffle3<T,12,BLOCK_W,BLOCK_H,4,Y_ELEMENTS,int4> <<<blocks, threads>>>(src,dst);
CUDA_SYNC_CHECK_ERROR();
}
void convolveSinglePassSeparateOuterLinear(ImageView<float> src, ImageView<float> dst, Saiga::ArrayView<float> kernel,
int radius)
{
CHECK_CUDA_ERROR(
hipMemcpyToSymbol(d_Kernel, kernel.data(), kernel.size() * sizeof(float), 0, hipMemcpyDeviceToDevice));
switch (radius)
{
case 1:
CUDA::convolveOuterLinear<float, 1>(src, dst);
break;
case 2:
CUDA::convolveOuterLinear<float, 2>(src, dst);
break;
case 3:
CUDA::convolveOuterLinear<float, 3>(src, dst);
break;
case 4:
CUDA::convolveOuterLinear<float, 4>(src, dst);
break;
case 5:
CUDA::convolveOuterLinear<float, 5>(src, dst);
break;
case 6:
CUDA::convolveOuterLinear<float, 6>(src, dst);
break;
case 7:
CUDA::convolveOuterLinear<float, 7>(src, dst);
break;
case 8:
CUDA::convolveOuterLinear<float, 8>(src, dst);
break;
case 9:
CUDA::convolveOuterLinear<float, 9>(src, dst);
break;
}
}
void convolveSinglePassSeparateOuterHalo(ImageView<float> src, ImageView<float> dst, Saiga::ArrayView<float> kernel,
int radius)
{
CHECK_CUDA_ERROR(
hipMemcpyToSymbol(d_Kernel, kernel.data(), kernel.size() * sizeof(float), 0, hipMemcpyDeviceToDevice));
switch (radius)
{
case 1:
CUDA::convolveOuterHalo<float, 1>(src, dst);
break;
case 2:
CUDA::convolveOuterHalo<float, 2>(src, dst);
break;
case 3:
CUDA::convolveOuterHalo<float, 3>(src, dst);
break;
case 4:
CUDA::convolveOuterHalo<float, 4>(src, dst);
break;
case 5:
CUDA::convolveOuterHalo<float, 5>(src, dst);
break;
case 6:
CUDA::convolveOuterHalo<float, 6>(src, dst);
break;
case 7:
CUDA::convolveOuterHalo<float, 7>(src, dst);
break;
case 8:
CUDA::convolveOuterHalo<float, 8>(src, dst);
break;
}
}
void convolveSinglePassSeparateInner(ImageView<float> src, ImageView<float> dst, Saiga::ArrayView<float> kernel,
int radius)
{
CHECK_CUDA_ERROR(
hipMemcpyToSymbol(d_Kernel, kernel.data(), kernel.size() * sizeof(float), 0, hipMemcpyDeviceToDevice));
switch (radius)
{
case 1:
CUDA::convolveInner<float, 1>(src, dst);
break;
case 2:
CUDA::convolveInner<float, 2>(src, dst);
break;
case 3:
CUDA::convolveInner<float, 3>(src, dst);
break;
case 4:
CUDA::convolveInner<float, 4>(src, dst);
break;
case 5:
CUDA::convolveInner<float, 5>(src, dst);
break;
case 6:
CUDA::convolveInner<float, 6>(src, dst);
break;
case 7:
CUDA::convolveInner<float, 7>(src, dst);
break;
case 8:
CUDA::convolveInner<float, 8>(src, dst);
break;
}
}
void convolveSinglePassSeparateInner75(ImageView<float> src, ImageView<float> dst, Saiga::ArrayView<float> kernel,
int radius)
{
CHECK_CUDA_ERROR(
hipMemcpyToSymbol(d_Kernel, kernel.data(), kernel.size() * sizeof(float), 0, hipMemcpyDeviceToDevice));
switch (radius)
{
case 1:
CUDA::convolveInner<float, 1, true>(src, dst);
break;
case 2:
CUDA::convolveInner<float, 2, true>(src, dst);
break;
case 3:
CUDA::convolveInner<float, 3, true>(src, dst);
break;
case 4:
CUDA::convolveInner<float, 4, true>(src, dst);
break;
case 5:
CUDA::convolveInner<float, 5, true>(src, dst);
break;
case 6:
CUDA::convolveInner<float, 6, true>(src, dst);
break;
case 7:
CUDA::convolveInner<float, 7, true>(src, dst);
break;
case 8:
CUDA::convolveInner<float, 8, true>(src, dst);
break;
}
}
void convolveSinglePassSeparateInnerShuffle(ImageView<float> src, ImageView<float> dst, Saiga::ArrayView<float> kernel,
int radius)
{
CHECK_CUDA_ERROR(
hipMemcpyToSymbol(d_Kernel, kernel.data(), kernel.size() * sizeof(float), 0, hipMemcpyDeviceToDevice));
switch (radius)
{
case 0:
CUDA::convolveInnerShuffle<float, 0, int>(src, dst);
break;
case 1:
CUDA::convolveInnerShuffle<float, 1, int>(src, dst);
break;
case 2:
CUDA::convolveInnerShuffle<float, 2, int2>(src, dst);
break;
case 3:
CUDA::convolveInnerShuffle<float, 3, int>(src, dst);
break;
case 4:
CUDA::convolveInnerShuffle<float, 4, int4>(src, dst);
break;
case 5:
CUDA::convolveInnerShuffle<float, 5, int>(src, dst);
break;
case 6:
CUDA::convolveInnerShuffle<float, 6, int2>(src, dst);
break;
case 7:
CUDA::convolveInnerShuffle<float, 7, int>(src, dst);
break;
case 8:
CUDA::convolveInnerShuffle<float, 8, int4>(src, dst);
break;
case 9:
CUDA::convolveInnerShuffle<float, 9, int>(src, dst);
break;
case 10:
CUDA::convolveInnerShuffle<float, 10, int2>(src, dst);
break;
case 11:
CUDA::convolveInnerShuffle<float, 11, int>(src, dst);
break;
case 12:
CUDA::convolveInnerShuffle<float, 12, int2>(src, dst);
break;
case 13:
CUDA::convolveInnerShuffle<float, 13, int>(src, dst);
break;
case 14:
CUDA::convolveInnerShuffle<float, 14, int2>(src, dst);
break;
case 15:
CUDA::convolveInnerShuffle<float, 15, int>(src, dst);
break;
case 16:
CUDA::convolveInnerShuffle<float, 16, int4>(src, dst);
break;
// case 17: CUDA::convolveInnerShuffle<float,17,int>(src,dst); break;
// case 18: CUDA::convolveInnerShuffle<float,18,int2>(src,dst); break;
// case 19: CUDA::convolveInnerShuffle<float,19,int>(src,dst); break;
case 20:
CUDA::convolveInnerShuffle<float, 20, int4>(src, dst);
break;
// case 21: CUDA::convolveInnerShuffle<float,21,int>(src,dst); break;
// case 22: CUDA::convolveInnerShuffle<float,22,int2>(src,dst); break;
// case 23: CUDA::convolveInnerShuffle<float,23,int>(src,dst); break;
case 24:
CUDA::convolveInnerShuffle<float, 24, int4>(src, dst);
break;
}
}
} // namespace CUDA
} // namespace Saiga
| 70cb1fd3fb7f7bbe46da3ec849da780fc9136c78.cu | /**
* Copyright (c) 2017 Darius Rückert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
#include "saiga/cuda/device_helper.h"
#include "saiga/cuda/imageProcessing/imageProcessing.h"
#include "saiga/cuda/shfl_helper.h"
namespace Saiga
{
namespace CUDA
{
//todo maybe change
static __constant__ float d_Kernel[SAIGA_MAX_KERNEL_SIZE];
template <typename T, int RADIUS, unsigned int BLOCK_W, unsigned int BLOCK_H, unsigned int Y_ELEMENTS>
__global__ static void d_convolveOuterLinear(ImageView<T> src, ImageView<T> dst)
{
const unsigned BLOCK_H2 = BLOCK_H * Y_ELEMENTS;
// for radius = 4: elements = (32+8) * (16+8) = 960 = 3840
__shared__ T buffer[BLOCK_H2 + 2 * RADIUS][BLOCK_W + 2 * RADIUS];
// for radius = 4: elements = (32+8) * (16) = 640 = 2560
__shared__ T buffer2[BLOCK_H2][BLOCK_W + 2 * RADIUS];
// total s mem per block = 6400
// with 512 threads per block smem per sm: 25600 -> 100% occ
int tx = threadIdx.x;
int ty = threadIdx.y;
int t = tx + ty * BLOCK_W;
int xp = blockIdx.x * BLOCK_W + tx;
int yp = blockIdx.y * BLOCK_H2 + ty;
int blockStartX = blockIdx.x * BLOCK_W - RADIUS;
int blockStartY = blockIdx.y * BLOCK_H2 - RADIUS;
const int blockSizeX = BLOCK_W + 2 * RADIUS;
const int blockSizeY = BLOCK_H2 + 2 * RADIUS;
// fill buffer
for (int i = t; i < blockSizeX * blockSizeY; i += (BLOCK_W * BLOCK_H))
{
int x = i % blockSizeX;
int y = i / blockSizeX;
int gx = x + blockStartX;
int gy = y + blockStartY;
src.clampToEdge(gy, gx);
buffer[y][x] = src(gy, gx);
}
__syncthreads();
T* kernel = d_Kernel;
for (int i = t; i < blockSizeX * BLOCK_H2; i += (BLOCK_W * BLOCK_H))
{
int x = i % blockSizeX;
int y = i / blockSizeX;
T sum = 0;
#pragma unroll
for (int j = -RADIUS; j <= RADIUS; j++)
{
int kernelIndex = j + RADIUS;
sum += buffer[y + RADIUS + j][x] * kernel[kernelIndex];
}
buffer2[y][x] = sum;
}
__syncthreads();
for (int i = 0; i < Y_ELEMENTS; ++i)
{
T sum = 0;
#pragma unroll
for (int j = -RADIUS; j <= RADIUS; j++)
{
int kernelIndex = j + RADIUS;
sum += buffer2[ty][tx + RADIUS + j] * kernel[kernelIndex];
}
if (dst.inImage(yp, xp)) dst(yp, xp) = sum;
yp += BLOCK_H;
ty += BLOCK_H;
}
}
template <typename T, int RADIUS>
inline void convolveOuterLinear(ImageView<T> src, ImageView<T> dst)
{
int w = src.width;
int h = src.height;
const int BLOCK_W = 32;
const int BLOCK_H = 16;
const int Y_ELEMENTS = 2;
dim3 blocks(Saiga::iDivUp(w, BLOCK_W), Saiga::iDivUp(h, BLOCK_H * Y_ELEMENTS), 1);
// dim3 blocks(Saiga::CUDA::getBlockCount(w, BLOCK_W), Saiga::CUDA::getBlockCount(h, BLOCK_H));
dim3 threads(BLOCK_W, BLOCK_H);
d_convolveOuterLinear<T, RADIUS, BLOCK_W, BLOCK_H, Y_ELEMENTS><<<blocks, threads>>>(src, dst);
}
template <typename T, int RADIUS, unsigned int BLOCK_W, unsigned int BLOCK_H, unsigned int Y_ELEMENTS>
__global__ static void d_convolveOuterHalo(ImageView<T> src, ImageView<T> dst)
{
const unsigned int BLOCK_H2 = BLOCK_H * Y_ELEMENTS;
const unsigned int WARPS_PER_BLOCK = BLOCK_W * BLOCK_H / 32; // 16
static_assert(WARPS_PER_BLOCK == 16, "warps per block wrong");
// for radius = 4: elements = (32+8) * (16+8) = 960 = 3840
__shared__ T buffer[BLOCK_H2 + 2 * RADIUS][BLOCK_W + 2 * RADIUS];
// for radius = 4: elements = (32+8) * (16) = 640 = 2560
__shared__ T buffer2[BLOCK_H2][BLOCK_W + 2 * RADIUS];
// total s mem per block = 6400
// with 512 threads per block smem per sm: 25600 -> 100% occ
int tx = threadIdx.x;
int ty = threadIdx.y;
int t = tx + ty * BLOCK_W;
const unsigned int warp_lane = t / 32;
const unsigned int lane_id = t & 31;
int xp = blockIdx.x * BLOCK_W + tx;
int yp = blockIdx.y * BLOCK_H2 + ty;
int x = xp;
int y = yp;
const unsigned int x_tile = blockIdx.x * BLOCK_W;
// const unsigned int y_tile = blockIdx.y * BLOCK_H2;
int blockStartX = blockIdx.x * BLOCK_W - RADIUS;
int blockStartY = blockIdx.y * BLOCK_H2 - RADIUS;
const int blockSizeX = BLOCK_W + 2 * RADIUS;
// const int blockSizeY = BLOCK_H2 + 2*RADIUS;
// copy main data
for (int i = 0; i < Y_ELEMENTS; ++i)
{
buffer[ty + i * BLOCK_H + RADIUS][tx + RADIUS] = src.clampedRead(y + i * BLOCK_H, x);
}
// top and bottom halo
if (warp_lane < 4)
{
const unsigned int num_warps = 4;
for (int i = warp_lane; i < RADIUS; i += num_warps)
{
buffer[i][lane_id + RADIUS] = src.clampedRead(blockStartY + i, x_tile + lane_id);
buffer[BLOCK_H2 + RADIUS + i][lane_id + RADIUS] =
src.clampedRead(blockStartY + BLOCK_H2 + RADIUS + i, x_tile + lane_id);
}
}
const unsigned int side_halo_rows_per_warp = 32 / RADIUS;
int local_warp_id = lane_id / RADIUS;
int local_lane_id = lane_id % RADIUS;
// left halo
if (warp_lane >= 4 && warp_lane < 10)
{
const unsigned int num_warps = 6;
int wid = warp_lane - 4;
int rows = BLOCK_H2 + 2 * RADIUS;
for (int i = wid * side_halo_rows_per_warp + local_warp_id; i < rows; i += num_warps * side_halo_rows_per_warp)
{
if (local_warp_id < side_halo_rows_per_warp)
{
buffer[i][local_lane_id] = src.clampedRead(blockStartY + i, blockStartX + local_lane_id);
}
}
}
// right halo
if (warp_lane >= 10 && warp_lane < 16)
{
const unsigned int num_warps = 6;
int wid = warp_lane - 10;
int rows = BLOCK_H2 + 2 * RADIUS;
for (int i = wid * side_halo_rows_per_warp + local_warp_id; i < rows; i += num_warps * side_halo_rows_per_warp)
{
if (local_warp_id < side_halo_rows_per_warp)
{
buffer[i][local_lane_id + RADIUS + BLOCK_W] =
src.clampedRead(blockStartY + i, blockStartX + local_lane_id + RADIUS + BLOCK_W);
}
}
}
__syncthreads();
T* kernel = d_Kernel;
for (int i = t; i < blockSizeX * BLOCK_H2; i += (BLOCK_W * BLOCK_H))
{
int x = i % blockSizeX;
int y = i / blockSizeX;
T sum = 0;
#pragma unroll
for (int j = -RADIUS; j <= RADIUS; j++)
{
int kernelIndex = j + RADIUS;
sum += buffer[y + RADIUS + j][x] * kernel[kernelIndex];
}
buffer2[y][x] = sum;
}
__syncthreads();
for (int i = 0; i < Y_ELEMENTS; ++i)
{
T sum = 0;
#pragma unroll
for (int j = -RADIUS; j <= RADIUS; j++)
{
int kernelIndex = j + RADIUS;
sum += buffer2[ty][tx + RADIUS + j] * kernel[kernelIndex];
}
if (dst.inImage(yp, xp)) dst(yp, xp) = sum;
yp += BLOCK_H;
ty += BLOCK_H;
}
}
template <typename T, int RADIUS>
inline void convolveOuterHalo(ImageView<T> src, ImageView<T> dst)
{
int w = src.width;
int h = src.height;
const int BLOCK_W = 32;
const int BLOCK_H = 16;
const int Y_ELEMENTS = 2;
dim3 blocks(Saiga::iDivUp(w, BLOCK_W), Saiga::iDivUp(h, BLOCK_H * Y_ELEMENTS), 1);
// dim3 blocks(Saiga::CUDA::getBlockCount(w, BLOCK_W), Saiga::CUDA::getBlockCount(h, BLOCK_H));
dim3 threads(BLOCK_W, BLOCK_H);
d_convolveOuterHalo<T, RADIUS, BLOCK_W, BLOCK_H, Y_ELEMENTS><<<blocks, threads>>>(src, dst);
}
template <typename T, int RADIUS, unsigned int BLOCK_W, unsigned int BLOCK_H, unsigned int Y_ELEMENTS>
__global__ static void d_convolveInner(ImageView<T> src, ImageView<T> dst)
{
const unsigned int TILE_H = BLOCK_H;
const unsigned int TILE_W = BLOCK_W;
const unsigned int TILE_H2 = TILE_H * Y_ELEMENTS;
const unsigned int tx = threadIdx.x;
const unsigned int ty = threadIdx.y;
// int t = tx + ty * BLOCK_W;
int x_tile = blockIdx.x * (TILE_W - 2 * RADIUS) - RADIUS;
int y_tile = blockIdx.y * (TILE_H2 - 2 * RADIUS) - RADIUS;
int x = x_tile + tx;
int y = y_tile + ty;
__shared__ T buffer[TILE_H2][TILE_W];
__shared__ T buffer2[TILE_H2 - RADIUS * 2][TILE_W];
// copy main data
for (int i = 0; i < Y_ELEMENTS; ++i) buffer[ty + i * TILE_H][tx] = src.clampedRead(y + i * TILE_H, x);
__syncthreads();
T* kernel = d_Kernel;
// convolve along y axis
// if(ty > RADIUS && ty < TILE_H2 - RADIUS)
// {
// int oy = ty - RADIUS;
for (int i = 0; i < Y_ELEMENTS; ++i)
{
// int gx = x;
// int gy = y + i * TILE_H;
int lx = tx;
int ly = ty + i * TILE_H;
if (ly < RADIUS || ly >= TILE_H2 - RADIUS) continue;
T sum = 0;
#pragma unroll
for (int j = -RADIUS; j <= RADIUS; j++)
{
int kernelIndex = j + RADIUS;
sum += buffer[ly + j][lx] * kernel[kernelIndex];
}
buffer2[ly - RADIUS][lx] = sum;
}
__syncthreads();
for (int i = 0; i < Y_ELEMENTS; ++i)
{
int gx = x;
int gy = y + i * TILE_H;
int lx = tx;
int ly = ty + i * TILE_H;
if (ly < RADIUS || ly >= TILE_H2 - RADIUS) continue;
if (lx < RADIUS || lx >= TILE_W - RADIUS) continue;
T sum = 0;
#pragma unroll
for (int j = -RADIUS; j <= RADIUS; j++)
{
int kernelIndex = j + RADIUS;
sum += buffer2[ly - RADIUS][lx + j] * kernel[kernelIndex];
}
// if(dst.inImage(gx,gy))
// dst(g,yp) = sum;
dst.clampedWrite(gy, gx, sum);
}
#if 0
for(int i =0; i < Y_ELEMENTS; ++i){
T sum = 0;
# pragma unroll
for (int j=-RADIUS;j<=RADIUS;j++){
int kernelIndex = j + RADIUS;
sum += buffer2[ty][tx + RADIUS + j] * kernel[kernelIndex];
}
if(dst.inImage(xp,yp))
dst(xp,yp) = sum;
yp += BLOCK_H;
ty += BLOCK_H;
}
#endif
}
template <typename T, int RADIUS, bool LOW_OCC = false>
inline void convolveInner(ImageView<T> src, ImageView<T> dst)
{
int w = src.width;
int h = src.height;
const int BLOCK_W = LOW_OCC ? 64 : 32;
const int BLOCK_H = LOW_OCC ? 8 : 16;
const int Y_ELEMENTS = LOW_OCC ? 4 : 2;
dim3 blocks(Saiga::iDivUp(w, BLOCK_W - 2 * RADIUS), Saiga::iDivUp(h, BLOCK_H * Y_ELEMENTS - 2 * RADIUS), 1);
// dim3 blocks(Saiga::CUDA::getBlockCount(w, BLOCK_W), Saiga::CUDA::getBlockCount(h, BLOCK_H));
dim3 threads(BLOCK_W, BLOCK_H);
d_convolveInner<T, RADIUS, BLOCK_W, BLOCK_H, Y_ELEMENTS><<<blocks, threads>>>(src, dst);
}
template <typename T, int RADIUS, unsigned int BLOCK_W, unsigned int BLOCK_H, unsigned int Y_ELEMENTS>
__global__ static void d_convolveInnerShuffle(ImageView<T> src, ImageView<T> dst)
{
const unsigned int TILE_H = BLOCK_H;
const unsigned int TILE_W = BLOCK_W;
const unsigned int TILE_H2 = TILE_H * Y_ELEMENTS;
const unsigned int tx = threadIdx.x;
const unsigned int ty = threadIdx.y;
// int t = tx + ty * BLOCK_W;
unsigned int lane_id = threadIdx.x % 32;
int x_tile = blockIdx.x * (TILE_W - 2 * RADIUS) - RADIUS;
int y_tile = blockIdx.y * (TILE_H2 - 2 * RADIUS) - RADIUS;
int x = x_tile + tx;
int y = y_tile + ty;
// __shared__ T buffer[TILE_H2][TILE_W];
// __shared__ T buffer2[TILE_H2][TILE_W - RADIUS * 2 + 1];
__shared__ T buffer2[TILE_H2][TILE_W - RADIUS * 2];
// __shared__ T buffer2[TILE_W - RADIUS * 2][TILE_H2];
T localElements[Y_ELEMENTS];
for (int i = 0; i < Y_ELEMENTS; ++i)
{
localElements[i] = src.clampedRead(y + i * TILE_H, x);
}
// conv row
T* kernel = d_Kernel;
for (int i = 0; i < Y_ELEMENTS; ++i)
{
int lx = tx;
int ly = ty + i * TILE_H;
T sum = 0;
#pragma unroll
for (int j = -RADIUS; j <= RADIUS; j++)
{
int kernelIndex = j + RADIUS;
auto value = shfl(localElements[i], lane_id + j);
sum += value * kernel[kernelIndex];
}
if (lx < RADIUS || lx >= TILE_W - RADIUS) continue;
buffer2[ly][lx - RADIUS] = sum;
// buffer2[lx- RADIUS][ly] = sum;
}
__syncthreads();
// conv col
for (int i = 0; i < Y_ELEMENTS; ++i)
{
int gx = x;
int gy = y + i * TILE_H;
int lx = tx;
int ly = ty + i * TILE_H;
if (ly < RADIUS || ly >= TILE_H2 - RADIUS) continue;
if (lx < RADIUS || lx >= TILE_W - RADIUS) continue;
T sum = 0;
#if 1
# pragma unroll
for (int j = -RADIUS; j <= RADIUS; j++)
{
int kernelIndex = j + RADIUS;
auto value = buffer2[ly + j][lx - RADIUS];
// auto value = buffer2[lx - RADIUS][ly + j];
sum += value * kernel[kernelIndex];
}
#endif
dst.clampedWrite(gy, gx, sum);
}
}
// | ---- BLOCK_W * X_ELEMENTS * vectorSize ---- |
// [ x x x x x x x x x x x x x x x x x x x x x x ]
//
template <typename T, int RADIUS, unsigned int BLOCK_W, unsigned int BLOCK_H, unsigned int X_ELEMENTS,
unsigned int Y_ELEMENTS, typename VectorType = int2>
//__launch_bounds__(BLOCK_W* BLOCK_H, 3)
__global__ static void d_convolveInnerShuffle2(ImageView<T> src, ImageView<T> dst)
{
const unsigned int TILE_W = BLOCK_W;
const unsigned int TILE_H = BLOCK_H;
const unsigned int TILE_W2 = TILE_W * X_ELEMENTS;
const unsigned int TILE_H2 = TILE_H * Y_ELEMENTS;
const unsigned int tx = threadIdx.x;
const unsigned int ty = threadIdx.y;
// int t = tx + ty * BLOCK_W;
// static_assert( sizeof(VectorType) / sizeof(T) == X_ELEMENTS);
unsigned int lane_id = threadIdx.x % 32;
// start position of tile
int x_tile = blockIdx.x * (TILE_W2 - 2 * RADIUS) - RADIUS;
int y_tile = blockIdx.y * (TILE_H2 - 2 * RADIUS) - RADIUS;
// global position of thread
int x = x_tile + tx * X_ELEMENTS;
int y = y_tile + ty;
T* kernel = d_Kernel;
// for vec4 radius 8:
// (16 * Y_ELEMENTS) * (32 - 4) * 16
// Y 3 -> 21504 100 occ
// Y 4 -> 28672 75 occ
// Y 5 -> 35840 50 occ
// Y 6 -> 43008 50 occ
// Y 8 -> 57344 failed
__shared__ VectorType buffer2[TILE_H2][TILE_W - 2 * RADIUS / X_ELEMENTS];
// own element + left and right radius
VectorType localElements[Y_ELEMENTS][1 + 2 * RADIUS / X_ELEMENTS]; // 5
// without this unroll we get a strange compile error
#pragma unroll
for (int i = 0; i < Y_ELEMENTS; ++i)
{
int rowId = y + i * TILE_H;
rowId = std::min(rowId, src.height - 1);
rowId = std::max(0, rowId);
int colId = std::max(0, x);
int xb = Saiga::iAlignUp(src.width, X_ELEMENTS) - X_ELEMENTS;
colId = std::min(colId, xb);
T* row = src.rowPtr(rowId);
CUDA_ASSERT(size_t(row) % sizeof(VectorType) == 0);
T* elem = row + colId;
// if(rowId == 0)
// printf("%d \n",colId);
T* localElementsT = reinterpret_cast<T*>(localElements[i]);
// center of localElements
// the left and right of the center will be filled by shuffles
VectorType& myValue = localElements[i][RADIUS / X_ELEMENTS]; //[i][2]
// load own value from global memory (note: this is the only global memory read)
CUDA_ASSERT(size_t(elem) % sizeof(VectorType) == 0);
myValue = reinterpret_cast<VectorType*>(elem)[0];
if (x < 0)
{
for (int k = 0; k < X_ELEMENTS; ++k)
{
localElementsT[RADIUS + k] = localElementsT[RADIUS];
}
}
if (x >= src.width)
{
for (int k = 0; k < X_ELEMENTS; ++k)
{
localElementsT[RADIUS + k] = localElementsT[RADIUS + X_ELEMENTS - 1];
}
}
// shuffle left
for (int j = 0; j < RADIUS / X_ELEMENTS; ++j)
{
localElements[i][j] = shfl(myValue, lane_id + j - RADIUS / X_ELEMENTS);
}
// shuffle right
for (int j = 0; j < RADIUS / X_ELEMENTS; ++j)
{
localElements[i][j + RADIUS / X_ELEMENTS + 1] = shfl(myValue, lane_id + j + 1);
}
T sum[X_ELEMENTS];
#pragma unroll
for (int j = 0; j < X_ELEMENTS; ++j)
{
sum[j] = 0;
}
#pragma unroll
for (int j = -RADIUS; j <= RADIUS; j++)
{
T kernelValue = kernel[j + RADIUS];
#pragma unroll
for (int k = 0; k < X_ELEMENTS; ++k)
{
sum[k] += localElementsT[RADIUS + j + k] * kernelValue;
}
}
// write to shared memory if this thread is 'inner' (not in the halo)
int lx = tx;
int ly = ty + i * TILE_H;
// continue if this thread is not a 'inner thread'
if (lx < RADIUS / X_ELEMENTS || lx >= TILE_W - RADIUS / X_ELEMENTS) continue;
if (x >= src.width) continue;
// if(lx >= RADIUS / X_ELEMENTS && lx < TILE_W - RADIUS / X_ELEMENTS)
{
if (rowId <= RADIUS && colId == 508)
{
// printf("sum row %d %d %f \n",x,y,sum[0]);
if (y == 1)
for (int k = 0; k < X_ELEMENTS + 2 * RADIUS; ++k)
{
// printf("localElementsT %d %f \n",k,localElementsT[k]);
}
}
buffer2[ly][lx - RADIUS / X_ELEMENTS] = reinterpret_cast<VectorType*>(sum)[0];
}
// return;
}
// the only sync in this kernel
__syncthreads();
#pragma unroll
for (int i = 0; i < Y_ELEMENTS; ++i)
{
int rowId = y + i * TILE_H;
rowId = std::min(rowId, src.height - 1);
rowId = std::max(0, rowId);
int colId = std::max(0, x);
int xb = Saiga::iAlignUp(src.width, X_ELEMENTS) - X_ELEMENTS;
colId = std::min(colId, xb);
// colId = std::min(colId,src.width - X_ELEMENTS);
// continue if this thread is not a 'inner thread'
int lx = tx;
int ly = ty + i * TILE_H;
if (lx < RADIUS / X_ELEMENTS || lx >= TILE_W - RADIUS / X_ELEMENTS) continue;
if (ly < RADIUS || ly >= TILE_H2 - RADIUS) continue;
// continue if this thread is not in image
if (x >= src.width || y + i * TILE_H >= src.height) continue;
T* row = dst.rowPtr(rowId);
T* elem = row + colId;
T sum[X_ELEMENTS];
for (int j = 0; j < X_ELEMENTS; ++j)
{
sum[j] = 0;
}
// simple row convolution in shared memory
for (int j = -RADIUS; j <= RADIUS; j++)
{
T kernelValue = kernel[j + RADIUS];
VectorType valueV = buffer2[ly + j][lx - RADIUS / X_ELEMENTS];
for (int k = 0; k < X_ELEMENTS; ++k)
{
auto v = reinterpret_cast<T*>(&valueV)[k];
sum[k] += v * kernelValue;
}
}
reinterpret_cast<VectorType*>(elem)[0] = reinterpret_cast<VectorType*>(sum)[0];
}
}
template <typename T, int RADIUS, typename VectorType = int>
inline void convolveInnerShuffle(ImageView<T> src, ImageView<T> dst)
{
int w = src.width;
int h = src.height;
// int p = src.pitchBytes;
const int BLOCK_W = 32;
const int BLOCK_H = 16;
const int X_ELEMENTS = sizeof(VectorType) / sizeof(T);
const int Y_ELEMENTS = 4;
dim3 blocks(Saiga::iDivUp(w, BLOCK_W * X_ELEMENTS - 2 * RADIUS),
Saiga::iDivUp(h, BLOCK_H * Y_ELEMENTS - 2 * RADIUS), 1);
dim3 threads(BLOCK_W, BLOCK_H);
if (sizeof(VectorType) >= 8)
cudaFuncSetSharedMemConfig(
d_convolveInnerShuffle2<T, RADIUS, BLOCK_W, BLOCK_H, X_ELEMENTS, Y_ELEMENTS, VectorType>,
cudaSharedMemBankSizeEightByte);
else
cudaFuncSetSharedMemConfig(
d_convolveInnerShuffle2<T, RADIUS, BLOCK_W, BLOCK_H, X_ELEMENTS, Y_ELEMENTS, VectorType>,
cudaSharedMemBankSizeFourByte);
d_convolveInnerShuffle2<T, RADIUS, BLOCK_W, BLOCK_H, X_ELEMENTS, Y_ELEMENTS, VectorType>
<<<blocks, threads>>>(src, dst);
// d_convolveInnerShuffle3<T,12,BLOCK_W,BLOCK_H,4,Y_ELEMENTS,int4> <<<blocks, threads>>>(src,dst);
CUDA_SYNC_CHECK_ERROR();
}
void convolveSinglePassSeparateOuterLinear(ImageView<float> src, ImageView<float> dst, Saiga::ArrayView<float> kernel,
int radius)
{
CHECK_CUDA_ERROR(
cudaMemcpyToSymbol(d_Kernel, kernel.data(), kernel.size() * sizeof(float), 0, cudaMemcpyDeviceToDevice));
switch (radius)
{
case 1:
CUDA::convolveOuterLinear<float, 1>(src, dst);
break;
case 2:
CUDA::convolveOuterLinear<float, 2>(src, dst);
break;
case 3:
CUDA::convolveOuterLinear<float, 3>(src, dst);
break;
case 4:
CUDA::convolveOuterLinear<float, 4>(src, dst);
break;
case 5:
CUDA::convolveOuterLinear<float, 5>(src, dst);
break;
case 6:
CUDA::convolveOuterLinear<float, 6>(src, dst);
break;
case 7:
CUDA::convolveOuterLinear<float, 7>(src, dst);
break;
case 8:
CUDA::convolveOuterLinear<float, 8>(src, dst);
break;
case 9:
CUDA::convolveOuterLinear<float, 9>(src, dst);
break;
}
}
void convolveSinglePassSeparateOuterHalo(ImageView<float> src, ImageView<float> dst, Saiga::ArrayView<float> kernel,
int radius)
{
CHECK_CUDA_ERROR(
cudaMemcpyToSymbol(d_Kernel, kernel.data(), kernel.size() * sizeof(float), 0, cudaMemcpyDeviceToDevice));
switch (radius)
{
case 1:
CUDA::convolveOuterHalo<float, 1>(src, dst);
break;
case 2:
CUDA::convolveOuterHalo<float, 2>(src, dst);
break;
case 3:
CUDA::convolveOuterHalo<float, 3>(src, dst);
break;
case 4:
CUDA::convolveOuterHalo<float, 4>(src, dst);
break;
case 5:
CUDA::convolveOuterHalo<float, 5>(src, dst);
break;
case 6:
CUDA::convolveOuterHalo<float, 6>(src, dst);
break;
case 7:
CUDA::convolveOuterHalo<float, 7>(src, dst);
break;
case 8:
CUDA::convolveOuterHalo<float, 8>(src, dst);
break;
}
}
void convolveSinglePassSeparateInner(ImageView<float> src, ImageView<float> dst, Saiga::ArrayView<float> kernel,
int radius)
{
CHECK_CUDA_ERROR(
cudaMemcpyToSymbol(d_Kernel, kernel.data(), kernel.size() * sizeof(float), 0, cudaMemcpyDeviceToDevice));
switch (radius)
{
case 1:
CUDA::convolveInner<float, 1>(src, dst);
break;
case 2:
CUDA::convolveInner<float, 2>(src, dst);
break;
case 3:
CUDA::convolveInner<float, 3>(src, dst);
break;
case 4:
CUDA::convolveInner<float, 4>(src, dst);
break;
case 5:
CUDA::convolveInner<float, 5>(src, dst);
break;
case 6:
CUDA::convolveInner<float, 6>(src, dst);
break;
case 7:
CUDA::convolveInner<float, 7>(src, dst);
break;
case 8:
CUDA::convolveInner<float, 8>(src, dst);
break;
}
}
void convolveSinglePassSeparateInner75(ImageView<float> src, ImageView<float> dst, Saiga::ArrayView<float> kernel,
int radius)
{
CHECK_CUDA_ERROR(
cudaMemcpyToSymbol(d_Kernel, kernel.data(), kernel.size() * sizeof(float), 0, cudaMemcpyDeviceToDevice));
switch (radius)
{
case 1:
CUDA::convolveInner<float, 1, true>(src, dst);
break;
case 2:
CUDA::convolveInner<float, 2, true>(src, dst);
break;
case 3:
CUDA::convolveInner<float, 3, true>(src, dst);
break;
case 4:
CUDA::convolveInner<float, 4, true>(src, dst);
break;
case 5:
CUDA::convolveInner<float, 5, true>(src, dst);
break;
case 6:
CUDA::convolveInner<float, 6, true>(src, dst);
break;
case 7:
CUDA::convolveInner<float, 7, true>(src, dst);
break;
case 8:
CUDA::convolveInner<float, 8, true>(src, dst);
break;
}
}
void convolveSinglePassSeparateInnerShuffle(ImageView<float> src, ImageView<float> dst, Saiga::ArrayView<float> kernel,
int radius)
{
CHECK_CUDA_ERROR(
cudaMemcpyToSymbol(d_Kernel, kernel.data(), kernel.size() * sizeof(float), 0, cudaMemcpyDeviceToDevice));
switch (radius)
{
case 0:
CUDA::convolveInnerShuffle<float, 0, int>(src, dst);
break;
case 1:
CUDA::convolveInnerShuffle<float, 1, int>(src, dst);
break;
case 2:
CUDA::convolveInnerShuffle<float, 2, int2>(src, dst);
break;
case 3:
CUDA::convolveInnerShuffle<float, 3, int>(src, dst);
break;
case 4:
CUDA::convolveInnerShuffle<float, 4, int4>(src, dst);
break;
case 5:
CUDA::convolveInnerShuffle<float, 5, int>(src, dst);
break;
case 6:
CUDA::convolveInnerShuffle<float, 6, int2>(src, dst);
break;
case 7:
CUDA::convolveInnerShuffle<float, 7, int>(src, dst);
break;
case 8:
CUDA::convolveInnerShuffle<float, 8, int4>(src, dst);
break;
case 9:
CUDA::convolveInnerShuffle<float, 9, int>(src, dst);
break;
case 10:
CUDA::convolveInnerShuffle<float, 10, int2>(src, dst);
break;
case 11:
CUDA::convolveInnerShuffle<float, 11, int>(src, dst);
break;
case 12:
CUDA::convolveInnerShuffle<float, 12, int2>(src, dst);
break;
case 13:
CUDA::convolveInnerShuffle<float, 13, int>(src, dst);
break;
case 14:
CUDA::convolveInnerShuffle<float, 14, int2>(src, dst);
break;
case 15:
CUDA::convolveInnerShuffle<float, 15, int>(src, dst);
break;
case 16:
CUDA::convolveInnerShuffle<float, 16, int4>(src, dst);
break;
// case 17: CUDA::convolveInnerShuffle<float,17,int>(src,dst); break;
// case 18: CUDA::convolveInnerShuffle<float,18,int2>(src,dst); break;
// case 19: CUDA::convolveInnerShuffle<float,19,int>(src,dst); break;
case 20:
CUDA::convolveInnerShuffle<float, 20, int4>(src, dst);
break;
// case 21: CUDA::convolveInnerShuffle<float,21,int>(src,dst); break;
// case 22: CUDA::convolveInnerShuffle<float,22,int2>(src,dst); break;
// case 23: CUDA::convolveInnerShuffle<float,23,int>(src,dst); break;
case 24:
CUDA::convolveInnerShuffle<float, 24, int4>(src, dst);
break;
}
}
} // namespace CUDA
} // namespace Saiga
|
588c31141acdd82937c958c99f755aca25454a3e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from zgeadd_batched.cu normal z -> s, Fri Sep 11 18:29:22 2015
@author Mark Gates
*/
#include "common_magma.h"
#define NB 64
/* =====================================================================
Batches slacpy of multiple arrays;
y-dimension of grid is different arrays,
x-dimension of grid is blocks for each array.
Matrix is m x n, and is divided into block rows, each NB x n.
Each CUDA block has NB threads to handle one block row.
Each thread adds one row, iterating across all columns.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
TODO. Block in both directions, for large matrices.
E.g., each block does 64x64 tile, instead of 64xN tile.
*/
__global__ void
sgeadd_batched_kernel(
int m, int n,
float alpha,
const float * const *dAarray, int ldda,
float **dBarray, int lddb )
{
// dA and dB iterate across row i
const float *dA = dAarray[ blockIdx.y ];
float *dB = dBarray[ blockIdx.y ];
int i = blockIdx.x*blockDim.x + threadIdx.x;
if ( i < m ) {
dA += i;
dB += i;
const float *dAend = dA + n*ldda;
while( dA < dAend ) {
*dB = alpha*(*dA) + (*dB);
dA += ldda;
dB += lddb;
}
}
}
/* ===================================================================== */
/**
Purpose
-------
ZGEADD adds two sets of matrices, dAarray[i] = alpha*dAarray[i] + dBarray[i],
for i = 0, ..., batchCount-1.
Arguments
---------
@param[in]
m INTEGER
The number of rows of each matrix dAarray[i]. M >= 0.
@param[in]
n INTEGER
The number of columns of each matrix dAarray[i]. N >= 0.
@param[in]
alpha REAL
The scalar alpha.
@param[in]
dAarray array on GPU, dimension(batchCount), of pointers to arrays,
with each array a REAL array, dimension (LDDA,N)
The m by n matrices dAarray[i].
@param[in]
ldda INTEGER
The leading dimension of each array dAarray[i]. LDDA >= max(1,M).
@param[in,out]
dBarray array on GPU, dimension(batchCount), of pointers to arrays,
with each array a REAL array, dimension (LDDB,N)
The m by n matrices dBarray[i].
@param[in]
lddb INTEGER
The leading dimension of each array dBarray[i]. LDDB >= max(1,M).
@param[in]
batchCount INTEGER
The number of matrices to add; length of dAarray and dBarray.
batchCount >= 0.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_sgeadd_batched_q(
magma_int_t m, magma_int_t n,
float alpha,
magmaFloat_const_ptr const dAarray[], magma_int_t ldda,
magmaFloat_ptr dBarray[], magma_int_t lddb,
magma_int_t batchCount,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
else if ( batchCount < 0 )
info = -8;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 || batchCount == 0 )
return;
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ), batchCount );
hipLaunchKernelGGL(( sgeadd_batched_kernel), dim3(grid), dim3(threads), 0, queue ,
m, n, alpha, dAarray, ldda, dBarray, lddb );
}
/**
@see magmablas_sgeadd_batched_q
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_sgeadd_batched(
magma_int_t m, magma_int_t n,
float alpha,
magmaFloat_const_ptr const dAarray[], magma_int_t ldda,
magmaFloat_ptr dBarray[], magma_int_t lddb,
magma_int_t batchCount )
{
magmablas_sgeadd_batched_q(
m, n, alpha, dAarray, ldda, dBarray, lddb, batchCount, magma_stream );
}
| 588c31141acdd82937c958c99f755aca25454a3e.cu | /*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from zgeadd_batched.cu normal z -> s, Fri Sep 11 18:29:22 2015
@author Mark Gates
*/
#include "common_magma.h"
#define NB 64
/* =====================================================================
Batches slacpy of multiple arrays;
y-dimension of grid is different arrays,
x-dimension of grid is blocks for each array.
Matrix is m x n, and is divided into block rows, each NB x n.
Each CUDA block has NB threads to handle one block row.
Each thread adds one row, iterating across all columns.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
TODO. Block in both directions, for large matrices.
E.g., each block does 64x64 tile, instead of 64xN tile.
*/
__global__ void
sgeadd_batched_kernel(
int m, int n,
float alpha,
const float * const *dAarray, int ldda,
float **dBarray, int lddb )
{
// dA and dB iterate across row i
const float *dA = dAarray[ blockIdx.y ];
float *dB = dBarray[ blockIdx.y ];
int i = blockIdx.x*blockDim.x + threadIdx.x;
if ( i < m ) {
dA += i;
dB += i;
const float *dAend = dA + n*ldda;
while( dA < dAend ) {
*dB = alpha*(*dA) + (*dB);
dA += ldda;
dB += lddb;
}
}
}
/* ===================================================================== */
/**
Purpose
-------
ZGEADD adds two sets of matrices, dAarray[i] = alpha*dAarray[i] + dBarray[i],
for i = 0, ..., batchCount-1.
Arguments
---------
@param[in]
m INTEGER
The number of rows of each matrix dAarray[i]. M >= 0.
@param[in]
n INTEGER
The number of columns of each matrix dAarray[i]. N >= 0.
@param[in]
alpha REAL
The scalar alpha.
@param[in]
dAarray array on GPU, dimension(batchCount), of pointers to arrays,
with each array a REAL array, dimension (LDDA,N)
The m by n matrices dAarray[i].
@param[in]
ldda INTEGER
The leading dimension of each array dAarray[i]. LDDA >= max(1,M).
@param[in,out]
dBarray array on GPU, dimension(batchCount), of pointers to arrays,
with each array a REAL array, dimension (LDDB,N)
The m by n matrices dBarray[i].
@param[in]
lddb INTEGER
The leading dimension of each array dBarray[i]. LDDB >= max(1,M).
@param[in]
batchCount INTEGER
The number of matrices to add; length of dAarray and dBarray.
batchCount >= 0.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_sgeadd_batched_q(
magma_int_t m, magma_int_t n,
float alpha,
magmaFloat_const_ptr const dAarray[], magma_int_t ldda,
magmaFloat_ptr dBarray[], magma_int_t lddb,
magma_int_t batchCount,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
else if ( batchCount < 0 )
info = -8;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 || batchCount == 0 )
return;
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ), batchCount );
sgeadd_batched_kernel<<< grid, threads, 0, queue >>>(
m, n, alpha, dAarray, ldda, dBarray, lddb );
}
/**
@see magmablas_sgeadd_batched_q
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_sgeadd_batched(
magma_int_t m, magma_int_t n,
float alpha,
magmaFloat_const_ptr const dAarray[], magma_int_t ldda,
magmaFloat_ptr dBarray[], magma_int_t lddb,
magma_int_t batchCount )
{
magmablas_sgeadd_batched_q(
m, n, alpha, dAarray, ldda, dBarray, lddb, batchCount, magma_stream );
}
|
e3eb85665e966ae27faff325bf96939b998632ae.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<math.h>
#include<limits.h>
#define numKernels 512
#define kernelBatchSize 45
#define numThreads 256
#define hashTableWidth 2048
__global__
void getQuadrant(int count,int *d_x,int *d_y,int *d_z,int *d_quad,int windowlen,int numDivisions,int nx,int ny,int nz)
{
//printf("yo bitch\n");
int index=blockIdx.x*blockDim.x+threadIdx.x;
int x,y,z;
//printf("yo\n");
if (index<count)
{
x=d_x[index];
y=d_y[index];
z=d_z[index];
printf("count= %d d_x[%d]=%d d_y=%d d_z=%d windowlen=%d numDivisions=%d\n",count,x,index,y,z,windowlen,numDivisions);
//d_quad[index]=(4*nx*ny*(nz+z)+2*nx*(ny+y)+(nx+x)+(boxvolume-1))/(boxvolume);
d_quad[index]=numDivisions*numDivisions*((nz+z+windowlen-1)/windowlen)+numDivisions*((ny+y+windowlen-1)/windowlen)+((nx+x+windowlen-1)/windowlen);
printf("%d %d\n",index,d_quad[index]);
}
}
__device__ void buildSearchHash(int batchIndex,int count, int *d_x,int *d_y,int *d_z,int *d_quad, int numDivisions,int offsetSize)
{
extern __shared__ int quadCoordinateMap[];
int index, newEntryColumn,i,j;
for(i=0;i<numDivisions;i++)
for(j=1;j<hashTableWidth/4;j++)
quadCoordinateMap[i*(hashTableWidth/4)+j]=INT_MIN;
for(i=0;i<numDivsions;i++)
quadCoordianteMap[i*(hastTableWidth/4)]=0;
index=batchIndex*offsetSize+blockIdx.x*blockDim.x;
for(i=0;i<offsetSize;i++)
{
newEntryColumn=quadCoordinateMap[d_quad[index+i]*(hashTableWidth/4)];
quadCoordinateMap[d_quad[index+i]*(hashTableWidth/4)+newEntryColumn*4+1]=d_x[index+i];
quadCoordinateMap[d_quad[index+i]*(hashTableWidth/4)+newEntryColumn*4+2]=d_y[index+i];
quadCoordinateMap[d_quad[index+i]*(hashTableWidth/4)+newEntryColumn*4+3]=d_z[index+i];
quadCoordinateMap[d_quad[index+i]*(hashTableWidth/4)+newEntryColumn*4+4]=index+i;
quadCoordinateMap[d_quad[index+i]*(hashTableWidth/4)]++;
}
_syncthreads();
__shared__ int findCoordinates[42];
for(i=0;i<count;i++)
{
for(k=0;k<42;k++)
findCoordinates[k]=0;hipLaunchKernelGGL((
searchNeighbours), dim3(1),dim3(42), 0, 0, quadCoordinateMap,count,x,y,z,quad,numDivisions,findCoordinates);
for(k=0;k<6;k++)
{
for(l=0;l<7;l++)
if(findCoordinates[k*7+l]!=0 && findCoordinates[k*7+l]!=d_quad[i])
{
/*for(j=0;j<quadCoordinateMap[d_quad[i]*(hashTableWidth/4)];j++)
{
if((quadCoordinateMap[d_quad[i]*(hashTableWidth/4)+j*3+1]==d_x[i]) && (quadCoordinateMap[d_quad[i]*(hashTableWidth/4)+j*3+2]==d_y[i]) && (quadCoordinateMap[d_quad[i]*(hashTableWidth/4)+j*3+3]==d_z[i]))
{
*/
int maxnum(int num1,int num2)
{
if(num1>num2)
return num1;
else
return num2;
}
int maxdimension(int num1,int num2,int num3)
{
return maxnum(num1,maxnum(num2,num3));
}
int main()
{
long long count,i;
int nx,ny,nz,x,y,z,*h_x,*h_y,*h_z,*h_quad,*d_quad,*d_x,*d_y,*d_z,numDivisions,windowlen,temp;
double cubeRoot;
//long long sharedMemSize=1000;
FILE *fp;
fp=fopen("data.txt","r");
count=0;
fscanf(fp,"Nx=%d Ny=%d Nz=%d",&nx,&ny,&nz);
printf("%d %d %d\n",nx,ny,nz);
while(feof(fp)==0)
{
fscanf(fp,"%d %d %d\n",&x,&y,&z);
printf("%d %d %d\n",x,y,z);
count++;
}
printf("%lld\n",count);
fclose(fp);
h_x=(int*)malloc(sizeof(int)*count);
h_y=(int*)malloc(sizeof(int)*count);
h_z=(int*)malloc(sizeof(int)*count);
h_quad=(int*)malloc(sizeof(int)*count);
hipMalloc(&d_x,count*sizeof(int));
hipMalloc(&d_y,count*sizeof(int));
hipMalloc(&d_z,count*sizeof(int));
hipMalloc(&d_quad,count*sizeof(int));
fp=fopen("data.txt","r");
fscanf(fp,"Nx=%d Ny=%d Nz=%d",&nx,&ny,&nz);
printf("%d %d %d\n",nx,ny,nz);
for(i=0;i<count;i++)
fscanf(fp,"%d %d %d\n",&h_x[i],&h_y[i],&h_z[i]);
/*printf("yoo");
for(i=0;i<count;i++)
printf("yo %d %d %d\n",h_x[i],h_y[i],h_z[i]);
*/
/*boxx=(2*nx)/numKernels;
boxy=(2*ny)/numKernels;
boxz=(2*nz)/numKernels;
*/
cubeRoot=cbrt(double(numKernels));
numDivisions=cubeRoot;
printf("cubeRoot %lf numKernels %d\n",cubeRoot,numKernels);
windowlen=(2*maxdimension(nx,ny,nz)+cubeRoot-1)/cubeRoot;
printf("numDivisions %d windowlen %d\n",numDivisions,windowlen);
hipMemcpy(d_x,h_x,count*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(d_y,h_y,count*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(d_z,h_z,count*sizeof(int),hipMemcpyHostToDevice);hipLaunchKernelGGL((
getQuadrant), dim3((count+numThreads-1)/numThreads),dim3(numThreads), 0, 0, count,d_x,d_y,d_z,d_quad,windowlen,numDivisions,nx,ny,nz);
hipMemcpy(h_quad,d_quad,count*sizeof(int),hipMemcpyDeviceToHost);
for(i=0;i<5;i++)
printf("yo4 %d\n",h_quad[i]);
}
| e3eb85665e966ae27faff325bf96939b998632ae.cu | #include<stdio.h>
#include<math.h>
#include<limits.h>
#define numKernels 512
#define kernelBatchSize 45
#define numThreads 256
#define hashTableWidth 2048
__global__
void getQuadrant(int count,int *d_x,int *d_y,int *d_z,int *d_quad,int windowlen,int numDivisions,int nx,int ny,int nz)
{
//printf("yo bitch\n");
int index=blockIdx.x*blockDim.x+threadIdx.x;
int x,y,z;
//printf("yo\n");
if (index<count)
{
x=d_x[index];
y=d_y[index];
z=d_z[index];
printf("count= %d d_x[%d]=%d d_y=%d d_z=%d windowlen=%d numDivisions=%d\n",count,x,index,y,z,windowlen,numDivisions);
//d_quad[index]=(4*nx*ny*(nz+z)+2*nx*(ny+y)+(nx+x)+(boxvolume-1))/(boxvolume);
d_quad[index]=numDivisions*numDivisions*((nz+z+windowlen-1)/windowlen)+numDivisions*((ny+y+windowlen-1)/windowlen)+((nx+x+windowlen-1)/windowlen);
printf("%d %d\n",index,d_quad[index]);
}
}
__device__ void buildSearchHash(int batchIndex,int count, int *d_x,int *d_y,int *d_z,int *d_quad, int numDivisions,int offsetSize)
{
extern __shared__ int quadCoordinateMap[];
int index, newEntryColumn,i,j;
for(i=0;i<numDivisions;i++)
for(j=1;j<hashTableWidth/4;j++)
quadCoordinateMap[i*(hashTableWidth/4)+j]=INT_MIN;
for(i=0;i<numDivsions;i++)
quadCoordianteMap[i*(hastTableWidth/4)]=0;
index=batchIndex*offsetSize+blockIdx.x*blockDim.x;
for(i=0;i<offsetSize;i++)
{
newEntryColumn=quadCoordinateMap[d_quad[index+i]*(hashTableWidth/4)];
quadCoordinateMap[d_quad[index+i]*(hashTableWidth/4)+newEntryColumn*4+1]=d_x[index+i];
quadCoordinateMap[d_quad[index+i]*(hashTableWidth/4)+newEntryColumn*4+2]=d_y[index+i];
quadCoordinateMap[d_quad[index+i]*(hashTableWidth/4)+newEntryColumn*4+3]=d_z[index+i];
quadCoordinateMap[d_quad[index+i]*(hashTableWidth/4)+newEntryColumn*4+4]=index+i;
quadCoordinateMap[d_quad[index+i]*(hashTableWidth/4)]++;
}
_syncthreads();
__shared__ int findCoordinates[42];
for(i=0;i<count;i++)
{
for(k=0;k<42;k++)
findCoordinates[k]=0;
searchNeighbours<<<1,42>>>(quadCoordinateMap,count,x,y,z,quad,numDivisions,findCoordinates);
for(k=0;k<6;k++)
{
for(l=0;l<7;l++)
if(findCoordinates[k*7+l]!=0 && findCoordinates[k*7+l]!=d_quad[i])
{
/*for(j=0;j<quadCoordinateMap[d_quad[i]*(hashTableWidth/4)];j++)
{
if((quadCoordinateMap[d_quad[i]*(hashTableWidth/4)+j*3+1]==d_x[i]) && (quadCoordinateMap[d_quad[i]*(hashTableWidth/4)+j*3+2]==d_y[i]) && (quadCoordinateMap[d_quad[i]*(hashTableWidth/4)+j*3+3]==d_z[i]))
{
*/
int maxnum(int num1,int num2)
{
if(num1>num2)
return num1;
else
return num2;
}
int maxdimension(int num1,int num2,int num3)
{
return maxnum(num1,maxnum(num2,num3));
}
int main()
{
long long count,i;
int nx,ny,nz,x,y,z,*h_x,*h_y,*h_z,*h_quad,*d_quad,*d_x,*d_y,*d_z,numDivisions,windowlen,temp;
double cubeRoot;
//long long sharedMemSize=1000;
FILE *fp;
fp=fopen("data.txt","r");
count=0;
fscanf(fp,"Nx=%d Ny=%d Nz=%d",&nx,&ny,&nz);
printf("%d %d %d\n",nx,ny,nz);
while(feof(fp)==0)
{
fscanf(fp,"%d %d %d\n",&x,&y,&z);
printf("%d %d %d\n",x,y,z);
count++;
}
printf("%lld\n",count);
fclose(fp);
h_x=(int*)malloc(sizeof(int)*count);
h_y=(int*)malloc(sizeof(int)*count);
h_z=(int*)malloc(sizeof(int)*count);
h_quad=(int*)malloc(sizeof(int)*count);
cudaMalloc(&d_x,count*sizeof(int));
cudaMalloc(&d_y,count*sizeof(int));
cudaMalloc(&d_z,count*sizeof(int));
cudaMalloc(&d_quad,count*sizeof(int));
fp=fopen("data.txt","r");
fscanf(fp,"Nx=%d Ny=%d Nz=%d",&nx,&ny,&nz);
printf("%d %d %d\n",nx,ny,nz);
for(i=0;i<count;i++)
fscanf(fp,"%d %d %d\n",&h_x[i],&h_y[i],&h_z[i]);
/*printf("yoo");
for(i=0;i<count;i++)
printf("yo %d %d %d\n",h_x[i],h_y[i],h_z[i]);
*/
/*boxx=(2*nx)/numKernels;
boxy=(2*ny)/numKernels;
boxz=(2*nz)/numKernels;
*/
cubeRoot=cbrt(double(numKernels));
numDivisions=cubeRoot;
printf("cubeRoot %lf numKernels %d\n",cubeRoot,numKernels);
windowlen=(2*maxdimension(nx,ny,nz)+cubeRoot-1)/cubeRoot;
printf("numDivisions %d windowlen %d\n",numDivisions,windowlen);
cudaMemcpy(d_x,h_x,count*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_y,h_y,count*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_z,h_z,count*sizeof(int),cudaMemcpyHostToDevice);
getQuadrant<<<(count+numThreads-1)/numThreads,numThreads>>>(count,d_x,d_y,d_z,d_quad,windowlen,numDivisions,nx,ny,nz);
cudaMemcpy(h_quad,d_quad,count*sizeof(int),cudaMemcpyDeviceToHost);
for(i=0;i<5;i++)
printf("yo4 %d\n",h_quad[i]);
}
|
c6713d5c9a999f7478c22ab99b8b15b9bc7f9dc7.hip | // !!! This is a file automatically generated by hipify!!!
#include "CudaErrors.hh"
#include <iostream>
static const char* cufftGetErrorString(hipfftResult err) {
switch (err) {
case HIPFFT_SUCCESS:
return "HIPFFT_SUCCESS";
case HIPFFT_INVALID_PLAN:
return "HIPFFT_INVALID_PLAN";
case HIPFFT_ALLOC_FAILED:
return "HIPFFT_ALLOC_FAILED";
case HIPFFT_INVALID_TYPE:
return "HIPFFT_INVALID_TYPE";
case HIPFFT_INVALID_VALUE:
return "HIPFFT_INVALID_VALUE";
case HIPFFT_INTERNAL_ERROR:
return "HIPFFT_INTERNAL_ERROR";
case HIPFFT_EXEC_FAILED:
return "HIPFFT_EXEC_FAILED";
case HIPFFT_SETUP_FAILED:
return "HIPFFT_SETUP_FAILED";
case HIPFFT_INVALID_SIZE:
return "HIPFFT_INVALID_SIZE";
case HIPFFT_UNALIGNED_DATA:
return "HIPFFT_UNALIGNED_DATA";
case HIPFFT_INCOMPLETE_PARAMETER_LIST:
return "HIPFFT_INCOMPLETE_PARAMETER_LIST";
case HIPFFT_INVALID_DEVICE:
return "HIPFFT_INVALID_DEVICE";
case HIPFFT_PARSE_ERROR:
return "HIPFFT_PARSE_ERROR";
case HIPFFT_NO_WORKSPACE:
return "HIPFFT_NO_WORKSPACE";
case HIPFFT_NOT_IMPLEMENTED:
return "HIPFFT_NOT_IMPLEMENTED";
case HIPFFT_LICENSE_ERROR:
return "HIPFFT_LICENSE_ERROR";
case HIPFFT_NOT_SUPPORTED:
return "HIPFFT_NOT_SUPPORTED";
default:
return "Unknown";
}
}
void fft::gpu::printCudaError(std::string msg, hipError_t err)
{
std::cerr << " *** " << msg << ": "
<< hipGetErrorString(err)
<< " ***" << std::endl;
}
void fft::gpu::printCudaError(std::string msg, hipfftResult err)
{
std::cerr << " *** " << msg << ": "
<< cufftGetErrorString(err)
<< " ***" << std::endl;
}
| c6713d5c9a999f7478c22ab99b8b15b9bc7f9dc7.cu | #include "CudaErrors.hh"
#include <iostream>
static const char* cufftGetErrorString(cufftResult err) {
switch (err) {
case CUFFT_SUCCESS:
return "CUFFT_SUCCESS";
case CUFFT_INVALID_PLAN:
return "CUFFT_INVALID_PLAN";
case CUFFT_ALLOC_FAILED:
return "CUFFT_ALLOC_FAILED";
case CUFFT_INVALID_TYPE:
return "CUFFT_INVALID_TYPE";
case CUFFT_INVALID_VALUE:
return "CUFFT_INVALID_VALUE";
case CUFFT_INTERNAL_ERROR:
return "CUFFT_INTERNAL_ERROR";
case CUFFT_EXEC_FAILED:
return "CUFFT_EXEC_FAILED";
case CUFFT_SETUP_FAILED:
return "CUFFT_SETUP_FAILED";
case CUFFT_INVALID_SIZE:
return "CUFFT_INVALID_SIZE";
case CUFFT_UNALIGNED_DATA:
return "CUFFT_UNALIGNED_DATA";
case CUFFT_INCOMPLETE_PARAMETER_LIST:
return "CUFFT_INCOMPLETE_PARAMETER_LIST";
case CUFFT_INVALID_DEVICE:
return "CUFFT_INVALID_DEVICE";
case CUFFT_PARSE_ERROR:
return "CUFFT_PARSE_ERROR";
case CUFFT_NO_WORKSPACE:
return "CUFFT_NO_WORKSPACE";
case CUFFT_NOT_IMPLEMENTED:
return "CUFFT_NOT_IMPLEMENTED";
case CUFFT_LICENSE_ERROR:
return "CUFFT_LICENSE_ERROR";
case CUFFT_NOT_SUPPORTED:
return "CUFFT_NOT_SUPPORTED";
default:
return "Unknown";
}
}
void fft::gpu::printCudaError(std::string msg, cudaError_t err)
{
std::cerr << " *** " << msg << ": "
<< cudaGetErrorString(err)
<< " ***" << std::endl;
}
void fft::gpu::printCudaError(std::string msg, cufftResult err)
{
std::cerr << " *** " << msg << ": "
<< cufftGetErrorString(err)
<< " ***" << std::endl;
}
|
f8794481e602be78f8093fc47c4d4658aa12133a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "reduce.h"
__device__ float merge(float old,float opOutput,float *extraParams) {
return opOutput * old;
}
__device__ float update(float old,float opOutput,float *extraParams) {
return opOutput * old;
}
__device__ float op(float d1,float *extraParams) {
return d1;
}
__device__ float postProcess(float reduction,int n,int xOffset,float *dx,int incx,float *extraParams,float *result) {
return reduction;
}
extern "C"
__global__ void prod_strided_float(int n, int xOffset,float *dx,int incx,float *extraParams,float *result) {
transform(n,xOffset,dx,incx,extraParams,result);
}
| f8794481e602be78f8093fc47c4d4658aa12133a.cu | #include "reduce.h"
__device__ float merge(float old,float opOutput,float *extraParams) {
return opOutput * old;
}
__device__ float update(float old,float opOutput,float *extraParams) {
return opOutput * old;
}
__device__ float op(float d1,float *extraParams) {
return d1;
}
__device__ float postProcess(float reduction,int n,int xOffset,float *dx,int incx,float *extraParams,float *result) {
return reduction;
}
extern "C"
__global__ void prod_strided_float(int n, int xOffset,float *dx,int incx,float *extraParams,float *result) {
transform(n,xOffset,dx,incx,extraParams,result);
}
|
0f35237f7f148347ec009a0f10ab9ad05cf59451.hip | // !!! This is a file automatically generated by hipify!!!
/*----------------------------------------------------------------------------------*
* Copyright (c) 2010-2018 Pauli Parkkinen, Eelis Solala, Wen-Hua Xu, *
* Sergio Losilla, Elias Toivanen, Jonas Juselius *
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy *
* of this software and associated documentation files (the "Software"), to deal *
* in the Software without restriction, including without limitation the rights *
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell *
* copies of the Software, and to permit persons to whom the Software is *
* furnished to do so, subject to the following conditions: *
* *
* The above copyright notice and this permission notice shall be included in all*
* copies or substantial portions of the Software. *
* *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR *
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE *
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER *
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, *
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE *
* SOFTWARE. *
*----------------------------------------------------------------------------------*/
/*! @file bubbles_cuda.cu
*! @brief CUDA implementation of the Bubbles.
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <stdlib.h>
//#include <algorithm> *std::max_element(result_cube, result_cube + totalPointCount)
#include "bubbles_cuda.h"
#include "streamcontainer.h"
#include "grid.h"
#include "spherical_harmonics_cuda.h"
#include "cube.h"
#include "function3d_multiplier.h"
#include "memory_leak_operators.h"
#include "evaluators.h"
#define X_ 0
#define Y_ 1
#define Z_ 2
#define R_ 3
#if (__CUDA_ARCH__ > 350)
#define INJECT_BLOCK_SIZE 256
#else
#define INJECT_BLOCK_SIZE 128
#endif
#define NLIP 7
/** \brief Size of the CUDA blocks in the X dimension */
#define BLOCKDIMX 8
/** \brief Size of the CUDA blocks in the Y dimension */
#define BLOCKDIMY 4
/** \brief Size of the CUDA blocks in the Z dimension */
#define BLOCKDIMZ 4
#define FULL_MASK 0xffffffff
#define STR_HELPER(x) #x
#define STR(x) STR_HELPER(x)
hipError_t cudastat;
__constant__ int shape_x_, shape_y_, shape_z_, ncell_, nlip_, lmax_, ilmmin_, lmin_, ilmmax_, first_term_, normalization_, ijk_max_;
__constant__ double charge_, r_max_;
hipStream_t **streams;
int streams_inited = 0;
int allocated = 0;
extern __shared__ double shared_memory[];
__host__ inline void check_memory(const char *filename, const int line_number) {
size_t mem_tot_0 = 0;
size_t mem_free_0 = 0;
hipMemGetInfo (&mem_free_0, &mem_tot_0);
printf("Free memory after: %ld, total: %ld\n ", mem_free_0, mem_tot_0);
}
template<typename T>
__device__ __forceinline__ T ldg(const T* ptr) {
#if __CUDA_ARCH__ >= 350
return __ldg(ptr);
#else
return *ptr;
#endif
}
void cube_download(double *hstPtr, int width, int height ,int depth,
void *devPtr, size_t pitch) {
// Define copy "from device to host" parameters
hipMemcpy3DParms d2h={0};
d2h.srcPtr = make_hipPitchedPtr(devPtr,
pitch,width,height);
d2h.dstPtr = make_hipPitchedPtr((void *)hstPtr,
width*sizeof(double),width,height);
d2h.extent = make_hipExtent(width * sizeof(double), height,
depth);
// hipMemset3D( d2h.srcPtr, 999, d2h.extent);
d2h.kind = hipMemcpyDeviceToHost;
// cudastat=hipMemset3D( d2h.srcPtr, 0, d2h.extent);
// Copy to host
cudastat = hipMemcpy3D( &d2h );
check_errors(__FILE__, __LINE__);
return;
}
void cube_upload(double *hstPtr, int *width ,int *height ,int *depth,
void *devPtr, size_t pitch) {
// Define copy "from host to device" parameters
hipMemcpy3DParms h2d={0};
h2d.srcPtr = make_hipPitchedPtr((void *)hstPtr,
*width*sizeof(double),*width,*height);
h2d.dstPtr = make_hipPitchedPtr(devPtr,
pitch,*width,*height);
h2d.extent = make_hipExtent(*width * sizeof(double), *height,
*depth);
h2d.kind = hipMemcpyHostToDevice;
// Copy to device
hipMemcpy3D( &h2d );
return;
}
__device__ int icell(double x, double *d, int n){
if ( ( x > d[n] ) || ( x < d[0] ) ) {
return -1;
}
int i[2];
i[0]=0;
i[1]=n;
int im=(i[0]+i[1])/2;
int j;
int max=log((float)n)/log(2.)+1;
for(j=0;j<max;j++){
i[ x<d[im] ] = im;
im=(i[0]+i[1])/2;
}
return im;
}
__device__ void calc_rc(double dist_vec[3], double *dist, double ref[3],double x,
double y, double z){
dist_vec[X_]=x-ref[X_];
dist_vec[Y_]=y-ref[Y_];
dist_vec[Z_]=z-ref[Z_];
*dist=sqrt(dist_vec[X_]*dist_vec[X_]+
dist_vec[Y_]*dist_vec[Y_]+
dist_vec[Z_]*dist_vec[Z_]);
dist_vec[X_]/=*dist;
dist_vec[Y_]/=*dist;
dist_vec[Z_]/=*dist;
return;
}
__device__ double eval_lip(int n, double *lip, double *f, double x){
short i,j;
double out=0.0;
for (j=0;j<n;j++){
double tmp=0.0;
for (i=0;i<n;i++){
tmp*= x;
tmp+= *(lip++);
}
out+=tmp*f[j];
}
return out;
}
__device__ double eval_poly(int n, double *c, double x){
double r=0.0;
while (n-- > 0) {
r *= x;
r += *(c++);
}
return r;
}
/*
* the following function precalculates some common values for the injection.
*
* NOTE: We are setting the cf-array to have 8 * (lmax+1) * (lmax+1) size
* This has several advantages (even if we are using more space and have
* blank spots in the array). 1) Every cell read is coalesced and we don't
* have overlapping requests! Additionally, we avoid divergence of the threads
* of one warp in the injection.
*/
__global__ void calc_cf(Bubble *bub, int offset, int number_of_points, size_t device_f_pitch) {
// get the index within this kernel call
const int index = blockIdx.x * blockDim.x + threadIdx.x;
// get the global index
const int id= index + offset;
const int icell=id%bub->grid->ncell;
const int ilm=id/bub->grid->ncell;
const int nlip = bub->grid->nlip;
__shared__ double shared_lip[49];
__shared__ double derivative_lip[42];
__shared__ double lower_derivative_lip[30];
__shared__ double cf_results[8*64];
__shared__ double df_results[8*64];
double f_i;
// load the Lagrange interpolation polynomials coefficients to
// the shared memory
if (threadIdx.x < (nlip) * (nlip)) {
shared_lip[threadIdx.x] = bub->grid->lip[threadIdx.x];
}
if (threadIdx.x < (nlip) * (nlip-1)) {
derivative_lip[threadIdx.x] = bub->grid->derivative_lip[threadIdx.x];
}
if (threadIdx.x < (nlip-2) * (nlip-1)) {
lower_derivative_lip[threadIdx.x] = bub->grid->lower_derivative_lip[threadIdx.x];
}
__syncthreads();
if ( index < number_of_points && ilm < ((bub->lmax+1)*(bub->lmax+1)) ) {
double *f = bub->f + ilm * device_f_pitch / sizeof(double) + (icell * (bub->grid->nlip-1));
double *cf = bub->cf + ( ilm * bub->grid->ncell + icell ) * 8;
double *df = bub->df + ( ilm * bub->grid->ncell + icell ) * 8;
short i,j;
double one_per_cell_step = 1.0 / bub->grid->h[icell];
double *lip=&shared_lip[0];
double *dlip=&derivative_lip[0];
double *ldlip=&lower_derivative_lip[0];
// set the shared memory result array to zero
for (i=0; i < 8; i++) {
cf_results[threadIdx.x * 8 + i]=0.0;
df_results[threadIdx.x * 8 + i]=0.0;
}
// evaluate the cf to shared memory
for (i=0; i < nlip; i++) {
f_i = f[i];
for (j=0; j < nlip ;j++){
cf_results[threadIdx.x * 8 + j] += f_i* (*(lip++));
}
// I (lnw) cannot see any good reason for this special case that is, the
// derivative at the centre of each bubble should be zero, but why does it have
// to be enforced?
const bool ignore_first = true;
if(ignore_first){
// handle the special case of the first cell, where the first
// data item most likely is not valid
if (icell == 0) {
if (i != 0) {
for (j = 1 ; j <= nlip-2; j++) {
df_results[threadIdx.x * 8 + j] += f_i* (*(ldlip++));
}
}
else {
df_results[threadIdx.x * 8] = 0.0;
}
}
else {
for (j=0; j < nlip-1 ;j++) {
df_results[threadIdx.x * 8 + j] += f_i* (*(dlip++));
}
}
}
else { // no special treatment
for (j=0; j < nlip-1 ;j++) {
df_results[threadIdx.x * 8 + j] += f_i* (*(dlip++));
}
}
}
// copy the result to device memory
for (i=0; i < 8; i++) {
cf[i] = cf_results[threadIdx.x * 8 + i];
df[i] = one_per_cell_step * df_results[threadIdx.x * 8 + i];
}
}
return;
}
__device__ inline double evaluate_polynomials(int n, const double* __restrict__ c, const double x){
double result=0.0;
while (n-- > 0) {
result *= x;
result += *(c++);
}
return result;
}
// __shfl* are defined from 3.x until including 6.x.
// they are replaced by __shfl*_sync
#if (__CUDA_ARCH__ >= 350) && (__CUDA_ARCH__ < 700)
/*
* Evaluates one granular polynomial for coefficients, and x
* NOTE: each thread is different value for coefficient, when entering the function
* NOTE: each x value must be the same for 8 consecutive threads
* NOTE: upon return each thread has the same value.
*/
__inline__ __device__
double evaluate_polynomials_unit_shuffle(double coefficient, const double x) {
double result = coefficient;
for (int i = 1; i < 7; i++) {
result *= x;
result += __shfl_down(coefficient, i, 8);
}
return result;
}
__inline__ __device__
double evaluate_polynomials_unit_register(const double * __restrict__ coefficients, const double x, int nlip) {
double result = 0.0;
while (nlip-- > 0) {
result *= x;
result += *(coefficients++);
}
return result;
}
__device__ inline void horizontal_rotate_8f(double coefficients[8], unsigned int order_number) {
coefficients[1] = __shfl(coefficients[1], (order_number+1)%8, 8);
coefficients[2] = __shfl(coefficients[2], (order_number+2)%8, 8);
coefficients[3] = __shfl(coefficients[3], (order_number+3)%8, 8);
coefficients[4] = __shfl(coefficients[4], (order_number+4)%8, 8);
coefficients[5] = __shfl(coefficients[5], (order_number+5)%8, 8);
coefficients[6] = __shfl(coefficients[6], (order_number+6)%8, 8);
coefficients[7] = __shfl(coefficients[7], (order_number+7)%8, 8);
}
__device__ inline void horizontal_rotate_8b(double coefficients[8], unsigned int order_number) {
coefficients[1] = __shfl(coefficients[1], (order_number+7)%8, 8);
coefficients[2] = __shfl(coefficients[2], (order_number+6)%8, 8);
coefficients[3] = __shfl(coefficients[3], (order_number+5)%8, 8);
coefficients[4] = __shfl(coefficients[4], (order_number+4)%8, 8);
coefficients[5] = __shfl(coefficients[5], (order_number+3)%8, 8);
coefficients[6] = __shfl(coefficients[6], (order_number+2)%8, 8);
coefficients[7] = __shfl(coefficients[7], (order_number+1)%8, 8);
}
__device__ inline void vertical_rotate_8(double src[8], unsigned int order_number) {
double tmp = src[0];
src[0] = (order_number == 1) ? src[7] : src[0];
src[7] = (order_number == 1) ? src[6] : src[7];
src[6] = (order_number == 1) ? src[5] : src[6];
src[5] = (order_number == 1) ? src[4] : src[5];
src[4] = (order_number == 1) ? src[3] : src[4];
src[3] = (order_number == 1) ? src[2] : src[3];
src[2] = (order_number == 1) ? src[1] : src[2];
src[1] = (order_number == 1) ? tmp : src[1];
src[1] = (order_number == 2) ? src[7] : src[1];
src[0] = (order_number == 2) ? src[6] : src[0];
src[7] = (order_number == 2) ? src[5] : src[7];
src[6] = (order_number == 2) ? src[4] : src[6];
src[5] = (order_number == 2) ? src[3] : src[5];
src[4] = (order_number == 2) ? src[2] : src[4];
src[3] = (order_number == 2) ? src[1] : src[3];
src[2] = (order_number == 2) ? tmp : src[2];
src[2] = (order_number == 3) ? src[7] : src[2];
src[1] = (order_number == 3) ? src[6] : src[1];
src[0] = (order_number == 3) ? src[5] : src[0];
src[7] = (order_number == 3) ? src[4] : src[7];
src[6] = (order_number == 3) ? src[3] : src[6];
src[5] = (order_number == 3) ? src[2] : src[5];
src[4] = (order_number == 3) ? src[1] : src[4];
src[3] = (order_number == 2) ? tmp : src[3];
src[3] = (order_number == 4) ? src[7] : src[3];
src[2] = (order_number == 4) ? src[6] : src[2];
src[1] = (order_number == 4) ? src[5] : src[1];
src[0] = (order_number == 4) ? src[4] : src[0];
src[7] = (order_number == 4) ? src[3] : src[7];
src[6] = (order_number == 4) ? src[2] : src[6];
src[5] = (order_number == 4) ? src[1] : src[5];
src[4] = (order_number == 4) ? tmp : src[4];
src[4] = (order_number == 5) ? src[7] : src[4];
src[3] = (order_number == 5) ? src[6] : src[3];
src[2] = (order_number == 5) ? src[5] : src[2];
src[1] = (order_number == 5) ? src[4] : src[1];
src[0] = (order_number == 5) ? src[3] : src[0];
src[7] = (order_number == 5) ? src[2] : src[7];
src[6] = (order_number == 5) ? src[1] : src[6];
src[5] = (order_number == 5) ? tmp : src[5];
src[5] = (order_number == 6) ? src[7] : src[5];
src[4] = (order_number == 6) ? src[6] : src[4];
src[3] = (order_number == 6) ? src[5] : src[3];
src[2] = (order_number == 6) ? src[4] : src[2];
src[1] = (order_number == 6) ? src[3] : src[1];
src[0] = (order_number == 6) ? src[2] : src[0];
src[7] = (order_number == 6) ? src[1] : src[7];
src[6] = (order_number == 6) ? tmp : src[6];
src[6] = (order_number == 7) ? src[7] : src[6];
src[5] = (order_number == 7) ? src[6] : src[5];
src[4] = (order_number == 7) ? src[5] : src[4];
src[3] = (order_number == 7) ? src[4] : src[3];
src[2] = (order_number == 7) ? src[3] : src[2];
src[1] = (order_number == 7) ? src[2] : src[1];
src[0] = (order_number == 7) ? src[1] : src[0];
src[7] = (order_number == 7) ? tmp : src[7];
}
__device__ inline void transpose8(double coefficients[8], int order_number) {
//printf("Original coefficients %d: %f, %f, %f, %f, %f, %f, %f, %f\n", order_number, coefficients[0], coefficients[1], coefficients[2], coefficients[3], coefficients[4], coefficients[5], coefficients[6], coefficients[7]);
horizontal_rotate_8f(coefficients, order_number);
vertical_rotate_8(coefficients, order_number);
horizontal_rotate_8b(coefficients, order_number);
//printf("Transposed coefficients coefficients %d: %f, %f, %f, %f, %f, %f, %f, %f\n", order_number, coefficients[0], coefficients[1], coefficients[2], coefficients[3], coefficients[4], coefficients[5], coefficients[6], coefficients[7]);
}
/*
* Evaluates the polynomials using shuffle actions. This saves the shared_memory significantly and allows
* the increase of the occupancy of the devices.
*
* This function only needs blockDim.x * 8 bytes of shared memory. This allows the usage of any sized blocks
* that are practically useful.
*
* The number of arithmetic operations is larger than for the version using shared memory only, and thus
* the effect to the execution speed remains to be seen.
*/
__device__ inline double evaluate_polynomials_shuffle(const int address,
const double * __restrict__ c,
const double x,
const int nlip) {
double *result = &shared_memory[0];
//double coefficients[8];
//double res;
int remainder = threadIdx.x%8;
int base_address = 8*(threadIdx.x/8);
double res;
for (int i = 0; i < 8; i ++) {
// evaluate the polynomials
// NOTE: __shfl(address, i, width=8) gets the address needed by the thread i/8 in the thread group
// NOTE: __shfl(x, i, width = 8) gets the coordinate x of the thread i/8 in the thread group
// NOTE: the c access (global memory is coalesced),
// NOTE: shared memorybank conflict should not occur, as every thread in the 8 thread group access
// the same address, thus resulting in broadcast.
//coefficients[i] = c[__shfl(address, i, 8) + remainder];
res = evaluate_polynomials_unit_shuffle( c[__shfl(address, i, 8) + remainder], __shfl(x, i, 8));
if (remainder == 0) result[base_address + i] = res;
}
// swap the coefficients to be with their rightful owners
//transpose8(coefficients, remainder);
return result[threadIdx.x];
//return evaluate_polynomials_unit_register(coefficients, x, nlip);
}
#endif
/*
* Get the thread-id within block.
*/
__device__ inline int getThreadId() {
return threadIdx.x
+ blockDim.x * threadIdx.y
+ blockDim.x * blockDim.y * threadIdx.z;
}
/*
* @param c, bubbles coefficients in the global memory
* @param x, the coordinate of the point in cell coordinates
*
* NOTE: The parameter 'c' must be pitched for this function to be useful
* NOTE: This function is made for NLIP:7, with other nlip values, the function must be remade
*/
template<int nlip>
__device__ inline
double evaluate_polynomials_shared(const int address, const double* __restrict__ c, const double x) {
double *coefficients = &shared_memory[0];
//const float *fc = (const float *)c;
int threadId = getThreadId();
const int remainder = threadId%8;
const int base_address = 8*(threadId/8);
const int id = base_address * 7 + remainder;
#if (__CUDA_ARCH__ >= 350) && (__CUDA_ARCH__ < 700)
// read the coefficients in the shared memory, 8 threads
// neighbouring each other are reading the global memory
// coefficients for one thread at the time, starting from 0
// and going to 7
int address_7 = __shfl(address, 7, 8);
if (remainder < 7) { // every eighth lane is idle
coefficients[id] = ldg<double>(&c[__shfl(address, 0, 8) + remainder]);
coefficients[id+7] = ldg<double>(&c[__shfl(address, 1, 8) + remainder]);
coefficients[id+7*2] = ldg<double>(&c[__shfl(address, 2, 8) + remainder]);
coefficients[id+7*3] = ldg<double>(&c[__shfl(address, 3, 8) + remainder]);
coefficients[id+7*4] = ldg<double>(&c[__shfl(address, 4, 8) + remainder]);
coefficients[id+7*5] = ldg<double>(&c[__shfl(address, 5, 8) + remainder]);
coefficients[id+7*6] = ldg<double>(&c[__shfl(address, 6, 8) + remainder]);
coefficients[id+7*7] = ldg<double>(&c[address_7 + remainder]);
}
/*coefficients[id] = c[__shfl(address, 0, 8) + remainder];
coefficients[id+8] = c[__shfl(address, 1, 8) + remainder];
coefficients[id+16] = c[__shfl(address, 2, 8) + remainder];
coefficients[id+24] = c[__shfl(address, 3, 8) + remainder];
coefficients[id+32] = c[__shfl(address, 4, 8) + remainder];
coefficients[id+40] = c[__shfl(address, 5, 8) + remainder];
coefficients[id+48] = c[__shfl(address, 6, 8) + remainder];
coefficients[id+56] = c[__shfl(address, 7, 8) + remainder];*/
/*fcoefficients[id] = fc[__shfl(faddress, 0, 16) + remainder];
fcoefficients[id+16] = fc[__shfl(faddress, 1, 16) + remainder];
fcoefficients[id+32] = fc[__shfl(faddress, 2, 16) + remainder];
fcoefficients[id+48] = fc[__shfl(faddress, 3, 16) + remainder];
fcoefficients[id+64] = fc[__shfl(faddress, 4, 16) + remainder];
fcoefficients[id+80] = fc[__shfl(faddress, 5, 16) + remainder];
fcoefficients[id+96] = fc[__shfl(faddress, 6, 16) + remainder];
fcoefficients[id+112] = fc[__shfl(faddress, 7, 16) + remainder];
fcoefficients[id+128] = fc[__shfl(faddress, 8, 16) + remainder];
fcoefficients[id+144] = fc[__shfl(faddress, 9, 16) + remainder];
fcoefficients[id+160] = fc[__shfl(faddress, 10, 16) + remainder];
fcoefficients[id+176] = fc[__shfl(faddress, 11, 16) + remainder];
fcoefficients[id+192] = fc[__shfl(faddress, 12, 16) + remainder];
fcoefficients[id+208] = fc[__shfl(faddress, 13, 16) + remainder];
fcoefficients[id+224] = fc[__shfl(faddress, 14, 16) + remainder];
fcoefficients[id+240] = fc[__shfl(faddress, 15, 16) + remainder];*/
#elif __CUDA_ARCH__ >= 700
// printf("activemask: %u\n", __activemask());
int address_7 = __shfl_sync(FULL_MASK, address, 7, 8);
if (remainder < 7) { // every eighth lane is idle and therefore removed from the mask
// printf("activemask: %u\n", __activemask());
coefficients[id] = ldg<double>(&c[__shfl_sync(0b01111111011111110111111101111111, address, 0, 8) + remainder]);
coefficients[id+7] = ldg<double>(&c[__shfl_sync(0b01111111011111110111111101111111, address, 1, 8) + remainder]);
coefficients[id+7*2] = ldg<double>(&c[__shfl_sync(0b01111111011111110111111101111111, address, 2, 8) + remainder]);
coefficients[id+7*3] = ldg<double>(&c[__shfl_sync(0b01111111011111110111111101111111, address, 3, 8) + remainder]);
coefficients[id+7*4] = ldg<double>(&c[__shfl_sync(0b01111111011111110111111101111111, address, 4, 8) + remainder]);
coefficients[id+7*5] = ldg<double>(&c[__shfl_sync(0b01111111011111110111111101111111, address, 5, 8) + remainder]);
coefficients[id+7*6] = ldg<double>(&c[__shfl_sync(0b01111111011111110111111101111111, address, 6, 8) + remainder]);
coefficients[id+7*7] = ldg<double>(&c[ address_7 + remainder]);
}
#else
// store the addresses to the shared memory
int *address_array = (int *) &shared_memory[8*blockDim.x * blockDim.y * blockDim.z];
address_array[threadIdx.x] = address;
coefficients[id] = c[address_array[base_address] + remainder];
coefficients[id+8] = c[address_array[base_address +1] + remainder];
coefficients[id+16] = c[address_array[base_address +2] + remainder];
coefficients[id+24] = c[address_array[base_address +3] + remainder];
coefficients[id+32] = c[address_array[base_address +4] + remainder];
coefficients[id+40] = c[address_array[base_address +5] + remainder];
coefficients[id+48] = c[address_array[base_address +6] + remainder];
coefficients[id+56] = c[address_array[base_address +7] + remainder];
#endif
double *coeff = &coefficients[threadId * 7];
double result = coeff[0];
if (nlip > 1) {
result *= x;
result += coeff[1];
}
if (nlip > 2) {
result *= x;
result += coeff[2];
}
if (nlip > 3) {
result *= x;
result += coeff[3];
}
if (nlip > 4) {
result *= x;
result += coeff[4];
}
if (nlip > 5) {
result *= x;
result += coeff[5];
}
if (nlip > 6) {
result *= x;
result += coeff[6];
}
return result;
}
__device__ inline int calculate_icell(double x, double *d, int n){
if ( ( x > d[n] ) || ( x < d[0] ) ) {
return -1;
}
int i[2];
i[0]=0;
i[1]=n;
int im=(i[0]+i[1])/2;
int j;
int max=log((float)n)/log(2.)+1;
for(j=0;j<max;j++){
i[ x<d[im] ] = im;
im=(i[0]+i[1])/2;
}
return im;
}
__device__ inline void calculate_icell_radial(const double x, const double charge, const double r_max,
const int ncell, const int nlip,
int *icell, double *in_cell_position) {
const double dx = r_max/(double)ncell;
const double c=8.0*rsqrt(charge)/charge;
const double a = r_max + c;
*icell = (int)(x * a / ((c + x)*dx));
double x1 = c / (a/((*icell+1) * dx) - 1.0);
double x0 = c / (a/(*icell * dx) - 1.0);
if (icell == 0) {
x0 = 0.0;
}
double grid_step = (x1-x0) / (nlip-1);
double center = (x1+x0) / (2.0);
*in_cell_position= (x - center)/grid_step;
}
inline __device__ void calculate_distance(double &dist_vec_x, double &dist_vec_y, double &dist_vec_z, double &dist,
const double reference_point_x, const double reference_point_y, const double reference_point_z,
const double x, const double y, const double z){
// calculate the vector relative to reference_point
dist_vec_x=x-reference_point_x;
dist_vec_y=y-reference_point_y;
dist_vec_z=z-reference_point_z;
// evaluate the length of the dist_vector, i.e., the distance between dist_vec and reference_point
dist=sqrt(dist_vec_x * dist_vec_x +
dist_vec_y * dist_vec_y +
dist_vec_z * dist_vec_z);
return;
}
/*
* Evaluates value of single bubble at a point. This is very similar with the
* SolidHarmonics simple evaluation, but the results are multiplied with the
* polynomial evaluations
*/
__device__ inline double Bubbles_evaluate_point_lmin(
// x-coordinate relative to the center of the bubble
const double &x,
// y-coordinate relative to the center of the bubble
const double &y,
// z-coordinate relative to the center of the bubble
const double &z,
// relative distance from the center of the bubble
const double &distance,
// minimum quantum number 'l'
const int &lmin,
// maximum quantum number 'l'
const int &lmax,
// number of cells
const int &ncell,
// number of lagrange integration polynomials per
// cell, i.e., the number of grid points per cell
const int &nlip,
// position inside the cell
const double &r,
// k value for the bubble
const int &k,
// the first address value in bubble for the selected cell
const int &address,
const double* __restrict__ cf
) {
double result = 0.0;
int lm_address = address, address2 = address;
// NOTE: Here the nlip is replaced by 8 because this gives advantages in loading the stuff
// also *cf this should be done
const int ncell_nlip = ncell * 8;
int l, m, l2;
double top = 0.0, bottom = 0.0, new_bottom = 0.0, prev1 = 0.0, prev2 = 0.0, current = 0.0;
double multiplier = 0.0, multiplier2 = 0.0, one_per_r = 1.0 / distance;
double r2 = x*x + y*y + z*z;
l = 0;
// set value for l=0, m=0
if (lmin == 0) {
//printf("x: %f, y: %f, z: %f, nlip: %d, ncell: %d, l: 0, address: %d, cf: %ld, r: %f\n", x, y, z, nlip, ncell, 0, lm_address, cf, r);
//printf("shared_memory address: %ld\n");
//printf("shared memory first value: %f", shared_memory[0]);
result = evaluate_polynomials_shared<NLIP>(lm_address, cf, r);
}
if (lmax >= 1) {
l = 1;
multiplier = one_per_r;
// set value for l=1, m=-1
lm_address += ncell_nlip;
if (lmin <= 1) {
result += y * evaluate_polynomials_shared<NLIP>(lm_address, cf, r) * multiplier;
}
// set all values where m=-1
m = -1;
prev1 = y;
// the starting address has 1 item before from the l=0, 3 from l=1, and 1 from l=2
address2 = address + ncell_nlip * 5;
multiplier2 = multiplier * one_per_r;
for (l = 2; l <= lmax; l++) {
current = ( 2.0*(double)l-1.0) * rsqrt( 1.0*(double)((l+m)*(l-m)) ) * z*prev1;
if (l > 2) {
current -= sqrt( (double)((l+m-1)*(l-m-1)) / (double)((l+m)*(l-m)) ) * r2 * prev2;
}
prev2 = prev1;
prev1 = current;
if (l >= lmin) {
result += current * evaluate_polynomials_shared<NLIP>(address2, cf, r) * multiplier2;
}
// add the address2 to get to the next item with m=-1
address2 += ncell_nlip * (2*l+2);
multiplier2 *= one_per_r;
}
// set value for l=1, m=0
lm_address += ncell_nlip;
if (lmin <= 1) {
result += z * evaluate_polynomials_shared<NLIP>(lm_address, cf, r) * multiplier;
}
// set all values where m=0
prev1 = z;
prev2 = 1.0;
m = 0;
// the starting address has 1 item before from the l=0, 3 from l=1, and 2 from l=2
address2 = address + ncell_nlip * 6;
multiplier2 = multiplier * one_per_r;
for (l = 2; l <= lmax; l++) {
current = ( 2.0*(double)l-1.0) * rsqrt( 1.0*(double)((l+m)*(l-m)) ) * z * prev1;
current -= sqrt( (double)((l+m-1)*(l-m-1)) / (double)((l+m)*(l-m)) ) * r2 * prev2;
prev2 = prev1;
prev1 = current;
if (l >= lmin) {
result += current * evaluate_polynomials_shared<NLIP>(address2, cf, r) * multiplier2;
}
// add the address2 to get to the next item with m=0
address2 += ncell_nlip * (2*l+2);
multiplier2 *= one_per_r;
}
// set value for l=1, m=1
lm_address += ncell_nlip;
if (lmin <= 1) {
result += x * evaluate_polynomials_shared<NLIP>(lm_address, cf, r) * multiplier;
}
// set all values where m=1
prev1 = x;
m = 1;
// the starting address has 1 item before from the l=0, 3 from l=1, and 3 from l=2
address2 = address + ncell_nlip * 7;
multiplier2 = multiplier * one_per_r;
for (l = 2; l <= lmax; l++) {
current = ( 2.0*(double)l-1.0) * rsqrt( 1.0*(double)((l+m)*(l-m)) ) * z*prev1;
if (l > 2) {
current -= sqrt( (double)((l+m-1)*(l-m-1)) / (double)((l+m)*(l-m)) ) * r2 * prev2;
}
prev2 = prev1;
prev1 = current;
if (l >= lmin) {
result += current * evaluate_polynomials_shared<NLIP>(address2, cf, r) * multiplier2;
}
// add the address2 to get to the next item with m=1
address2 += ncell_nlip * (2*l+2);
multiplier2 *= one_per_r;
}
// go through the rest of the stuff
bottom = y; // bottom refers to solid harmonics value with l=l-1 and m=-(l-1)
top = x; // top refers to solid harmonics value with l=l-1 and m=l-1
lm_address += ncell_nlip;
multiplier *= one_per_r;
for (l=2; l <= lmax; l++) {
new_bottom = sqrt((2.0*(double)l - 1.0) / (2.0*(double)l)) *
( y*top + x*bottom);
if (l >= lmin) {
result += new_bottom * evaluate_polynomials_shared<NLIP>(lm_address, cf, r) * multiplier;
}
// set all values where m=-l
m = -l;
prev1 = new_bottom;
address2 = lm_address + (2*l+2) * ncell_nlip;
multiplier2 = multiplier * one_per_r;
for (l2 = l+1; l2 <= lmax; l2++) {
current = ( 2.0*(double)l2-1.0) * rsqrt( 1.0*(double)((l2+m)*(l2-m)) ) * z*prev1;
if (l2 > l+1) {
current -= sqrt( (double)((l2+m-1)*(l2-m-1)) / (double)((l2+m)*(l2-m)) ) * r2 * prev2;
}
prev2 = prev1;
prev1 = current;
if (l2 >= lmin) {
result += current * evaluate_polynomials_shared<NLIP>(address2, cf, r) * multiplier2;
}
// add the address2 to get to the next item with m=l
address2 += ncell_nlip * (2*l2+2);
multiplier2 *= one_per_r;
}
// get value for l=l, m=l. The address is 2*l items away from l=l, m=-l
lm_address += 2*l * ncell_nlip;
top = sqrt((2.0*(double)l - 1.0) / (2.0*(double)l)) *
( x*top-y*bottom );
// set all values where m=l
m = l;
prev1 = top;
address2 = lm_address + (2*l+2) * ncell_nlip;
multiplier2 = multiplier * one_per_r;
for (l2 = l+1; l2 <= lmax; l2++) {
current = ( 2.0*(double)l2-1.0) * rsqrt( 1.0*(double)((l2+m)*(l2-m)) ) * z*prev1;
if (l2 > l+1) {
current -= sqrt( (double)((l2+m-1)*(l2-m-1)) / (double)((l2+m)*(l2-m)) ) * r2 * prev2;
}
prev2 = prev1;
prev1 = current;
if (l2 >= lmin) {
result += current * evaluate_polynomials_shared<NLIP>(address2, cf, r) * multiplier2;
}
// add the address2 to get to the next item with m=l
address2 += ncell_nlip * (2*l2+2);
multiplier2 *= one_per_r;
}
// store the new bottom: l=l, m=-l (we need the old bottom in calculation of top)
bottom = new_bottom;
if (l >= lmin) {
result += top * evaluate_polynomials_shared<NLIP>(lm_address, cf, r) * multiplier;
}
// get next address
lm_address += ncell_nlip;
multiplier *= one_per_r;
}
}
// multiply the result with r^k, if k is not 0
// the distance is not too close to 0.0 as this is checked
// earlier in this function
if (k != 0 && distance > 1e-12) {
result *= pow(distance, (double)k);
}
if (distance < 1e-8) {
result = 1.0 * cf[0]; //evaluate_polynomials(nlip, &cf[address], r);
}
return result;
}
/*
* (int nlip, int ncell, int l, int address, double *c, const double x)
* Evaluates the value of gradient of a single bubble at a point. This is very similar with the
* SolidHarmonics simple evaluation, but the results are multiplied with the
* polynomial evaluations and summed together.
*/
template <bool evaluate_gradients_x, bool evaluate_gradients_y, bool evaluate_gradients_z >
__device__ inline void Bubbles_evaluate_gradient_point(
// x-coordinate relative to the center of the bubble
const double &x,
// y-coordinate relative to the center of the bubble
const double &y,
// z-coordinate relative to the center of the bubble
const double &z,
// relative distance from the center of the bubble
const double &distance,
// maximum quantum number 'l'
const int &lmax,
// number of cells
const int &ncell,
// number of lagrange integration polynomials per
// cell, i.e., the number of grid points per cell
const int &nlip,
// position inside the cell
const double &r,
// k value for the bubble
const int &k,
// the first address value in bubble for the selected cell
const int &address,
// constant pointer to a variable double array
const double* __restrict__ cf,
// constant pointer to a derivative variable double array
const double* __restrict__ df,
// if only the l = 0 is evaluated
const bool only_spherical,
// result
double result[3]
) {
int lm_address = address, address2;
// NOTE: Here the nlip is replaced by 8 because this gives advantages in loading the stuff
// also *cf this should be done
const int ncell_nlip = ncell * 8;
int l, l2;
double top, bottom, new_bottom, prev1, prev2, current, current_gradient[3], prev1_gradient[3], prev2_gradient[3], bottom_gradient[3], new_bottom_gradient, top_gradient[3];
double one_per_r = 1.0 / distance;
double one_per_r_gradient[3] = {(-x) * one_per_r * one_per_r,
(-y) * one_per_r * one_per_r,
(-z) * one_per_r * one_per_r};
l = 0;
// set value for l=0, m=0
double radial_value;
double radial_derivative = evaluate_polynomials_shared<NLIP-1>(lm_address, df, r);
if (evaluate_gradients_x) result[X_] = radial_derivative * x; // * one_per_r;
if (evaluate_gradients_y) result[Y_] = radial_derivative * y; // * one_per_r;
if (evaluate_gradients_z) result[Z_] = radial_derivative * z; // * one_per_r;
if (distance >= 0.0 && distance < 1e-12) {
one_per_r = 0.0;
if (evaluate_gradients_x) one_per_r_gradient[X_] = 0.0;
if (evaluate_gradients_y) one_per_r_gradient[Y_] = 0.0;
if (evaluate_gradients_z) one_per_r_gradient[Z_] = 0.0;
if (evaluate_gradients_x) result[X_] = 0.0; //radial_derivative;
if (evaluate_gradients_y) result[Y_] = 0.0; //radial_derivative;
if (evaluate_gradients_z) result[Z_] = 0.0;//radial_derivative;
}
/*if (only_spherical) {
one_per_r = 0.0;
if (evaluate_gradients_x) one_per_r_gradient[X_] = 0.0;
if (evaluate_gradients_y) one_per_r_gradient[Y_] = 0.0;
if (evaluate_gradients_z) one_per_r_gradient[Z_] = 0.0;
}*/
if (lmax >= 1) {
// set all values where m=-1
prev1 = y * one_per_r;
if (evaluate_gradients_x) prev1_gradient[X_] = one_per_r_gradient[X_] * y;
if (evaluate_gradients_y) prev1_gradient[Y_] = 1.0 + one_per_r_gradient[Y_] * y;
if (evaluate_gradients_z) prev1_gradient[Z_] = one_per_r_gradient[Z_] * y;
// set value for l=1, m=-1
radial_value = evaluate_polynomials_shared<NLIP>(address+ncell_nlip, cf, r);
radial_derivative = evaluate_polynomials_shared<NLIP-1>(address+ncell_nlip, df, r);
if (evaluate_gradients_x) result[X_] += radial_value * prev1_gradient[X_] + radial_derivative * prev1 * x;// * one_per_r;
if (evaluate_gradients_y) result[Y_] += radial_value * prev1_gradient[Y_] + radial_derivative * prev1 * y;// * one_per_r;
if (evaluate_gradients_z) result[Z_] += radial_value * prev1_gradient[Z_] + radial_derivative * prev1 * z;// * one_per_r;
//if (only_spherical) printf("radial_value: %e, radial_derivative: %e, prev1, i.e., y/r: %e\n", radial_value, radial_derivative, prev1);
//if (only_spherical && evaluate_gradients_x) printf("prev1-gradient-x: %e, x/r: %e\n", prev1_gradient[X_], x * one_per_r);
//if (only_spherical && evaluate_gradients_y) printf("prev1-gradient-y: %e, y/r: %e\n", prev1_gradient[Y_], y * one_per_r);
//if (only_spherical && evaluate_gradients_z) printf("prev1-gradient-z: %e, z/r: %e\n", prev1_gradient[Z_], z * one_per_r);
// the starting address has 1 item before from the l=0, 3 from l=1, and 1 from l=2
address2 = address + ncell_nlip * 5;
for (l = 2; l <= lmax; l++) {
double a = ( 2.0*(double)l-1.0) * rsqrt( 1.0*(double)((l-1)*(l+1)) );
current = a * z*prev1 * one_per_r;
if (evaluate_gradients_x) current_gradient[X_] = a *(z * prev1 * one_per_r_gradient[X_] + z * one_per_r * prev1_gradient[X_]);
if (evaluate_gradients_y) current_gradient[Y_] = a *(z * prev1 * one_per_r_gradient[Y_] + z * one_per_r * prev1_gradient[Y_]);
if (evaluate_gradients_z) current_gradient[Z_] = a *(z * prev1 * one_per_r_gradient[Z_] + prev1 + z * one_per_r * prev1_gradient[Z_]);
if (l > 2) {
double b = sqrt( (double)((l-2)*(l)) / (double)((l-1)*(l+1)) );
current -= b * prev2;
if (evaluate_gradients_x) current_gradient[X_] -= b * prev2_gradient[X_];
if (evaluate_gradients_y) current_gradient[Y_] -= b * prev2_gradient[Y_];
if (evaluate_gradients_z) current_gradient[Z_] -= b * prev2_gradient[Z_];
}
radial_value = evaluate_polynomials_shared<NLIP>(address2, cf, r);
radial_derivative = evaluate_polynomials_shared<NLIP-1>(address2, df, r);
if (evaluate_gradients_x) result[X_] += radial_value * current_gradient[X_] + radial_derivative * current * x;// * one_per_r;
if (evaluate_gradients_y) result[Y_] += radial_value * current_gradient[Y_] + radial_derivative * current * y;// * one_per_r;
if (evaluate_gradients_z) result[Z_] += radial_value * current_gradient[Z_] + radial_derivative * current * z;// * one_per_r;
prev2 = prev1;
if (evaluate_gradients_x) prev2_gradient[X_] = prev1_gradient[X_];
if (evaluate_gradients_y) prev2_gradient[Y_] = prev1_gradient[Y_];
if (evaluate_gradients_z) prev2_gradient[Z_] = prev1_gradient[Z_];
prev1 = current;
if (evaluate_gradients_x) prev1_gradient[X_] = current_gradient[X_];
if (evaluate_gradients_y) prev1_gradient[Y_] = current_gradient[Y_];
if (evaluate_gradients_z) prev1_gradient[Z_] = current_gradient[Z_];
// add the address2 to get to the next item with m=-1
address2 += ncell_nlip * (2*l+2);
}
prev2 = 1.0;
if (evaluate_gradients_x) prev2_gradient[X_] = 0.0;
if (evaluate_gradients_y) prev2_gradient[Y_] = 0.0;
if (evaluate_gradients_z) prev2_gradient[Z_] = 0.0;
// set all values where m=0
prev1 = z * one_per_r;
if (evaluate_gradients_x) prev1_gradient[X_] = one_per_r_gradient[X_] * z;
if (evaluate_gradients_y) prev1_gradient[Y_] = one_per_r_gradient[Y_] * z;
if (evaluate_gradients_z) prev1_gradient[Z_] = 1.0 + one_per_r_gradient[Z_] * z;
// set value for l=1, m=0
radial_value = evaluate_polynomials_shared<NLIP>(address+2*ncell_nlip, cf, r);
radial_derivative = evaluate_polynomials_shared<NLIP-1>(address+2*ncell_nlip, df, r);
if (evaluate_gradients_x) result[X_] += radial_value * prev1_gradient[X_] + radial_derivative * prev1 * x;// * one_per_r;
if (evaluate_gradients_y) result[Y_] += radial_value * prev1_gradient[Y_] + radial_derivative * prev1 * y;// * one_per_r;
if (evaluate_gradients_z) result[Z_] += radial_value * prev1_gradient[Z_] + radial_derivative * prev1 * z;// * one_per_r;
//if (only_spherical) printf("radial_value: %e, radial_derivative: %e, prev1, i.e., z/r: %e\n", radial_value, radial_derivative, prev1);
//if (only_spherical && evaluate_gradients_x) printf("prev1-gradient-x: %e, x/r: %e\n", prev1_gradient[X_], x * one_per_r);
//if (only_spherical && evaluate_gradients_y) printf("prev1-gradient-y: %e, y/r: %e\n", prev1_gradient[Y_], y * one_per_r);
//if (only_spherical && evaluate_gradients_z) printf("prev1-gradient-z: %e, z/r: %e\n", prev1_gradient[Z_], z * one_per_r);
// the starting address has 1 item before from the l=0, 3 from l=1, and 2 from l=2
address2 = address + ncell_nlip * 6;
for (l = 2; l <= lmax; l++) {
double a = ( 2.0*(double)l-1.0) * rsqrt( 1.0*(double)((l)*(l)) );
double b = sqrt( (double)((l-1)*(l-1)) / (double)((l)*(l)) );
current = a * z * prev1 * one_per_r;
if (evaluate_gradients_x) current_gradient[X_] = a *(z * prev1 * one_per_r_gradient[X_] + z * one_per_r * prev1_gradient[X_]);
if (evaluate_gradients_y) current_gradient[Y_] = a *(z * prev1 * one_per_r_gradient[Y_] + z * one_per_r * prev1_gradient[Y_]);
if (evaluate_gradients_z) current_gradient[Z_] = a *(z * prev1 * one_per_r_gradient[Z_] + prev1 + z * one_per_r * prev1_gradient[Z_]);
current -= b * prev2;
if (evaluate_gradients_x) current_gradient[X_] -= b * prev2_gradient[X_];
if (evaluate_gradients_y) current_gradient[Y_] -= b * prev2_gradient[Y_];
if (evaluate_gradients_z) current_gradient[Z_] -= b * prev2_gradient[Z_];
radial_value = evaluate_polynomials_shared<NLIP>(address2, cf, r);
radial_derivative = evaluate_polynomials_shared<NLIP-1>(address2, df, r);
if (evaluate_gradients_x) result[X_] += radial_value * current_gradient[X_] + radial_derivative * current * x;// * one_per_r;
if (evaluate_gradients_y) result[Y_] += radial_value * current_gradient[Y_] + radial_derivative * current * y;// * one_per_r;
if (evaluate_gradients_z) result[Z_] += radial_value * current_gradient[Z_] + radial_derivative * current * z;// * one_per_r;
prev2 = prev1;
if (evaluate_gradients_x) prev2_gradient[X_] = prev1_gradient[X_];
if (evaluate_gradients_y) prev2_gradient[Y_] = prev1_gradient[Y_];
if (evaluate_gradients_z) prev2_gradient[Z_] = prev1_gradient[Z_];
prev1 = current;
if (evaluate_gradients_x) prev1_gradient[X_] = current_gradient[X_];
if (evaluate_gradients_y) prev1_gradient[Y_] = current_gradient[Y_];
if (evaluate_gradients_z) prev1_gradient[Z_] = current_gradient[Z_];
// add the address2 to get to the next item with m=0
address2 += ncell_nlip * (2*l+2);
}
// set all values where m=1
prev1 = x * one_per_r;
if (evaluate_gradients_x) prev1_gradient[X_] = 1.0 + one_per_r_gradient[X_] * x;
if (evaluate_gradients_y) prev1_gradient[Y_] = one_per_r_gradient[Y_] * x;
if (evaluate_gradients_z) prev1_gradient[Z_] = one_per_r_gradient[Z_] * x;
// set value for l=1, m=1
radial_value = evaluate_polynomials_shared<NLIP>(address+3*ncell_nlip, cf, r);
radial_derivative = evaluate_polynomials_shared<NLIP-1>(address+3*ncell_nlip, df, r);
if (evaluate_gradients_x) result[X_] += radial_value * prev1_gradient[X_] + radial_derivative * prev1 * x;// * one_per_r;
if (evaluate_gradients_y) result[Y_] += radial_value * prev1_gradient[Y_] + radial_derivative * prev1 * y;// * one_per_r;
if (evaluate_gradients_z) result[Z_] += radial_value * prev1_gradient[Z_] + radial_derivative * prev1 * z;// * one_per_r;
//if (only_spherical) printf("radial_value: %e, radial_derivative: %e, prev1, i.e., x/r: %e\n", radial_value, radial_derivative, prev1);
//if (only_spherical && evaluate_gradients_x) printf("prev1-gradient-x: %e, x/r: %e\n", prev1_gradient[X_], x * one_per_r);
//if (only_spherical && evaluate_gradients_y) printf("prev1-gradient-y: %e, y/r: %e\n", prev1_gradient[Y_], y * one_per_r);
//if (only_spherical && evaluate_gradients_z) printf("prev1-gradient-z: %e, z/r: %e\n", prev1_gradient[Z_], z * one_per_r);
// the starting address has 1 item before from the l=0, 3 from l=1, and 3 from l=2
address2 = address + ncell_nlip * 7;
for (l = 2; l <= lmax; l++) {
double a = ( 2.0*(double)l-1.0) * rsqrt( 1.0*(double)((l+1)*(l-1)) );
current = a * z*prev1 * one_per_r;
if (evaluate_gradients_x) current_gradient[X_] = a *(z * prev1 * one_per_r_gradient[X_] + z * one_per_r * prev1_gradient[X_]);
if (evaluate_gradients_y) current_gradient[Y_] = a *(z * prev1 * one_per_r_gradient[Y_] + z * one_per_r * prev1_gradient[Y_]);
if (evaluate_gradients_z) current_gradient[Z_] = a *(z * prev1 * one_per_r_gradient[Z_] + prev1 + z * one_per_r * prev1_gradient[Z_]);
if (l > 2) {
double b = sqrt( (double)((l)*(l-2)) / (double)((l+1)*(l-1)) );
current -= b * prev2;
if (evaluate_gradients_x) current_gradient[X_] -= b * prev2_gradient[X_];
if (evaluate_gradients_y) current_gradient[Y_] -= b * prev2_gradient[Y_];
if (evaluate_gradients_z) current_gradient[Z_] -= b * prev2_gradient[Z_];
}
radial_value = evaluate_polynomials_shared<NLIP>(address2, cf, r);
radial_derivative = evaluate_polynomials_shared<NLIP-1>(address2, df, r);
if (evaluate_gradients_x) result[X_] += radial_value * current_gradient[X_] + radial_derivative * current * x;// * one_per_r;
if (evaluate_gradients_y) result[Y_] += radial_value * current_gradient[Y_] + radial_derivative * current * y;// * one_per_r;
if (evaluate_gradients_z) result[Z_] += radial_value * current_gradient[Z_] + radial_derivative * current * z;// * one_per_r;
prev2 = prev1;
if (evaluate_gradients_x) prev2_gradient[X_] = prev1_gradient[X_];
if (evaluate_gradients_y) prev2_gradient[Y_] = prev1_gradient[Y_];
if (evaluate_gradients_z) prev2_gradient[Z_] = prev1_gradient[Z_];
prev1 = current;
if (evaluate_gradients_x) prev1_gradient[X_] = current_gradient[X_];
if (evaluate_gradients_y) prev1_gradient[Y_] = current_gradient[Y_];
if (evaluate_gradients_z) prev1_gradient[Z_] = current_gradient[Z_];
// add the address2 to get to the next item with m=-1
address2 += ncell_nlip * (2*l+2);
}
// go through the rest of the stuff
bottom = y * one_per_r; // bottom refers to solid harmonics value with l=l-1 and m=-(l-1)
if (evaluate_gradients_x) bottom_gradient[X_] = one_per_r_gradient[X_] * y;
if (evaluate_gradients_y) bottom_gradient[Y_] = 1.0 + one_per_r_gradient[Y_] * y;
if (evaluate_gradients_z) bottom_gradient[Z_] = one_per_r_gradient[Z_] * y;
top = x * one_per_r; // top refers to solid harmonics value with l=l-1 and m=l-1
if (evaluate_gradients_x) top_gradient[X_] = 1.0 + one_per_r_gradient[X_] * x;
if (evaluate_gradients_y) top_gradient[Y_] = one_per_r_gradient[Y_] * x;
if (evaluate_gradients_z) top_gradient[Z_] = one_per_r_gradient[Z_] * x;
lm_address += 4 * ncell_nlip;
for (l=2; l <= lmax; l++) {
double c = sqrt((2.0*(double)l - 1.0) / (2.0*(double)l));
new_bottom = c * one_per_r * ( y*top + x*bottom);
// get the gradients to x direction
if (evaluate_gradients_x) new_bottom_gradient = c * (one_per_r_gradient[X_] * (y * top + x * bottom) +
one_per_r * (y * top_gradient[X_] + x * bottom_gradient[X_] + bottom)) ;
if (evaluate_gradients_x) top_gradient[X_] = c * (one_per_r_gradient[X_] * (x * top - y * bottom) +
one_per_r * (x * top_gradient[X_] + top - y * bottom_gradient[X_]));
if (evaluate_gradients_x) bottom_gradient[X_] = new_bottom_gradient;
// get the gradients to y direction
if (evaluate_gradients_y) new_bottom_gradient = c * (one_per_r_gradient[Y_] * (y * top + x * bottom) +
one_per_r * (y * top_gradient[Y_] + top + x * bottom_gradient[Y_]));
if (evaluate_gradients_y) top_gradient[Y_] = c * (one_per_r_gradient[Y_] * (x * top - y * bottom) +
one_per_r * (x * top_gradient[Y_] - y * bottom_gradient[Y_] - bottom));
if (evaluate_gradients_y) bottom_gradient[Y_] = new_bottom_gradient;
// get the gradients to z direction
if (evaluate_gradients_z) new_bottom_gradient = c * (one_per_r_gradient[Z_] * (y * top + x * bottom) +
one_per_r * (y * top_gradient[Z_] + x * bottom_gradient[Z_]));
if (evaluate_gradients_z) top_gradient[Z_] = c * (one_per_r_gradient[Z_] * (x * top - y * bottom) +
one_per_r * (x * top_gradient[Z_] - y * bottom_gradient[Z_]));
if (evaluate_gradients_z) bottom_gradient[Z_] = new_bottom_gradient;
top = c * one_per_r * ( x*top-y*bottom );
// store the new bottom: l=l, m=-l (we need the old bottom in calculation of top previously, so we
// have to sacrifice one register temporarily)
bottom = new_bottom;
radial_value = evaluate_polynomials_shared<NLIP>(lm_address, cf, r);
radial_derivative = evaluate_polynomials_shared<NLIP-1>(lm_address, df, r);
// get value for l=l, m=-l.
if (evaluate_gradients_x) result[X_] += radial_value * bottom_gradient[X_] + radial_derivative * bottom * x;// * one_per_r;
if (evaluate_gradients_y) result[Y_] += radial_value * bottom_gradient[Y_] + radial_derivative * bottom * y;// * one_per_r;
if (evaluate_gradients_z) result[Z_] += radial_value * bottom_gradient[Z_] + radial_derivative * bottom * z;// * one_per_r;
radial_value = evaluate_polynomials_shared<NLIP>(lm_address + 2*l * ncell_nlip, cf, r);
radial_derivative = evaluate_polynomials_shared<NLIP-1>(lm_address + 2*l * ncell_nlip, df, r);
// get value for l=l, m=l. The address is 2*l items away from l=l, m=-l
if (evaluate_gradients_x) result[X_] += radial_value * top_gradient[X_] + radial_derivative * top * x;// * one_per_r;
if (evaluate_gradients_y) result[Y_] += radial_value * top_gradient[Y_] + radial_derivative * top * y;// * one_per_r;
if (evaluate_gradients_z) result[Z_] += radial_value * top_gradient[Z_] + radial_derivative * top * z;// * one_per_r;
// set all values where m=-l
prev1 = bottom;
if (evaluate_gradients_x) prev1_gradient[X_] = bottom_gradient[X_];
if (evaluate_gradients_y) prev1_gradient[Y_] = bottom_gradient[Y_];
if (evaluate_gradients_z) prev1_gradient[Z_] = bottom_gradient[Z_];
address2 = lm_address + (2*l+2) * ncell_nlip;
for (l2 = l+1; l2 <= lmax; l2++) {
// evaluate spherical harmonics for l=l2, m=-l
double a = ( 2.0*(double)l2-1.0) * rsqrt( 1.0*(double)((l2-l)*(l2+l)) );
current = a * z*prev1 * one_per_r;
if (evaluate_gradients_x) current_gradient[X_] = a *(z * prev1 * one_per_r_gradient[X_] + z * one_per_r * prev1_gradient[X_]);
if (evaluate_gradients_y) current_gradient[Y_] = a *(z * prev1 * one_per_r_gradient[Y_] + z * one_per_r * prev1_gradient[Y_]);
if (evaluate_gradients_z) current_gradient[Z_] = a *(z * prev1 * one_per_r_gradient[Z_] + prev1 + z * one_per_r * prev1_gradient[Z_]);
if (l2 > l+1) {
double b = sqrt( (double)((l2-l-1)*(l2+l-1)) / (double)((l2-l)*(l2+l)) );
current -= b * prev2;
if (evaluate_gradients_x) current_gradient[X_] -= b * prev2_gradient[X_];
if (evaluate_gradients_y) current_gradient[Y_] -= b * prev2_gradient[Y_];
if (evaluate_gradients_z) current_gradient[Z_] -= b * prev2_gradient[Z_];
}
radial_value = evaluate_polynomials_shared<NLIP>(address2, cf, r);
radial_derivative = evaluate_polynomials_shared<NLIP-1>(address2, df, r);
if (evaluate_gradients_x) result[X_] += radial_value * current_gradient[X_] + radial_derivative * current * x;// * one_per_r;
if (evaluate_gradients_y) result[Y_] += radial_value * current_gradient[Y_] + radial_derivative * current * y;// * one_per_r;
if (evaluate_gradients_z) result[Z_] += radial_value * current_gradient[Z_] + radial_derivative * current * z;// * one_per_r;
prev2 = prev1;
if (evaluate_gradients_x) prev2_gradient[X_] = prev1_gradient[X_];
if (evaluate_gradients_y) prev2_gradient[Y_] = prev1_gradient[Y_];
if (evaluate_gradients_z) prev2_gradient[Z_] = prev1_gradient[Z_];
prev1 = current;
if (evaluate_gradients_x) prev1_gradient[X_] = current_gradient[X_];
if (evaluate_gradients_y) prev1_gradient[Y_] = current_gradient[Y_];
if (evaluate_gradients_z) prev1_gradient[Z_] = current_gradient[Z_];
// add the address2 to get to the next item with m=-1
address2 += ncell_nlip * (2*l2+2);
}
// set all values where m=l
lm_address += 2*l * ncell_nlip;
prev1 = top;
if (evaluate_gradients_x) prev1_gradient[X_] = top_gradient[X_];
if (evaluate_gradients_y) prev1_gradient[Y_] = top_gradient[Y_];
if (evaluate_gradients_z) prev1_gradient[Z_] = top_gradient[Z_];
address2 = lm_address + (2*l+2) * ncell_nlip;
for (l2 = l+1; l2 <= lmax; l2++) {
// evaluate spherical harmonics for l=l2, m=l
double a = ( 2.0*(double)l2-1.0) * rsqrt( 1.0*(double)((l2+l)*(l2-l)) );
current = a * z*prev1 * one_per_r;
if (evaluate_gradients_x) current_gradient[X_] = a *(z * prev1 * one_per_r_gradient[X_] + z * one_per_r * prev1_gradient[X_]);
if (evaluate_gradients_y) current_gradient[Y_] = a *(z * prev1 * one_per_r_gradient[Y_] + z * one_per_r * prev1_gradient[Y_]);
if (evaluate_gradients_z) current_gradient[Z_] = a *(z * prev1 * one_per_r_gradient[Z_] + prev1 + z * one_per_r * prev1_gradient[Z_]);
if (l2 > l+1) {
double b = sqrt( (double)((l2+l-1)*(l2-l-1)) / (double)((l2+l)*(l2-l)) );
current -= b * prev2;
if (evaluate_gradients_x) current_gradient[X_] -= b * prev2_gradient[X_];
if (evaluate_gradients_y) current_gradient[Y_] -= b * prev2_gradient[Y_];
if (evaluate_gradients_z) current_gradient[Z_] -= b * prev2_gradient[Z_];
}
radial_value = evaluate_polynomials_shared<NLIP>(address2, cf, r);
radial_derivative = evaluate_polynomials_shared<NLIP-1>(address2, df, r);
if (evaluate_gradients_x) result[X_] += radial_value * current_gradient[X_] + radial_derivative * current * x;// * one_per_r;
if (evaluate_gradients_y) result[Y_] += radial_value * current_gradient[Y_] + radial_derivative * current * y;// * one_per_r;
if (evaluate_gradients_z) result[Z_] += radial_value * current_gradient[Z_] + radial_derivative * current * z;// * one_per_r;
prev2 = prev1;
if (evaluate_gradients_x) prev2_gradient[X_] = prev1_gradient[X_];
if (evaluate_gradients_y) prev2_gradient[Y_] = prev1_gradient[Y_];
if (evaluate_gradients_z) prev2_gradient[Z_] = prev1_gradient[Z_];
prev1 = current;
if (evaluate_gradients_x) prev1_gradient[X_] = current_gradient[X_];
if (evaluate_gradients_y) prev1_gradient[Y_] = current_gradient[Y_];
if (evaluate_gradients_z) prev1_gradient[Z_] = current_gradient[Z_];
// add the address2 to get to the next item with m=-1
address2 += ncell_nlip * (2*l2+2);
}
// get next address
lm_address += ncell_nlip;
}
}
result[X_] *= one_per_r;
result[Y_] *= one_per_r;
result[Z_] *= one_per_r;
// multiply the result with r^k, if k is not 0
// the distance is not too close to 0.0 as this is checked
// earlier in this function, NOTE: should never happen, thus
// commented away
//if (k != 0 && distance > 1e-12) {
/*for (int i = 0; i < k; i ++) {
result *= distance;
}
for (int i = 0; i < -k; i ++) {
result *= one_per_r;
}*/
//}
if (distance < 1e-12) {
result[X_] = 0.0; // * evaluate_polynomials_shared<NLIP-1>(address, df, r);
result[Y_] = 0.0;
result[Z_] = 0.0;
}
}
/*
* Evaluates value of single bubble at a point. This is very similar to the
* SolidHarmonics simple evaluation, but the results are multiplied with the
* polynomial evaluations
*/
__device__ inline double Bubbles_evaluate_point(
// x-coordinate relative to the center of the bubble
const double &x,
// y-coordinate relative to the center of the bubble
const double &y,
// z-coordinate relative to the center of the bubble
const double &z,
// relative distance from the center of the bubble
const double &distance,
// maximum quantum number 'l'
const int &lmax,
// number of cells
const int &ncell,
// number of lagrange integration polynomials per
// cell, i.e., the number of grid points per cell
const int &nlip,
// position inside the cell
const double &r,
// k value for the bubble
const int &k,
// the first address value in bubble for the selected cell
const int &address,
// constant pointer to a variable double array
const double* __restrict__ cf
) {
double result = 0.0;
int lm_address = address, address2;
// NOTE: Here the nlip is replaced by 8 because this gives advantages in loading the stuff
// also *cf this should be done
const int ncell_nlip = ncell * 8;
int l, l2;
double top, bottom, new_bottom, prev1, prev2, current, a, b, a2;
const double one_per_r = 1.0 / distance;
l = 0;
// set value for l=0, m=0
// printf("x: %f, y: %f, z: %f, nlip: %d, ncell: %d, l: 0, lmax: %d, address: %d, cf: %ld, r: %f\n", x, y, z, nlip, ncell, 0, lmax, lm_address, cf, r);
// printf("shared_memory address: %ld\n", );
// printf("shared memory first value: %f", shared_memory[0]);
result = evaluate_polynomials_shared<NLIP>(lm_address, cf, r);
if (lmax >= 1) {
// set value for l=1, m=-1
result += y * evaluate_polynomials_shared<NLIP>(address+ncell_nlip, cf, r) * one_per_r;
// set value for l=1, m=0
result += z * evaluate_polynomials_shared<NLIP>(address+2*ncell_nlip, cf, r) * one_per_r;
// set value for l=1, m=1
result += x * evaluate_polynomials_shared<NLIP>(address+3*ncell_nlip, cf, r) * one_per_r;
// set all values where m=-1
prev2 = 0.0;
prev1 = y * one_per_r;
// the starting address has 1 item before from the l=0, 3 from l=1, and 1 from l=2
address2 = address + ncell_nlip * 5;
l = threadIdx.x % 32;
a = ( 2.0*(double)l-1.0) * rsqrt( 1.0*(double)((l-1)*(l+1)) );
b = (l > 2) ? sqrt( (double)((l-2)*(l)) / (double)((l-1)*(l+1)) ) : 0.0;
for (l = 2; l <= lmax; l++) {
#if (__CUDA_ARCH__ >= 350) && (__CUDA_ARCH__ < 700)
current = __shfl(a, l) * z*prev1 * one_per_r - __shfl(b, l) * prev2;
#elif (__CUDA_ARCH__ >= 700)
current = __shfl_sync(FULL_MASK, a, l) * z*prev1 * one_per_r - __shfl_sync(FULL_MASK, b, l) * prev2;
#endif
result += current * evaluate_polynomials_shared<NLIP>(address2, cf, r) ;
prev2 = prev1;
prev1 = current;
// add the address2 to get to the next item with m=-1
address2 += ncell_nlip * (2*l+2);
}
// set all values where m=0
prev1 = z * one_per_r;
prev2 = 1.0;
// the starting address has 1 item before from the l=0, 3 from l=1, and 2 from l=2
address2 = address + ncell_nlip * 6;
l = threadIdx.x % 32; // lane within warp
a = ( 2.0*(double)l-1.0) * rsqrt( 1.0*(double)((l)*(l)) );
b = sqrt( (double)((l-1)*(l-1)) / (double)((l)*(l)) );
// printf("l: %d, lmax:%d, a: %f, b: %f, z: %f, prev1: %f, one_per_r: %f, prev2: %f\n", l, lmax, a, b, z, prev1, one_per_r, prev2);
for (l = 2; l <= lmax; l++) {
#if (__CUDA_ARCH__ >= 350) && (__CUDA_ARCH__ < 700)
current = __shfl(a, l) * z * prev1 * one_per_r - __shfl(b, l) * prev2;
#elif __CUDA_ARCH__ >= 700
// printf("lane: %d, l: %d, a: %f, b:%f\n", threadIdx.x % 32, l, a, b);
current = __shfl_sync(FULL_MASK, a, l) * z * prev1 * one_per_r - __shfl_sync(FULL_MASK, b, l) * prev2;
#endif
result += current * evaluate_polynomials_shared<NLIP>(address2, cf, r);
prev2 = prev1;
prev1 = current;
// add the address2 to get to the next item with m=0
address2 += ncell_nlip * (2*l+2);
}
// set all values where m=1
prev1 = x * one_per_r;
// the starting address has 1 item before from the l=0, 3 from l=1, and 3 from l=2
address2 = address + ncell_nlip * 7;
l = threadIdx.x % 32;
a = ( 2.0*(double)l-1.0) * rsqrt( 1.0*(double)((l+1)*(l-1)) );
b = (l > 2) ? sqrt( (double)((l)*(l-2)) / (double)((l+1)*(l-1)) ) : 0.0;
for (l = 2; l <= lmax; l++) {
#if (__CUDA_ARCH__ >= 350) && (__CUDA_ARCH__ < 700)
current = __shfl(a, l) * z * prev1 * one_per_r - __shfl(b, l) * prev2;
#elif __CUDA_ARCH__ >= 700
current = __shfl_sync(FULL_MASK, a, l) * z * prev1 * one_per_r - __shfl_sync(FULL_MASK, b, l) * prev2;
#endif
result += current * evaluate_polynomials_shared<NLIP>(address2, cf, r);
prev2 = prev1;
prev1 = current;
// add the address2 to get to the next item with m=1
address2 += ncell_nlip * (2*l+2);
}
// go through the rest of the stuff
bottom = y * one_per_r; // bottom refers to spherical harmonics value with l=l-1 and m=-(l-1)
top = x * one_per_r; // top refers to spherical harmonics value with l=l-1 and m=l-1
lm_address += 4 * ncell_nlip;
l = threadIdx.x % 32;
a = sqrt((2.0*(double)l - 1.0) / (2.0*(double)l));
for (l=2; l <= lmax; l++) {
#if (__CUDA_ARCH__ >= 350) && (__CUDA_ARCH__ < 700)
new_bottom = __shfl(a, l) * one_per_r * ( y*top + x*bottom);
top = __shfl(a, l) * one_per_r * ( x*top - y*bottom );
#elif __CUDA_ARCH__ >= 700
new_bottom = __shfl_sync(FULL_MASK, a, l) * one_per_r * ( y*top + x*bottom);
top = __shfl_sync(FULL_MASK, a, l) * one_per_r * ( x*top - y*bottom );
#endif
// store the new bottom: l=l, m=-l (we need the old bottom in calculation of top previously, so we
// have to sacrifice one register temporarily)
bottom = new_bottom;
result += bottom * evaluate_polynomials_shared<NLIP>(lm_address, cf, r);
// get value for l=l, m=l. The address is 2*l items away from l=l, m=-l
result += top * evaluate_polynomials_shared<NLIP>(lm_address + 2*l * ncell_nlip, cf, r);
// set all values where m=-l
prev2 = 0.0;
prev1 = bottom;
address2 = lm_address + (2*l+2) * ncell_nlip;
// set all values where m=l
lm_address += 2*l * ncell_nlip;
l2 = threadIdx.x % 32;
a2 = ( 2.0*(double)l2-1.0) * rsqrt( 1.0*(double)((l2-l)*(l2+l)) );
b = (l2 > l+1) ? sqrt( (double)((l2-l-1)*(l2+l-1)) / (double)((l2-l)*(l2+l)) ) : 0.0;
for (l2 = l+1; l2 <= lmax; l2++) {
// evaluate spherical harmonics for l=l2, m=-l
#if (__CUDA_ARCH__ >= 350) && (__CUDA_ARCH__ < 700)
current = __shfl(a2, l2) * z * prev1 * one_per_r - __shfl(b, l2) * prev2;
#elif __CUDA_ARCH__ >= 700
current = __shfl_sync(FULL_MASK, a2, l2) * z * prev1 * one_per_r - __shfl_sync(FULL_MASK, b, l2) * prev2;
#endif
result += current * evaluate_polynomials_shared<NLIP>(address2, cf, r);
prev2 = prev1;
prev1 = current;
// add the address2 to get to the next item with m=-l
address2 += ncell_nlip * (2*l2+2);
}
prev2 = 0.0;
prev1 = top;
address2 = lm_address + (2*l+2) * ncell_nlip;
l2 = threadIdx.x % 32;
a2 = ( 2.0*(double)l2-1.0) * rsqrt( 1.0*(double)((l2+l)*(l2-l)) ) ;
b = (l2 > l+1) ? sqrt( (double)((l2+l-1)*(l2-l-1)) / (double)((l2+l)*(l2-l)) ) : 0.0;
for (l2 = l+1; l2 <= lmax; l2++) {
// evaluate spherical harmonics for l=l2, m=l
#if (__CUDA_ARCH__ >= 350) && (__CUDA_ARCH__ < 700)
current = __shfl(a2, l2) * z * prev1 * one_per_r - __shfl(b, l2) * prev2;
#elif __CUDA_ARCH__ >= 700
current = __shfl_sync(FULL_MASK, a2, l2) * z * prev1 * one_per_r - __shfl_sync(FULL_MASK, b, l2) * prev2;
#endif
// the latter term will go to zero, if l2 <= l+1
result += current * evaluate_polynomials_shared<NLIP>(address2, cf, r);
prev2 = prev1;
prev1 = current;
// add the address3 to get to the next item with m=l
address2 += ncell_nlip * (2*l2+2);
}
// get next address
lm_address += ncell_nlip;
}
}
// multiply the result with r^k, if k is not 0
// the distance is not too close to 0.0 as this is checked
// earlier in this function, NOTE: should never happen, thus
// commented away
//if (k != 0 && distance > 1e-12) {
if (distance < 1e-14) {
result = 1.0 * evaluate_polynomials_shared<NLIP>(address, cf, r);
}
for (int i = 0; i < k; i ++) {
result *= distance;
}
for (int i = 0; i < -k; i ++) {
result *= one_per_r;
}
//}
return result;
}
__device__ int getGlobalIdx_1D_1D() {
int id=threadIdx.x + blockIdx.x * blockDim.x;
return id;
}
__device__ int getGlobalIdx_3D_3D() {
int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z;
int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z)
+ (threadIdx.z * (blockDim.x * blockDim.y))
+ (threadIdx.y * blockDim.x)
+ threadIdx.x;
return threadId;
}
/*
* Get the minimum/maximum and overwrite values with -1
*/
__device__ inline void minmax(int *first, int *second) {
int temp;
if (*first == -1) {
*first = *second;
}
if (*second == -1) {
*second = *first;
}
if (*second < *first) {
temp = *second;
*second = *first;
*first = temp;
}
}
/*
* Find the minimum and maximum in array that is as large as a block, and store them as the first
* and last value of the input array. NOTE: The arrayLength must be a power of 2.
*/
__device__ void calculateMinimumMaximum(int *array, int blockThreadId, int arrayLength) {
int division = arrayLength / 2;
// order so that the larger values of pairs are at the second part of the array
// and the smaller are at the end of the array
if (blockThreadId < division) {
// rearrange the values so that the larger is in the &array[blockThreadId + division]
// and smaller is in &array[blockThreadId]
minmax(&array[blockThreadId], &array[blockThreadId + division]);
}
__syncthreads();
division = arrayLength / 4;
// if the block
while (division >= 1) {
if (blockThreadId < division) {
minmax(&array[blockThreadId], &array[blockThreadId + division]);
}
else if (blockThreadId > arrayLength - division) {
minmax(&array[blockThreadId - division], &array[blockThreadId]);
}
division /= 2;
__syncthreads();
}
}
/*
* Evaluate Bubbles on a grid
*
*/
template <bool lmin_zero, bool evaluate_value, bool evaluate_gradients_x, bool evaluate_gradients_y, bool evaluate_gradients_z >
__device__ inline void
Bubbles_evaluate_grid(const Bubble* __restrict__ bubble,
double* __restrict__ cube,
double* __restrict__ gradient_cube_x,
double* __restrict__ gradient_cube_y,
double* __restrict__ gradient_cube_z,
const double* __restrict__ grid_points_x,
const double* __restrict__ grid_points_y,
const double* __restrict__ grid_points_z,
const int shape_x,
const int shape_y,
const int shape_z,
const double zero_point_x,
const double zero_point_y,
const double zero_point_z,
const int k,
const int slice_offset,
const size_t pitch,
const int memory_y_shape,
const int slice_count,
const int lmin,
const double multiplier) {
// The result array will be in fortran with indices l, x, y, z.
// This means that the x index will be the fastest to change.
int x, y, z;
getXYZ(&x, &y, &z);
// get the offset from the input cube pointer
const int id = getCubeOffset3D(x, y, z, pitch, memory_y_shape);
double value, gradient[3];
double in_cell_position = 0.0;
const int ncell = bubble->grid->ncell, nlip = bubble->grid->nlip;
int icell;
double relative_position_x, relative_position_y, relative_position_z, distance;
// printf("X: %f, cell_spacing: %f, ncell: %d", distance, bubble->cell_spacing, ncell);
// Check that the point is within the block
if (x < shape_x && y < shape_y && z+slice_offset < shape_z && z < slice_count) {
// calculate relative position to the zero-point and distance to it
calculate_distance(relative_position_x,
relative_position_y,
relative_position_z,
distance,
zero_point_x,
zero_point_y,
zero_point_z,
grid_points_x[x],
ldg<double>(&grid_points_y[y]),
ldg<double>(&grid_points_z[z+slice_offset]));
// get the order number of cell the point resides in
//icell = calculate_icell(distance, bubble->d, bubble->ncell);
calculate_icell_radial(distance, bubble->charge, bubble->grid->r_max, ncell, nlip, &icell, &in_cell_position);
//printf("x: %d, y: %d, z:%d, id:%d, vector_id: %d, vector_offset:%d, blockId: %d, blocks_per_vector: %d, %f, %f, %f, %d\n", x, y, z, id, vector_id, vector_offset, blockIdx.x, blocks_per_vector, grid_points_x[x], ldg(&grid_points_y[y]), ldg(&grid_points_z[z]), icell);
}
else {
icell = 1;
distance = 0.1;
}
if (lmin_zero) {
// calculate the bubble value for the point with lmin = 0
if (evaluate_value) {
value = Bubbles_evaluate_point( relative_position_x,
relative_position_y,
relative_position_z,
distance,
bubble->lmax,
ncell,
nlip,
in_cell_position,
k,
icell * 8,
bubble->cf);
}
// evaluate gradients if we are evaluating any
if (evaluate_gradients_x || evaluate_gradients_y || evaluate_gradients_z) {
Bubbles_evaluate_gradient_point
<evaluate_gradients_x, evaluate_gradients_y, evaluate_gradients_z>
(relative_position_x,
relative_position_y,
relative_position_z,
distance,
bubble->lmax,
ncell,
nlip,
in_cell_position,
k,
icell * 8,
bubble->cf,
bubble->df,
false,
gradient
);
}
}
else {
if (evaluate_value) {
// calculate the bubble value for the point with lmin > 0
value = Bubbles_evaluate_point_lmin( relative_position_x,
relative_position_y,
relative_position_z,
distance,
lmin,
bubble->lmax,
ncell,
nlip,
in_cell_position,
k,
icell * 8,
bubble->cf
);
}
}
if (x < shape_x && y < shape_y && z+slice_offset < shape_z && z < slice_count && icell < ncell) {
/*if (x == 0 && y == 0) {
printf("%d: [x, y, z], id : [%d, %d, %d], %d, icell: %d, in_cell_position:%f, first_bubble-value:%e, distance:%f, coord: [%f, %f, %f] old-value: %e, value: %e, multiplier: %f\n", slice_offset, x, y, z+slice_offset, id, icell, in_cell_position, bubble->cf[icell*8], distance, relative_position_x, relative_position_y, relative_position_z, cube[id], value, multiplier);
}*/
if (evaluate_value) cube[id] += multiplier * value;
if (evaluate_gradients_x) gradient_cube_x[id] += multiplier * gradient[X_];
if (evaluate_gradients_y) gradient_cube_y[id] += multiplier * gradient[Y_];
if (evaluate_gradients_z) gradient_cube_z[id] += multiplier * gradient[Z_];
}
return;
}
/*
* Evaluate Bubbles on a grid
*/
__global__ void
#if (__CUDA_ARCH__ <= 350)
__launch_bounds__(128, 6)
#else
__launch_bounds__(256)
#endif
Bubbles_evaluate_grid_lmin(const Bubble* __restrict__ bubble,
double* __restrict__ cube,
const double* __restrict__ grid_points_x,
const double* __restrict__ grid_points_y,
const double* __restrict__ grid_points_z,
const int shape_x,
const int shape_y,
const int shape_z,
const double zero_point_x,
const double zero_point_y,
const double zero_point_z,
const int k,
const int slice_offset,
const size_t pitch,
const int memory_y_shape,
const int slice_count,
const int lmin,
const double multiplier) {
Bubbles_evaluate_grid <false, true, false, false, false> (
bubble, cube, /*gradient_cube_x = */NULL,
/*gradient_cube_y = */NULL, /*gradient_cube_z = */NULL,
grid_points_x, grid_points_y, grid_points_z,
shape_x, shape_y, shape_z,
zero_point_x, zero_point_y, zero_point_z,
k, slice_offset, pitch, memory_y_shape,
slice_count, lmin, multiplier);
}
__global__ void
#if (__CUDA_ARCH__ > 350)
__launch_bounds__(256)
#else
__launch_bounds__(128, 8)
#endif
Bubbles_evaluate_grid_pitched(const Bubble* __restrict__ bubble,
double* __restrict__ cube,
const double* __restrict__ grid_points_x,
const double* __restrict__ grid_points_y,
const double* __restrict__ grid_points_z,
const int shape_x,
const int shape_y,
const int shape_z,
const double zero_point_x,
const double zero_point_y,
const double zero_point_z,
const int k,
const int slice_offset,
const size_t pitch,
const int memory_y_shape,
const int slice_count,
const double multiplier) {
Bubbles_evaluate_grid <true, true, false, false, false> (
bubble, cube, /*gradient_cube_x = */NULL,
/*gradient_cube_y = */NULL, /*gradient_cube_z = */NULL,
grid_points_x, grid_points_y, grid_points_z,
shape_x, shape_y, shape_z,
zero_point_x, zero_point_y, zero_point_z,
k, slice_offset, pitch, memory_y_shape,
slice_count, /*lmin = */0, multiplier);
}
template <bool lmin_zero, bool evaluate_value, bool evaluate_gradients_x, bool evaluate_gradients_y, bool evaluate_gradients_z >
__global__ void
#if (__CUDA_ARCH__ > 350)
__launch_bounds__(256)
#else
__launch_bounds__(128, 5)
#endif
Bubbles_evaluate_grid_gradients(const Bubble* __restrict__ bubble,
double* __restrict__ cube,
double* __restrict__ gradient_cube_x,
double* __restrict__ gradient_cube_y,
double* __restrict__ gradient_cube_z,
const double* __restrict__ grid_points_x,
const double* __restrict__ grid_points_y,
const double* __restrict__ grid_points_z,
const int shape_x,
const int shape_y,
const int shape_z,
const double zero_point_x,
const double zero_point_y,
const double zero_point_z,
const int k,
const int slice_offset,
const size_t pitch,
const int memory_y_shape,
const int slice_count,
const double multiplier) {
Bubbles_evaluate_grid <lmin_zero, evaluate_value, evaluate_gradients_x, evaluate_gradients_y, evaluate_gradients_z> (
bubble, cube, gradient_cube_x,
gradient_cube_y, gradient_cube_z,
grid_points_x, grid_points_y, grid_points_z,
shape_x, shape_y, shape_z,
zero_point_x, zero_point_y, zero_point_z,
k, slice_offset, pitch, memory_y_shape,
slice_count, /*lmin = */0, multiplier);
}
/*
* Evaluate Bubbles at points
*/
template <bool lmin_zero, bool evaluate_value, bool evaluate_gradients_x, bool evaluate_gradients_y, bool evaluate_gradients_z>
__device__ inline void
Bubbles_evaluate_points(const Bubble* __restrict__ bubble,
double* __restrict__ result_array,
double* __restrict__ device_gradients_x,
double* __restrict__ device_gradients_y,
double* __restrict__ device_gradients_z,
// a 3d array, where the x coordinates are first,
// then y coordinates, and finally the z coordinates. This ordering
// is selected to get coalesced memory reads
const double* __restrict__ points,
// total number of points evaluated by this device
const int device_number_of_points,
// the zero point x-coordinate of bubbles
const double zero_point_x,
// the zero point y-coordinate of bubbles
const double zero_point_y,
// the zero point z-coordinate of bubbles
const double zero_point_z,
// the k value of the bubbles
const int k,
// the lmin value evaluated
const int lmin,
// number of points in this kernel call
const int point_count,
// device_point_offset
const int device_point_offset,
const double multiplier
) {
// Get the point order number within this kernel call
int id = blockIdx.x * blockDim.x + threadIdx.x;
double value, gradient[3];
double in_cell_position = 0.0;
const int ncell = bubble->grid->ncell, nlip = bubble->grid->nlip;
int icell = -1;
double relative_position_x, relative_position_y, relative_position_z, distance, r_max = bubble->grid->r_max;
//printf("X: %f, cell_spacing: %f, ncell: %d", distance, bubble->cell_spacing, ncell);
// Check that the point is within the block
if (id + device_point_offset < device_number_of_points && id < point_count ) {
// calculate relative position to the zero-point and distance to it
calculate_distance(relative_position_x,
relative_position_y,
relative_position_z,
distance,
zero_point_x,
zero_point_y,
zero_point_z,
points[id + device_point_offset],
points[id + device_point_offset + device_number_of_points],
points[id + device_point_offset + device_number_of_points*2]);
// get the order number of cell the point resides in
calculate_icell_radial(distance, bubble->charge, bubble->grid->r_max, ncell, nlip, &icell, &in_cell_position);
}
else {
icell = 1;
distance = 0.1;
}
// calculate the bubble value for the point
if (!lmin_zero) {
if (evaluate_value) {
value = Bubbles_evaluate_point_lmin( relative_position_x,
relative_position_y,
relative_position_z,
distance,
lmin,
bubble->lmax,
ncell,
nlip,
in_cell_position,
k,
icell * 8,
bubble->cf
);
}
}
else {
if (evaluate_gradients_x || evaluate_gradients_y || evaluate_gradients_z) {
Bubbles_evaluate_gradient_point
<evaluate_gradients_x, evaluate_gradients_y, evaluate_gradients_z>
(relative_position_x,
relative_position_y,
relative_position_z,
distance,
bubble->lmax,
ncell,
nlip,
in_cell_position,
k,
icell * 8,
bubble->cf,
bubble->df,
false, //(evaluate_gradients_x != evaluate_gradients_y || evaluate_gradients_x != evaluate_gradients_z) && icell == 0, //evaluate_gradients_x != evaluate_gradients_y || evaluate_gradients_x != evaluate_gradients_z,
gradient
);
}
if (evaluate_value) {
value = Bubbles_evaluate_point( relative_position_x,
relative_position_y,
relative_position_z,
distance,
bubble->lmax,
ncell,
nlip,
in_cell_position,
k,
icell * 8,
bubble->cf
);
}
}
// store the result to the result array
if (id + device_point_offset < device_number_of_points && id < point_count && distance < r_max && icell < ncell ) {
if (evaluate_value) result_array[id+device_point_offset] += multiplier * value;
//if ((evaluate_gradients_x) && (id + device_point_offset <= 7)) printf("%%%%#%# X: %d: pos: %f, %f, %f, val: %e, remainder: %e\n", id + device_point_offset, relative_position_x, relative_position_y, relative_position_z, gradient[X_], device_gradients_x[id+device_point_offset]);
//if ((evaluate_gradients_y) && (id + device_point_offset <= 7)) printf("%%%%#%# Y: %d: pos: %f, %f, %f, val: %e, remainder: %e\n", id + device_point_offset, relative_position_x, relative_position_y, relative_position_z, gradient[Y_], device_gradients_y[id+device_point_offset]);
//if ((evaluate_gradients_z) && (id + device_point_offset <= 7)) printf("%%%%#%# Z: %d: pos: %f, %f, %f, val: %e, remainder: %e\n", id + device_point_offset, relative_position_x, relative_position_y, relative_position_z, gradient[Z_], device_gradients_z[id+device_point_offset]);
// add also the gradient value, if we are evaluating them
if (evaluate_gradients_x) device_gradients_x[id+device_point_offset] += multiplier * gradient[X_];
if (evaluate_gradients_y) device_gradients_y[id+device_point_offset] += multiplier * gradient[Y_];
if (evaluate_gradients_z) device_gradients_z[id+device_point_offset] += multiplier * gradient[Z_];
}
return;
}
__device__ inline double get_damping_factor(double r) {
double result;
// erfc: error function
if (r > 1e-12) {
result = 0.5*erfc(r-2.0/r);
}
else {
result = 1.0;
}
return result;
}
template <bool lmin_zero, bool evaluate_value, bool evaluate_gradients_x, bool evaluate_gradients_y, bool evaluate_gradients_z >
#if (__CUDA_ARCH__ <= 350)
__launch_bounds__(128, 4)
#else
__launch_bounds__(256)
#endif
__global__ void Bubbles_evaluate_gradient_points(
const Bubble* __restrict__ bubble,
double* __restrict__ result_array,
double* __restrict__ device_gradients_x,
double* __restrict__ device_gradients_y,
double* __restrict__ device_gradients_z,
// a 3d array, where the x coordinates are first,
// then y coordinates, and finally the z coordinates. This ordering
// is selected to get coalesced memory reads
const double* __restrict__ points,
// total number of points evaluated by this device
const int device_number_of_points,
// the zero point x-coordinate of bubbles
const double zero_point_x,
// the zero point y-coordinate of bubbles
const double zero_point_y,
// the zero point z-coordinate of bubbles
const double zero_point_z,
// the k value of the bubbles
const int k,
// number of points in this kernel call
const int point_count,
// device_point_offset
const int device_point_offset,
const double multiplier
) {
Bubbles_evaluate_points<lmin_zero, evaluate_value, evaluate_gradients_x, evaluate_gradients_y, evaluate_gradients_z>(
bubble,
result_array,
device_gradients_x,
device_gradients_y,
device_gradients_z,
points,
device_number_of_points,
zero_point_x,
zero_point_y,
zero_point_z,
k,
0,
point_count,
device_point_offset,
multiplier
);
}
#if (__CUDA_ARCH__ <= 350)
__launch_bounds__(128, 7)
#else
__launch_bounds__(256)
#endif
__global__ void Bubbles_evaluate_points_simple(
const Bubble* __restrict__ bubble,
double* __restrict__ result_array,
// a 3d array, where the x coordinates are first,
// then y coordinates, and finally the z coordinates. This ordering
// is selected to get coalesced memory reads
const double* __restrict__ points,
// total number of points evaluated by this device
const int device_number_of_points,
// the zero point x-coordinate of bubbles
const double zero_point_x,
// the zero point y-coordinate of bubbles
const double zero_point_y,
// the zero point z-coordinate of bubbles
const double zero_point_z,
// the k value of the bubbles
const int k,
// number of points in this kernel call
const int point_count,
// device_point_offset
const int device_point_offset,
const double multiplier
) {
Bubbles_evaluate_points<true, true, false, false, false>(
bubble,
result_array,
/*device_gradients_x*/NULL,
/*device_gradients_y*/NULL,
/*device_gradients_z*/NULL,
points,
device_number_of_points,
zero_point_x,
zero_point_y,
zero_point_z,
k,
0,
point_count,
device_point_offset,
multiplier
);
}
/*__global__ void Bubble_make_taylor_kernel(Bubble_t *result_bubble, int maximum_taylor_order, double *contaminants,
double *c2s_coefficients, int *c2s_lm_ids, int *c2s_term_starts, int offset) {
const int index=threadIdx.x + blockIdx.x * blockDim.x + offset;
extern __shared__ double shared_memory[];
double *one_per_kappa_factorial = &shared_memory[0];
double *shared_contaminants = &shared_memory[maximum_taylor_order];
int contaminants_size = (maximum_taylor_order+1)*(maximum_taylor_order+2)*(maximum_taylor_order+3)/6;
// calculate the 1/kappa! terms to the shared memory
if (threadIdx.x < maximum_taylor_order) {
int kappa = 1;
for (int i = 1; i <= threadIdx.x; i++) {
kappa *= i+1;
}
one_per_kappa_factorial[threadIdx.x] = 1.0 / ((double) kappa);
}
// load the contaminats to the shared memory
if (threadIdx.x < contaminants_size) {
int id = threadIdx.x;
while (id < contaminants_size) {
shared_contaminants[id] = contaminants[id];
id += blockDim.x;
}
}
__syncthreads();
// do the actual calculation
double r = result_bubble->gridpoints[index];
double prefactor;
double damping_factor = get_damping_factor(r);
int k = result_bubble->k, ncell= result_bubble->ncell, nlip = result_bubble->nlip;
int result_index = 0, counter = 0, term_counter = 0;
for (int x = 0; x <= maximum_taylor_order; x++) {
for (int y = 0; y <= maximum_taylor_order - x; y++) {
for (int z = 0; z <= maximum_taylor_order - x - y; z++) {
prefactor = one_per_kappa_factorial[x+y+z]// 1/[x+y+z]
* pow(r, (double)(x+y+z - k)) // r^x+y+z-k
* shared_contaminants[counter] // c
* damping_factor;
// go through all l,m terms which get contribution from x,y,z -term
while (term_counter < c2s_term_starts[counter+1]) {
// get the index in the result array, note: the -1 is because the indices are in
// fortran format, starting from 1
result_index = (c2s_lm_ids[term_counter]-1) * (ncell * (nlip-1) +1) + index;
// add the prefactor times the coefficient from cartesion to spherical conversion
result_bubble->f[result_index] += c2s_coefficients[term_counter] * prefactor;
// add the counter value used to follow the c2s conversion
term_counter++;
}
// add the conter value used to follow cartesian terms
counter ++;
}
}
}
} */
/*
* Kernel that sums the f-values of two bubble objects together. The summation happens
* pointwise so that each thread calculates all l,m values for each point. The result
* is stored to the bubble_f.
*/
__global__ void Bubble_sum_kernel(double* __restrict__ bubble_f, const double* __restrict__ bubble1_f, const int lmax, const int max_id, const size_t device_f_pitch) {
const int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < max_id) {
// go through all l, m values of input bubble 'bubble'
for (int ilm = 0; ilm < (lmax+1)*(lmax+1); ilm++) {
bubble_f[ilm * device_f_pitch / sizeof(double) + id] += bubble1_f[ilm * device_f_pitch / sizeof(double) + id];
}
}
}
/*
* Decreases the k-value of a bubble by k_decrese. The operation happens
* pointwise so that each thread calculates all l,m values for each point. The result
* is stored to the bubble_f.
*
* k_decrease is how many k values is decreased
*/
__global__ void Bubble_decrease_k_kernel(double* __restrict__ bubble_f, const double* __restrict__ r, const int k_decrease, const int lmax, const int max_id, const size_t device_f_pitch) {
const int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < max_id) {
const double rpow = pow(r[id], (double) k_decrease);
// go through all l, m values of input bubble 'bubble'
for (int ilm = 0; ilm < (lmax+1)*(lmax+1); ilm++) {
bubble_f[ilm * device_f_pitch / sizeof(double) + id] *= rpow;
}
}
}
/*
* Multiply cubes 1 and 2 and store it to cube1
*/
__global__ void multiply_cubes(double *cube1, double *cube2, const int cube_size, const int offset) {
// get the id of the point (We are using only the first )
const int index=threadIdx.x + blockIdx.x * blockDim.x + offset;
if (index < cube_size) {
cube1[index] *= cube2[index];
}
}
/**************************************************************
* Bubble-implementation *
**************************************************************/
/*
* Evaluate the cf at ALL devices. This is a crucial preparation function for injection.
* For correct results, on call the Bubble must have all f-values present.
*
* NOTE: the function streaming is structured using number of l,m-pairs, like the uploadAll.
*/
void Bubble::calculateCf() {
// calculate the cf
int ilmmax = (this->lmax+1)*(this->lmax+1);
int block_size = 64;
int grid_size;
int offset;
check_errors(__FILE__, __LINE__);
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) {
this->streamContainer->setDevice(device);
offset = 0;
for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream ++) {
int ilm_per_stream = ilmmax / this->streamContainer->getStreamsPerDevice() +
(( ilmmax % this->streamContainer->getStreamsPerDevice()) > stream);
int number_of_points = ilm_per_stream * this->grid->ncell;
// verify that there is something to calculate the cf for (for instance if ilmmax is 1, some streams
// can be left without any points, resulting to a cuda error)
if (number_of_points > 0) {
grid_size = (number_of_points + block_size - 1) / block_size;
hipLaunchKernelGGL(( calc_cf) , dim3(grid_size), dim3(block_size), 0, *this->streamContainer->getStream(device, stream) ,
this->device_copies[device], offset, number_of_points, this->device_f_pitch[device]);
offset += number_of_points;
}
check_errors(__FILE__, __LINE__);
}
}
}
void Bubble::initDeviceMemory(int ibub, Grid1D *grid, double center[3], int lmax,
int k, double charge, StreamContainer *streamContainer) {
//hipHostRegister(this, sizeof(Bubble), hipHostRegisterPortable);
//check_errors(__FILE__, __LINE__);
this->ibub = ibub;
this->lmax = lmax;
this->device_memory_lmax = lmax;
this->k = k;
this->charge = charge;
this->streamContainer = streamContainer;
this->crd[X_] = center[X_];
this->crd[Y_] = center[Y_];
this->crd[Z_] = center[Z_];
this->integrator = NULL;
this->uploaded_events = new hipEvent_t*[this->streamContainer->getNumberOfDevices()];
this->device_copies = new Bubble * [this->streamContainer->getNumberOfDevices()];
this->device_f = new double *[this->streamContainer->getNumberOfDevices()];
this->device_f_pitch = new size_t [this->streamContainer->getNumberOfDevices()];
this->device_cf = new double * [this->streamContainer->getNumberOfDevices()];
this->device_df = new double * [this->streamContainer->getNumberOfDevices()];
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) {
this->streamContainer->setDevice(device);
size_t sz=sizeof(double)*(grid->ncell*(grid->nlip-1)+1);
hipMallocPitch((void**)&device_f[device], &device_f_pitch[device],
sz, (lmax+1)*(lmax+1));
check_errors(__FILE__, __LINE__);
hipMemset(device_f[device], 0, device_f_pitch[device]*(lmax+1)*(lmax+1));
check_errors(__FILE__, __LINE__);
sz=sizeof(double)*grid->ncell*8*(lmax+1)*(lmax+1);
hipMalloc(&this->device_cf[device], sz);
hipMalloc(&this->device_df[device], sz);
check_errors(__FILE__, __LINE__);
// copy the bubble to the device, for which set the device pointers
// to be the main-pointers
this->f = this->device_f[device];
this->cf = this->device_cf[device];
this->df = this->device_df[device];
this->grid = grid->device_copies[device];
// allocate & copy the bubble to device
hipMalloc(&this->device_copies[device], sizeof(Bubble));
hipMemcpy(this->device_copies[device], this, sizeof(Bubble), hipMemcpyHostToDevice);
check_errors(__FILE__, __LINE__);
}
this->grid = grid;
}
Bubble::Bubble(int ibub, Grid1D *grid, double center[3], int lmax, int k, double *bf,
double charge, StreamContainer *streamContainer) {
this->initDeviceMemory(ibub, grid, center, lmax, k, charge, streamContainer);
// set the host variables and register them for faster data transfer
this->f = bf;
/*hipHostRegister(this->f, sizeof(double)*(grid->ncell*(grid->nlip-1)+1)*(lmax+1)*(lmax+1), hipHostRegisterPortable);
check_errors(__FILE__, __LINE__);*/
}
Bubble::Bubble(int ibub, Grid1D *grid, double center[3], int lmax, int k, double charge, StreamContainer *streamContainer) {
this->initDeviceMemory(ibub, grid, center, lmax, k, charge, streamContainer);
}
Bubble::Bubble(Bubble *old_bubble, int lmax, int k) {
this->initDeviceMemory(old_bubble->ibub, old_bubble->grid, old_bubble->crd, lmax, old_bubble->k, old_bubble->charge, old_bubble->streamContainer);
}
/*
* Uploads all bubble data to all devices (gpus) on all nodes. This kind of approach
* is needed when injecting bubbles to cuda. With bubble-multiplication - the upload
* -method is preferred.
*/
void Bubble::uploadAll(double *f, int lmax) {
// set the host variables and register them for faster data transfer
this->f = f;
this->lmax = lmax;
size_t host_pitch = (this->grid->ncell * (this->grid->nlip - 1) + 1) * sizeof(double);
int ilmmax = (lmax+1)*(lmax+1);
check_errors(__FILE__, __LINE__);
Grid1D* host_grid = this->grid;
// register the host array array
//hipHostRegister(this->f, host_pitch * ilmmax, hipHostRegisterPortable);
check_errors(__FILE__, __LINE__);
double *device_f, *host_f;
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) {
this->streamContainer->setDevice(device);
// get the preallocated device pointers
device_f = this->device_f[device];
// NOTE: for all devices the first pointer points to the first value of each array
host_f = this->f;
for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream++) {
int ilm_per_stream = ilmmax / this->streamContainer->getStreamsPerDevice() +
(( ilmmax % this->streamContainer->getStreamsPerDevice()) > stream);
// upload the stream data to device
hipMemcpy2DAsync((void *) device_f, this->device_f_pitch[device],
(void *) host_f, host_pitch,
host_pitch,
ilm_per_stream,
hipMemcpyHostToDevice,
*this->streamContainer->getStream(device, stream)
);
check_errors(__FILE__, __LINE__);
// add to the pointers
device_f += ilm_per_stream * this->device_f_pitch[device] / sizeof(double);
host_f += ilm_per_stream * host_pitch / sizeof(double);
}
// copy the bubble to the device, for which set the device pointers
// to be the main-pointers
this->f = this->device_f[device];
this->cf = this->device_cf[device];
this->df = this->device_df[device];
this->grid = host_grid->device_copies[device];
this->lmax = lmax;
// copy the bubble to device
hipMemcpyAsync(this->device_copies[device], this, sizeof(Bubble), hipMemcpyHostToDevice,
*this->streamContainer->getStream(device, 0));
check_errors(__FILE__, __LINE__);
this->f = f;
this->grid = host_grid;
}
check_errors(__FILE__, __LINE__);
this->streamContainer->synchronizeAllDevices();
// calculate the cf
this->calculateCf();
// and synchronize the host with the device
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) {
this->uploaded_events[device] = this->streamContainer->recordDeviceEvent(device);
}
// we are not in any case downloading the data back, so we can unregister the array
//hipHostUnregister(this->f);
check_errors(__FILE__, __LINE__);
}
/*
* Uploads part of a bubble to the device
*
* NOTE: in order to use this, the bubble uploaded (i.e., the f-array given as input)
* must have the same lmax value as the Bubble-object we are uploading to.
* NOTE: registers the input array but does not unregister it, thus after calling this
* the user must unregister the f elsewhere, for instance by calling the unregister function.
* NOTE: this function is designed to function together with the bubble multiplication
*/
void Bubble::upload(double *f, int lmax, bool register_host) {
// set the host variables and register them for faster data transfer
this->f = f;
check_errors(__FILE__, __LINE__);
this->lmax = lmax;
int ilmmax = (lmax + 1) * (lmax + 1);
// calculate the total number of points in the bubbles each l,m -pair,
int total_point_count = this->grid->ncell * (this->grid->nlip - 1) +1;
Grid1D* host_grid = this->grid;
// register the host array, if not explicitly telling not to
/*if (register_host) {
hipHostRegister(this->f, sizeof(double)*ilmmax*total_point_count, hipHostRegisterPortable);
check_errors(__FILE__, __LINE__);
}*/
// store the processor variables to be used at downloading time
this->processor_order_number = processor_order_number;
this->number_of_processors = number_of_processors;
size_t host_pitch = total_point_count * sizeof(double);
// determine how many of the points belong to the current mpi-node
int processor_point_count = total_point_count / this->number_of_processors
+ ((total_point_count % number_of_processors) > processor_order_number);
// get the offset to the f-array caused by other processors
int remainder = total_point_count % this->number_of_processors;
int offset = processor_order_number * total_point_count / number_of_processors +
((remainder < processor_order_number) ? remainder : processor_order_number);
double *device_f;
double *host_f = &this->f[offset];
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) {
this->streamContainer->setDevice(device);
// get the preallocated device pointers,
// NOTE: The memory of bubble is allocated for its entire
// length, thus we have to go to the part we want to upload
device_f = this->device_f[device];
device_f = &device_f[offset];
// detemine how many of the mpi-nodes points belong to this device (gpu)
int device_point_count = processor_point_count / this->streamContainer->getNumberOfDevices() +
((processor_point_count % this->streamContainer->getNumberOfDevices()) > device);
for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream ++) {
// detemine the number of the points handled by this stream
int stream_point_count = device_point_count / this->streamContainer->getStreamsPerDevice() +
((device_point_count % this->streamContainer->getStreamsPerDevice()) > stream);
// upload the data to device, copy all ilmmax-rows for stream_point_count columns
hipMemcpy2DAsync((void *) device_f, this->device_f_pitch[device],
(void *) host_f, host_pitch,
stream_point_count * sizeof(double),
ilmmax,
hipMemcpyHostToDevice,
*this->streamContainer->getStream(device, stream)
);
check_errors(__FILE__, __LINE__);
offset += stream_point_count;
device_f += stream_point_count;
host_f += stream_point_count;
}
// copy the bubble to the device, for which set the device pointers
// to be the main-pointers
this->f = this->device_f[device];
this->cf = this->device_cf[device];
this->df = this->device_df[device];
this->grid = host_grid->device_copies[device];
this->lmax = lmax;
// copy the bubble to device
hipMemcpyAsync(this->device_copies[device], this, sizeof(Bubble), hipMemcpyHostToDevice,
*this->streamContainer->getStream(device, 0));
check_errors(__FILE__, __LINE__);
this->f = f;
this->grid = host_grid;
}
// and synchronize the host with the device
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) {
this->uploaded_events[device] = this->streamContainer->recordDeviceEvent(device);
}
}
void Bubble::waitBubbleUploaded(int device, hipStream_t *stream) {
hipStreamWaitEvent(*stream, *this->uploaded_events[device], 0);
}
void Bubble::waitBubbleUploaded(int device) {
hipStreamWaitEvent(0, *this->uploaded_events[device], 0);
}
/*
* Sets bubble values to zero
*
* NOTE: in order to use this, the bubble uploaded (i.e., the f-array given as input)
* must have the same lmax value as the Bubble-object we are uploading to.
* NOTE: registers the input array but does not unregister it, thus after calling this
* the user must unregister the f elsewhere, for instance by calling the unregister function.
* NOTE: this function is designed to function together with the bubble multiplication
*/
void Bubble::setToZero() {
// set the host variables and register them for faster data transfer
this->f = f;
check_errors(__FILE__, __LINE__);
int ilmmax = (this->device_memory_lmax + 1) * (this->device_memory_lmax + 1);
// calculate the total number of points in the bubbles each l,m -pair,
int total_point_count = this->grid->ncell * (this->grid->nlip - 1) +1;
// determine how many of the points belong to the current mpi-node
int processor_point_count = total_point_count / this->number_of_processors
+ ((total_point_count % this->number_of_processors) > this->processor_order_number);
// get the offset to the f-array caused by other processors
int remainder = total_point_count % this->number_of_processors;
int offset = this->processor_order_number * total_point_count / this->number_of_processors +
((remainder < this->processor_order_number) ? remainder : this->processor_order_number);
double *device_f;
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) {
this->streamContainer->setDevice(device);
// get the preallocated device pointers,
// NOTE: The memory of bubble is allocated for its entire
// length, thus we have to go to the part we want to upload
device_f = this->device_f[device];
device_f = &device_f[offset];
// detemine how many of the mpi-nodes points belong to this device (gpu)
int device_point_count = processor_point_count / this->streamContainer->getNumberOfDevices() +
((processor_point_count % this->streamContainer->getNumberOfDevices()) > device);
for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream ++) {
// detemine the number of the points handled by this stream
int stream_point_count = device_point_count / this->streamContainer->getStreamsPerDevice() +
((device_point_count % this->streamContainer->getStreamsPerDevice()) > stream);
// upload the data to device, copy all ilmmax-rows for stream_point_count columns
hipMemset2DAsync((void *) device_f, this->device_f_pitch[device],
0,
stream_point_count * sizeof(double),
ilmmax,
*this->streamContainer->getStream(device, stream)
);
check_errors(__FILE__, __LINE__);
offset += stream_point_count;
device_f += stream_point_count;
}
}
}
/*
* Downloads part of a bubble from the device. Downloads to host exactly the same
* part as the upload function above uploads to device.
*
* NOTE: this function is designed to function together with the bubble multiplication &
* summation
*/
void Bubble::download(int lmax) {
// calculate the total number of points in the bubbles each l,m -pair,
int total_point_count = this->grid->ncell * (this->grid->nlip - 1) +1;
size_t host_pitch = total_point_count * sizeof(double);
int ilmmax = (lmax + 1) * (lmax + 1);
// determine how many of the points belong to the current mpi-node
int processor_point_count = total_point_count / this->number_of_processors
+ ((total_point_count % this->number_of_processors) > this->processor_order_number);
// get the offset to the f-array caused by other processors
int remainder = total_point_count % this->number_of_processors;
int offset = this->processor_order_number * total_point_count / this->number_of_processors
+ ((remainder < this->processor_order_number) ? remainder : this->processor_order_number);
double *device_f;
double *host_f = &this->f[offset];
check_errors(__FILE__, __LINE__);
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) {
this->streamContainer->setDevice(device);
// get the preallocated device pointers,
// NOTE: The memory of bubble is allocated for its entire
// length, thus we have to go to the part we want to upload
device_f = this->device_f[device];
device_f = &device_f[offset];
// detemine how many of the mpi-nodes points belong to this device (gpu)
int device_point_count = processor_point_count / this->streamContainer->getNumberOfDevices() +
((processor_point_count % this->streamContainer->getNumberOfDevices()) > device);
for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream ++) {
// detemine the number of the points handled by this stream
int stream_point_count = device_point_count / this->streamContainer->getStreamsPerDevice() +
((device_point_count % this->streamContainer->getStreamsPerDevice()) > stream);
// upload the data to device, copy all ilmmax-rows for stream_point_count columns
hipMemcpy2DAsync((void *) host_f, host_pitch,
(void *) device_f, this->device_f_pitch[device],
stream_point_count * sizeof(double),
ilmmax,
hipMemcpyDeviceToHost,
*this->streamContainer->getStream(device, stream)
);
check_errors(__FILE__, __LINE__);
offset += stream_point_count;
device_f += stream_point_count;
host_f += stream_point_count;
check_errors(__FILE__, __LINE__);
}
}
}
/*
* Adds together the f-values of 'this' and input bubble 'bubble'
*
* NOTE: this function is designed to function together with the bubble multiplication
* NOTE: this function assumes that the bubbles have identical grids and with that,
* identical f_pitches
*/
void Bubble::add(Bubble *bubble) {
// make sure that the k-values of the input functions are the same
// this is done by decreasing the larger k-value to be equal
// with the smaller
check_errors(__FILE__, __LINE__);
if (this->k > bubble->k) {
this->decreaseK(this->k - bubble->k);
}
else if (this->k < bubble->k) {
bubble->decreaseK(bubble->k - this->k);
}
check_errors(__FILE__, __LINE__);
// calculate the total number of points in the bubbles each l,m -pair,
int total_point_count = this->grid->ncell * (this->grid->nlip - 1) +1;
int smaller_lmax = min(this->lmax, bubble->lmax);
// determine how many of the points belong to the current mpi-node
int processor_point_count = total_point_count / this->number_of_processors
+ ((total_point_count % this->number_of_processors) > this->processor_order_number);
// get the offset to the f-array caused by other processors
int remainder = total_point_count % this->number_of_processors;
int offset = this->processor_order_number * total_point_count / this->number_of_processors
+ ((remainder < this->processor_order_number) ? remainder : this->processor_order_number);
double *device_f;
double *device_f1;
int block_size = 256;
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) {
this->streamContainer->setDevice(device);
this->waitBubbleUploaded(device);
bubble->waitBubbleUploaded(device);
// get the preallocated device pointers,
// NOTE: The memory of bubble is allocated for its entire
// length, thus we have to go to the part we want to upload
device_f = this->device_f[device];
device_f = &device_f[offset];
device_f1 = bubble->device_f[device];
device_f1 = &device_f1[offset];
// detemine how many of the mpi-nodes points belong to this device (gpu)
int device_point_count = processor_point_count / this->streamContainer->getNumberOfDevices() +
((processor_point_count % this->streamContainer->getNumberOfDevices()) > device);
for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream ++) {
// detemine the number of the points handled by this stream
int stream_point_count = device_point_count / this->streamContainer->getStreamsPerDevice() +
((device_point_count % this->streamContainer->getStreamsPerDevice()) > stream);
int grid_size = (stream_point_count + block_size - 1) / block_size;
// call the kernel
hipLaunchKernelGGL(( Bubble_sum_kernel) , dim3(grid_size), dim3(block_size), 0, *this->streamContainer->getStream(device, stream),
device_f, device_f1, smaller_lmax, stream_point_count, this->device_f_pitch[device]);
check_errors(__FILE__, __LINE__);
// add the device pointers and the offset
offset += stream_point_count;
device_f += stream_point_count;
device_f1 += stream_point_count;
}
}
}
/*
* Decreases the k-value of a bubble by k_decrease
*
* NOTE: this function is designed to function together with the bubble multiplication
* NOTE: this function assumes that the bubbles have identical grids and with that,
* identical f_pitches
*/
void Bubble::decreaseK(int k_decrease) {
// calculate the total number of points in the bubbles each l,m -pair,
int total_point_count = this->grid->ncell * (this->grid->nlip - 1) +1;
// determine how many of the points belong to the current mpi-node
int processor_point_count = total_point_count / this->number_of_processors
+ ((total_point_count % this->number_of_processors) > this->processor_order_number);
// get the offset to the f-array caused by other processors
int remainder = total_point_count % this->number_of_processors;
int offset = this->processor_order_number * total_point_count / this->number_of_processors
+ ((remainder < this->processor_order_number) ? remainder : this->processor_order_number);
double *device_f;
double *device_r;
int block_size = 256;
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) {
this->streamContainer->setDevice(device);
this->waitBubbleUploaded(device);
// get the preallocated device pointers,
// NOTE: The memory of bubble is allocated for its entire
// length, thus we have to go to the part we want to upload
device_f = this->device_f[device];
device_f = &device_f[offset];
device_r = this->grid->device_gridpoints[device];
device_r = &device_r[offset];
// detemine how many of the mpi-nodes points belong to this device (gpu)
int device_point_count = processor_point_count / this->streamContainer->getNumberOfDevices() +
((processor_point_count % this->streamContainer->getNumberOfDevices()) > device);
for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream ++) {
// detemine the number of the points handled by this stream
int stream_point_count = device_point_count / this->streamContainer->getStreamsPerDevice() +
((device_point_count % this->streamContainer->getStreamsPerDevice()) > stream);
int grid_size = (stream_point_count + block_size - 1) / block_size;
// call the kernel
hipLaunchKernelGGL(( Bubble_decrease_k_kernel) , dim3(grid_size), dim3(block_size), 0, *this->streamContainer->getStream(device, stream),
device_f, device_r, k_decrease, this->lmax, stream_point_count, this->device_f_pitch[device]);
check_errors(__FILE__, __LINE__);
// add the device pointers and the offset
offset += stream_point_count;
device_f += stream_point_count;
device_r += stream_point_count;
}
}
}
/*
* Integrates over the bubble. We only need to integrate over the s-bubble.
*/
double Bubble::integrate() {
// calculate the total number of points in the bubbles each l,m -pair,
int total_point_count = this->grid->getShape();
// check if the integrator has been inited, if not, init it
if (!this->integrator) {
this->integrator = new Integrator1D(this->streamContainer, this->grid, this->processor_order_number, this->number_of_processors);
}
// upload the l,m=0 radial function f to the integrator
this->integrator->upload(this->f);
check_errors(__FILE__, __LINE__);
// determine how many of the points belong to the current mpi-node
int processor_point_count = total_point_count / this->number_of_processors
+ ((total_point_count % this->number_of_processors) > this->processor_order_number);
// get the offset to the f-array caused by other processors
int remainder = total_point_count % this->number_of_processors;
int offset = this->processor_order_number * total_point_count / this->number_of_processors
+ ((remainder < this->processor_order_number) ? remainder : this->processor_order_number);
// get the partial s-bubble device vectors residing now in the integrators device memory
double **device_vectors = this->integrator->getDeviceVectors();
double *device_vector;
double *device_r;
// multiply the integration vector with r^(2+this->k)
// get the times we have to multiply the vector with r, i.e., 2+this->k
// NOTE: this must be larger or equal to zero
int k_change = 2 + this->k;
if (k_change > 0) {
int block_size = 256;
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) {
this->streamContainer->setDevice(device);
// get the preallocated device pointers,
// NOTE: The memory of gridpoints is allocated for its entire
// length, thus we have to go to the part we want to upload
// however, the integrator only has the memory it needs, the we don't need to
// offset the device_vector
device_vector = device_vectors[device];
device_r = this->grid->device_gridpoints[device];
device_r = &device_r[offset];
// detemine how many of the mpi-nodes points belong to this device (gpu)
int device_point_count = processor_point_count / this->streamContainer->getNumberOfDevices() +
((processor_point_count % this->streamContainer->getNumberOfDevices()) > device);
for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream ++) {
// detemine the number of the points handled by this stream
int stream_point_count = device_point_count / this->streamContainer->getStreamsPerDevice() +
((device_point_count % this->streamContainer->getStreamsPerDevice()) > stream);
int grid_size = (stream_point_count + block_size - 1) / block_size;
// call the decrease_k- kernel by using lmax = 0
hipLaunchKernelGGL(( Bubble_decrease_k_kernel) , dim3(grid_size), dim3(block_size), 0, *this->streamContainer->getStream(device, stream),
device_vector, device_r, k_change, 0, stream_point_count, 0);
check_errors(__FILE__, __LINE__);
// add the device pointers and the offset
offset += stream_point_count;
device_vector += stream_point_count;
device_r += stream_point_count;
check_errors(__FILE__, __LINE__);
}
}
}
else if (k_change < 0) {
printf("Invalid k-value (%d) at bubble-integrate, must be larger or equal with -2. At file '%s', line number %d", this->k, __FILE__, __LINE__);
exit(-1);
}
return 4.0 * M_PI * this->integrator->integrate(); //
}
void Bubble::registerHost(double *f) {
check_errors(__FILE__, __LINE__);
this->f = f;
/*int ilmmax = (this->lmax + 1) * (this->lmax + 1);
// calculate the total number of points in the bubbles each l,m -pair,
int total_point_count = this->grid->ncell * (this->grid->nlip - 1) +1;
hipHostRegister(this->f, sizeof(double)*ilmmax*total_point_count, hipHostRegisterPortable);*/
check_errors(__FILE__, __LINE__);
}
void Bubble::destroy() {
//this->grid->destroy();
//check_errors(__FILE__, __LINE__);
//delete this->grid;
this->grid = NULL;
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device++) {
this->streamContainer->setDevice(device);
hipFree(this->device_f[device]);
check_errors(__FILE__, __LINE__);
hipFree(this->device_cf[device]);
check_errors(__FILE__, __LINE__);
hipFree(this->device_df[device]);
check_errors(__FILE__, __LINE__);
hipFree(this->device_copies[device]);
check_errors(__FILE__, __LINE__);
}
delete[] this->device_copies;
delete[] this->device_f;
delete[] this->device_df;
delete[] this->device_f_pitch;
delete[] this->device_cf;
delete[] this->uploaded_events;
// check if integrator is null pointer, if not
// delete the integrator
if (this->integrator) {
this->integrator->destroy();
delete this->integrator;
this->integrator = NULL;
}
check_errors(__FILE__, __LINE__);
//hipHostUnregister(this);
}
/*
* Set MPI-configuration used by the bubble object.
*/
void Bubble::setProcessorConfiguration( int processor_order_number, int number_of_processors) {
this->number_of_processors = number_of_processors;
this->processor_order_number = processor_order_number;
}
/**************************************************************
* Bubbles-implementation *
**************************************************************/
int Bubbles::getBubbleCount() {
return this->nbub;
}
Bubbles::Bubbles(int nbub) {
this->nbub = nbub;
this->bubbles = new Bubble*[nbub];
this->is_sub_bubbles = false;
}
/*
* Init new Bubbles by making a copy of the old.
*
* NOTE: This makes a deep copy of the old bubbles, meaning that
* new memory places are allocated for the underlying Bubble objects.
*/
Bubbles::Bubbles(Bubbles *old_bubbles, int lmax, int k) {
this->is_sub_bubbles = false;
this->nbub = old_bubbles->nbub;
this->bubbles = new Bubble*[nbub];
for (int i = 0; i < old_bubbles->getBubbleCount(); i++) {
this->bubbles[i] = new Bubble(old_bubbles->bubbles[i], lmax, k);
}
}
/*
* Get new bubbles object containing some of the original bubbles.
* The bubbles selected in the new objects are the ones with
* the ibub values matching to those in input parameter 'ibubs'.
* NOTE: this function makes a shallow copy of the input bubbles 'this',
* i.e., the underlying Bubble objects are copied as references only
*/
Bubbles *Bubbles::getSubBubbles(int *ibubs, int nbub) {
Bubbles *new_bubbles = new Bubbles(nbub);
new_bubbles->is_sub_bubbles = true;
// copy the references to the wanted Bubble-objects specified
// in ibubs
for (int i = 0; i < new_bubbles->getBubbleCount(); i++) {
new_bubbles->bubbles[i] = this->getBubble(ibubs[i]);
}
return new_bubbles;
}
/*
* Get the pointer to the Bubble with local order number 'i' equal to
* input parameter 'i'. If not found NULL is returned.
*
* @param i - The local order number of the bubble
*/
Bubble *Bubbles::getBubbleWithLocalOrderNumber(int i) {
if (i < this->nbub) {
return this->bubbles[i];
}
return NULL;
}
/*
* Get the pointer to the Bubble with global order number 'ibub' equal to
* input parameter 'ibub'. If not found NULL is returned.
*
* @param ibub - The global order number of the bubble
*/
Bubble *Bubbles::getBubble(int ibub) {
for (int i = 0; i < this->getBubbleCount(); i ++) {
if (this->bubbles[i]->ibub == ibub) {
return this->bubbles[i];
}
}
return NULL;
}
/*
* Check if the Bubbles contains a Bubble with global order number 'ibub'.
*
* @param ibub - The global order number of the bubble
*/
bool Bubbles::containsBubble(int ibub) {
Bubble *bubble = this->getBubble(ibub);
return (bubble != NULL);
}
/*
* Init a bubble with global order number 'ibub' to the 'i':th slot in the
* internal bubbles array. Contains also the values for the bubble.
*
* @param grid - The grid used in the bubble
* @param i - The internal order number of the bubble
* @param ibub - The global order number of the bubble
* @param center - The global center point of the bubble
* @param lmax - The maximum value of quantum number 'l' for the bubble
* @param k - The parameter k for the r^k multiplier of the values
* @param bf - The values of the bubble
* @param charge - The charge of the atom at the center of the bubble
* @param streaContainer - The container holding the streams used in cuda evaluation of anything
* related to this object
*/
void Bubbles::initBubble(Grid1D *grid, int i, int ibub, double center[3], int lmax,
int k, double *bf, double charge, StreamContainer *streamContainer) {
this->bubbles[i] = new Bubble(ibub, grid, center, lmax, k, bf, charge, streamContainer);
}
/*
* Init a bubble with global order number 'ibub' to the 'i':th slot in the
* internal bubbles array. Contains also the values for the bubble.
*
* @param grid - The grid used in the bubble
* @param i - The internal order number of the bubble
* @param ibub - The global order number of the bubble
* @param center - The global center point of the bubble
* @param lmax - The maximum value of quantum number 'l' for the bubble
* @param k - The parameter k for the r^k multiplier of the values
* @param charge - The charge of the atom at the center of the bubble
* @param streaContainer - The container holding the streams used in cuda evaluation of anything
* related to this object
*/
void Bubbles::initBubble(Grid1D *grid, int i, int ibub, double center[3], int lmax,
int k, double charge, StreamContainer *streamContainer) {
check_errors(__FILE__, __LINE__);
this->bubbles[i] = new Bubble(ibub, grid, center, lmax, k, charge, streamContainer);
}
void Bubbles::unregister() {
/*for (int ibub = 0; ibub < this->getBubbleCount(); ibub ++) {
hipHostUnregister(this->getBubble(ibub)->f);
check_errors(__FILE__, __LINE__);
}*/
}
void Bubbles::waitBubblesUploaded(int device) {
for (int i = 0; i < this->getBubbleCount(); i ++) {
this->bubbles[i]->waitBubbleUploaded(device);
}
}
/*
* Set MPI-configuration used by the bubble object.
*/
void Bubbles::setProcessorConfiguration( int processor_order_number, int number_of_processors) {
for (int i = 0; i < this->getBubbleCount(); i ++) {
this->bubbles[i]->setProcessorConfiguration(processor_order_number, number_of_processors);
}
}
double Bubbles::integrate() {
double result = 0.0;
for (int i = 0; i < this->getBubbleCount(); i ++) {
result += this->getBubbleWithLocalOrderNumber(i)->integrate();
}
return result;
}
void Bubbles::download() {
for (int i = 0; i < this->getBubbleCount(); i ++) {
this->bubbles[i]->download(this->bubbles[i]->lmax);
}
}
void Bubbles::add(Bubbles *bubbles) {
// go through all the Bubble-objects present in this
for (int i = 0; i < bubbles->getBubbleCount(); i ++) {
// get the matching bubble in the added bubbles
Bubble * bubble = bubbles->getBubble(this->bubbles[i]->ibub);
// if the corresponding Bubble exists in both the Bubbles, do the add
if (bubble) {
this->bubbles[i]->add(bubble);
}
}
check_errors(__FILE__, __LINE__);
}
void Bubbles::destroy() {
if (!this->is_sub_bubbles) {
for (int ibub = 0; ibub < this->getBubbleCount(); ibub ++) {
this->bubbles[ibub]->destroy();
delete this->bubbles[ibub];
}
}
delete[] this->bubbles;
}
void Bubbles::inject(Grid3D *grid3d, CudaCube *cube, int lmin, CudaCube *gradients_cube_x,
CudaCube *gradients_cube_y, CudaCube *gradients_cube_z, bool evaluate_value,
bool evaluate_gradients_x, bool evaluate_gradients_y, bool evaluate_gradients_z) {
check_errors(__FILE__, __LINE__);
int total_slice_count = cube->getShape(Z_);
// the minimum l is 0 always in the multiplication
int device_slice_count;
// get the pointer arrays from the cubes
double **device_cubes = cube->getDeviceCubes();
double **device_gradients_x, **device_gradients_y, **device_gradients_z;
// get the device gradient result pointers
if (evaluate_gradients_x) device_gradients_x = gradients_cube_x->getDeviceCubes();
if (evaluate_gradients_y) device_gradients_y = gradients_cube_y->getDeviceCubes();
if (evaluate_gradients_z) device_gradients_z = gradients_cube_z->getDeviceCubes();
size_t *device_pitches = cube->getDevicePitches();
int *device_memory_shape = cube->getDeviceMemoryShape();
int slice_offset = 0;
Bubble *bubble;
StreamContainer *streamContainer = cube->getStreamContainer();
// copy the cubes to the device & execute the kernels
for (int device = 0; device < streamContainer->getNumberOfDevices(); device ++) {
// set the used device (gpu)
streamContainer->setDevice(device);
double *dev_cube = device_cubes[device];
double *dev_gradient_x, *dev_gradient_y, *dev_gradient_z;
// get the gradient addresses for the device
if (evaluate_gradients_x) dev_gradient_x = device_gradients_x[device];
if (evaluate_gradients_y) dev_gradient_y = device_gradients_y[device];
if (evaluate_gradients_z) dev_gradient_z = device_gradients_z[device];
// calculate the number of vectors this device handles
device_slice_count = total_slice_count / streamContainer->getNumberOfDevices()
+ ((total_slice_count % streamContainer->getNumberOfDevices()) > device);
for (int stream = 0; stream < streamContainer->getStreamsPerDevice(); stream++) {
// determine the count of vectors handled by this stream
int slice_count = device_slice_count / streamContainer->getStreamsPerDevice()
+ ((device_slice_count % streamContainer->getStreamsPerDevice()) > stream);
check_errors(__FILE__, __LINE__);
// get the launch configuration for the f1-inject
dim3 block, grid;
cube->getLaunchConfiguration(&grid, &block, slice_count, INJECT_BLOCK_SIZE);
if (slice_count > 0) {
// inject bubbles to the cube
for (int i = 0; i < this->getBubbleCount(); i++) {
bubble = this->getBubbleWithLocalOrderNumber(i);
// wait that the bubble is uploaded to the device before starting
if (stream == 0) bubble->waitBubbleUploaded(device);
// call the kernel
if (lmin == 0) {
if (evaluate_gradients_x && evaluate_gradients_y && evaluate_gradients_z) {
if (evaluate_value) {
hipLaunchKernelGGL(( Bubbles_evaluate_grid_gradients < true, true, true, true, true>)
, dim3(grid), dim3(block), INJECT_BLOCK_SIZE * sizeof(double) * 8,
*streamContainer->getStream(device, stream) ,
bubble->device_copies[device],
dev_cube,
dev_gradient_x,
dev_gradient_y,
dev_gradient_z,
grid3d->axis[X_]->device_gridpoints[device],
grid3d->axis[Y_]->device_gridpoints[device],
grid3d->axis[Z_]->device_gridpoints[device],
grid3d->shape[X_],
grid3d->shape[Y_],
grid3d->shape[Z_],
bubble->crd[X_],
bubble->crd[Y_],
bubble->crd[Z_],
bubble->k,
slice_offset,
device_pitches[device],
device_memory_shape[Y_],
slice_count,
1.0);
}
else {
hipLaunchKernelGGL(( Bubbles_evaluate_grid_gradients < true, false, true, true, true>)
, dim3(grid), dim3(block), INJECT_BLOCK_SIZE * sizeof(double) * 8,
*streamContainer->getStream(device, stream) ,
bubble->device_copies[device],
dev_cube,
dev_gradient_x,
dev_gradient_y,
dev_gradient_z,
grid3d->axis[X_]->device_gridpoints[device],
grid3d->axis[Y_]->device_gridpoints[device],
grid3d->axis[Z_]->device_gridpoints[device],
grid3d->shape[X_],
grid3d->shape[Y_],
grid3d->shape[Z_],
bubble->crd[X_],
bubble->crd[Y_],
bubble->crd[Z_],
bubble->k,
slice_offset,
device_pitches[device],
device_memory_shape[Y_],
slice_count,
1.0);
}
}
else if (evaluate_gradients_x) {
hipLaunchKernelGGL(( Bubbles_evaluate_grid_gradients < true, false, true, false, false>)
, dim3(grid), dim3(block), INJECT_BLOCK_SIZE * sizeof(double) * 8,
*streamContainer->getStream(device, stream) ,
bubble->device_copies[device],
dev_cube,
dev_gradient_x,
dev_gradient_y,
dev_gradient_z,
grid3d->axis[X_]->device_gridpoints[device],
grid3d->axis[Y_]->device_gridpoints[device],
grid3d->axis[Z_]->device_gridpoints[device],
grid3d->shape[X_],
grid3d->shape[Y_],
grid3d->shape[Z_],
bubble->crd[X_],
bubble->crd[Y_],
bubble->crd[Z_],
bubble->k,
slice_offset,
device_pitches[device],
device_memory_shape[Y_],
slice_count,
1.0);
}
else if (evaluate_gradients_y) {
hipLaunchKernelGGL(( Bubbles_evaluate_grid_gradients < true, false, false, true, false>)
, dim3(grid), dim3(block), INJECT_BLOCK_SIZE * sizeof(double) * 8,
*streamContainer->getStream(device, stream) ,
bubble->device_copies[device],
dev_cube,
dev_gradient_x,
dev_gradient_y,
dev_gradient_z,
grid3d->axis[X_]->device_gridpoints[device],
grid3d->axis[Y_]->device_gridpoints[device],
grid3d->axis[Z_]->device_gridpoints[device],
grid3d->shape[X_],
grid3d->shape[Y_],
grid3d->shape[Z_],
bubble->crd[X_],
bubble->crd[Y_],
bubble->crd[Z_],
bubble->k,
slice_offset,
device_pitches[device],
device_memory_shape[Y_],
slice_count,
1.0);
}
else if (evaluate_gradients_z) {
hipLaunchKernelGGL(( Bubbles_evaluate_grid_gradients < true, false, false, false, true>)
, dim3(grid), dim3(block), INJECT_BLOCK_SIZE * sizeof(double) * 8,
*streamContainer->getStream(device, stream) ,
bubble->device_copies[device],
dev_cube,
dev_gradient_x,
dev_gradient_y,
dev_gradient_z,
grid3d->axis[X_]->device_gridpoints[device],
grid3d->axis[Y_]->device_gridpoints[device],
grid3d->axis[Z_]->device_gridpoints[device],
grid3d->shape[X_],
grid3d->shape[Y_],
grid3d->shape[Z_],
bubble->crd[X_],
bubble->crd[Y_],
bubble->crd[Z_],
bubble->k,
slice_offset,
device_pitches[device],
device_memory_shape[Y_],
slice_count,
1.0);
}
else if (evaluate_value) {
hipLaunchKernelGGL(( Bubbles_evaluate_grid_pitched)
, dim3(grid), dim3(block), INJECT_BLOCK_SIZE * sizeof(double) * 8,
*streamContainer->getStream(device, stream) ,
bubble->device_copies[device],
dev_cube,
grid3d->axis[X_]->device_gridpoints[device],
grid3d->axis[Y_]->device_gridpoints[device],
grid3d->axis[Z_]->device_gridpoints[device],
grid3d->shape[X_],
grid3d->shape[Y_],
grid3d->shape[Z_],
bubble->crd[X_],
bubble->crd[Y_],
bubble->crd[Z_],
bubble->k,
slice_offset,
device_pitches[device],
device_memory_shape[Y_],
slice_count,
1.0);
}
}
else if (evaluate_value) {
hipLaunchKernelGGL(( Bubbles_evaluate_grid_lmin)
, dim3(grid), dim3(block), INJECT_BLOCK_SIZE * sizeof(double) * 8,
*streamContainer->getStream(device, stream) ,
bubble->device_copies[device],
dev_cube,
grid3d->axis[X_]->device_gridpoints[device],
grid3d->axis[Y_]->device_gridpoints[device],
grid3d->axis[Z_]->device_gridpoints[device],
grid3d->shape[X_],
grid3d->shape[Y_],
grid3d->shape[Z_],
bubble->crd[X_],
bubble->crd[Y_],
bubble->crd[Z_],
bubble->k,
slice_offset,
device_pitches[device],
device_memory_shape[Y_],
slice_count,
lmin,
1.0);
}
check_errors(__FILE__, __LINE__);
}
}
// increase the address by the number of vectors in this array
if (evaluate_value) dev_cube += slice_count * device_pitches[device] / sizeof(double) * device_memory_shape[Y_];
if (evaluate_gradients_x) dev_gradient_x += slice_count * device_pitches[device] / sizeof(double) * device_memory_shape[Y_];
if (evaluate_gradients_y) dev_gradient_y += slice_count * device_pitches[device] / sizeof(double) * device_memory_shape[Y_];
if (evaluate_gradients_z) dev_gradient_z += slice_count * device_pitches[device] / sizeof(double) * device_memory_shape[Y_];
slice_offset += slice_count;
}
}
}
/**************************************************************
* BubblesEvaluator function implementations *
**************************************************************/
/*
* Evaluate the bubbles at preset points. The results are stored in the device memory.
*
* @param gradient_direction - possible values X_ = 0, Y_ = 1, Z_ = 2, (X_, Y_, Z_) = 3 && this->evaluateGradients
* anything else: no gradients
*/
void BubblesEvaluator::evaluatePoints(Points *result_points, Points *gradient_points_x, Points *gradient_points_y, Points *gradient_points_z, int gradient_direction) {
int warp_size = 32;
int total_warp_count = result_points->point_coordinates->number_of_points / warp_size + ((result_points->point_coordinates->number_of_points % warp_size) > 0);
int point_offset = 0;
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) {
this->streamContainer->setDevice(device);
// allocate space for device results and device points
int device_warp_count = total_warp_count / this->streamContainer->getNumberOfDevices()
+ ((total_warp_count % this->streamContainer->getNumberOfDevices()) > device);
int device_point_count = device_warp_count * warp_size;
int device_point_offset = 0;
check_errors(__FILE__, __LINE__);
// get the pointers to the device points & results
double *device_points_ptr = result_points->point_coordinates->device_coordinates[device];
double *device_results_ptr = result_points->device_values[device];
double *device_gradients_x_ptr = NULL;
double *device_gradients_y_ptr = NULL;
double *device_gradients_z_ptr = NULL;
if (gradient_direction == 3) {
device_gradients_x_ptr = gradient_points_x->device_values[device];
device_gradients_y_ptr = gradient_points_y->device_values[device];
device_gradients_z_ptr = gradient_points_z->device_values[device];
}
else if (gradient_direction < 3 && gradient_direction >= 0) {
device_gradients_x_ptr = result_points->device_values[device];
device_gradients_y_ptr = result_points->device_values[device];
device_gradients_z_ptr = result_points->device_values[device];
}
for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream ++) {
// get the number of points that are in the responsibility of this stream
int stream_warp_count = device_warp_count / this->streamContainer->getStreamsPerDevice()
+ ((device_warp_count % streamContainer->getStreamsPerDevice()) > stream);
int stream_point_count = stream_warp_count * warp_size;
// make sure that the last stream does not go over board
if (stream_point_count + point_offset > result_points->point_coordinates->number_of_points) {
stream_point_count = result_points->point_coordinates->number_of_points - point_offset;
}
check_errors(__FILE__, __LINE__);
if (stream_point_count > 0) {
for (int i = 0; i < this->bubbles->getBubbleCount(); i++) {
Bubble *bubble = this->bubbles->getBubbleWithLocalOrderNumber(i);
// wait that the bubble is uploaded before calling the kernel
if (stream == 0) bubble->waitBubbleUploaded(device);
int grid_size = (stream_point_count + INJECT_BLOCK_SIZE - 1) / INJECT_BLOCK_SIZE;
//printf("ibub: %d, device: %d, stream: %d, grid_size: %d, block_size: %d, stream_point_count: %d, device_point_offset: %d, device_point_count: %d, point_count: %d\n",
// ibub, device, stream, grid_size, INJECT_BLOCK_SIZE, stream_point_count, device_point_offset, device_point_count, this->point_count);
if (gradient_direction == X_) {
hipLaunchKernelGGL(( Bubbles_evaluate_gradient_points
<true, false, true, false, false>)
, dim3(grid_size), dim3(INJECT_BLOCK_SIZE), INJECT_BLOCK_SIZE * sizeof(double) * 7,
*this->streamContainer->getStream(device, stream) ,
bubble->device_copies[device],
device_results_ptr,
device_gradients_x_ptr,
device_gradients_y_ptr,
device_gradients_z_ptr,
device_points_ptr,
device_point_count,
bubble->crd[X_],
bubble->crd[Y_],
bubble->crd[Z_],
bubble->k,
stream_point_count,
device_point_offset,
1.0
);
}
else if (gradient_direction == Y_) {
hipLaunchKernelGGL(( Bubbles_evaluate_gradient_points
<true, false, false, true, false>)
, dim3(grid_size), dim3(INJECT_BLOCK_SIZE), INJECT_BLOCK_SIZE * sizeof(double) * 7,
*this->streamContainer->getStream(device, stream) ,
bubble->device_copies[device],
device_results_ptr,
device_gradients_x_ptr,
device_gradients_y_ptr,
device_gradients_z_ptr,
device_points_ptr,
device_point_count,
bubble->crd[X_],
bubble->crd[Y_],
bubble->crd[Z_],
bubble->k,
stream_point_count,
device_point_offset,
1.0
);
}
else if (gradient_direction == Z_) {
hipLaunchKernelGGL(( Bubbles_evaluate_gradient_points
<true, false, false, false, true>)
, dim3(grid_size), dim3(INJECT_BLOCK_SIZE), INJECT_BLOCK_SIZE * sizeof(double) * 7,
*this->streamContainer->getStream(device, stream) ,
bubble->device_copies[device],
device_results_ptr,
device_gradients_x_ptr,
device_gradients_y_ptr,
device_gradients_z_ptr,
device_points_ptr,
device_point_count,
bubble->crd[X_],
bubble->crd[Y_],
bubble->crd[Z_],
bubble->k,
stream_point_count,
device_point_offset,
1.0
);
}
else if (gradient_direction == 3) {
hipLaunchKernelGGL(( Bubbles_evaluate_gradient_points
<true, true, true, true, true>)
, dim3(grid_size), dim3(INJECT_BLOCK_SIZE), INJECT_BLOCK_SIZE * sizeof(double) * 7,
*this->streamContainer->getStream(device, stream) ,
bubble->device_copies[device],
device_results_ptr,
device_gradients_x_ptr,
device_gradients_y_ptr,
device_gradients_z_ptr,
device_points_ptr,
device_point_count,
bubble->crd[X_],
bubble->crd[Y_],
bubble->crd[Z_],
bubble->k,
stream_point_count,
device_point_offset,
1.0
);
}
else {
hipLaunchKernelGGL(( Bubbles_evaluate_points_simple)
, dim3(grid_size), dim3(INJECT_BLOCK_SIZE), INJECT_BLOCK_SIZE * sizeof(double) * 7,
*this->streamContainer->getStream(device, stream) ,
bubble->device_copies[device],
device_results_ptr,
device_points_ptr,
device_point_count,
bubble->crd[X_],
bubble->crd[Y_],
bubble->crd[Z_],
bubble->k,
stream_point_count,
device_point_offset,
1.0
);
}
check_errors(__FILE__, __LINE__);
}
}
// add the pointers
point_offset += stream_point_count;
device_point_offset += stream_point_count;
}
check_errors(__FILE__, __LINE__);
}
}
/**************************************************************
* Function3DMultiplier-implementation *
**************************************************************/
/*
* Injects the f1_bubbles to this->cube1 and f2_bubbles to this->cube2,
* multiplies this->cube1 with this->cube2 and de-injects the 'result_bubbles'
* from 'this->cube1'
*
* @param f1_bubbles
* @param f2_bubbles
* @param result_bubbles
*/
void Function3DMultiplier::multiply(Bubbles *f1_bubbles, Bubbles *f2_bubbles, Bubbles *result_bubbles) {
int total_slice_count = this->cube1->getShape(Z_);
// the minimum l is 0 always in the multiplication
int device_slice_count;
// get the pointer arrays from the cubes
double **f1_device_cubes = this->cube1->getDeviceCubes();
size_t *f1_device_pitches = this->cube1->getDevicePitches();
double **f2_device_cubes = this->cube2->getDeviceCubes();
size_t *f2_device_pitches = this->cube2->getDevicePitches();
int *f1_device_memory_shape = this->cube1->getDeviceMemoryShape();
int *f2_device_memory_shape = this->cube2->getDeviceMemoryShape();
int f1_shape[3];
f1_shape[X_] = this->cube1->getShape(X_);
f1_shape[Y_] = this->cube1->getShape(Y_);
f1_shape[Z_] = this->cube1->getShape(Z_);
int f2_shape[3];
f2_shape[X_] = this->cube2->getShape(X_);
f2_shape[Y_] = this->cube2->getShape(Y_);
f2_shape[Z_] = this->cube2->getShape(Z_);
int slice_offset = 0;
Bubble *bubble;
// copy the cubes to the device & execute the kernels
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) {
// set the used device (gpu)
this->streamContainer->setDevice(device);
//hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte);
//int first_block = 0;
double *dev_f1_cube = f1_device_cubes[device];
double *dev_f2_cube = f2_device_cubes[device];
// calculate the number of vectors this device handles
device_slice_count = total_slice_count / this->streamContainer->getNumberOfDevices()
+ ((total_slice_count % this->streamContainer->getNumberOfDevices()) > device);
for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream++) {
// determine the count of vectors handled by this stream
int slice_count = device_slice_count / this->streamContainer->getStreamsPerDevice()
+ ((device_slice_count % this->streamContainer->getStreamsPerDevice()) > stream);
if (slice_count > 0) {
// get the launch configuration for the f1-inject
dim3 block, grid;
this->cube1->getLaunchConfiguration(&grid, &block, slice_count, INJECT_BLOCK_SIZE);
check_errors(__FILE__, __LINE__);
// inject the f1 bubbles to the f1_cube (and sum)
for (int i = 0; i < f1_bubbles->getBubbleCount(); i++) {
bubble = f1_bubbles->getBubbleWithLocalOrderNumber(i);
// wait that the bubble is uploaded to the device before starting
if (stream == 0) bubble->waitBubbleUploaded(device);
hipLaunchKernelGGL(( Bubbles_evaluate_grid_pitched)
, dim3(grid), dim3(block), INJECT_BLOCK_SIZE * sizeof(double) * 7,
*this->streamContainer->getStream(device, stream) ,
bubble->device_copies[device],
dev_f1_cube,
this->grid->axis[X_]->device_gridpoints[device],
this->grid->axis[Y_]->device_gridpoints[device],
this->grid->axis[Z_]->device_gridpoints[device],
f1_shape[X_],
f1_shape[Y_],
f1_shape[Z_],
bubble->crd[X_],
bubble->crd[Y_],
bubble->crd[Z_],
bubble->k,
slice_offset,
f1_device_pitches[device],
f1_device_memory_shape[Y_],
slice_count,
1.0);
check_errors(__FILE__, __LINE__);
// printf("after offending kernel\n");
// fflush(stdout);
}
check_errors(__FILE__, __LINE__);
// get the launch configuration for the f2-inject
this->cube2->getLaunchConfiguration(&grid, &block, slice_count, INJECT_BLOCK_SIZE);
// inject the f2 bubbles to the f1_cube (and sum)
for (int i = 0; i < f2_bubbles->getBubbleCount(); i++) {
bubble = f2_bubbles->getBubbleWithLocalOrderNumber(i);
// wait that the bubble is uploaded to the device before starting
if (stream == 0) bubble->waitBubbleUploaded(device);
// call the kernel
hipLaunchKernelGGL(( Bubbles_evaluate_grid_pitched)
, dim3(grid), dim3(block), INJECT_BLOCK_SIZE * sizeof(double) * 7,
*this->streamContainer->getStream(device, stream) ,
bubble->device_copies[device],
dev_f2_cube,
this->grid->axis[X_]->device_gridpoints[device],
this->grid->axis[Y_]->device_gridpoints[device],
this->grid->axis[Z_]->device_gridpoints[device],
f2_shape[X_],
f2_shape[Y_],
f2_shape[Z_],
bubble->crd[X_],
bubble->crd[Y_],
bubble->crd[Z_],
bubble->k,
slice_offset,
f2_device_pitches[device],
f2_device_memory_shape[Y_],
slice_count,
1.0);
check_errors(__FILE__, __LINE__);
}
// get the launch configuration for the multiplication and result-inject
this->cube2->getLaunchConfiguration(&grid, &block, slice_count, INJECT_BLOCK_SIZE);
// multiply dev_f1_cube with dev_f2_cube and store the result to dev_f1_cube
multiply_3d_cubes(dev_f1_cube, f1_shape[X_], f1_shape[Y_], f1_device_memory_shape[Y_], f1_device_pitches[device],
dev_f2_cube, f2_shape[X_], f2_shape[Y_], f2_device_memory_shape[Y_], f2_device_pitches[device],
slice_count, &grid, &block, this->streamContainer->getStream(device, stream));
check_errors(__FILE__, __LINE__);
// de-inject (deduct) the result bubbles from the dev_f1_cube
for (int i = 0; i < result_bubbles->getBubbleCount(); i++) {
bubble = result_bubbles->getBubbleWithLocalOrderNumber(i);
// wait that the bubble is uploaded to the device before starting
if (stream == 0) bubble->waitBubbleUploaded(device);
// call the kernel
hipLaunchKernelGGL(( Bubbles_evaluate_grid_pitched)
, dim3(grid), dim3(block), INJECT_BLOCK_SIZE * sizeof(double) * 7,
*this->streamContainer->getStream(device, stream) ,
bubble->device_copies[device],
dev_f1_cube,
this->grid->axis[X_]->device_gridpoints[device],
this->grid->axis[Y_]->device_gridpoints[device],
this->grid->axis[Z_]->device_gridpoints[device],
f1_shape[X_],
f1_shape[Y_],
f1_shape[Z_],
bubble->crd[X_],
bubble->crd[Y_],
bubble->crd[Z_],
bubble->k,
slice_offset,
f1_device_pitches[device],
f1_device_memory_shape[Y_],
slice_count,
-1.0);
check_errors(__FILE__, __LINE__);
}
// increase the address by the number of vectors in this array
// something else
dev_f1_cube += slice_count * f1_device_pitches[device] / sizeof(double) * f1_device_memory_shape[Y_];
dev_f2_cube += slice_count * f2_device_pitches[device] / sizeof(double) * f2_device_memory_shape[Y_];
slice_offset += slice_count;
}
}
}
}
/********************************************
* Fortran interfaces *
********************************************/
extern "C" void bubbles_add_cuda(Bubbles *bubbles, Bubbles *bubbles1) {
bubbles->add(bubbles1);
}
extern "C" Bubbles* bubbles_get_sub_bubbles_cuda(Bubbles *bubbles, int *ibubs, int nbub) {
return bubbles->getSubBubbles(ibubs, nbub);
}
extern "C" Bubbles *bubbles_init_cuda(int nbub) {
Bubbles *new_bubbles = new Bubbles(nbub);
check_errors(__FILE__, __LINE__);
return new_bubbles;
}
/*
*
* @param id - local index of the bubble inited in Fortran format: first index is 1.
*/
extern "C" void bubble_init_cuda(Bubbles *bubbles, Grid1D *grid, int i, int ibub, double center[3], int lmax,
int k, double charge, StreamContainer *streamContainer) {
bubbles->initBubble(grid, i-1, ibub, center, lmax, k, charge, streamContainer);
check_errors(__FILE__, __LINE__);
}
/*
* Upload the content ('bf') of the Bubble with global order number 'ibub' to the device.
*
* @param ibub - tHe global order number of the bubble
*/
extern "C" void bubble_upload_all_cuda(Bubbles *bubbles, int ibub, int lmax, int k, double *bf) {
if (bubbles->containsBubble(ibub)) {
bubbles->getBubble(ibub)->k = k;
bubbles->getBubble(ibub)->uploadAll(bf, lmax);
check_errors(__FILE__, __LINE__);
}
}
extern "C" void bubble_upload_cuda(Bubbles *bubbles, int ibub, int lmax, double *bf) {
if (bubbles->containsBubble(ibub)) {
bubbles->getBubble(ibub)->upload(bf, lmax);
check_errors(__FILE__, __LINE__);
}
}
extern "C" void bubble_add_cuda(Bubbles *bubbles, Bubbles *bubbles1, int ibub) {
bubbles->getBubble(ibub)->add(bubbles1->getBubble(ibub));
check_errors(__FILE__, __LINE__);
}
extern "C" void bubbles_destroy_cuda(Bubbles* bubbles){
if (bubbles) {
bubbles->destroy();
delete bubbles;
check_errors(__FILE__, __LINE__);
}
}
extern "C" double bubbles_integrate_cuda(Bubbles *bubbles) {
return bubbles->integrate();
}
extern "C" void bubbles_set_processor_configuration_cuda(Bubbles *bubbles, int processor_order_number, int number_of_processors) {
bubbles->setProcessorConfiguration(processor_order_number, number_of_processors);
}
extern "C" void bubbles_inject_cuda(Bubbles *bubbles, Grid3D *grid, int lmin, CudaCube *cube) {
bubbles->inject(grid, cube, lmin);
}
extern "C" void bubbles_inject_to_cuda(Bubbles *bubbles, Grid3D *grid, int lmin, CudaCube *cudaCube, double *cube, int offset, int cube_host_shape[3]) {
cudaCube->initHost(&cube[offset], cube_host_shape, true);
cudaCube->upload();
bubbles->inject(grid, cudaCube, lmin);
}
extern "C" double *bubbles_init_page_locked_f_cuda(int lmax, int shape){
//allocated += 1;
double * result_f;
check_errors(__FILE__, __LINE__);
hipHostMalloc((void **)&result_f,
sizeof(double) * (lmax+1) * (lmax+1) * shape,
hipHostMallocPortable);
check_errors(__FILE__, __LINE__);
//printf("Allocated 1, Now allocated %d, address: %ld\n", allocated, result_f);
return result_f;
}
extern "C" void bubbles_destroy_page_locked_f_cuda(double * f){
//allocated -= 1;
//printf("Deallocated 1, Now allocated %d, address: %ld\n", allocated, f);
hipHostFree(f);
check_errors(__FILE__, __LINE__);
}
| 0f35237f7f148347ec009a0f10ab9ad05cf59451.cu | /*----------------------------------------------------------------------------------*
* Copyright (c) 2010-2018 Pauli Parkkinen, Eelis Solala, Wen-Hua Xu, *
* Sergio Losilla, Elias Toivanen, Jonas Juselius *
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy *
* of this software and associated documentation files (the "Software"), to deal *
* in the Software without restriction, including without limitation the rights *
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell *
* copies of the Software, and to permit persons to whom the Software is *
* furnished to do so, subject to the following conditions: *
* *
* The above copyright notice and this permission notice shall be included in all*
* copies or substantial portions of the Software. *
* *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR *
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE *
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER *
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, *
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE *
* SOFTWARE. *
*----------------------------------------------------------------------------------*/
/*! @file bubbles_cuda.cu
*! @brief CUDA implementation of the Bubbles.
*/
#include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
//#include <algorithm> *std::max_element(result_cube, result_cube + totalPointCount)
#include "bubbles_cuda.h"
#include "streamcontainer.h"
#include "grid.h"
#include "spherical_harmonics_cuda.h"
#include "cube.h"
#include "function3d_multiplier.h"
#include "memory_leak_operators.h"
#include "evaluators.h"
#define X_ 0
#define Y_ 1
#define Z_ 2
#define R_ 3
#if (__CUDA_ARCH__ > 350)
#define INJECT_BLOCK_SIZE 256
#else
#define INJECT_BLOCK_SIZE 128
#endif
#define NLIP 7
/** \brief Size of the CUDA blocks in the X dimension */
#define BLOCKDIMX 8
/** \brief Size of the CUDA blocks in the Y dimension */
#define BLOCKDIMY 4
/** \brief Size of the CUDA blocks in the Z dimension */
#define BLOCKDIMZ 4
#define FULL_MASK 0xffffffff
#define STR_HELPER(x) #x
#define STR(x) STR_HELPER(x)
cudaError_t cudastat;
__constant__ int shape_x_, shape_y_, shape_z_, ncell_, nlip_, lmax_, ilmmin_, lmin_, ilmmax_, first_term_, normalization_, ijk_max_;
__constant__ double charge_, r_max_;
cudaStream_t **streams;
int streams_inited = 0;
int allocated = 0;
extern __shared__ double shared_memory[];
__host__ inline void check_memory(const char *filename, const int line_number) {
size_t mem_tot_0 = 0;
size_t mem_free_0 = 0;
cudaMemGetInfo (&mem_free_0, &mem_tot_0);
printf("Free memory after: %ld, total: %ld\n ", mem_free_0, mem_tot_0);
}
template<typename T>
__device__ __forceinline__ T ldg(const T* ptr) {
#if __CUDA_ARCH__ >= 350
return __ldg(ptr);
#else
return *ptr;
#endif
}
void cube_download(double *hstPtr, int width, int height ,int depth,
void *devPtr, size_t pitch) {
// Define copy "from device to host" parameters
cudaMemcpy3DParms d2h={0};
d2h.srcPtr = make_cudaPitchedPtr(devPtr,
pitch,width,height);
d2h.dstPtr = make_cudaPitchedPtr((void *)hstPtr,
width*sizeof(double),width,height);
d2h.extent = make_cudaExtent(width * sizeof(double), height,
depth);
// cudaMemset3D( d2h.srcPtr, 999, d2h.extent);
d2h.kind = cudaMemcpyDeviceToHost;
// cudastat=cudaMemset3D( d2h.srcPtr, 0, d2h.extent);
// Copy to host
cudastat = cudaMemcpy3D( &d2h );
check_errors(__FILE__, __LINE__);
return;
}
void cube_upload(double *hstPtr, int *width ,int *height ,int *depth,
void *devPtr, size_t pitch) {
// Define copy "from host to device" parameters
cudaMemcpy3DParms h2d={0};
h2d.srcPtr = make_cudaPitchedPtr((void *)hstPtr,
*width*sizeof(double),*width,*height);
h2d.dstPtr = make_cudaPitchedPtr(devPtr,
pitch,*width,*height);
h2d.extent = make_cudaExtent(*width * sizeof(double), *height,
*depth);
h2d.kind = cudaMemcpyHostToDevice;
// Copy to device
cudaMemcpy3D( &h2d );
return;
}
__device__ int icell(double x, double *d, int n){
if ( ( x > d[n] ) || ( x < d[0] ) ) {
return -1;
}
int i[2];
i[0]=0;
i[1]=n;
int im=(i[0]+i[1])/2;
int j;
int max=log((float)n)/log(2.)+1;
for(j=0;j<max;j++){
i[ x<d[im] ] = im;
im=(i[0]+i[1])/2;
}
return im;
}
__device__ void calc_rc(double dist_vec[3], double *dist, double ref[3],double x,
double y, double z){
dist_vec[X_]=x-ref[X_];
dist_vec[Y_]=y-ref[Y_];
dist_vec[Z_]=z-ref[Z_];
*dist=sqrt(dist_vec[X_]*dist_vec[X_]+
dist_vec[Y_]*dist_vec[Y_]+
dist_vec[Z_]*dist_vec[Z_]);
dist_vec[X_]/=*dist;
dist_vec[Y_]/=*dist;
dist_vec[Z_]/=*dist;
return;
}
__device__ double eval_lip(int n, double *lip, double *f, double x){
short i,j;
double out=0.0;
for (j=0;j<n;j++){
double tmp=0.0;
for (i=0;i<n;i++){
tmp*= x;
tmp+= *(lip++);
}
out+=tmp*f[j];
}
return out;
}
__device__ double eval_poly(int n, double *c, double x){
double r=0.0;
while (n-- > 0) {
r *= x;
r += *(c++);
}
return r;
}
/*
* the following function precalculates some common values for the injection.
*
* NOTE: We are setting the cf-array to have 8 * (lmax+1) * (lmax+1) size
* This has several advantages (even if we are using more space and have
* blank spots in the array). 1) Every cell read is coalesced and we don't
* have overlapping requests! Additionally, we avoid divergence of the threads
* of one warp in the injection.
*/
__global__ void calc_cf(Bubble *bub, int offset, int number_of_points, size_t device_f_pitch) {
// get the index within this kernel call
const int index = blockIdx.x * blockDim.x + threadIdx.x;
// get the global index
const int id= index + offset;
const int icell=id%bub->grid->ncell;
const int ilm=id/bub->grid->ncell;
const int nlip = bub->grid->nlip;
__shared__ double shared_lip[49];
__shared__ double derivative_lip[42];
__shared__ double lower_derivative_lip[30];
__shared__ double cf_results[8*64];
__shared__ double df_results[8*64];
double f_i;
// load the Lagrange interpolation polynomials coefficients to
// the shared memory
if (threadIdx.x < (nlip) * (nlip)) {
shared_lip[threadIdx.x] = bub->grid->lip[threadIdx.x];
}
if (threadIdx.x < (nlip) * (nlip-1)) {
derivative_lip[threadIdx.x] = bub->grid->derivative_lip[threadIdx.x];
}
if (threadIdx.x < (nlip-2) * (nlip-1)) {
lower_derivative_lip[threadIdx.x] = bub->grid->lower_derivative_lip[threadIdx.x];
}
__syncthreads();
if ( index < number_of_points && ilm < ((bub->lmax+1)*(bub->lmax+1)) ) {
double *f = bub->f + ilm * device_f_pitch / sizeof(double) + (icell * (bub->grid->nlip-1));
double *cf = bub->cf + ( ilm * bub->grid->ncell + icell ) * 8;
double *df = bub->df + ( ilm * bub->grid->ncell + icell ) * 8;
short i,j;
double one_per_cell_step = 1.0 / bub->grid->h[icell];
double *lip=&shared_lip[0];
double *dlip=&derivative_lip[0];
double *ldlip=&lower_derivative_lip[0];
// set the shared memory result array to zero
for (i=0; i < 8; i++) {
cf_results[threadIdx.x * 8 + i]=0.0;
df_results[threadIdx.x * 8 + i]=0.0;
}
// evaluate the cf to shared memory
for (i=0; i < nlip; i++) {
f_i = f[i];
for (j=0; j < nlip ;j++){
cf_results[threadIdx.x * 8 + j] += f_i* (*(lip++));
}
// I (lnw) cannot see any good reason for this special case that is, the
// derivative at the centre of each bubble should be zero, but why does it have
// to be enforced?
const bool ignore_first = true;
if(ignore_first){
// handle the special case of the first cell, where the first
// data item most likely is not valid
if (icell == 0) {
if (i != 0) {
for (j = 1 ; j <= nlip-2; j++) {
df_results[threadIdx.x * 8 + j] += f_i* (*(ldlip++));
}
}
else {
df_results[threadIdx.x * 8] = 0.0;
}
}
else {
for (j=0; j < nlip-1 ;j++) {
df_results[threadIdx.x * 8 + j] += f_i* (*(dlip++));
}
}
}
else { // no special treatment
for (j=0; j < nlip-1 ;j++) {
df_results[threadIdx.x * 8 + j] += f_i* (*(dlip++));
}
}
}
// copy the result to device memory
for (i=0; i < 8; i++) {
cf[i] = cf_results[threadIdx.x * 8 + i];
df[i] = one_per_cell_step * df_results[threadIdx.x * 8 + i];
}
}
return;
}
__device__ inline double evaluate_polynomials(int n, const double* __restrict__ c, const double x){
double result=0.0;
while (n-- > 0) {
result *= x;
result += *(c++);
}
return result;
}
// __shfl* are defined from 3.x until including 6.x.
// they are replaced by __shfl*_sync
#if (__CUDA_ARCH__ >= 350) && (__CUDA_ARCH__ < 700)
/*
* Evaluates one granular polynomial for coefficients, and x
* NOTE: each thread is different value for coefficient, when entering the function
* NOTE: each x value must be the same for 8 consecutive threads
* NOTE: upon return each thread has the same value.
*/
__inline__ __device__
double evaluate_polynomials_unit_shuffle(double coefficient, const double x) {
double result = coefficient;
for (int i = 1; i < 7; i++) {
result *= x;
result += __shfl_down(coefficient, i, 8);
}
return result;
}
__inline__ __device__
double evaluate_polynomials_unit_register(const double * __restrict__ coefficients, const double x, int nlip) {
double result = 0.0;
while (nlip-- > 0) {
result *= x;
result += *(coefficients++);
}
return result;
}
__device__ inline void horizontal_rotate_8f(double coefficients[8], unsigned int order_number) {
coefficients[1] = __shfl(coefficients[1], (order_number+1)%8, 8);
coefficients[2] = __shfl(coefficients[2], (order_number+2)%8, 8);
coefficients[3] = __shfl(coefficients[3], (order_number+3)%8, 8);
coefficients[4] = __shfl(coefficients[4], (order_number+4)%8, 8);
coefficients[5] = __shfl(coefficients[5], (order_number+5)%8, 8);
coefficients[6] = __shfl(coefficients[6], (order_number+6)%8, 8);
coefficients[7] = __shfl(coefficients[7], (order_number+7)%8, 8);
}
__device__ inline void horizontal_rotate_8b(double coefficients[8], unsigned int order_number) {
coefficients[1] = __shfl(coefficients[1], (order_number+7)%8, 8);
coefficients[2] = __shfl(coefficients[2], (order_number+6)%8, 8);
coefficients[3] = __shfl(coefficients[3], (order_number+5)%8, 8);
coefficients[4] = __shfl(coefficients[4], (order_number+4)%8, 8);
coefficients[5] = __shfl(coefficients[5], (order_number+3)%8, 8);
coefficients[6] = __shfl(coefficients[6], (order_number+2)%8, 8);
coefficients[7] = __shfl(coefficients[7], (order_number+1)%8, 8);
}
__device__ inline void vertical_rotate_8(double src[8], unsigned int order_number) {
double tmp = src[0];
src[0] = (order_number == 1) ? src[7] : src[0];
src[7] = (order_number == 1) ? src[6] : src[7];
src[6] = (order_number == 1) ? src[5] : src[6];
src[5] = (order_number == 1) ? src[4] : src[5];
src[4] = (order_number == 1) ? src[3] : src[4];
src[3] = (order_number == 1) ? src[2] : src[3];
src[2] = (order_number == 1) ? src[1] : src[2];
src[1] = (order_number == 1) ? tmp : src[1];
src[1] = (order_number == 2) ? src[7] : src[1];
src[0] = (order_number == 2) ? src[6] : src[0];
src[7] = (order_number == 2) ? src[5] : src[7];
src[6] = (order_number == 2) ? src[4] : src[6];
src[5] = (order_number == 2) ? src[3] : src[5];
src[4] = (order_number == 2) ? src[2] : src[4];
src[3] = (order_number == 2) ? src[1] : src[3];
src[2] = (order_number == 2) ? tmp : src[2];
src[2] = (order_number == 3) ? src[7] : src[2];
src[1] = (order_number == 3) ? src[6] : src[1];
src[0] = (order_number == 3) ? src[5] : src[0];
src[7] = (order_number == 3) ? src[4] : src[7];
src[6] = (order_number == 3) ? src[3] : src[6];
src[5] = (order_number == 3) ? src[2] : src[5];
src[4] = (order_number == 3) ? src[1] : src[4];
src[3] = (order_number == 2) ? tmp : src[3];
src[3] = (order_number == 4) ? src[7] : src[3];
src[2] = (order_number == 4) ? src[6] : src[2];
src[1] = (order_number == 4) ? src[5] : src[1];
src[0] = (order_number == 4) ? src[4] : src[0];
src[7] = (order_number == 4) ? src[3] : src[7];
src[6] = (order_number == 4) ? src[2] : src[6];
src[5] = (order_number == 4) ? src[1] : src[5];
src[4] = (order_number == 4) ? tmp : src[4];
src[4] = (order_number == 5) ? src[7] : src[4];
src[3] = (order_number == 5) ? src[6] : src[3];
src[2] = (order_number == 5) ? src[5] : src[2];
src[1] = (order_number == 5) ? src[4] : src[1];
src[0] = (order_number == 5) ? src[3] : src[0];
src[7] = (order_number == 5) ? src[2] : src[7];
src[6] = (order_number == 5) ? src[1] : src[6];
src[5] = (order_number == 5) ? tmp : src[5];
src[5] = (order_number == 6) ? src[7] : src[5];
src[4] = (order_number == 6) ? src[6] : src[4];
src[3] = (order_number == 6) ? src[5] : src[3];
src[2] = (order_number == 6) ? src[4] : src[2];
src[1] = (order_number == 6) ? src[3] : src[1];
src[0] = (order_number == 6) ? src[2] : src[0];
src[7] = (order_number == 6) ? src[1] : src[7];
src[6] = (order_number == 6) ? tmp : src[6];
src[6] = (order_number == 7) ? src[7] : src[6];
src[5] = (order_number == 7) ? src[6] : src[5];
src[4] = (order_number == 7) ? src[5] : src[4];
src[3] = (order_number == 7) ? src[4] : src[3];
src[2] = (order_number == 7) ? src[3] : src[2];
src[1] = (order_number == 7) ? src[2] : src[1];
src[0] = (order_number == 7) ? src[1] : src[0];
src[7] = (order_number == 7) ? tmp : src[7];
}
__device__ inline void transpose8(double coefficients[8], int order_number) {
//printf("Original coefficients %d: %f, %f, %f, %f, %f, %f, %f, %f\n", order_number, coefficients[0], coefficients[1], coefficients[2], coefficients[3], coefficients[4], coefficients[5], coefficients[6], coefficients[7]);
horizontal_rotate_8f(coefficients, order_number);
vertical_rotate_8(coefficients, order_number);
horizontal_rotate_8b(coefficients, order_number);
//printf("Transposed coefficients coefficients %d: %f, %f, %f, %f, %f, %f, %f, %f\n", order_number, coefficients[0], coefficients[1], coefficients[2], coefficients[3], coefficients[4], coefficients[5], coefficients[6], coefficients[7]);
}
/*
* Evaluates the polynomials using shuffle actions. This saves the shared_memory significantly and allows
* the increase of the occupancy of the devices.
*
* This function only needs blockDim.x * 8 bytes of shared memory. This allows the usage of any sized blocks
* that are practically useful.
*
* The number of arithmetic operations is larger than for the version using shared memory only, and thus
* the effect to the execution speed remains to be seen.
*/
__device__ inline double evaluate_polynomials_shuffle(const int address,
const double * __restrict__ c,
const double x,
const int nlip) {
double *result = &shared_memory[0];
//double coefficients[8];
//double res;
int remainder = threadIdx.x%8;
int base_address = 8*(threadIdx.x/8);
double res;
for (int i = 0; i < 8; i ++) {
// evaluate the polynomials
// NOTE: __shfl(address, i, width=8) gets the address needed by the thread i/8 in the thread group
// NOTE: __shfl(x, i, width = 8) gets the coordinate x of the thread i/8 in the thread group
// NOTE: the c access (global memory is coalesced),
// NOTE: shared memorybank conflict should not occur, as every thread in the 8 thread group access
// the same address, thus resulting in broadcast.
//coefficients[i] = c[__shfl(address, i, 8) + remainder];
res = evaluate_polynomials_unit_shuffle( c[__shfl(address, i, 8) + remainder], __shfl(x, i, 8));
if (remainder == 0) result[base_address + i] = res;
}
// swap the coefficients to be with their rightful owners
//transpose8(coefficients, remainder);
return result[threadIdx.x];
//return evaluate_polynomials_unit_register(coefficients, x, nlip);
}
#endif
/*
* Get the thread-id within block.
*/
__device__ inline int getThreadId() {
return threadIdx.x
+ blockDim.x * threadIdx.y
+ blockDim.x * blockDim.y * threadIdx.z;
}
/*
* @param c, bubbles coefficients in the global memory
* @param x, the coordinate of the point in cell coordinates
*
* NOTE: The parameter 'c' must be pitched for this function to be useful
* NOTE: This function is made for NLIP:7, with other nlip values, the function must be remade
*/
template<int nlip>
__device__ inline
double evaluate_polynomials_shared(const int address, const double* __restrict__ c, const double x) {
double *coefficients = &shared_memory[0];
//const float *fc = (const float *)c;
int threadId = getThreadId();
const int remainder = threadId%8;
const int base_address = 8*(threadId/8);
const int id = base_address * 7 + remainder;
#if (__CUDA_ARCH__ >= 350) && (__CUDA_ARCH__ < 700)
// read the coefficients in the shared memory, 8 threads
// neighbouring each other are reading the global memory
// coefficients for one thread at the time, starting from 0
// and going to 7
int address_7 = __shfl(address, 7, 8);
if (remainder < 7) { // every eighth lane is idle
coefficients[id] = ldg<double>(&c[__shfl(address, 0, 8) + remainder]);
coefficients[id+7] = ldg<double>(&c[__shfl(address, 1, 8) + remainder]);
coefficients[id+7*2] = ldg<double>(&c[__shfl(address, 2, 8) + remainder]);
coefficients[id+7*3] = ldg<double>(&c[__shfl(address, 3, 8) + remainder]);
coefficients[id+7*4] = ldg<double>(&c[__shfl(address, 4, 8) + remainder]);
coefficients[id+7*5] = ldg<double>(&c[__shfl(address, 5, 8) + remainder]);
coefficients[id+7*6] = ldg<double>(&c[__shfl(address, 6, 8) + remainder]);
coefficients[id+7*7] = ldg<double>(&c[address_7 + remainder]);
}
/*coefficients[id] = c[__shfl(address, 0, 8) + remainder];
coefficients[id+8] = c[__shfl(address, 1, 8) + remainder];
coefficients[id+16] = c[__shfl(address, 2, 8) + remainder];
coefficients[id+24] = c[__shfl(address, 3, 8) + remainder];
coefficients[id+32] = c[__shfl(address, 4, 8) + remainder];
coefficients[id+40] = c[__shfl(address, 5, 8) + remainder];
coefficients[id+48] = c[__shfl(address, 6, 8) + remainder];
coefficients[id+56] = c[__shfl(address, 7, 8) + remainder];*/
/*fcoefficients[id] = fc[__shfl(faddress, 0, 16) + remainder];
fcoefficients[id+16] = fc[__shfl(faddress, 1, 16) + remainder];
fcoefficients[id+32] = fc[__shfl(faddress, 2, 16) + remainder];
fcoefficients[id+48] = fc[__shfl(faddress, 3, 16) + remainder];
fcoefficients[id+64] = fc[__shfl(faddress, 4, 16) + remainder];
fcoefficients[id+80] = fc[__shfl(faddress, 5, 16) + remainder];
fcoefficients[id+96] = fc[__shfl(faddress, 6, 16) + remainder];
fcoefficients[id+112] = fc[__shfl(faddress, 7, 16) + remainder];
fcoefficients[id+128] = fc[__shfl(faddress, 8, 16) + remainder];
fcoefficients[id+144] = fc[__shfl(faddress, 9, 16) + remainder];
fcoefficients[id+160] = fc[__shfl(faddress, 10, 16) + remainder];
fcoefficients[id+176] = fc[__shfl(faddress, 11, 16) + remainder];
fcoefficients[id+192] = fc[__shfl(faddress, 12, 16) + remainder];
fcoefficients[id+208] = fc[__shfl(faddress, 13, 16) + remainder];
fcoefficients[id+224] = fc[__shfl(faddress, 14, 16) + remainder];
fcoefficients[id+240] = fc[__shfl(faddress, 15, 16) + remainder];*/
#elif __CUDA_ARCH__ >= 700
// printf("activemask: %u\n", __activemask());
int address_7 = __shfl_sync(FULL_MASK, address, 7, 8);
if (remainder < 7) { // every eighth lane is idle and therefore removed from the mask
// printf("activemask: %u\n", __activemask());
coefficients[id] = ldg<double>(&c[__shfl_sync(0b01111111011111110111111101111111, address, 0, 8) + remainder]);
coefficients[id+7] = ldg<double>(&c[__shfl_sync(0b01111111011111110111111101111111, address, 1, 8) + remainder]);
coefficients[id+7*2] = ldg<double>(&c[__shfl_sync(0b01111111011111110111111101111111, address, 2, 8) + remainder]);
coefficients[id+7*3] = ldg<double>(&c[__shfl_sync(0b01111111011111110111111101111111, address, 3, 8) + remainder]);
coefficients[id+7*4] = ldg<double>(&c[__shfl_sync(0b01111111011111110111111101111111, address, 4, 8) + remainder]);
coefficients[id+7*5] = ldg<double>(&c[__shfl_sync(0b01111111011111110111111101111111, address, 5, 8) + remainder]);
coefficients[id+7*6] = ldg<double>(&c[__shfl_sync(0b01111111011111110111111101111111, address, 6, 8) + remainder]);
coefficients[id+7*7] = ldg<double>(&c[ address_7 + remainder]);
}
#else
// store the addresses to the shared memory
int *address_array = (int *) &shared_memory[8*blockDim.x * blockDim.y * blockDim.z];
address_array[threadIdx.x] = address;
coefficients[id] = c[address_array[base_address] + remainder];
coefficients[id+8] = c[address_array[base_address +1] + remainder];
coefficients[id+16] = c[address_array[base_address +2] + remainder];
coefficients[id+24] = c[address_array[base_address +3] + remainder];
coefficients[id+32] = c[address_array[base_address +4] + remainder];
coefficients[id+40] = c[address_array[base_address +5] + remainder];
coefficients[id+48] = c[address_array[base_address +6] + remainder];
coefficients[id+56] = c[address_array[base_address +7] + remainder];
#endif
double *coeff = &coefficients[threadId * 7];
double result = coeff[0];
if (nlip > 1) {
result *= x;
result += coeff[1];
}
if (nlip > 2) {
result *= x;
result += coeff[2];
}
if (nlip > 3) {
result *= x;
result += coeff[3];
}
if (nlip > 4) {
result *= x;
result += coeff[4];
}
if (nlip > 5) {
result *= x;
result += coeff[5];
}
if (nlip > 6) {
result *= x;
result += coeff[6];
}
return result;
}
__device__ inline int calculate_icell(double x, double *d, int n){
if ( ( x > d[n] ) || ( x < d[0] ) ) {
return -1;
}
int i[2];
i[0]=0;
i[1]=n;
int im=(i[0]+i[1])/2;
int j;
int max=log((float)n)/log(2.)+1;
for(j=0;j<max;j++){
i[ x<d[im] ] = im;
im=(i[0]+i[1])/2;
}
return im;
}
__device__ inline void calculate_icell_radial(const double x, const double charge, const double r_max,
const int ncell, const int nlip,
int *icell, double *in_cell_position) {
const double dx = r_max/(double)ncell;
const double c=8.0*rsqrt(charge)/charge;
const double a = r_max + c;
*icell = (int)(x * a / ((c + x)*dx));
double x1 = c / (a/((*icell+1) * dx) - 1.0);
double x0 = c / (a/(*icell * dx) - 1.0);
if (icell == 0) {
x0 = 0.0;
}
double grid_step = (x1-x0) / (nlip-1);
double center = (x1+x0) / (2.0);
*in_cell_position= (x - center)/grid_step;
}
inline __device__ void calculate_distance(double &dist_vec_x, double &dist_vec_y, double &dist_vec_z, double &dist,
const double reference_point_x, const double reference_point_y, const double reference_point_z,
const double x, const double y, const double z){
// calculate the vector relative to reference_point
dist_vec_x=x-reference_point_x;
dist_vec_y=y-reference_point_y;
dist_vec_z=z-reference_point_z;
// evaluate the length of the dist_vector, i.e., the distance between dist_vec and reference_point
dist=sqrt(dist_vec_x * dist_vec_x +
dist_vec_y * dist_vec_y +
dist_vec_z * dist_vec_z);
return;
}
/*
* Evaluates value of single bubble at a point. This is very similar with the
* SolidHarmonics simple evaluation, but the results are multiplied with the
* polynomial evaluations
*/
__device__ inline double Bubbles_evaluate_point_lmin(
// x-coordinate relative to the center of the bubble
const double &x,
// y-coordinate relative to the center of the bubble
const double &y,
// z-coordinate relative to the center of the bubble
const double &z,
// relative distance from the center of the bubble
const double &distance,
// minimum quantum number 'l'
const int &lmin,
// maximum quantum number 'l'
const int &lmax,
// number of cells
const int &ncell,
// number of lagrange integration polynomials per
// cell, i.e., the number of grid points per cell
const int &nlip,
// position inside the cell
const double &r,
// k value for the bubble
const int &k,
// the first address value in bubble for the selected cell
const int &address,
const double* __restrict__ cf
) {
double result = 0.0;
int lm_address = address, address2 = address;
// NOTE: Here the nlip is replaced by 8 because this gives advantages in loading the stuff
// also *cf this should be done
const int ncell_nlip = ncell * 8;
int l, m, l2;
double top = 0.0, bottom = 0.0, new_bottom = 0.0, prev1 = 0.0, prev2 = 0.0, current = 0.0;
double multiplier = 0.0, multiplier2 = 0.0, one_per_r = 1.0 / distance;
double r2 = x*x + y*y + z*z;
l = 0;
// set value for l=0, m=0
if (lmin == 0) {
//printf("x: %f, y: %f, z: %f, nlip: %d, ncell: %d, l: 0, address: %d, cf: %ld, r: %f\n", x, y, z, nlip, ncell, 0, lm_address, cf, r);
//printf("shared_memory address: %ld\n");
//printf("shared memory first value: %f", shared_memory[0]);
result = evaluate_polynomials_shared<NLIP>(lm_address, cf, r);
}
if (lmax >= 1) {
l = 1;
multiplier = one_per_r;
// set value for l=1, m=-1
lm_address += ncell_nlip;
if (lmin <= 1) {
result += y * evaluate_polynomials_shared<NLIP>(lm_address, cf, r) * multiplier;
}
// set all values where m=-1
m = -1;
prev1 = y;
// the starting address has 1 item before from the l=0, 3 from l=1, and 1 from l=2
address2 = address + ncell_nlip * 5;
multiplier2 = multiplier * one_per_r;
for (l = 2; l <= lmax; l++) {
current = ( 2.0*(double)l-1.0) * rsqrt( 1.0*(double)((l+m)*(l-m)) ) * z*prev1;
if (l > 2) {
current -= sqrt( (double)((l+m-1)*(l-m-1)) / (double)((l+m)*(l-m)) ) * r2 * prev2;
}
prev2 = prev1;
prev1 = current;
if (l >= lmin) {
result += current * evaluate_polynomials_shared<NLIP>(address2, cf, r) * multiplier2;
}
// add the address2 to get to the next item with m=-1
address2 += ncell_nlip * (2*l+2);
multiplier2 *= one_per_r;
}
// set value for l=1, m=0
lm_address += ncell_nlip;
if (lmin <= 1) {
result += z * evaluate_polynomials_shared<NLIP>(lm_address, cf, r) * multiplier;
}
// set all values where m=0
prev1 = z;
prev2 = 1.0;
m = 0;
// the starting address has 1 item before from the l=0, 3 from l=1, and 2 from l=2
address2 = address + ncell_nlip * 6;
multiplier2 = multiplier * one_per_r;
for (l = 2; l <= lmax; l++) {
current = ( 2.0*(double)l-1.0) * rsqrt( 1.0*(double)((l+m)*(l-m)) ) * z * prev1;
current -= sqrt( (double)((l+m-1)*(l-m-1)) / (double)((l+m)*(l-m)) ) * r2 * prev2;
prev2 = prev1;
prev1 = current;
if (l >= lmin) {
result += current * evaluate_polynomials_shared<NLIP>(address2, cf, r) * multiplier2;
}
// add the address2 to get to the next item with m=0
address2 += ncell_nlip * (2*l+2);
multiplier2 *= one_per_r;
}
// set value for l=1, m=1
lm_address += ncell_nlip;
if (lmin <= 1) {
result += x * evaluate_polynomials_shared<NLIP>(lm_address, cf, r) * multiplier;
}
// set all values where m=1
prev1 = x;
m = 1;
// the starting address has 1 item before from the l=0, 3 from l=1, and 3 from l=2
address2 = address + ncell_nlip * 7;
multiplier2 = multiplier * one_per_r;
for (l = 2; l <= lmax; l++) {
current = ( 2.0*(double)l-1.0) * rsqrt( 1.0*(double)((l+m)*(l-m)) ) * z*prev1;
if (l > 2) {
current -= sqrt( (double)((l+m-1)*(l-m-1)) / (double)((l+m)*(l-m)) ) * r2 * prev2;
}
prev2 = prev1;
prev1 = current;
if (l >= lmin) {
result += current * evaluate_polynomials_shared<NLIP>(address2, cf, r) * multiplier2;
}
// add the address2 to get to the next item with m=1
address2 += ncell_nlip * (2*l+2);
multiplier2 *= one_per_r;
}
// go through the rest of the stuff
bottom = y; // bottom refers to solid harmonics value with l=l-1 and m=-(l-1)
top = x; // top refers to solid harmonics value with l=l-1 and m=l-1
lm_address += ncell_nlip;
multiplier *= one_per_r;
for (l=2; l <= lmax; l++) {
new_bottom = sqrt((2.0*(double)l - 1.0) / (2.0*(double)l)) *
( y*top + x*bottom);
if (l >= lmin) {
result += new_bottom * evaluate_polynomials_shared<NLIP>(lm_address, cf, r) * multiplier;
}
// set all values where m=-l
m = -l;
prev1 = new_bottom;
address2 = lm_address + (2*l+2) * ncell_nlip;
multiplier2 = multiplier * one_per_r;
for (l2 = l+1; l2 <= lmax; l2++) {
current = ( 2.0*(double)l2-1.0) * rsqrt( 1.0*(double)((l2+m)*(l2-m)) ) * z*prev1;
if (l2 > l+1) {
current -= sqrt( (double)((l2+m-1)*(l2-m-1)) / (double)((l2+m)*(l2-m)) ) * r2 * prev2;
}
prev2 = prev1;
prev1 = current;
if (l2 >= lmin) {
result += current * evaluate_polynomials_shared<NLIP>(address2, cf, r) * multiplier2;
}
// add the address2 to get to the next item with m=l
address2 += ncell_nlip * (2*l2+2);
multiplier2 *= one_per_r;
}
// get value for l=l, m=l. The address is 2*l items away from l=l, m=-l
lm_address += 2*l * ncell_nlip;
top = sqrt((2.0*(double)l - 1.0) / (2.0*(double)l)) *
( x*top-y*bottom );
// set all values where m=l
m = l;
prev1 = top;
address2 = lm_address + (2*l+2) * ncell_nlip;
multiplier2 = multiplier * one_per_r;
for (l2 = l+1; l2 <= lmax; l2++) {
current = ( 2.0*(double)l2-1.0) * rsqrt( 1.0*(double)((l2+m)*(l2-m)) ) * z*prev1;
if (l2 > l+1) {
current -= sqrt( (double)((l2+m-1)*(l2-m-1)) / (double)((l2+m)*(l2-m)) ) * r2 * prev2;
}
prev2 = prev1;
prev1 = current;
if (l2 >= lmin) {
result += current * evaluate_polynomials_shared<NLIP>(address2, cf, r) * multiplier2;
}
// add the address2 to get to the next item with m=l
address2 += ncell_nlip * (2*l2+2);
multiplier2 *= one_per_r;
}
// store the new bottom: l=l, m=-l (we need the old bottom in calculation of top)
bottom = new_bottom;
if (l >= lmin) {
result += top * evaluate_polynomials_shared<NLIP>(lm_address, cf, r) * multiplier;
}
// get next address
lm_address += ncell_nlip;
multiplier *= one_per_r;
}
}
// multiply the result with r^k, if k is not 0
// the distance is not too close to 0.0 as this is checked
// earlier in this function
if (k != 0 && distance > 1e-12) {
result *= pow(distance, (double)k);
}
if (distance < 1e-8) {
result = 1.0 * cf[0]; //evaluate_polynomials(nlip, &cf[address], r);
}
return result;
}
/*
* (int nlip, int ncell, int l, int address, double *c, const double x)
* Evaluates the value of gradient of a single bubble at a point. This is very similar with the
* SolidHarmonics simple evaluation, but the results are multiplied with the
* polynomial evaluations and summed together.
*/
template <bool evaluate_gradients_x, bool evaluate_gradients_y, bool evaluate_gradients_z >
__device__ inline void Bubbles_evaluate_gradient_point(
// x-coordinate relative to the center of the bubble
const double &x,
// y-coordinate relative to the center of the bubble
const double &y,
// z-coordinate relative to the center of the bubble
const double &z,
// relative distance from the center of the bubble
const double &distance,
// maximum quantum number 'l'
const int &lmax,
// number of cells
const int &ncell,
// number of lagrange integration polynomials per
// cell, i.e., the number of grid points per cell
const int &nlip,
// position inside the cell
const double &r,
// k value for the bubble
const int &k,
// the first address value in bubble for the selected cell
const int &address,
// constant pointer to a variable double array
const double* __restrict__ cf,
// constant pointer to a derivative variable double array
const double* __restrict__ df,
// if only the l = 0 is evaluated
const bool only_spherical,
// result
double result[3]
) {
int lm_address = address, address2;
// NOTE: Here the nlip is replaced by 8 because this gives advantages in loading the stuff
// also *cf this should be done
const int ncell_nlip = ncell * 8;
int l, l2;
double top, bottom, new_bottom, prev1, prev2, current, current_gradient[3], prev1_gradient[3], prev2_gradient[3], bottom_gradient[3], new_bottom_gradient, top_gradient[3];
double one_per_r = 1.0 / distance;
double one_per_r_gradient[3] = {(-x) * one_per_r * one_per_r,
(-y) * one_per_r * one_per_r,
(-z) * one_per_r * one_per_r};
l = 0;
// set value for l=0, m=0
double radial_value;
double radial_derivative = evaluate_polynomials_shared<NLIP-1>(lm_address, df, r);
if (evaluate_gradients_x) result[X_] = radial_derivative * x; // * one_per_r;
if (evaluate_gradients_y) result[Y_] = radial_derivative * y; // * one_per_r;
if (evaluate_gradients_z) result[Z_] = radial_derivative * z; // * one_per_r;
if (distance >= 0.0 && distance < 1e-12) {
one_per_r = 0.0;
if (evaluate_gradients_x) one_per_r_gradient[X_] = 0.0;
if (evaluate_gradients_y) one_per_r_gradient[Y_] = 0.0;
if (evaluate_gradients_z) one_per_r_gradient[Z_] = 0.0;
if (evaluate_gradients_x) result[X_] = 0.0; //radial_derivative;
if (evaluate_gradients_y) result[Y_] = 0.0; //radial_derivative;
if (evaluate_gradients_z) result[Z_] = 0.0;//radial_derivative;
}
/*if (only_spherical) {
one_per_r = 0.0;
if (evaluate_gradients_x) one_per_r_gradient[X_] = 0.0;
if (evaluate_gradients_y) one_per_r_gradient[Y_] = 0.0;
if (evaluate_gradients_z) one_per_r_gradient[Z_] = 0.0;
}*/
if (lmax >= 1) {
// set all values where m=-1
prev1 = y * one_per_r;
if (evaluate_gradients_x) prev1_gradient[X_] = one_per_r_gradient[X_] * y;
if (evaluate_gradients_y) prev1_gradient[Y_] = 1.0 + one_per_r_gradient[Y_] * y;
if (evaluate_gradients_z) prev1_gradient[Z_] = one_per_r_gradient[Z_] * y;
// set value for l=1, m=-1
radial_value = evaluate_polynomials_shared<NLIP>(address+ncell_nlip, cf, r);
radial_derivative = evaluate_polynomials_shared<NLIP-1>(address+ncell_nlip, df, r);
if (evaluate_gradients_x) result[X_] += radial_value * prev1_gradient[X_] + radial_derivative * prev1 * x;// * one_per_r;
if (evaluate_gradients_y) result[Y_] += radial_value * prev1_gradient[Y_] + radial_derivative * prev1 * y;// * one_per_r;
if (evaluate_gradients_z) result[Z_] += radial_value * prev1_gradient[Z_] + radial_derivative * prev1 * z;// * one_per_r;
//if (only_spherical) printf("radial_value: %e, radial_derivative: %e, prev1, i.e., y/r: %e\n", radial_value, radial_derivative, prev1);
//if (only_spherical && evaluate_gradients_x) printf("prev1-gradient-x: %e, x/r: %e\n", prev1_gradient[X_], x * one_per_r);
//if (only_spherical && evaluate_gradients_y) printf("prev1-gradient-y: %e, y/r: %e\n", prev1_gradient[Y_], y * one_per_r);
//if (only_spherical && evaluate_gradients_z) printf("prev1-gradient-z: %e, z/r: %e\n", prev1_gradient[Z_], z * one_per_r);
// the starting address has 1 item before from the l=0, 3 from l=1, and 1 from l=2
address2 = address + ncell_nlip * 5;
for (l = 2; l <= lmax; l++) {
double a = ( 2.0*(double)l-1.0) * rsqrt( 1.0*(double)((l-1)*(l+1)) );
current = a * z*prev1 * one_per_r;
if (evaluate_gradients_x) current_gradient[X_] = a *(z * prev1 * one_per_r_gradient[X_] + z * one_per_r * prev1_gradient[X_]);
if (evaluate_gradients_y) current_gradient[Y_] = a *(z * prev1 * one_per_r_gradient[Y_] + z * one_per_r * prev1_gradient[Y_]);
if (evaluate_gradients_z) current_gradient[Z_] = a *(z * prev1 * one_per_r_gradient[Z_] + prev1 + z * one_per_r * prev1_gradient[Z_]);
if (l > 2) {
double b = sqrt( (double)((l-2)*(l)) / (double)((l-1)*(l+1)) );
current -= b * prev2;
if (evaluate_gradients_x) current_gradient[X_] -= b * prev2_gradient[X_];
if (evaluate_gradients_y) current_gradient[Y_] -= b * prev2_gradient[Y_];
if (evaluate_gradients_z) current_gradient[Z_] -= b * prev2_gradient[Z_];
}
radial_value = evaluate_polynomials_shared<NLIP>(address2, cf, r);
radial_derivative = evaluate_polynomials_shared<NLIP-1>(address2, df, r);
if (evaluate_gradients_x) result[X_] += radial_value * current_gradient[X_] + radial_derivative * current * x;// * one_per_r;
if (evaluate_gradients_y) result[Y_] += radial_value * current_gradient[Y_] + radial_derivative * current * y;// * one_per_r;
if (evaluate_gradients_z) result[Z_] += radial_value * current_gradient[Z_] + radial_derivative * current * z;// * one_per_r;
prev2 = prev1;
if (evaluate_gradients_x) prev2_gradient[X_] = prev1_gradient[X_];
if (evaluate_gradients_y) prev2_gradient[Y_] = prev1_gradient[Y_];
if (evaluate_gradients_z) prev2_gradient[Z_] = prev1_gradient[Z_];
prev1 = current;
if (evaluate_gradients_x) prev1_gradient[X_] = current_gradient[X_];
if (evaluate_gradients_y) prev1_gradient[Y_] = current_gradient[Y_];
if (evaluate_gradients_z) prev1_gradient[Z_] = current_gradient[Z_];
// add the address2 to get to the next item with m=-1
address2 += ncell_nlip * (2*l+2);
}
prev2 = 1.0;
if (evaluate_gradients_x) prev2_gradient[X_] = 0.0;
if (evaluate_gradients_y) prev2_gradient[Y_] = 0.0;
if (evaluate_gradients_z) prev2_gradient[Z_] = 0.0;
// set all values where m=0
prev1 = z * one_per_r;
if (evaluate_gradients_x) prev1_gradient[X_] = one_per_r_gradient[X_] * z;
if (evaluate_gradients_y) prev1_gradient[Y_] = one_per_r_gradient[Y_] * z;
if (evaluate_gradients_z) prev1_gradient[Z_] = 1.0 + one_per_r_gradient[Z_] * z;
// set value for l=1, m=0
radial_value = evaluate_polynomials_shared<NLIP>(address+2*ncell_nlip, cf, r);
radial_derivative = evaluate_polynomials_shared<NLIP-1>(address+2*ncell_nlip, df, r);
if (evaluate_gradients_x) result[X_] += radial_value * prev1_gradient[X_] + radial_derivative * prev1 * x;// * one_per_r;
if (evaluate_gradients_y) result[Y_] += radial_value * prev1_gradient[Y_] + radial_derivative * prev1 * y;// * one_per_r;
if (evaluate_gradients_z) result[Z_] += radial_value * prev1_gradient[Z_] + radial_derivative * prev1 * z;// * one_per_r;
//if (only_spherical) printf("radial_value: %e, radial_derivative: %e, prev1, i.e., z/r: %e\n", radial_value, radial_derivative, prev1);
//if (only_spherical && evaluate_gradients_x) printf("prev1-gradient-x: %e, x/r: %e\n", prev1_gradient[X_], x * one_per_r);
//if (only_spherical && evaluate_gradients_y) printf("prev1-gradient-y: %e, y/r: %e\n", prev1_gradient[Y_], y * one_per_r);
//if (only_spherical && evaluate_gradients_z) printf("prev1-gradient-z: %e, z/r: %e\n", prev1_gradient[Z_], z * one_per_r);
// the starting address has 1 item before from the l=0, 3 from l=1, and 2 from l=2
address2 = address + ncell_nlip * 6;
for (l = 2; l <= lmax; l++) {
double a = ( 2.0*(double)l-1.0) * rsqrt( 1.0*(double)((l)*(l)) );
double b = sqrt( (double)((l-1)*(l-1)) / (double)((l)*(l)) );
current = a * z * prev1 * one_per_r;
if (evaluate_gradients_x) current_gradient[X_] = a *(z * prev1 * one_per_r_gradient[X_] + z * one_per_r * prev1_gradient[X_]);
if (evaluate_gradients_y) current_gradient[Y_] = a *(z * prev1 * one_per_r_gradient[Y_] + z * one_per_r * prev1_gradient[Y_]);
if (evaluate_gradients_z) current_gradient[Z_] = a *(z * prev1 * one_per_r_gradient[Z_] + prev1 + z * one_per_r * prev1_gradient[Z_]);
current -= b * prev2;
if (evaluate_gradients_x) current_gradient[X_] -= b * prev2_gradient[X_];
if (evaluate_gradients_y) current_gradient[Y_] -= b * prev2_gradient[Y_];
if (evaluate_gradients_z) current_gradient[Z_] -= b * prev2_gradient[Z_];
radial_value = evaluate_polynomials_shared<NLIP>(address2, cf, r);
radial_derivative = evaluate_polynomials_shared<NLIP-1>(address2, df, r);
if (evaluate_gradients_x) result[X_] += radial_value * current_gradient[X_] + radial_derivative * current * x;// * one_per_r;
if (evaluate_gradients_y) result[Y_] += radial_value * current_gradient[Y_] + radial_derivative * current * y;// * one_per_r;
if (evaluate_gradients_z) result[Z_] += radial_value * current_gradient[Z_] + radial_derivative * current * z;// * one_per_r;
prev2 = prev1;
if (evaluate_gradients_x) prev2_gradient[X_] = prev1_gradient[X_];
if (evaluate_gradients_y) prev2_gradient[Y_] = prev1_gradient[Y_];
if (evaluate_gradients_z) prev2_gradient[Z_] = prev1_gradient[Z_];
prev1 = current;
if (evaluate_gradients_x) prev1_gradient[X_] = current_gradient[X_];
if (evaluate_gradients_y) prev1_gradient[Y_] = current_gradient[Y_];
if (evaluate_gradients_z) prev1_gradient[Z_] = current_gradient[Z_];
// add the address2 to get to the next item with m=0
address2 += ncell_nlip * (2*l+2);
}
// set all values where m=1
prev1 = x * one_per_r;
if (evaluate_gradients_x) prev1_gradient[X_] = 1.0 + one_per_r_gradient[X_] * x;
if (evaluate_gradients_y) prev1_gradient[Y_] = one_per_r_gradient[Y_] * x;
if (evaluate_gradients_z) prev1_gradient[Z_] = one_per_r_gradient[Z_] * x;
// set value for l=1, m=1
radial_value = evaluate_polynomials_shared<NLIP>(address+3*ncell_nlip, cf, r);
radial_derivative = evaluate_polynomials_shared<NLIP-1>(address+3*ncell_nlip, df, r);
if (evaluate_gradients_x) result[X_] += radial_value * prev1_gradient[X_] + radial_derivative * prev1 * x;// * one_per_r;
if (evaluate_gradients_y) result[Y_] += radial_value * prev1_gradient[Y_] + radial_derivative * prev1 * y;// * one_per_r;
if (evaluate_gradients_z) result[Z_] += radial_value * prev1_gradient[Z_] + radial_derivative * prev1 * z;// * one_per_r;
//if (only_spherical) printf("radial_value: %e, radial_derivative: %e, prev1, i.e., x/r: %e\n", radial_value, radial_derivative, prev1);
//if (only_spherical && evaluate_gradients_x) printf("prev1-gradient-x: %e, x/r: %e\n", prev1_gradient[X_], x * one_per_r);
//if (only_spherical && evaluate_gradients_y) printf("prev1-gradient-y: %e, y/r: %e\n", prev1_gradient[Y_], y * one_per_r);
//if (only_spherical && evaluate_gradients_z) printf("prev1-gradient-z: %e, z/r: %e\n", prev1_gradient[Z_], z * one_per_r);
// the starting address has 1 item before from the l=0, 3 from l=1, and 3 from l=2
address2 = address + ncell_nlip * 7;
for (l = 2; l <= lmax; l++) {
double a = ( 2.0*(double)l-1.0) * rsqrt( 1.0*(double)((l+1)*(l-1)) );
current = a * z*prev1 * one_per_r;
if (evaluate_gradients_x) current_gradient[X_] = a *(z * prev1 * one_per_r_gradient[X_] + z * one_per_r * prev1_gradient[X_]);
if (evaluate_gradients_y) current_gradient[Y_] = a *(z * prev1 * one_per_r_gradient[Y_] + z * one_per_r * prev1_gradient[Y_]);
if (evaluate_gradients_z) current_gradient[Z_] = a *(z * prev1 * one_per_r_gradient[Z_] + prev1 + z * one_per_r * prev1_gradient[Z_]);
if (l > 2) {
double b = sqrt( (double)((l)*(l-2)) / (double)((l+1)*(l-1)) );
current -= b * prev2;
if (evaluate_gradients_x) current_gradient[X_] -= b * prev2_gradient[X_];
if (evaluate_gradients_y) current_gradient[Y_] -= b * prev2_gradient[Y_];
if (evaluate_gradients_z) current_gradient[Z_] -= b * prev2_gradient[Z_];
}
radial_value = evaluate_polynomials_shared<NLIP>(address2, cf, r);
radial_derivative = evaluate_polynomials_shared<NLIP-1>(address2, df, r);
if (evaluate_gradients_x) result[X_] += radial_value * current_gradient[X_] + radial_derivative * current * x;// * one_per_r;
if (evaluate_gradients_y) result[Y_] += radial_value * current_gradient[Y_] + radial_derivative * current * y;// * one_per_r;
if (evaluate_gradients_z) result[Z_] += radial_value * current_gradient[Z_] + radial_derivative * current * z;// * one_per_r;
prev2 = prev1;
if (evaluate_gradients_x) prev2_gradient[X_] = prev1_gradient[X_];
if (evaluate_gradients_y) prev2_gradient[Y_] = prev1_gradient[Y_];
if (evaluate_gradients_z) prev2_gradient[Z_] = prev1_gradient[Z_];
prev1 = current;
if (evaluate_gradients_x) prev1_gradient[X_] = current_gradient[X_];
if (evaluate_gradients_y) prev1_gradient[Y_] = current_gradient[Y_];
if (evaluate_gradients_z) prev1_gradient[Z_] = current_gradient[Z_];
// add the address2 to get to the next item with m=-1
address2 += ncell_nlip * (2*l+2);
}
// go through the rest of the stuff
bottom = y * one_per_r; // bottom refers to solid harmonics value with l=l-1 and m=-(l-1)
if (evaluate_gradients_x) bottom_gradient[X_] = one_per_r_gradient[X_] * y;
if (evaluate_gradients_y) bottom_gradient[Y_] = 1.0 + one_per_r_gradient[Y_] * y;
if (evaluate_gradients_z) bottom_gradient[Z_] = one_per_r_gradient[Z_] * y;
top = x * one_per_r; // top refers to solid harmonics value with l=l-1 and m=l-1
if (evaluate_gradients_x) top_gradient[X_] = 1.0 + one_per_r_gradient[X_] * x;
if (evaluate_gradients_y) top_gradient[Y_] = one_per_r_gradient[Y_] * x;
if (evaluate_gradients_z) top_gradient[Z_] = one_per_r_gradient[Z_] * x;
lm_address += 4 * ncell_nlip;
for (l=2; l <= lmax; l++) {
double c = sqrt((2.0*(double)l - 1.0) / (2.0*(double)l));
new_bottom = c * one_per_r * ( y*top + x*bottom);
// get the gradients to x direction
if (evaluate_gradients_x) new_bottom_gradient = c * (one_per_r_gradient[X_] * (y * top + x * bottom) +
one_per_r * (y * top_gradient[X_] + x * bottom_gradient[X_] + bottom)) ;
if (evaluate_gradients_x) top_gradient[X_] = c * (one_per_r_gradient[X_] * (x * top - y * bottom) +
one_per_r * (x * top_gradient[X_] + top - y * bottom_gradient[X_]));
if (evaluate_gradients_x) bottom_gradient[X_] = new_bottom_gradient;
// get the gradients to y direction
if (evaluate_gradients_y) new_bottom_gradient = c * (one_per_r_gradient[Y_] * (y * top + x * bottom) +
one_per_r * (y * top_gradient[Y_] + top + x * bottom_gradient[Y_]));
if (evaluate_gradients_y) top_gradient[Y_] = c * (one_per_r_gradient[Y_] * (x * top - y * bottom) +
one_per_r * (x * top_gradient[Y_] - y * bottom_gradient[Y_] - bottom));
if (evaluate_gradients_y) bottom_gradient[Y_] = new_bottom_gradient;
// get the gradients to z direction
if (evaluate_gradients_z) new_bottom_gradient = c * (one_per_r_gradient[Z_] * (y * top + x * bottom) +
one_per_r * (y * top_gradient[Z_] + x * bottom_gradient[Z_]));
if (evaluate_gradients_z) top_gradient[Z_] = c * (one_per_r_gradient[Z_] * (x * top - y * bottom) +
one_per_r * (x * top_gradient[Z_] - y * bottom_gradient[Z_]));
if (evaluate_gradients_z) bottom_gradient[Z_] = new_bottom_gradient;
top = c * one_per_r * ( x*top-y*bottom );
// store the new bottom: l=l, m=-l (we need the old bottom in calculation of top previously, so we
// have to sacrifice one register temporarily)
bottom = new_bottom;
radial_value = evaluate_polynomials_shared<NLIP>(lm_address, cf, r);
radial_derivative = evaluate_polynomials_shared<NLIP-1>(lm_address, df, r);
// get value for l=l, m=-l.
if (evaluate_gradients_x) result[X_] += radial_value * bottom_gradient[X_] + radial_derivative * bottom * x;// * one_per_r;
if (evaluate_gradients_y) result[Y_] += radial_value * bottom_gradient[Y_] + radial_derivative * bottom * y;// * one_per_r;
if (evaluate_gradients_z) result[Z_] += radial_value * bottom_gradient[Z_] + radial_derivative * bottom * z;// * one_per_r;
radial_value = evaluate_polynomials_shared<NLIP>(lm_address + 2*l * ncell_nlip, cf, r);
radial_derivative = evaluate_polynomials_shared<NLIP-1>(lm_address + 2*l * ncell_nlip, df, r);
// get value for l=l, m=l. The address is 2*l items away from l=l, m=-l
if (evaluate_gradients_x) result[X_] += radial_value * top_gradient[X_] + radial_derivative * top * x;// * one_per_r;
if (evaluate_gradients_y) result[Y_] += radial_value * top_gradient[Y_] + radial_derivative * top * y;// * one_per_r;
if (evaluate_gradients_z) result[Z_] += radial_value * top_gradient[Z_] + radial_derivative * top * z;// * one_per_r;
// set all values where m=-l
prev1 = bottom;
if (evaluate_gradients_x) prev1_gradient[X_] = bottom_gradient[X_];
if (evaluate_gradients_y) prev1_gradient[Y_] = bottom_gradient[Y_];
if (evaluate_gradients_z) prev1_gradient[Z_] = bottom_gradient[Z_];
address2 = lm_address + (2*l+2) * ncell_nlip;
for (l2 = l+1; l2 <= lmax; l2++) {
// evaluate spherical harmonics for l=l2, m=-l
double a = ( 2.0*(double)l2-1.0) * rsqrt( 1.0*(double)((l2-l)*(l2+l)) );
current = a * z*prev1 * one_per_r;
if (evaluate_gradients_x) current_gradient[X_] = a *(z * prev1 * one_per_r_gradient[X_] + z * one_per_r * prev1_gradient[X_]);
if (evaluate_gradients_y) current_gradient[Y_] = a *(z * prev1 * one_per_r_gradient[Y_] + z * one_per_r * prev1_gradient[Y_]);
if (evaluate_gradients_z) current_gradient[Z_] = a *(z * prev1 * one_per_r_gradient[Z_] + prev1 + z * one_per_r * prev1_gradient[Z_]);
if (l2 > l+1) {
double b = sqrt( (double)((l2-l-1)*(l2+l-1)) / (double)((l2-l)*(l2+l)) );
current -= b * prev2;
if (evaluate_gradients_x) current_gradient[X_] -= b * prev2_gradient[X_];
if (evaluate_gradients_y) current_gradient[Y_] -= b * prev2_gradient[Y_];
if (evaluate_gradients_z) current_gradient[Z_] -= b * prev2_gradient[Z_];
}
radial_value = evaluate_polynomials_shared<NLIP>(address2, cf, r);
radial_derivative = evaluate_polynomials_shared<NLIP-1>(address2, df, r);
if (evaluate_gradients_x) result[X_] += radial_value * current_gradient[X_] + radial_derivative * current * x;// * one_per_r;
if (evaluate_gradients_y) result[Y_] += radial_value * current_gradient[Y_] + radial_derivative * current * y;// * one_per_r;
if (evaluate_gradients_z) result[Z_] += radial_value * current_gradient[Z_] + radial_derivative * current * z;// * one_per_r;
prev2 = prev1;
if (evaluate_gradients_x) prev2_gradient[X_] = prev1_gradient[X_];
if (evaluate_gradients_y) prev2_gradient[Y_] = prev1_gradient[Y_];
if (evaluate_gradients_z) prev2_gradient[Z_] = prev1_gradient[Z_];
prev1 = current;
if (evaluate_gradients_x) prev1_gradient[X_] = current_gradient[X_];
if (evaluate_gradients_y) prev1_gradient[Y_] = current_gradient[Y_];
if (evaluate_gradients_z) prev1_gradient[Z_] = current_gradient[Z_];
// add the address2 to get to the next item with m=-1
address2 += ncell_nlip * (2*l2+2);
}
// set all values where m=l
lm_address += 2*l * ncell_nlip;
prev1 = top;
if (evaluate_gradients_x) prev1_gradient[X_] = top_gradient[X_];
if (evaluate_gradients_y) prev1_gradient[Y_] = top_gradient[Y_];
if (evaluate_gradients_z) prev1_gradient[Z_] = top_gradient[Z_];
address2 = lm_address + (2*l+2) * ncell_nlip;
for (l2 = l+1; l2 <= lmax; l2++) {
// evaluate spherical harmonics for l=l2, m=l
double a = ( 2.0*(double)l2-1.0) * rsqrt( 1.0*(double)((l2+l)*(l2-l)) );
current = a * z*prev1 * one_per_r;
if (evaluate_gradients_x) current_gradient[X_] = a *(z * prev1 * one_per_r_gradient[X_] + z * one_per_r * prev1_gradient[X_]);
if (evaluate_gradients_y) current_gradient[Y_] = a *(z * prev1 * one_per_r_gradient[Y_] + z * one_per_r * prev1_gradient[Y_]);
if (evaluate_gradients_z) current_gradient[Z_] = a *(z * prev1 * one_per_r_gradient[Z_] + prev1 + z * one_per_r * prev1_gradient[Z_]);
if (l2 > l+1) {
double b = sqrt( (double)((l2+l-1)*(l2-l-1)) / (double)((l2+l)*(l2-l)) );
current -= b * prev2;
if (evaluate_gradients_x) current_gradient[X_] -= b * prev2_gradient[X_];
if (evaluate_gradients_y) current_gradient[Y_] -= b * prev2_gradient[Y_];
if (evaluate_gradients_z) current_gradient[Z_] -= b * prev2_gradient[Z_];
}
radial_value = evaluate_polynomials_shared<NLIP>(address2, cf, r);
radial_derivative = evaluate_polynomials_shared<NLIP-1>(address2, df, r);
if (evaluate_gradients_x) result[X_] += radial_value * current_gradient[X_] + radial_derivative * current * x;// * one_per_r;
if (evaluate_gradients_y) result[Y_] += radial_value * current_gradient[Y_] + radial_derivative * current * y;// * one_per_r;
if (evaluate_gradients_z) result[Z_] += radial_value * current_gradient[Z_] + radial_derivative * current * z;// * one_per_r;
prev2 = prev1;
if (evaluate_gradients_x) prev2_gradient[X_] = prev1_gradient[X_];
if (evaluate_gradients_y) prev2_gradient[Y_] = prev1_gradient[Y_];
if (evaluate_gradients_z) prev2_gradient[Z_] = prev1_gradient[Z_];
prev1 = current;
if (evaluate_gradients_x) prev1_gradient[X_] = current_gradient[X_];
if (evaluate_gradients_y) prev1_gradient[Y_] = current_gradient[Y_];
if (evaluate_gradients_z) prev1_gradient[Z_] = current_gradient[Z_];
// add the address2 to get to the next item with m=-1
address2 += ncell_nlip * (2*l2+2);
}
// get next address
lm_address += ncell_nlip;
}
}
result[X_] *= one_per_r;
result[Y_] *= one_per_r;
result[Z_] *= one_per_r;
// multiply the result with r^k, if k is not 0
// the distance is not too close to 0.0 as this is checked
// earlier in this function, NOTE: should never happen, thus
// commented away
//if (k != 0 && distance > 1e-12) {
/*for (int i = 0; i < k; i ++) {
result *= distance;
}
for (int i = 0; i < -k; i ++) {
result *= one_per_r;
}*/
//}
if (distance < 1e-12) {
result[X_] = 0.0; // * evaluate_polynomials_shared<NLIP-1>(address, df, r);
result[Y_] = 0.0;
result[Z_] = 0.0;
}
}
/*
* Evaluates value of single bubble at a point. This is very similar to the
* SolidHarmonics simple evaluation, but the results are multiplied with the
* polynomial evaluations
*/
__device__ inline double Bubbles_evaluate_point(
// x-coordinate relative to the center of the bubble
const double &x,
// y-coordinate relative to the center of the bubble
const double &y,
// z-coordinate relative to the center of the bubble
const double &z,
// relative distance from the center of the bubble
const double &distance,
// maximum quantum number 'l'
const int &lmax,
// number of cells
const int &ncell,
// number of lagrange integration polynomials per
// cell, i.e., the number of grid points per cell
const int &nlip,
// position inside the cell
const double &r,
// k value for the bubble
const int &k,
// the first address value in bubble for the selected cell
const int &address,
// constant pointer to a variable double array
const double* __restrict__ cf
) {
double result = 0.0;
int lm_address = address, address2;
// NOTE: Here the nlip is replaced by 8 because this gives advantages in loading the stuff
// also *cf this should be done
const int ncell_nlip = ncell * 8;
int l, l2;
double top, bottom, new_bottom, prev1, prev2, current, a, b, a2;
const double one_per_r = 1.0 / distance;
l = 0;
// set value for l=0, m=0
// printf("x: %f, y: %f, z: %f, nlip: %d, ncell: %d, l: 0, lmax: %d, address: %d, cf: %ld, r: %f\n", x, y, z, nlip, ncell, 0, lmax, lm_address, cf, r);
// printf("shared_memory address: %ld\n", );
// printf("shared memory first value: %f", shared_memory[0]);
result = evaluate_polynomials_shared<NLIP>(lm_address, cf, r);
if (lmax >= 1) {
// set value for l=1, m=-1
result += y * evaluate_polynomials_shared<NLIP>(address+ncell_nlip, cf, r) * one_per_r;
// set value for l=1, m=0
result += z * evaluate_polynomials_shared<NLIP>(address+2*ncell_nlip, cf, r) * one_per_r;
// set value for l=1, m=1
result += x * evaluate_polynomials_shared<NLIP>(address+3*ncell_nlip, cf, r) * one_per_r;
// set all values where m=-1
prev2 = 0.0;
prev1 = y * one_per_r;
// the starting address has 1 item before from the l=0, 3 from l=1, and 1 from l=2
address2 = address + ncell_nlip * 5;
l = threadIdx.x % 32;
a = ( 2.0*(double)l-1.0) * rsqrt( 1.0*(double)((l-1)*(l+1)) );
b = (l > 2) ? sqrt( (double)((l-2)*(l)) / (double)((l-1)*(l+1)) ) : 0.0;
for (l = 2; l <= lmax; l++) {
#if (__CUDA_ARCH__ >= 350) && (__CUDA_ARCH__ < 700)
current = __shfl(a, l) * z*prev1 * one_per_r - __shfl(b, l) * prev2;
#elif (__CUDA_ARCH__ >= 700)
current = __shfl_sync(FULL_MASK, a, l) * z*prev1 * one_per_r - __shfl_sync(FULL_MASK, b, l) * prev2;
#endif
result += current * evaluate_polynomials_shared<NLIP>(address2, cf, r) ;
prev2 = prev1;
prev1 = current;
// add the address2 to get to the next item with m=-1
address2 += ncell_nlip * (2*l+2);
}
// set all values where m=0
prev1 = z * one_per_r;
prev2 = 1.0;
// the starting address has 1 item before from the l=0, 3 from l=1, and 2 from l=2
address2 = address + ncell_nlip * 6;
l = threadIdx.x % 32; // lane within warp
a = ( 2.0*(double)l-1.0) * rsqrt( 1.0*(double)((l)*(l)) );
b = sqrt( (double)((l-1)*(l-1)) / (double)((l)*(l)) );
// printf("l: %d, lmax:%d, a: %f, b: %f, z: %f, prev1: %f, one_per_r: %f, prev2: %f\n", l, lmax, a, b, z, prev1, one_per_r, prev2);
for (l = 2; l <= lmax; l++) {
#if (__CUDA_ARCH__ >= 350) && (__CUDA_ARCH__ < 700)
current = __shfl(a, l) * z * prev1 * one_per_r - __shfl(b, l) * prev2;
#elif __CUDA_ARCH__ >= 700
// printf("lane: %d, l: %d, a: %f, b:%f\n", threadIdx.x % 32, l, a, b);
current = __shfl_sync(FULL_MASK, a, l) * z * prev1 * one_per_r - __shfl_sync(FULL_MASK, b, l) * prev2;
#endif
result += current * evaluate_polynomials_shared<NLIP>(address2, cf, r);
prev2 = prev1;
prev1 = current;
// add the address2 to get to the next item with m=0
address2 += ncell_nlip * (2*l+2);
}
// set all values where m=1
prev1 = x * one_per_r;
// the starting address has 1 item before from the l=0, 3 from l=1, and 3 from l=2
address2 = address + ncell_nlip * 7;
l = threadIdx.x % 32;
a = ( 2.0*(double)l-1.0) * rsqrt( 1.0*(double)((l+1)*(l-1)) );
b = (l > 2) ? sqrt( (double)((l)*(l-2)) / (double)((l+1)*(l-1)) ) : 0.0;
for (l = 2; l <= lmax; l++) {
#if (__CUDA_ARCH__ >= 350) && (__CUDA_ARCH__ < 700)
current = __shfl(a, l) * z * prev1 * one_per_r - __shfl(b, l) * prev2;
#elif __CUDA_ARCH__ >= 700
current = __shfl_sync(FULL_MASK, a, l) * z * prev1 * one_per_r - __shfl_sync(FULL_MASK, b, l) * prev2;
#endif
result += current * evaluate_polynomials_shared<NLIP>(address2, cf, r);
prev2 = prev1;
prev1 = current;
// add the address2 to get to the next item with m=1
address2 += ncell_nlip * (2*l+2);
}
// go through the rest of the stuff
bottom = y * one_per_r; // bottom refers to spherical harmonics value with l=l-1 and m=-(l-1)
top = x * one_per_r; // top refers to spherical harmonics value with l=l-1 and m=l-1
lm_address += 4 * ncell_nlip;
l = threadIdx.x % 32;
a = sqrt((2.0*(double)l - 1.0) / (2.0*(double)l));
for (l=2; l <= lmax; l++) {
#if (__CUDA_ARCH__ >= 350) && (__CUDA_ARCH__ < 700)
new_bottom = __shfl(a, l) * one_per_r * ( y*top + x*bottom);
top = __shfl(a, l) * one_per_r * ( x*top - y*bottom );
#elif __CUDA_ARCH__ >= 700
new_bottom = __shfl_sync(FULL_MASK, a, l) * one_per_r * ( y*top + x*bottom);
top = __shfl_sync(FULL_MASK, a, l) * one_per_r * ( x*top - y*bottom );
#endif
// store the new bottom: l=l, m=-l (we need the old bottom in calculation of top previously, so we
// have to sacrifice one register temporarily)
bottom = new_bottom;
result += bottom * evaluate_polynomials_shared<NLIP>(lm_address, cf, r);
// get value for l=l, m=l. The address is 2*l items away from l=l, m=-l
result += top * evaluate_polynomials_shared<NLIP>(lm_address + 2*l * ncell_nlip, cf, r);
// set all values where m=-l
prev2 = 0.0;
prev1 = bottom;
address2 = lm_address + (2*l+2) * ncell_nlip;
// set all values where m=l
lm_address += 2*l * ncell_nlip;
l2 = threadIdx.x % 32;
a2 = ( 2.0*(double)l2-1.0) * rsqrt( 1.0*(double)((l2-l)*(l2+l)) );
b = (l2 > l+1) ? sqrt( (double)((l2-l-1)*(l2+l-1)) / (double)((l2-l)*(l2+l)) ) : 0.0;
for (l2 = l+1; l2 <= lmax; l2++) {
// evaluate spherical harmonics for l=l2, m=-l
#if (__CUDA_ARCH__ >= 350) && (__CUDA_ARCH__ < 700)
current = __shfl(a2, l2) * z * prev1 * one_per_r - __shfl(b, l2) * prev2;
#elif __CUDA_ARCH__ >= 700
current = __shfl_sync(FULL_MASK, a2, l2) * z * prev1 * one_per_r - __shfl_sync(FULL_MASK, b, l2) * prev2;
#endif
result += current * evaluate_polynomials_shared<NLIP>(address2, cf, r);
prev2 = prev1;
prev1 = current;
// add the address2 to get to the next item with m=-l
address2 += ncell_nlip * (2*l2+2);
}
prev2 = 0.0;
prev1 = top;
address2 = lm_address + (2*l+2) * ncell_nlip;
l2 = threadIdx.x % 32;
a2 = ( 2.0*(double)l2-1.0) * rsqrt( 1.0*(double)((l2+l)*(l2-l)) ) ;
b = (l2 > l+1) ? sqrt( (double)((l2+l-1)*(l2-l-1)) / (double)((l2+l)*(l2-l)) ) : 0.0;
for (l2 = l+1; l2 <= lmax; l2++) {
// evaluate spherical harmonics for l=l2, m=l
#if (__CUDA_ARCH__ >= 350) && (__CUDA_ARCH__ < 700)
current = __shfl(a2, l2) * z * prev1 * one_per_r - __shfl(b, l2) * prev2;
#elif __CUDA_ARCH__ >= 700
current = __shfl_sync(FULL_MASK, a2, l2) * z * prev1 * one_per_r - __shfl_sync(FULL_MASK, b, l2) * prev2;
#endif
// the latter term will go to zero, if l2 <= l+1
result += current * evaluate_polynomials_shared<NLIP>(address2, cf, r);
prev2 = prev1;
prev1 = current;
// add the address3 to get to the next item with m=l
address2 += ncell_nlip * (2*l2+2);
}
// get next address
lm_address += ncell_nlip;
}
}
// multiply the result with r^k, if k is not 0
// the distance is not too close to 0.0 as this is checked
// earlier in this function, NOTE: should never happen, thus
// commented away
//if (k != 0 && distance > 1e-12) {
if (distance < 1e-14) {
result = 1.0 * evaluate_polynomials_shared<NLIP>(address, cf, r);
}
for (int i = 0; i < k; i ++) {
result *= distance;
}
for (int i = 0; i < -k; i ++) {
result *= one_per_r;
}
//}
return result;
}
__device__ int getGlobalIdx_1D_1D() {
int id=threadIdx.x + blockIdx.x * blockDim.x;
return id;
}
__device__ int getGlobalIdx_3D_3D() {
int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z;
int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z)
+ (threadIdx.z * (blockDim.x * blockDim.y))
+ (threadIdx.y * blockDim.x)
+ threadIdx.x;
return threadId;
}
/*
* Get the minimum/maximum and overwrite values with -1
*/
__device__ inline void minmax(int *first, int *second) {
int temp;
if (*first == -1) {
*first = *second;
}
if (*second == -1) {
*second = *first;
}
if (*second < *first) {
temp = *second;
*second = *first;
*first = temp;
}
}
/*
* Find the minimum and maximum in array that is as large as a block, and store them as the first
* and last value of the input array. NOTE: The arrayLength must be a power of 2.
*/
__device__ void calculateMinimumMaximum(int *array, int blockThreadId, int arrayLength) {
int division = arrayLength / 2;
// order so that the larger values of pairs are at the second part of the array
// and the smaller are at the end of the array
if (blockThreadId < division) {
// rearrange the values so that the larger is in the &array[blockThreadId + division]
// and smaller is in &array[blockThreadId]
minmax(&array[blockThreadId], &array[blockThreadId + division]);
}
__syncthreads();
division = arrayLength / 4;
// if the block
while (division >= 1) {
if (blockThreadId < division) {
minmax(&array[blockThreadId], &array[blockThreadId + division]);
}
else if (blockThreadId > arrayLength - division) {
minmax(&array[blockThreadId - division], &array[blockThreadId]);
}
division /= 2;
__syncthreads();
}
}
/*
* Evaluate Bubbles on a grid
*
*/
template <bool lmin_zero, bool evaluate_value, bool evaluate_gradients_x, bool evaluate_gradients_y, bool evaluate_gradients_z >
__device__ inline void
Bubbles_evaluate_grid(const Bubble* __restrict__ bubble,
double* __restrict__ cube,
double* __restrict__ gradient_cube_x,
double* __restrict__ gradient_cube_y,
double* __restrict__ gradient_cube_z,
const double* __restrict__ grid_points_x,
const double* __restrict__ grid_points_y,
const double* __restrict__ grid_points_z,
const int shape_x,
const int shape_y,
const int shape_z,
const double zero_point_x,
const double zero_point_y,
const double zero_point_z,
const int k,
const int slice_offset,
const size_t pitch,
const int memory_y_shape,
const int slice_count,
const int lmin,
const double multiplier) {
// The result array will be in fortran with indices l, x, y, z.
// This means that the x index will be the fastest to change.
int x, y, z;
getXYZ(&x, &y, &z);
// get the offset from the input cube pointer
const int id = getCubeOffset3D(x, y, z, pitch, memory_y_shape);
double value, gradient[3];
double in_cell_position = 0.0;
const int ncell = bubble->grid->ncell, nlip = bubble->grid->nlip;
int icell;
double relative_position_x, relative_position_y, relative_position_z, distance;
// printf("X: %f, cell_spacing: %f, ncell: %d", distance, bubble->cell_spacing, ncell);
// Check that the point is within the block
if (x < shape_x && y < shape_y && z+slice_offset < shape_z && z < slice_count) {
// calculate relative position to the zero-point and distance to it
calculate_distance(relative_position_x,
relative_position_y,
relative_position_z,
distance,
zero_point_x,
zero_point_y,
zero_point_z,
grid_points_x[x],
ldg<double>(&grid_points_y[y]),
ldg<double>(&grid_points_z[z+slice_offset]));
// get the order number of cell the point resides in
//icell = calculate_icell(distance, bubble->d, bubble->ncell);
calculate_icell_radial(distance, bubble->charge, bubble->grid->r_max, ncell, nlip, &icell, &in_cell_position);
//printf("x: %d, y: %d, z:%d, id:%d, vector_id: %d, vector_offset:%d, blockId: %d, blocks_per_vector: %d, %f, %f, %f, %d\n", x, y, z, id, vector_id, vector_offset, blockIdx.x, blocks_per_vector, grid_points_x[x], ldg(&grid_points_y[y]), ldg(&grid_points_z[z]), icell);
}
else {
icell = 1;
distance = 0.1;
}
if (lmin_zero) {
// calculate the bubble value for the point with lmin = 0
if (evaluate_value) {
value = Bubbles_evaluate_point( relative_position_x,
relative_position_y,
relative_position_z,
distance,
bubble->lmax,
ncell,
nlip,
in_cell_position,
k,
icell * 8,
bubble->cf);
}
// evaluate gradients if we are evaluating any
if (evaluate_gradients_x || evaluate_gradients_y || evaluate_gradients_z) {
Bubbles_evaluate_gradient_point
<evaluate_gradients_x, evaluate_gradients_y, evaluate_gradients_z>
(relative_position_x,
relative_position_y,
relative_position_z,
distance,
bubble->lmax,
ncell,
nlip,
in_cell_position,
k,
icell * 8,
bubble->cf,
bubble->df,
false,
gradient
);
}
}
else {
if (evaluate_value) {
// calculate the bubble value for the point with lmin > 0
value = Bubbles_evaluate_point_lmin( relative_position_x,
relative_position_y,
relative_position_z,
distance,
lmin,
bubble->lmax,
ncell,
nlip,
in_cell_position,
k,
icell * 8,
bubble->cf
);
}
}
if (x < shape_x && y < shape_y && z+slice_offset < shape_z && z < slice_count && icell < ncell) {
/*if (x == 0 && y == 0) {
printf("%d: [x, y, z], id : [%d, %d, %d], %d, icell: %d, in_cell_position:%f, first_bubble-value:%e, distance:%f, coord: [%f, %f, %f] old-value: %e, value: %e, multiplier: %f\n", slice_offset, x, y, z+slice_offset, id, icell, in_cell_position, bubble->cf[icell*8], distance, relative_position_x, relative_position_y, relative_position_z, cube[id], value, multiplier);
}*/
if (evaluate_value) cube[id] += multiplier * value;
if (evaluate_gradients_x) gradient_cube_x[id] += multiplier * gradient[X_];
if (evaluate_gradients_y) gradient_cube_y[id] += multiplier * gradient[Y_];
if (evaluate_gradients_z) gradient_cube_z[id] += multiplier * gradient[Z_];
}
return;
}
/*
* Evaluate Bubbles on a grid
*/
__global__ void
#if (__CUDA_ARCH__ <= 350)
__launch_bounds__(128, 6)
#else
__launch_bounds__(256)
#endif
Bubbles_evaluate_grid_lmin(const Bubble* __restrict__ bubble,
double* __restrict__ cube,
const double* __restrict__ grid_points_x,
const double* __restrict__ grid_points_y,
const double* __restrict__ grid_points_z,
const int shape_x,
const int shape_y,
const int shape_z,
const double zero_point_x,
const double zero_point_y,
const double zero_point_z,
const int k,
const int slice_offset,
const size_t pitch,
const int memory_y_shape,
const int slice_count,
const int lmin,
const double multiplier) {
Bubbles_evaluate_grid <false, true, false, false, false> (
bubble, cube, /*gradient_cube_x = */NULL,
/*gradient_cube_y = */NULL, /*gradient_cube_z = */NULL,
grid_points_x, grid_points_y, grid_points_z,
shape_x, shape_y, shape_z,
zero_point_x, zero_point_y, zero_point_z,
k, slice_offset, pitch, memory_y_shape,
slice_count, lmin, multiplier);
}
__global__ void
#if (__CUDA_ARCH__ > 350)
__launch_bounds__(256)
#else
__launch_bounds__(128, 8)
#endif
Bubbles_evaluate_grid_pitched(const Bubble* __restrict__ bubble,
double* __restrict__ cube,
const double* __restrict__ grid_points_x,
const double* __restrict__ grid_points_y,
const double* __restrict__ grid_points_z,
const int shape_x,
const int shape_y,
const int shape_z,
const double zero_point_x,
const double zero_point_y,
const double zero_point_z,
const int k,
const int slice_offset,
const size_t pitch,
const int memory_y_shape,
const int slice_count,
const double multiplier) {
Bubbles_evaluate_grid <true, true, false, false, false> (
bubble, cube, /*gradient_cube_x = */NULL,
/*gradient_cube_y = */NULL, /*gradient_cube_z = */NULL,
grid_points_x, grid_points_y, grid_points_z,
shape_x, shape_y, shape_z,
zero_point_x, zero_point_y, zero_point_z,
k, slice_offset, pitch, memory_y_shape,
slice_count, /*lmin = */0, multiplier);
}
template <bool lmin_zero, bool evaluate_value, bool evaluate_gradients_x, bool evaluate_gradients_y, bool evaluate_gradients_z >
__global__ void
#if (__CUDA_ARCH__ > 350)
__launch_bounds__(256)
#else
__launch_bounds__(128, 5)
#endif
Bubbles_evaluate_grid_gradients(const Bubble* __restrict__ bubble,
double* __restrict__ cube,
double* __restrict__ gradient_cube_x,
double* __restrict__ gradient_cube_y,
double* __restrict__ gradient_cube_z,
const double* __restrict__ grid_points_x,
const double* __restrict__ grid_points_y,
const double* __restrict__ grid_points_z,
const int shape_x,
const int shape_y,
const int shape_z,
const double zero_point_x,
const double zero_point_y,
const double zero_point_z,
const int k,
const int slice_offset,
const size_t pitch,
const int memory_y_shape,
const int slice_count,
const double multiplier) {
Bubbles_evaluate_grid <lmin_zero, evaluate_value, evaluate_gradients_x, evaluate_gradients_y, evaluate_gradients_z> (
bubble, cube, gradient_cube_x,
gradient_cube_y, gradient_cube_z,
grid_points_x, grid_points_y, grid_points_z,
shape_x, shape_y, shape_z,
zero_point_x, zero_point_y, zero_point_z,
k, slice_offset, pitch, memory_y_shape,
slice_count, /*lmin = */0, multiplier);
}
/*
* Evaluate Bubbles at points
*/
template <bool lmin_zero, bool evaluate_value, bool evaluate_gradients_x, bool evaluate_gradients_y, bool evaluate_gradients_z>
__device__ inline void
Bubbles_evaluate_points(const Bubble* __restrict__ bubble,
double* __restrict__ result_array,
double* __restrict__ device_gradients_x,
double* __restrict__ device_gradients_y,
double* __restrict__ device_gradients_z,
// a 3d array, where the x coordinates are first,
// then y coordinates, and finally the z coordinates. This ordering
// is selected to get coalesced memory reads
const double* __restrict__ points,
// total number of points evaluated by this device
const int device_number_of_points,
// the zero point x-coordinate of bubbles
const double zero_point_x,
// the zero point y-coordinate of bubbles
const double zero_point_y,
// the zero point z-coordinate of bubbles
const double zero_point_z,
// the k value of the bubbles
const int k,
// the lmin value evaluated
const int lmin,
// number of points in this kernel call
const int point_count,
// device_point_offset
const int device_point_offset,
const double multiplier
) {
// Get the point order number within this kernel call
int id = blockIdx.x * blockDim.x + threadIdx.x;
double value, gradient[3];
double in_cell_position = 0.0;
const int ncell = bubble->grid->ncell, nlip = bubble->grid->nlip;
int icell = -1;
double relative_position_x, relative_position_y, relative_position_z, distance, r_max = bubble->grid->r_max;
//printf("X: %f, cell_spacing: %f, ncell: %d", distance, bubble->cell_spacing, ncell);
// Check that the point is within the block
if (id + device_point_offset < device_number_of_points && id < point_count ) {
// calculate relative position to the zero-point and distance to it
calculate_distance(relative_position_x,
relative_position_y,
relative_position_z,
distance,
zero_point_x,
zero_point_y,
zero_point_z,
points[id + device_point_offset],
points[id + device_point_offset + device_number_of_points],
points[id + device_point_offset + device_number_of_points*2]);
// get the order number of cell the point resides in
calculate_icell_radial(distance, bubble->charge, bubble->grid->r_max, ncell, nlip, &icell, &in_cell_position);
}
else {
icell = 1;
distance = 0.1;
}
// calculate the bubble value for the point
if (!lmin_zero) {
if (evaluate_value) {
value = Bubbles_evaluate_point_lmin( relative_position_x,
relative_position_y,
relative_position_z,
distance,
lmin,
bubble->lmax,
ncell,
nlip,
in_cell_position,
k,
icell * 8,
bubble->cf
);
}
}
else {
if (evaluate_gradients_x || evaluate_gradients_y || evaluate_gradients_z) {
Bubbles_evaluate_gradient_point
<evaluate_gradients_x, evaluate_gradients_y, evaluate_gradients_z>
(relative_position_x,
relative_position_y,
relative_position_z,
distance,
bubble->lmax,
ncell,
nlip,
in_cell_position,
k,
icell * 8,
bubble->cf,
bubble->df,
false, //(evaluate_gradients_x != evaluate_gradients_y || evaluate_gradients_x != evaluate_gradients_z) && icell == 0, //evaluate_gradients_x != evaluate_gradients_y || evaluate_gradients_x != evaluate_gradients_z,
gradient
);
}
if (evaluate_value) {
value = Bubbles_evaluate_point( relative_position_x,
relative_position_y,
relative_position_z,
distance,
bubble->lmax,
ncell,
nlip,
in_cell_position,
k,
icell * 8,
bubble->cf
);
}
}
// store the result to the result array
if (id + device_point_offset < device_number_of_points && id < point_count && distance < r_max && icell < ncell ) {
if (evaluate_value) result_array[id+device_point_offset] += multiplier * value;
//if ((evaluate_gradients_x) && (id + device_point_offset <= 7)) printf("¤%¤%¤%%#¤%#¤ X: %d: pos: %f, %f, %f, val: %e, remainder: %e\n", id + device_point_offset, relative_position_x, relative_position_y, relative_position_z, gradient[X_], device_gradients_x[id+device_point_offset]);
//if ((evaluate_gradients_y) && (id + device_point_offset <= 7)) printf("¤%¤%¤%%#¤%#¤ Y: %d: pos: %f, %f, %f, val: %e, remainder: %e\n", id + device_point_offset, relative_position_x, relative_position_y, relative_position_z, gradient[Y_], device_gradients_y[id+device_point_offset]);
//if ((evaluate_gradients_z) && (id + device_point_offset <= 7)) printf("¤%¤%¤%%#¤%#¤ Z: %d: pos: %f, %f, %f, val: %e, remainder: %e\n", id + device_point_offset, relative_position_x, relative_position_y, relative_position_z, gradient[Z_], device_gradients_z[id+device_point_offset]);
// add also the gradient value, if we are evaluating them
if (evaluate_gradients_x) device_gradients_x[id+device_point_offset] += multiplier * gradient[X_];
if (evaluate_gradients_y) device_gradients_y[id+device_point_offset] += multiplier * gradient[Y_];
if (evaluate_gradients_z) device_gradients_z[id+device_point_offset] += multiplier * gradient[Z_];
}
return;
}
__device__ inline double get_damping_factor(double r) {
double result;
// erfc: error function
if (r > 1e-12) {
result = 0.5*erfc(r-2.0/r);
}
else {
result = 1.0;
}
return result;
}
template <bool lmin_zero, bool evaluate_value, bool evaluate_gradients_x, bool evaluate_gradients_y, bool evaluate_gradients_z >
#if (__CUDA_ARCH__ <= 350)
__launch_bounds__(128, 4)
#else
__launch_bounds__(256)
#endif
__global__ void Bubbles_evaluate_gradient_points(
const Bubble* __restrict__ bubble,
double* __restrict__ result_array,
double* __restrict__ device_gradients_x,
double* __restrict__ device_gradients_y,
double* __restrict__ device_gradients_z,
// a 3d array, where the x coordinates are first,
// then y coordinates, and finally the z coordinates. This ordering
// is selected to get coalesced memory reads
const double* __restrict__ points,
// total number of points evaluated by this device
const int device_number_of_points,
// the zero point x-coordinate of bubbles
const double zero_point_x,
// the zero point y-coordinate of bubbles
const double zero_point_y,
// the zero point z-coordinate of bubbles
const double zero_point_z,
// the k value of the bubbles
const int k,
// number of points in this kernel call
const int point_count,
// device_point_offset
const int device_point_offset,
const double multiplier
) {
Bubbles_evaluate_points<lmin_zero, evaluate_value, evaluate_gradients_x, evaluate_gradients_y, evaluate_gradients_z>(
bubble,
result_array,
device_gradients_x,
device_gradients_y,
device_gradients_z,
points,
device_number_of_points,
zero_point_x,
zero_point_y,
zero_point_z,
k,
0,
point_count,
device_point_offset,
multiplier
);
}
#if (__CUDA_ARCH__ <= 350)
__launch_bounds__(128, 7)
#else
__launch_bounds__(256)
#endif
__global__ void Bubbles_evaluate_points_simple(
const Bubble* __restrict__ bubble,
double* __restrict__ result_array,
// a 3d array, where the x coordinates are first,
// then y coordinates, and finally the z coordinates. This ordering
// is selected to get coalesced memory reads
const double* __restrict__ points,
// total number of points evaluated by this device
const int device_number_of_points,
// the zero point x-coordinate of bubbles
const double zero_point_x,
// the zero point y-coordinate of bubbles
const double zero_point_y,
// the zero point z-coordinate of bubbles
const double zero_point_z,
// the k value of the bubbles
const int k,
// number of points in this kernel call
const int point_count,
// device_point_offset
const int device_point_offset,
const double multiplier
) {
Bubbles_evaluate_points<true, true, false, false, false>(
bubble,
result_array,
/*device_gradients_x*/NULL,
/*device_gradients_y*/NULL,
/*device_gradients_z*/NULL,
points,
device_number_of_points,
zero_point_x,
zero_point_y,
zero_point_z,
k,
0,
point_count,
device_point_offset,
multiplier
);
}
/*__global__ void Bubble_make_taylor_kernel(Bubble_t *result_bubble, int maximum_taylor_order, double *contaminants,
double *c2s_coefficients, int *c2s_lm_ids, int *c2s_term_starts, int offset) {
const int index=threadIdx.x + blockIdx.x * blockDim.x + offset;
extern __shared__ double shared_memory[];
double *one_per_kappa_factorial = &shared_memory[0];
double *shared_contaminants = &shared_memory[maximum_taylor_order];
int contaminants_size = (maximum_taylor_order+1)*(maximum_taylor_order+2)*(maximum_taylor_order+3)/6;
// calculate the 1/kappa! terms to the shared memory
if (threadIdx.x < maximum_taylor_order) {
int kappa = 1;
for (int i = 1; i <= threadIdx.x; i++) {
kappa *= i+1;
}
one_per_kappa_factorial[threadIdx.x] = 1.0 / ((double) kappa);
}
// load the contaminats to the shared memory
if (threadIdx.x < contaminants_size) {
int id = threadIdx.x;
while (id < contaminants_size) {
shared_contaminants[id] = contaminants[id];
id += blockDim.x;
}
}
__syncthreads();
// do the actual calculation
double r = result_bubble->gridpoints[index];
double prefactor;
double damping_factor = get_damping_factor(r);
int k = result_bubble->k, ncell= result_bubble->ncell, nlip = result_bubble->nlip;
int result_index = 0, counter = 0, term_counter = 0;
for (int x = 0; x <= maximum_taylor_order; x++) {
for (int y = 0; y <= maximum_taylor_order - x; y++) {
for (int z = 0; z <= maximum_taylor_order - x - y; z++) {
prefactor = one_per_kappa_factorial[x+y+z]// 1/[x+y+z]
* pow(r, (double)(x+y+z - k)) // r^x+y+z-k
* shared_contaminants[counter] // c
* damping_factor;
// go through all l,m terms which get contribution from x,y,z -term
while (term_counter < c2s_term_starts[counter+1]) {
// get the index in the result array, note: the -1 is because the indices are in
// fortran format, starting from 1
result_index = (c2s_lm_ids[term_counter]-1) * (ncell * (nlip-1) +1) + index;
// add the prefactor times the coefficient from cartesion to spherical conversion
result_bubble->f[result_index] += c2s_coefficients[term_counter] * prefactor;
// add the counter value used to follow the c2s conversion
term_counter++;
}
// add the conter value used to follow cartesian terms
counter ++;
}
}
}
} */
/*
* Kernel that sums the f-values of two bubble objects together. The summation happens
* pointwise so that each thread calculates all l,m values for each point. The result
* is stored to the bubble_f.
*/
__global__ void Bubble_sum_kernel(double* __restrict__ bubble_f, const double* __restrict__ bubble1_f, const int lmax, const int max_id, const size_t device_f_pitch) {
const int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < max_id) {
// go through all l, m values of input bubble 'bubble'
for (int ilm = 0; ilm < (lmax+1)*(lmax+1); ilm++) {
bubble_f[ilm * device_f_pitch / sizeof(double) + id] += bubble1_f[ilm * device_f_pitch / sizeof(double) + id];
}
}
}
/*
* Decreases the k-value of a bubble by k_decrese. The operation happens
* pointwise so that each thread calculates all l,m values for each point. The result
* is stored to the bubble_f.
*
* k_decrease is how many k values is decreased
*/
__global__ void Bubble_decrease_k_kernel(double* __restrict__ bubble_f, const double* __restrict__ r, const int k_decrease, const int lmax, const int max_id, const size_t device_f_pitch) {
const int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < max_id) {
const double rpow = pow(r[id], (double) k_decrease);
// go through all l, m values of input bubble 'bubble'
for (int ilm = 0; ilm < (lmax+1)*(lmax+1); ilm++) {
bubble_f[ilm * device_f_pitch / sizeof(double) + id] *= rpow;
}
}
}
/*
* Multiply cubes 1 and 2 and store it to cube1
*/
__global__ void multiply_cubes(double *cube1, double *cube2, const int cube_size, const int offset) {
// get the id of the point (We are using only the first )
const int index=threadIdx.x + blockIdx.x * blockDim.x + offset;
if (index < cube_size) {
cube1[index] *= cube2[index];
}
}
/**************************************************************
* Bubble-implementation *
**************************************************************/
/*
* Evaluate the cf at ALL devices. This is a crucial preparation function for injection.
* For correct results, on call the Bubble must have all f-values present.
*
* NOTE: the function streaming is structured using number of l,m-pairs, like the uploadAll.
*/
void Bubble::calculateCf() {
// calculate the cf
int ilmmax = (this->lmax+1)*(this->lmax+1);
int block_size = 64;
int grid_size;
int offset;
check_errors(__FILE__, __LINE__);
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) {
this->streamContainer->setDevice(device);
offset = 0;
for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream ++) {
int ilm_per_stream = ilmmax / this->streamContainer->getStreamsPerDevice() +
(( ilmmax % this->streamContainer->getStreamsPerDevice()) > stream);
int number_of_points = ilm_per_stream * this->grid->ncell;
// verify that there is something to calculate the cf for (for instance if ilmmax is 1, some streams
// can be left without any points, resulting to a cuda error)
if (number_of_points > 0) {
grid_size = (number_of_points + block_size - 1) / block_size;
calc_cf <<< grid_size, block_size, 0, *this->streamContainer->getStream(device, stream) >>>
(this->device_copies[device], offset, number_of_points, this->device_f_pitch[device]);
offset += number_of_points;
}
check_errors(__FILE__, __LINE__);
}
}
}
void Bubble::initDeviceMemory(int ibub, Grid1D *grid, double center[3], int lmax,
int k, double charge, StreamContainer *streamContainer) {
//cudaHostRegister(this, sizeof(Bubble), cudaHostRegisterPortable);
//check_errors(__FILE__, __LINE__);
this->ibub = ibub;
this->lmax = lmax;
this->device_memory_lmax = lmax;
this->k = k;
this->charge = charge;
this->streamContainer = streamContainer;
this->crd[X_] = center[X_];
this->crd[Y_] = center[Y_];
this->crd[Z_] = center[Z_];
this->integrator = NULL;
this->uploaded_events = new cudaEvent_t*[this->streamContainer->getNumberOfDevices()];
this->device_copies = new Bubble * [this->streamContainer->getNumberOfDevices()];
this->device_f = new double *[this->streamContainer->getNumberOfDevices()];
this->device_f_pitch = new size_t [this->streamContainer->getNumberOfDevices()];
this->device_cf = new double * [this->streamContainer->getNumberOfDevices()];
this->device_df = new double * [this->streamContainer->getNumberOfDevices()];
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) {
this->streamContainer->setDevice(device);
size_t sz=sizeof(double)*(grid->ncell*(grid->nlip-1)+1);
cudaMallocPitch((void**)&device_f[device], &device_f_pitch[device],
sz, (lmax+1)*(lmax+1));
check_errors(__FILE__, __LINE__);
cudaMemset(device_f[device], 0, device_f_pitch[device]*(lmax+1)*(lmax+1));
check_errors(__FILE__, __LINE__);
sz=sizeof(double)*grid->ncell*8*(lmax+1)*(lmax+1);
cudaMalloc(&this->device_cf[device], sz);
cudaMalloc(&this->device_df[device], sz);
check_errors(__FILE__, __LINE__);
// copy the bubble to the device, for which set the device pointers
// to be the main-pointers
this->f = this->device_f[device];
this->cf = this->device_cf[device];
this->df = this->device_df[device];
this->grid = grid->device_copies[device];
// allocate & copy the bubble to device
cudaMalloc(&this->device_copies[device], sizeof(Bubble));
cudaMemcpy(this->device_copies[device], this, sizeof(Bubble), cudaMemcpyHostToDevice);
check_errors(__FILE__, __LINE__);
}
this->grid = grid;
}
Bubble::Bubble(int ibub, Grid1D *grid, double center[3], int lmax, int k, double *bf,
double charge, StreamContainer *streamContainer) {
this->initDeviceMemory(ibub, grid, center, lmax, k, charge, streamContainer);
// set the host variables and register them for faster data transfer
this->f = bf;
/*cudaHostRegister(this->f, sizeof(double)*(grid->ncell*(grid->nlip-1)+1)*(lmax+1)*(lmax+1), cudaHostRegisterPortable);
check_errors(__FILE__, __LINE__);*/
}
Bubble::Bubble(int ibub, Grid1D *grid, double center[3], int lmax, int k, double charge, StreamContainer *streamContainer) {
this->initDeviceMemory(ibub, grid, center, lmax, k, charge, streamContainer);
}
Bubble::Bubble(Bubble *old_bubble, int lmax, int k) {
this->initDeviceMemory(old_bubble->ibub, old_bubble->grid, old_bubble->crd, lmax, old_bubble->k, old_bubble->charge, old_bubble->streamContainer);
}
/*
* Uploads all bubble data to all devices (gpus) on all nodes. This kind of approach
* is needed when injecting bubbles to cuda. With bubble-multiplication - the upload
* -method is preferred.
*/
void Bubble::uploadAll(double *f, int lmax) {
// set the host variables and register them for faster data transfer
this->f = f;
this->lmax = lmax;
size_t host_pitch = (this->grid->ncell * (this->grid->nlip - 1) + 1) * sizeof(double);
int ilmmax = (lmax+1)*(lmax+1);
check_errors(__FILE__, __LINE__);
Grid1D* host_grid = this->grid;
// register the host array array
//cudaHostRegister(this->f, host_pitch * ilmmax, cudaHostRegisterPortable);
check_errors(__FILE__, __LINE__);
double *device_f, *host_f;
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) {
this->streamContainer->setDevice(device);
// get the preallocated device pointers
device_f = this->device_f[device];
// NOTE: for all devices the first pointer points to the first value of each array
host_f = this->f;
for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream++) {
int ilm_per_stream = ilmmax / this->streamContainer->getStreamsPerDevice() +
(( ilmmax % this->streamContainer->getStreamsPerDevice()) > stream);
// upload the stream data to device
cudaMemcpy2DAsync((void *) device_f, this->device_f_pitch[device],
(void *) host_f, host_pitch,
host_pitch,
ilm_per_stream,
cudaMemcpyHostToDevice,
*this->streamContainer->getStream(device, stream)
);
check_errors(__FILE__, __LINE__);
// add to the pointers
device_f += ilm_per_stream * this->device_f_pitch[device] / sizeof(double);
host_f += ilm_per_stream * host_pitch / sizeof(double);
}
// copy the bubble to the device, for which set the device pointers
// to be the main-pointers
this->f = this->device_f[device];
this->cf = this->device_cf[device];
this->df = this->device_df[device];
this->grid = host_grid->device_copies[device];
this->lmax = lmax;
// copy the bubble to device
cudaMemcpyAsync(this->device_copies[device], this, sizeof(Bubble), cudaMemcpyHostToDevice,
*this->streamContainer->getStream(device, 0));
check_errors(__FILE__, __LINE__);
this->f = f;
this->grid = host_grid;
}
check_errors(__FILE__, __LINE__);
this->streamContainer->synchronizeAllDevices();
// calculate the cf
this->calculateCf();
// and synchronize the host with the device
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) {
this->uploaded_events[device] = this->streamContainer->recordDeviceEvent(device);
}
// we are not in any case downloading the data back, so we can unregister the array
//cudaHostUnregister(this->f);
check_errors(__FILE__, __LINE__);
}
/*
* Uploads part of a bubble to the device
*
* NOTE: in order to use this, the bubble uploaded (i.e., the f-array given as input)
* must have the same lmax value as the Bubble-object we are uploading to.
* NOTE: registers the input array but does not unregister it, thus after calling this
* the user must unregister the f elsewhere, for instance by calling the unregister function.
* NOTE: this function is designed to function together with the bubble multiplication
*/
void Bubble::upload(double *f, int lmax, bool register_host) {
// set the host variables and register them for faster data transfer
this->f = f;
check_errors(__FILE__, __LINE__);
this->lmax = lmax;
int ilmmax = (lmax + 1) * (lmax + 1);
// calculate the total number of points in the bubbles each l,m -pair,
int total_point_count = this->grid->ncell * (this->grid->nlip - 1) +1;
Grid1D* host_grid = this->grid;
// register the host array, if not explicitly telling not to
/*if (register_host) {
cudaHostRegister(this->f, sizeof(double)*ilmmax*total_point_count, cudaHostRegisterPortable);
check_errors(__FILE__, __LINE__);
}*/
// store the processor variables to be used at downloading time
this->processor_order_number = processor_order_number;
this->number_of_processors = number_of_processors;
size_t host_pitch = total_point_count * sizeof(double);
// determine how many of the points belong to the current mpi-node
int processor_point_count = total_point_count / this->number_of_processors
+ ((total_point_count % number_of_processors) > processor_order_number);
// get the offset to the f-array caused by other processors
int remainder = total_point_count % this->number_of_processors;
int offset = processor_order_number * total_point_count / number_of_processors +
((remainder < processor_order_number) ? remainder : processor_order_number);
double *device_f;
double *host_f = &this->f[offset];
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) {
this->streamContainer->setDevice(device);
// get the preallocated device pointers,
// NOTE: The memory of bubble is allocated for its entire
// length, thus we have to go to the part we want to upload
device_f = this->device_f[device];
device_f = &device_f[offset];
// detemine how many of the mpi-nodes points belong to this device (gpu)
int device_point_count = processor_point_count / this->streamContainer->getNumberOfDevices() +
((processor_point_count % this->streamContainer->getNumberOfDevices()) > device);
for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream ++) {
// detemine the number of the points handled by this stream
int stream_point_count = device_point_count / this->streamContainer->getStreamsPerDevice() +
((device_point_count % this->streamContainer->getStreamsPerDevice()) > stream);
// upload the data to device, copy all ilmmax-rows for stream_point_count columns
cudaMemcpy2DAsync((void *) device_f, this->device_f_pitch[device],
(void *) host_f, host_pitch,
stream_point_count * sizeof(double),
ilmmax,
cudaMemcpyHostToDevice,
*this->streamContainer->getStream(device, stream)
);
check_errors(__FILE__, __LINE__);
offset += stream_point_count;
device_f += stream_point_count;
host_f += stream_point_count;
}
// copy the bubble to the device, for which set the device pointers
// to be the main-pointers
this->f = this->device_f[device];
this->cf = this->device_cf[device];
this->df = this->device_df[device];
this->grid = host_grid->device_copies[device];
this->lmax = lmax;
// copy the bubble to device
cudaMemcpyAsync(this->device_copies[device], this, sizeof(Bubble), cudaMemcpyHostToDevice,
*this->streamContainer->getStream(device, 0));
check_errors(__FILE__, __LINE__);
this->f = f;
this->grid = host_grid;
}
// and synchronize the host with the device
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) {
this->uploaded_events[device] = this->streamContainer->recordDeviceEvent(device);
}
}
void Bubble::waitBubbleUploaded(int device, cudaStream_t *stream) {
cudaStreamWaitEvent(*stream, *this->uploaded_events[device], 0);
}
void Bubble::waitBubbleUploaded(int device) {
cudaStreamWaitEvent(0, *this->uploaded_events[device], 0);
}
/*
* Sets bubble values to zero
*
* NOTE: in order to use this, the bubble uploaded (i.e., the f-array given as input)
* must have the same lmax value as the Bubble-object we are uploading to.
* NOTE: registers the input array but does not unregister it, thus after calling this
* the user must unregister the f elsewhere, for instance by calling the unregister function.
* NOTE: this function is designed to function together with the bubble multiplication
*/
void Bubble::setToZero() {
// set the host variables and register them for faster data transfer
this->f = f;
check_errors(__FILE__, __LINE__);
int ilmmax = (this->device_memory_lmax + 1) * (this->device_memory_lmax + 1);
// calculate the total number of points in the bubbles each l,m -pair,
int total_point_count = this->grid->ncell * (this->grid->nlip - 1) +1;
// determine how many of the points belong to the current mpi-node
int processor_point_count = total_point_count / this->number_of_processors
+ ((total_point_count % this->number_of_processors) > this->processor_order_number);
// get the offset to the f-array caused by other processors
int remainder = total_point_count % this->number_of_processors;
int offset = this->processor_order_number * total_point_count / this->number_of_processors +
((remainder < this->processor_order_number) ? remainder : this->processor_order_number);
double *device_f;
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) {
this->streamContainer->setDevice(device);
// get the preallocated device pointers,
// NOTE: The memory of bubble is allocated for its entire
// length, thus we have to go to the part we want to upload
device_f = this->device_f[device];
device_f = &device_f[offset];
// detemine how many of the mpi-nodes points belong to this device (gpu)
int device_point_count = processor_point_count / this->streamContainer->getNumberOfDevices() +
((processor_point_count % this->streamContainer->getNumberOfDevices()) > device);
for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream ++) {
// detemine the number of the points handled by this stream
int stream_point_count = device_point_count / this->streamContainer->getStreamsPerDevice() +
((device_point_count % this->streamContainer->getStreamsPerDevice()) > stream);
// upload the data to device, copy all ilmmax-rows for stream_point_count columns
cudaMemset2DAsync((void *) device_f, this->device_f_pitch[device],
0,
stream_point_count * sizeof(double),
ilmmax,
*this->streamContainer->getStream(device, stream)
);
check_errors(__FILE__, __LINE__);
offset += stream_point_count;
device_f += stream_point_count;
}
}
}
/*
* Downloads part of a bubble from the device. Downloads to host exactly the same
* part as the upload function above uploads to device.
*
* NOTE: this function is designed to function together with the bubble multiplication &
* summation
*/
void Bubble::download(int lmax) {
// calculate the total number of points in the bubbles each l,m -pair,
int total_point_count = this->grid->ncell * (this->grid->nlip - 1) +1;
size_t host_pitch = total_point_count * sizeof(double);
int ilmmax = (lmax + 1) * (lmax + 1);
// determine how many of the points belong to the current mpi-node
int processor_point_count = total_point_count / this->number_of_processors
+ ((total_point_count % this->number_of_processors) > this->processor_order_number);
// get the offset to the f-array caused by other processors
int remainder = total_point_count % this->number_of_processors;
int offset = this->processor_order_number * total_point_count / this->number_of_processors
+ ((remainder < this->processor_order_number) ? remainder : this->processor_order_number);
double *device_f;
double *host_f = &this->f[offset];
check_errors(__FILE__, __LINE__);
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) {
this->streamContainer->setDevice(device);
// get the preallocated device pointers,
// NOTE: The memory of bubble is allocated for its entire
// length, thus we have to go to the part we want to upload
device_f = this->device_f[device];
device_f = &device_f[offset];
// detemine how many of the mpi-nodes points belong to this device (gpu)
int device_point_count = processor_point_count / this->streamContainer->getNumberOfDevices() +
((processor_point_count % this->streamContainer->getNumberOfDevices()) > device);
for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream ++) {
// detemine the number of the points handled by this stream
int stream_point_count = device_point_count / this->streamContainer->getStreamsPerDevice() +
((device_point_count % this->streamContainer->getStreamsPerDevice()) > stream);
// upload the data to device, copy all ilmmax-rows for stream_point_count columns
cudaMemcpy2DAsync((void *) host_f, host_pitch,
(void *) device_f, this->device_f_pitch[device],
stream_point_count * sizeof(double),
ilmmax,
cudaMemcpyDeviceToHost,
*this->streamContainer->getStream(device, stream)
);
check_errors(__FILE__, __LINE__);
offset += stream_point_count;
device_f += stream_point_count;
host_f += stream_point_count;
check_errors(__FILE__, __LINE__);
}
}
}
/*
* Adds together the f-values of 'this' and input bubble 'bubble'
*
* NOTE: this function is designed to function together with the bubble multiplication
* NOTE: this function assumes that the bubbles have identical grids and with that,
* identical f_pitches
*/
void Bubble::add(Bubble *bubble) {
// make sure that the k-values of the input functions are the same
// this is done by decreasing the larger k-value to be equal
// with the smaller
check_errors(__FILE__, __LINE__);
if (this->k > bubble->k) {
this->decreaseK(this->k - bubble->k);
}
else if (this->k < bubble->k) {
bubble->decreaseK(bubble->k - this->k);
}
check_errors(__FILE__, __LINE__);
// calculate the total number of points in the bubbles each l,m -pair,
int total_point_count = this->grid->ncell * (this->grid->nlip - 1) +1;
int smaller_lmax = min(this->lmax, bubble->lmax);
// determine how many of the points belong to the current mpi-node
int processor_point_count = total_point_count / this->number_of_processors
+ ((total_point_count % this->number_of_processors) > this->processor_order_number);
// get the offset to the f-array caused by other processors
int remainder = total_point_count % this->number_of_processors;
int offset = this->processor_order_number * total_point_count / this->number_of_processors
+ ((remainder < this->processor_order_number) ? remainder : this->processor_order_number);
double *device_f;
double *device_f1;
int block_size = 256;
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) {
this->streamContainer->setDevice(device);
this->waitBubbleUploaded(device);
bubble->waitBubbleUploaded(device);
// get the preallocated device pointers,
// NOTE: The memory of bubble is allocated for its entire
// length, thus we have to go to the part we want to upload
device_f = this->device_f[device];
device_f = &device_f[offset];
device_f1 = bubble->device_f[device];
device_f1 = &device_f1[offset];
// detemine how many of the mpi-nodes points belong to this device (gpu)
int device_point_count = processor_point_count / this->streamContainer->getNumberOfDevices() +
((processor_point_count % this->streamContainer->getNumberOfDevices()) > device);
for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream ++) {
// detemine the number of the points handled by this stream
int stream_point_count = device_point_count / this->streamContainer->getStreamsPerDevice() +
((device_point_count % this->streamContainer->getStreamsPerDevice()) > stream);
int grid_size = (stream_point_count + block_size - 1) / block_size;
// call the kernel
Bubble_sum_kernel <<<grid_size, block_size, 0, *this->streamContainer->getStream(device, stream)>>>
(device_f, device_f1, smaller_lmax, stream_point_count, this->device_f_pitch[device]);
check_errors(__FILE__, __LINE__);
// add the device pointers and the offset
offset += stream_point_count;
device_f += stream_point_count;
device_f1 += stream_point_count;
}
}
}
/*
* Decreases the k-value of a bubble by k_decrease
*
* NOTE: this function is designed to function together with the bubble multiplication
* NOTE: this function assumes that the bubbles have identical grids and with that,
* identical f_pitches
*/
void Bubble::decreaseK(int k_decrease) {
// calculate the total number of points in the bubbles each l,m -pair,
int total_point_count = this->grid->ncell * (this->grid->nlip - 1) +1;
// determine how many of the points belong to the current mpi-node
int processor_point_count = total_point_count / this->number_of_processors
+ ((total_point_count % this->number_of_processors) > this->processor_order_number);
// get the offset to the f-array caused by other processors
int remainder = total_point_count % this->number_of_processors;
int offset = this->processor_order_number * total_point_count / this->number_of_processors
+ ((remainder < this->processor_order_number) ? remainder : this->processor_order_number);
double *device_f;
double *device_r;
int block_size = 256;
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) {
this->streamContainer->setDevice(device);
this->waitBubbleUploaded(device);
// get the preallocated device pointers,
// NOTE: The memory of bubble is allocated for its entire
// length, thus we have to go to the part we want to upload
device_f = this->device_f[device];
device_f = &device_f[offset];
device_r = this->grid->device_gridpoints[device];
device_r = &device_r[offset];
// detemine how many of the mpi-nodes points belong to this device (gpu)
int device_point_count = processor_point_count / this->streamContainer->getNumberOfDevices() +
((processor_point_count % this->streamContainer->getNumberOfDevices()) > device);
for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream ++) {
// detemine the number of the points handled by this stream
int stream_point_count = device_point_count / this->streamContainer->getStreamsPerDevice() +
((device_point_count % this->streamContainer->getStreamsPerDevice()) > stream);
int grid_size = (stream_point_count + block_size - 1) / block_size;
// call the kernel
Bubble_decrease_k_kernel <<<grid_size, block_size, 0, *this->streamContainer->getStream(device, stream)>>>
(device_f, device_r, k_decrease, this->lmax, stream_point_count, this->device_f_pitch[device]);
check_errors(__FILE__, __LINE__);
// add the device pointers and the offset
offset += stream_point_count;
device_f += stream_point_count;
device_r += stream_point_count;
}
}
}
/*
* Integrates over the bubble. We only need to integrate over the s-bubble.
*/
double Bubble::integrate() {
// calculate the total number of points in the bubbles each l,m -pair,
int total_point_count = this->grid->getShape();
// check if the integrator has been inited, if not, init it
if (!this->integrator) {
this->integrator = new Integrator1D(this->streamContainer, this->grid, this->processor_order_number, this->number_of_processors);
}
// upload the l,m=0 radial function f to the integrator
this->integrator->upload(this->f);
check_errors(__FILE__, __LINE__);
// determine how many of the points belong to the current mpi-node
int processor_point_count = total_point_count / this->number_of_processors
+ ((total_point_count % this->number_of_processors) > this->processor_order_number);
// get the offset to the f-array caused by other processors
int remainder = total_point_count % this->number_of_processors;
int offset = this->processor_order_number * total_point_count / this->number_of_processors
+ ((remainder < this->processor_order_number) ? remainder : this->processor_order_number);
// get the partial s-bubble device vectors residing now in the integrators device memory
double **device_vectors = this->integrator->getDeviceVectors();
double *device_vector;
double *device_r;
// multiply the integration vector with r^(2+this->k)
// get the times we have to multiply the vector with r, i.e., 2+this->k
// NOTE: this must be larger or equal to zero
int k_change = 2 + this->k;
if (k_change > 0) {
int block_size = 256;
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) {
this->streamContainer->setDevice(device);
// get the preallocated device pointers,
// NOTE: The memory of gridpoints is allocated for its entire
// length, thus we have to go to the part we want to upload
// however, the integrator only has the memory it needs, the we don't need to
// offset the device_vector
device_vector = device_vectors[device];
device_r = this->grid->device_gridpoints[device];
device_r = &device_r[offset];
// detemine how many of the mpi-nodes points belong to this device (gpu)
int device_point_count = processor_point_count / this->streamContainer->getNumberOfDevices() +
((processor_point_count % this->streamContainer->getNumberOfDevices()) > device);
for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream ++) {
// detemine the number of the points handled by this stream
int stream_point_count = device_point_count / this->streamContainer->getStreamsPerDevice() +
((device_point_count % this->streamContainer->getStreamsPerDevice()) > stream);
int grid_size = (stream_point_count + block_size - 1) / block_size;
// call the decrease_k- kernel by using lmax = 0
Bubble_decrease_k_kernel <<<grid_size, block_size, 0, *this->streamContainer->getStream(device, stream)>>>
(device_vector, device_r, k_change, 0, stream_point_count, 0);
check_errors(__FILE__, __LINE__);
// add the device pointers and the offset
offset += stream_point_count;
device_vector += stream_point_count;
device_r += stream_point_count;
check_errors(__FILE__, __LINE__);
}
}
}
else if (k_change < 0) {
printf("Invalid k-value (%d) at bubble-integrate, must be larger or equal with -2. At file '%s', line number %d", this->k, __FILE__, __LINE__);
exit(-1);
}
return 4.0 * M_PI * this->integrator->integrate(); //
}
void Bubble::registerHost(double *f) {
check_errors(__FILE__, __LINE__);
this->f = f;
/*int ilmmax = (this->lmax + 1) * (this->lmax + 1);
// calculate the total number of points in the bubbles each l,m -pair,
int total_point_count = this->grid->ncell * (this->grid->nlip - 1) +1;
cudaHostRegister(this->f, sizeof(double)*ilmmax*total_point_count, cudaHostRegisterPortable);*/
check_errors(__FILE__, __LINE__);
}
void Bubble::destroy() {
//this->grid->destroy();
//check_errors(__FILE__, __LINE__);
//delete this->grid;
this->grid = NULL;
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device++) {
this->streamContainer->setDevice(device);
cudaFree(this->device_f[device]);
check_errors(__FILE__, __LINE__);
cudaFree(this->device_cf[device]);
check_errors(__FILE__, __LINE__);
cudaFree(this->device_df[device]);
check_errors(__FILE__, __LINE__);
cudaFree(this->device_copies[device]);
check_errors(__FILE__, __LINE__);
}
delete[] this->device_copies;
delete[] this->device_f;
delete[] this->device_df;
delete[] this->device_f_pitch;
delete[] this->device_cf;
delete[] this->uploaded_events;
// check if integrator is null pointer, if not
// delete the integrator
if (this->integrator) {
this->integrator->destroy();
delete this->integrator;
this->integrator = NULL;
}
check_errors(__FILE__, __LINE__);
//cudaHostUnregister(this);
}
/*
* Set MPI-configuration used by the bubble object.
*/
void Bubble::setProcessorConfiguration( int processor_order_number, int number_of_processors) {
this->number_of_processors = number_of_processors;
this->processor_order_number = processor_order_number;
}
/**************************************************************
* Bubbles-implementation *
**************************************************************/
int Bubbles::getBubbleCount() {
return this->nbub;
}
Bubbles::Bubbles(int nbub) {
this->nbub = nbub;
this->bubbles = new Bubble*[nbub];
this->is_sub_bubbles = false;
}
/*
* Init new Bubbles by making a copy of the old.
*
* NOTE: This makes a deep copy of the old bubbles, meaning that
* new memory places are allocated for the underlying Bubble objects.
*/
Bubbles::Bubbles(Bubbles *old_bubbles, int lmax, int k) {
this->is_sub_bubbles = false;
this->nbub = old_bubbles->nbub;
this->bubbles = new Bubble*[nbub];
for (int i = 0; i < old_bubbles->getBubbleCount(); i++) {
this->bubbles[i] = new Bubble(old_bubbles->bubbles[i], lmax, k);
}
}
/*
* Get new bubbles object containing some of the original bubbles.
* The bubbles selected in the new objects are the ones with
* the ibub values matching to those in input parameter 'ibubs'.
* NOTE: this function makes a shallow copy of the input bubbles 'this',
* i.e., the underlying Bubble objects are copied as references only
*/
Bubbles *Bubbles::getSubBubbles(int *ibubs, int nbub) {
Bubbles *new_bubbles = new Bubbles(nbub);
new_bubbles->is_sub_bubbles = true;
// copy the references to the wanted Bubble-objects specified
// in ibubs
for (int i = 0; i < new_bubbles->getBubbleCount(); i++) {
new_bubbles->bubbles[i] = this->getBubble(ibubs[i]);
}
return new_bubbles;
}
/*
* Get the pointer to the Bubble with local order number 'i' equal to
* input parameter 'i'. If not found NULL is returned.
*
* @param i - The local order number of the bubble
*/
Bubble *Bubbles::getBubbleWithLocalOrderNumber(int i) {
if (i < this->nbub) {
return this->bubbles[i];
}
return NULL;
}
/*
* Get the pointer to the Bubble with global order number 'ibub' equal to
* input parameter 'ibub'. If not found NULL is returned.
*
* @param ibub - The global order number of the bubble
*/
Bubble *Bubbles::getBubble(int ibub) {
for (int i = 0; i < this->getBubbleCount(); i ++) {
if (this->bubbles[i]->ibub == ibub) {
return this->bubbles[i];
}
}
return NULL;
}
/*
* Check if the Bubbles contains a Bubble with global order number 'ibub'.
*
* @param ibub - The global order number of the bubble
*/
bool Bubbles::containsBubble(int ibub) {
Bubble *bubble = this->getBubble(ibub);
return (bubble != NULL);
}
/*
* Init a bubble with global order number 'ibub' to the 'i':th slot in the
* internal bubbles array. Contains also the values for the bubble.
*
* @param grid - The grid used in the bubble
* @param i - The internal order number of the bubble
* @param ibub - The global order number of the bubble
* @param center - The global center point of the bubble
* @param lmax - The maximum value of quantum number 'l' for the bubble
* @param k - The parameter k for the r^k multiplier of the values
* @param bf - The values of the bubble
* @param charge - The charge of the atom at the center of the bubble
* @param streaContainer - The container holding the streams used in cuda evaluation of anything
* related to this object
*/
void Bubbles::initBubble(Grid1D *grid, int i, int ibub, double center[3], int lmax,
int k, double *bf, double charge, StreamContainer *streamContainer) {
this->bubbles[i] = new Bubble(ibub, grid, center, lmax, k, bf, charge, streamContainer);
}
/*
* Init a bubble with global order number 'ibub' to the 'i':th slot in the
* internal bubbles array. Contains also the values for the bubble.
*
* @param grid - The grid used in the bubble
* @param i - The internal order number of the bubble
* @param ibub - The global order number of the bubble
* @param center - The global center point of the bubble
* @param lmax - The maximum value of quantum number 'l' for the bubble
* @param k - The parameter k for the r^k multiplier of the values
* @param charge - The charge of the atom at the center of the bubble
* @param streaContainer - The container holding the streams used in cuda evaluation of anything
* related to this object
*/
void Bubbles::initBubble(Grid1D *grid, int i, int ibub, double center[3], int lmax,
int k, double charge, StreamContainer *streamContainer) {
check_errors(__FILE__, __LINE__);
this->bubbles[i] = new Bubble(ibub, grid, center, lmax, k, charge, streamContainer);
}
void Bubbles::unregister() {
/*for (int ibub = 0; ibub < this->getBubbleCount(); ibub ++) {
cudaHostUnregister(this->getBubble(ibub)->f);
check_errors(__FILE__, __LINE__);
}*/
}
void Bubbles::waitBubblesUploaded(int device) {
for (int i = 0; i < this->getBubbleCount(); i ++) {
this->bubbles[i]->waitBubbleUploaded(device);
}
}
/*
* Set MPI-configuration used by the bubble object.
*/
void Bubbles::setProcessorConfiguration( int processor_order_number, int number_of_processors) {
for (int i = 0; i < this->getBubbleCount(); i ++) {
this->bubbles[i]->setProcessorConfiguration(processor_order_number, number_of_processors);
}
}
double Bubbles::integrate() {
double result = 0.0;
for (int i = 0; i < this->getBubbleCount(); i ++) {
result += this->getBubbleWithLocalOrderNumber(i)->integrate();
}
return result;
}
void Bubbles::download() {
for (int i = 0; i < this->getBubbleCount(); i ++) {
this->bubbles[i]->download(this->bubbles[i]->lmax);
}
}
void Bubbles::add(Bubbles *bubbles) {
// go through all the Bubble-objects present in this
for (int i = 0; i < bubbles->getBubbleCount(); i ++) {
// get the matching bubble in the added bubbles
Bubble * bubble = bubbles->getBubble(this->bubbles[i]->ibub);
// if the corresponding Bubble exists in both the Bubbles, do the add
if (bubble) {
this->bubbles[i]->add(bubble);
}
}
check_errors(__FILE__, __LINE__);
}
void Bubbles::destroy() {
if (!this->is_sub_bubbles) {
for (int ibub = 0; ibub < this->getBubbleCount(); ibub ++) {
this->bubbles[ibub]->destroy();
delete this->bubbles[ibub];
}
}
delete[] this->bubbles;
}
void Bubbles::inject(Grid3D *grid3d, CudaCube *cube, int lmin, CudaCube *gradients_cube_x,
CudaCube *gradients_cube_y, CudaCube *gradients_cube_z, bool evaluate_value,
bool evaluate_gradients_x, bool evaluate_gradients_y, bool evaluate_gradients_z) {
check_errors(__FILE__, __LINE__);
int total_slice_count = cube->getShape(Z_);
// the minimum l is 0 always in the multiplication
int device_slice_count;
// get the pointer arrays from the cubes
double **device_cubes = cube->getDeviceCubes();
double **device_gradients_x, **device_gradients_y, **device_gradients_z;
// get the device gradient result pointers
if (evaluate_gradients_x) device_gradients_x = gradients_cube_x->getDeviceCubes();
if (evaluate_gradients_y) device_gradients_y = gradients_cube_y->getDeviceCubes();
if (evaluate_gradients_z) device_gradients_z = gradients_cube_z->getDeviceCubes();
size_t *device_pitches = cube->getDevicePitches();
int *device_memory_shape = cube->getDeviceMemoryShape();
int slice_offset = 0;
Bubble *bubble;
StreamContainer *streamContainer = cube->getStreamContainer();
// copy the cubes to the device & execute the kernels
for (int device = 0; device < streamContainer->getNumberOfDevices(); device ++) {
// set the used device (gpu)
streamContainer->setDevice(device);
double *dev_cube = device_cubes[device];
double *dev_gradient_x, *dev_gradient_y, *dev_gradient_z;
// get the gradient addresses for the device
if (evaluate_gradients_x) dev_gradient_x = device_gradients_x[device];
if (evaluate_gradients_y) dev_gradient_y = device_gradients_y[device];
if (evaluate_gradients_z) dev_gradient_z = device_gradients_z[device];
// calculate the number of vectors this device handles
device_slice_count = total_slice_count / streamContainer->getNumberOfDevices()
+ ((total_slice_count % streamContainer->getNumberOfDevices()) > device);
for (int stream = 0; stream < streamContainer->getStreamsPerDevice(); stream++) {
// determine the count of vectors handled by this stream
int slice_count = device_slice_count / streamContainer->getStreamsPerDevice()
+ ((device_slice_count % streamContainer->getStreamsPerDevice()) > stream);
check_errors(__FILE__, __LINE__);
// get the launch configuration for the f1-inject
dim3 block, grid;
cube->getLaunchConfiguration(&grid, &block, slice_count, INJECT_BLOCK_SIZE);
if (slice_count > 0) {
// inject bubbles to the cube
for (int i = 0; i < this->getBubbleCount(); i++) {
bubble = this->getBubbleWithLocalOrderNumber(i);
// wait that the bubble is uploaded to the device before starting
if (stream == 0) bubble->waitBubbleUploaded(device);
// call the kernel
if (lmin == 0) {
if (evaluate_gradients_x && evaluate_gradients_y && evaluate_gradients_z) {
if (evaluate_value) {
Bubbles_evaluate_grid_gradients < true, true, true, true, true>
<<< grid, block, INJECT_BLOCK_SIZE * sizeof(double) * 8,
*streamContainer->getStream(device, stream) >>>
(bubble->device_copies[device],
dev_cube,
dev_gradient_x,
dev_gradient_y,
dev_gradient_z,
grid3d->axis[X_]->device_gridpoints[device],
grid3d->axis[Y_]->device_gridpoints[device],
grid3d->axis[Z_]->device_gridpoints[device],
grid3d->shape[X_],
grid3d->shape[Y_],
grid3d->shape[Z_],
bubble->crd[X_],
bubble->crd[Y_],
bubble->crd[Z_],
bubble->k,
slice_offset,
device_pitches[device],
device_memory_shape[Y_],
slice_count,
1.0);
}
else {
Bubbles_evaluate_grid_gradients < true, false, true, true, true>
<<< grid, block, INJECT_BLOCK_SIZE * sizeof(double) * 8,
*streamContainer->getStream(device, stream) >>>
(bubble->device_copies[device],
dev_cube,
dev_gradient_x,
dev_gradient_y,
dev_gradient_z,
grid3d->axis[X_]->device_gridpoints[device],
grid3d->axis[Y_]->device_gridpoints[device],
grid3d->axis[Z_]->device_gridpoints[device],
grid3d->shape[X_],
grid3d->shape[Y_],
grid3d->shape[Z_],
bubble->crd[X_],
bubble->crd[Y_],
bubble->crd[Z_],
bubble->k,
slice_offset,
device_pitches[device],
device_memory_shape[Y_],
slice_count,
1.0);
}
}
else if (evaluate_gradients_x) {
Bubbles_evaluate_grid_gradients < true, false, true, false, false>
<<< grid, block, INJECT_BLOCK_SIZE * sizeof(double) * 8,
*streamContainer->getStream(device, stream) >>>
(bubble->device_copies[device],
dev_cube,
dev_gradient_x,
dev_gradient_y,
dev_gradient_z,
grid3d->axis[X_]->device_gridpoints[device],
grid3d->axis[Y_]->device_gridpoints[device],
grid3d->axis[Z_]->device_gridpoints[device],
grid3d->shape[X_],
grid3d->shape[Y_],
grid3d->shape[Z_],
bubble->crd[X_],
bubble->crd[Y_],
bubble->crd[Z_],
bubble->k,
slice_offset,
device_pitches[device],
device_memory_shape[Y_],
slice_count,
1.0);
}
else if (evaluate_gradients_y) {
Bubbles_evaluate_grid_gradients < true, false, false, true, false>
<<< grid, block, INJECT_BLOCK_SIZE * sizeof(double) * 8,
*streamContainer->getStream(device, stream) >>>
(bubble->device_copies[device],
dev_cube,
dev_gradient_x,
dev_gradient_y,
dev_gradient_z,
grid3d->axis[X_]->device_gridpoints[device],
grid3d->axis[Y_]->device_gridpoints[device],
grid3d->axis[Z_]->device_gridpoints[device],
grid3d->shape[X_],
grid3d->shape[Y_],
grid3d->shape[Z_],
bubble->crd[X_],
bubble->crd[Y_],
bubble->crd[Z_],
bubble->k,
slice_offset,
device_pitches[device],
device_memory_shape[Y_],
slice_count,
1.0);
}
else if (evaluate_gradients_z) {
Bubbles_evaluate_grid_gradients < true, false, false, false, true>
<<< grid, block, INJECT_BLOCK_SIZE * sizeof(double) * 8,
*streamContainer->getStream(device, stream) >>>
(bubble->device_copies[device],
dev_cube,
dev_gradient_x,
dev_gradient_y,
dev_gradient_z,
grid3d->axis[X_]->device_gridpoints[device],
grid3d->axis[Y_]->device_gridpoints[device],
grid3d->axis[Z_]->device_gridpoints[device],
grid3d->shape[X_],
grid3d->shape[Y_],
grid3d->shape[Z_],
bubble->crd[X_],
bubble->crd[Y_],
bubble->crd[Z_],
bubble->k,
slice_offset,
device_pitches[device],
device_memory_shape[Y_],
slice_count,
1.0);
}
else if (evaluate_value) {
Bubbles_evaluate_grid_pitched
<<< grid, block, INJECT_BLOCK_SIZE * sizeof(double) * 8,
*streamContainer->getStream(device, stream) >>>
(bubble->device_copies[device],
dev_cube,
grid3d->axis[X_]->device_gridpoints[device],
grid3d->axis[Y_]->device_gridpoints[device],
grid3d->axis[Z_]->device_gridpoints[device],
grid3d->shape[X_],
grid3d->shape[Y_],
grid3d->shape[Z_],
bubble->crd[X_],
bubble->crd[Y_],
bubble->crd[Z_],
bubble->k,
slice_offset,
device_pitches[device],
device_memory_shape[Y_],
slice_count,
1.0);
}
}
else if (evaluate_value) {
Bubbles_evaluate_grid_lmin
<<< grid, block, INJECT_BLOCK_SIZE * sizeof(double) * 8,
*streamContainer->getStream(device, stream) >>>
(bubble->device_copies[device],
dev_cube,
grid3d->axis[X_]->device_gridpoints[device],
grid3d->axis[Y_]->device_gridpoints[device],
grid3d->axis[Z_]->device_gridpoints[device],
grid3d->shape[X_],
grid3d->shape[Y_],
grid3d->shape[Z_],
bubble->crd[X_],
bubble->crd[Y_],
bubble->crd[Z_],
bubble->k,
slice_offset,
device_pitches[device],
device_memory_shape[Y_],
slice_count,
lmin,
1.0);
}
check_errors(__FILE__, __LINE__);
}
}
// increase the address by the number of vectors in this array
if (evaluate_value) dev_cube += slice_count * device_pitches[device] / sizeof(double) * device_memory_shape[Y_];
if (evaluate_gradients_x) dev_gradient_x += slice_count * device_pitches[device] / sizeof(double) * device_memory_shape[Y_];
if (evaluate_gradients_y) dev_gradient_y += slice_count * device_pitches[device] / sizeof(double) * device_memory_shape[Y_];
if (evaluate_gradients_z) dev_gradient_z += slice_count * device_pitches[device] / sizeof(double) * device_memory_shape[Y_];
slice_offset += slice_count;
}
}
}
/**************************************************************
* BubblesEvaluator function implementations *
**************************************************************/
/*
* Evaluate the bubbles at preset points. The results are stored in the device memory.
*
* @param gradient_direction - possible values X_ = 0, Y_ = 1, Z_ = 2, (X_, Y_, Z_) = 3 && this->evaluateGradients
* anything else: no gradients
*/
void BubblesEvaluator::evaluatePoints(Points *result_points, Points *gradient_points_x, Points *gradient_points_y, Points *gradient_points_z, int gradient_direction) {
int warp_size = 32;
int total_warp_count = result_points->point_coordinates->number_of_points / warp_size + ((result_points->point_coordinates->number_of_points % warp_size) > 0);
int point_offset = 0;
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) {
this->streamContainer->setDevice(device);
// allocate space for device results and device points
int device_warp_count = total_warp_count / this->streamContainer->getNumberOfDevices()
+ ((total_warp_count % this->streamContainer->getNumberOfDevices()) > device);
int device_point_count = device_warp_count * warp_size;
int device_point_offset = 0;
check_errors(__FILE__, __LINE__);
// get the pointers to the device points & results
double *device_points_ptr = result_points->point_coordinates->device_coordinates[device];
double *device_results_ptr = result_points->device_values[device];
double *device_gradients_x_ptr = NULL;
double *device_gradients_y_ptr = NULL;
double *device_gradients_z_ptr = NULL;
if (gradient_direction == 3) {
device_gradients_x_ptr = gradient_points_x->device_values[device];
device_gradients_y_ptr = gradient_points_y->device_values[device];
device_gradients_z_ptr = gradient_points_z->device_values[device];
}
else if (gradient_direction < 3 && gradient_direction >= 0) {
device_gradients_x_ptr = result_points->device_values[device];
device_gradients_y_ptr = result_points->device_values[device];
device_gradients_z_ptr = result_points->device_values[device];
}
for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream ++) {
// get the number of points that are in the responsibility of this stream
int stream_warp_count = device_warp_count / this->streamContainer->getStreamsPerDevice()
+ ((device_warp_count % streamContainer->getStreamsPerDevice()) > stream);
int stream_point_count = stream_warp_count * warp_size;
// make sure that the last stream does not go over board
if (stream_point_count + point_offset > result_points->point_coordinates->number_of_points) {
stream_point_count = result_points->point_coordinates->number_of_points - point_offset;
}
check_errors(__FILE__, __LINE__);
if (stream_point_count > 0) {
for (int i = 0; i < this->bubbles->getBubbleCount(); i++) {
Bubble *bubble = this->bubbles->getBubbleWithLocalOrderNumber(i);
// wait that the bubble is uploaded before calling the kernel
if (stream == 0) bubble->waitBubbleUploaded(device);
int grid_size = (stream_point_count + INJECT_BLOCK_SIZE - 1) / INJECT_BLOCK_SIZE;
//printf("ibub: %d, device: %d, stream: %d, grid_size: %d, block_size: %d, stream_point_count: %d, device_point_offset: %d, device_point_count: %d, point_count: %d\n",
// ibub, device, stream, grid_size, INJECT_BLOCK_SIZE, stream_point_count, device_point_offset, device_point_count, this->point_count);
if (gradient_direction == X_) {
Bubbles_evaluate_gradient_points
<true, false, true, false, false>
<<< grid_size, INJECT_BLOCK_SIZE, INJECT_BLOCK_SIZE * sizeof(double) * 7,
*this->streamContainer->getStream(device, stream) >>>
(bubble->device_copies[device],
device_results_ptr,
device_gradients_x_ptr,
device_gradients_y_ptr,
device_gradients_z_ptr,
device_points_ptr,
device_point_count,
bubble->crd[X_],
bubble->crd[Y_],
bubble->crd[Z_],
bubble->k,
stream_point_count,
device_point_offset,
1.0
);
}
else if (gradient_direction == Y_) {
Bubbles_evaluate_gradient_points
<true, false, false, true, false>
<<< grid_size, INJECT_BLOCK_SIZE, INJECT_BLOCK_SIZE * sizeof(double) * 7,
*this->streamContainer->getStream(device, stream) >>>
(bubble->device_copies[device],
device_results_ptr,
device_gradients_x_ptr,
device_gradients_y_ptr,
device_gradients_z_ptr,
device_points_ptr,
device_point_count,
bubble->crd[X_],
bubble->crd[Y_],
bubble->crd[Z_],
bubble->k,
stream_point_count,
device_point_offset,
1.0
);
}
else if (gradient_direction == Z_) {
Bubbles_evaluate_gradient_points
<true, false, false, false, true>
<<< grid_size, INJECT_BLOCK_SIZE, INJECT_BLOCK_SIZE * sizeof(double) * 7,
*this->streamContainer->getStream(device, stream) >>>
(bubble->device_copies[device],
device_results_ptr,
device_gradients_x_ptr,
device_gradients_y_ptr,
device_gradients_z_ptr,
device_points_ptr,
device_point_count,
bubble->crd[X_],
bubble->crd[Y_],
bubble->crd[Z_],
bubble->k,
stream_point_count,
device_point_offset,
1.0
);
}
else if (gradient_direction == 3) {
Bubbles_evaluate_gradient_points
<true, true, true, true, true>
<<< grid_size, INJECT_BLOCK_SIZE, INJECT_BLOCK_SIZE * sizeof(double) * 7,
*this->streamContainer->getStream(device, stream) >>>
(bubble->device_copies[device],
device_results_ptr,
device_gradients_x_ptr,
device_gradients_y_ptr,
device_gradients_z_ptr,
device_points_ptr,
device_point_count,
bubble->crd[X_],
bubble->crd[Y_],
bubble->crd[Z_],
bubble->k,
stream_point_count,
device_point_offset,
1.0
);
}
else {
Bubbles_evaluate_points_simple
<<< grid_size, INJECT_BLOCK_SIZE, INJECT_BLOCK_SIZE * sizeof(double) * 7,
*this->streamContainer->getStream(device, stream) >>>
(bubble->device_copies[device],
device_results_ptr,
device_points_ptr,
device_point_count,
bubble->crd[X_],
bubble->crd[Y_],
bubble->crd[Z_],
bubble->k,
stream_point_count,
device_point_offset,
1.0
);
}
check_errors(__FILE__, __LINE__);
}
}
// add the pointers
point_offset += stream_point_count;
device_point_offset += stream_point_count;
}
check_errors(__FILE__, __LINE__);
}
}
/**************************************************************
* Function3DMultiplier-implementation *
**************************************************************/
/*
* Injects the f1_bubbles to this->cube1 and f2_bubbles to this->cube2,
* multiplies this->cube1 with this->cube2 and de-injects the 'result_bubbles'
* from 'this->cube1'
*
* @param f1_bubbles
* @param f2_bubbles
* @param result_bubbles
*/
void Function3DMultiplier::multiply(Bubbles *f1_bubbles, Bubbles *f2_bubbles, Bubbles *result_bubbles) {
int total_slice_count = this->cube1->getShape(Z_);
// the minimum l is 0 always in the multiplication
int device_slice_count;
// get the pointer arrays from the cubes
double **f1_device_cubes = this->cube1->getDeviceCubes();
size_t *f1_device_pitches = this->cube1->getDevicePitches();
double **f2_device_cubes = this->cube2->getDeviceCubes();
size_t *f2_device_pitches = this->cube2->getDevicePitches();
int *f1_device_memory_shape = this->cube1->getDeviceMemoryShape();
int *f2_device_memory_shape = this->cube2->getDeviceMemoryShape();
int f1_shape[3];
f1_shape[X_] = this->cube1->getShape(X_);
f1_shape[Y_] = this->cube1->getShape(Y_);
f1_shape[Z_] = this->cube1->getShape(Z_);
int f2_shape[3];
f2_shape[X_] = this->cube2->getShape(X_);
f2_shape[Y_] = this->cube2->getShape(Y_);
f2_shape[Z_] = this->cube2->getShape(Z_);
int slice_offset = 0;
Bubble *bubble;
// copy the cubes to the device & execute the kernels
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) {
// set the used device (gpu)
this->streamContainer->setDevice(device);
//cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte);
//int first_block = 0;
double *dev_f1_cube = f1_device_cubes[device];
double *dev_f2_cube = f2_device_cubes[device];
// calculate the number of vectors this device handles
device_slice_count = total_slice_count / this->streamContainer->getNumberOfDevices()
+ ((total_slice_count % this->streamContainer->getNumberOfDevices()) > device);
for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream++) {
// determine the count of vectors handled by this stream
int slice_count = device_slice_count / this->streamContainer->getStreamsPerDevice()
+ ((device_slice_count % this->streamContainer->getStreamsPerDevice()) > stream);
if (slice_count > 0) {
// get the launch configuration for the f1-inject
dim3 block, grid;
this->cube1->getLaunchConfiguration(&grid, &block, slice_count, INJECT_BLOCK_SIZE);
check_errors(__FILE__, __LINE__);
// inject the f1 bubbles to the f1_cube (and sum)
for (int i = 0; i < f1_bubbles->getBubbleCount(); i++) {
bubble = f1_bubbles->getBubbleWithLocalOrderNumber(i);
// wait that the bubble is uploaded to the device before starting
if (stream == 0) bubble->waitBubbleUploaded(device);
Bubbles_evaluate_grid_pitched
<<< grid, block, INJECT_BLOCK_SIZE * sizeof(double) * 7,
*this->streamContainer->getStream(device, stream) >>>
(bubble->device_copies[device],
dev_f1_cube,
this->grid->axis[X_]->device_gridpoints[device],
this->grid->axis[Y_]->device_gridpoints[device],
this->grid->axis[Z_]->device_gridpoints[device],
f1_shape[X_],
f1_shape[Y_],
f1_shape[Z_],
bubble->crd[X_],
bubble->crd[Y_],
bubble->crd[Z_],
bubble->k,
slice_offset,
f1_device_pitches[device],
f1_device_memory_shape[Y_],
slice_count,
1.0);
check_errors(__FILE__, __LINE__);
// printf("after offending kernel\n");
// fflush(stdout);
}
check_errors(__FILE__, __LINE__);
// get the launch configuration for the f2-inject
this->cube2->getLaunchConfiguration(&grid, &block, slice_count, INJECT_BLOCK_SIZE);
// inject the f2 bubbles to the f1_cube (and sum)
for (int i = 0; i < f2_bubbles->getBubbleCount(); i++) {
bubble = f2_bubbles->getBubbleWithLocalOrderNumber(i);
// wait that the bubble is uploaded to the device before starting
if (stream == 0) bubble->waitBubbleUploaded(device);
// call the kernel
Bubbles_evaluate_grid_pitched
<<< grid, block, INJECT_BLOCK_SIZE * sizeof(double) * 7,
*this->streamContainer->getStream(device, stream) >>>
(bubble->device_copies[device],
dev_f2_cube,
this->grid->axis[X_]->device_gridpoints[device],
this->grid->axis[Y_]->device_gridpoints[device],
this->grid->axis[Z_]->device_gridpoints[device],
f2_shape[X_],
f2_shape[Y_],
f2_shape[Z_],
bubble->crd[X_],
bubble->crd[Y_],
bubble->crd[Z_],
bubble->k,
slice_offset,
f2_device_pitches[device],
f2_device_memory_shape[Y_],
slice_count,
1.0);
check_errors(__FILE__, __LINE__);
}
// get the launch configuration for the multiplication and result-inject
this->cube2->getLaunchConfiguration(&grid, &block, slice_count, INJECT_BLOCK_SIZE);
// multiply dev_f1_cube with dev_f2_cube and store the result to dev_f1_cube
multiply_3d_cubes(dev_f1_cube, f1_shape[X_], f1_shape[Y_], f1_device_memory_shape[Y_], f1_device_pitches[device],
dev_f2_cube, f2_shape[X_], f2_shape[Y_], f2_device_memory_shape[Y_], f2_device_pitches[device],
slice_count, &grid, &block, this->streamContainer->getStream(device, stream));
check_errors(__FILE__, __LINE__);
// de-inject (deduct) the result bubbles from the dev_f1_cube
for (int i = 0; i < result_bubbles->getBubbleCount(); i++) {
bubble = result_bubbles->getBubbleWithLocalOrderNumber(i);
// wait that the bubble is uploaded to the device before starting
if (stream == 0) bubble->waitBubbleUploaded(device);
// call the kernel
Bubbles_evaluate_grid_pitched
<<< grid, block, INJECT_BLOCK_SIZE * sizeof(double) * 7,
*this->streamContainer->getStream(device, stream) >>>
(bubble->device_copies[device],
dev_f1_cube,
this->grid->axis[X_]->device_gridpoints[device],
this->grid->axis[Y_]->device_gridpoints[device],
this->grid->axis[Z_]->device_gridpoints[device],
f1_shape[X_],
f1_shape[Y_],
f1_shape[Z_],
bubble->crd[X_],
bubble->crd[Y_],
bubble->crd[Z_],
bubble->k,
slice_offset,
f1_device_pitches[device],
f1_device_memory_shape[Y_],
slice_count,
-1.0);
check_errors(__FILE__, __LINE__);
}
// increase the address by the number of vectors in this array
// something else
dev_f1_cube += slice_count * f1_device_pitches[device] / sizeof(double) * f1_device_memory_shape[Y_];
dev_f2_cube += slice_count * f2_device_pitches[device] / sizeof(double) * f2_device_memory_shape[Y_];
slice_offset += slice_count;
}
}
}
}
/********************************************
* Fortran interfaces *
********************************************/
extern "C" void bubbles_add_cuda(Bubbles *bubbles, Bubbles *bubbles1) {
bubbles->add(bubbles1);
}
extern "C" Bubbles* bubbles_get_sub_bubbles_cuda(Bubbles *bubbles, int *ibubs, int nbub) {
return bubbles->getSubBubbles(ibubs, nbub);
}
extern "C" Bubbles *bubbles_init_cuda(int nbub) {
Bubbles *new_bubbles = new Bubbles(nbub);
check_errors(__FILE__, __LINE__);
return new_bubbles;
}
/*
*
* @param id - local index of the bubble inited in Fortran format: first index is 1.
*/
extern "C" void bubble_init_cuda(Bubbles *bubbles, Grid1D *grid, int i, int ibub, double center[3], int lmax,
int k, double charge, StreamContainer *streamContainer) {
bubbles->initBubble(grid, i-1, ibub, center, lmax, k, charge, streamContainer);
check_errors(__FILE__, __LINE__);
}
/*
* Upload the content ('bf') of the Bubble with global order number 'ibub' to the device.
*
* @param ibub - tHe global order number of the bubble
*/
extern "C" void bubble_upload_all_cuda(Bubbles *bubbles, int ibub, int lmax, int k, double *bf) {
if (bubbles->containsBubble(ibub)) {
bubbles->getBubble(ibub)->k = k;
bubbles->getBubble(ibub)->uploadAll(bf, lmax);
check_errors(__FILE__, __LINE__);
}
}
extern "C" void bubble_upload_cuda(Bubbles *bubbles, int ibub, int lmax, double *bf) {
if (bubbles->containsBubble(ibub)) {
bubbles->getBubble(ibub)->upload(bf, lmax);
check_errors(__FILE__, __LINE__);
}
}
extern "C" void bubble_add_cuda(Bubbles *bubbles, Bubbles *bubbles1, int ibub) {
bubbles->getBubble(ibub)->add(bubbles1->getBubble(ibub));
check_errors(__FILE__, __LINE__);
}
extern "C" void bubbles_destroy_cuda(Bubbles* bubbles){
if (bubbles) {
bubbles->destroy();
delete bubbles;
check_errors(__FILE__, __LINE__);
}
}
extern "C" double bubbles_integrate_cuda(Bubbles *bubbles) {
return bubbles->integrate();
}
extern "C" void bubbles_set_processor_configuration_cuda(Bubbles *bubbles, int processor_order_number, int number_of_processors) {
bubbles->setProcessorConfiguration(processor_order_number, number_of_processors);
}
extern "C" void bubbles_inject_cuda(Bubbles *bubbles, Grid3D *grid, int lmin, CudaCube *cube) {
bubbles->inject(grid, cube, lmin);
}
extern "C" void bubbles_inject_to_cuda(Bubbles *bubbles, Grid3D *grid, int lmin, CudaCube *cudaCube, double *cube, int offset, int cube_host_shape[3]) {
cudaCube->initHost(&cube[offset], cube_host_shape, true);
cudaCube->upload();
bubbles->inject(grid, cudaCube, lmin);
}
extern "C" double *bubbles_init_page_locked_f_cuda(int lmax, int shape){
//allocated += 1;
double * result_f;
check_errors(__FILE__, __LINE__);
cudaHostAlloc((void **)&result_f,
sizeof(double) * (lmax+1) * (lmax+1) * shape,
cudaHostAllocPortable);
check_errors(__FILE__, __LINE__);
//printf("Allocated 1, Now allocated %d, address: %ld\n", allocated, result_f);
return result_f;
}
extern "C" void bubbles_destroy_page_locked_f_cuda(double * f){
//allocated -= 1;
//printf("Deallocated 1, Now allocated %d, address: %ld\n", allocated, f);
cudaFreeHost(f);
check_errors(__FILE__, __LINE__);
}
|
23edd96eae6039375bc2190cc158f99cfaba8b45.hip | // !!! This is a file automatically generated by hipify!!!
#include "StructuredEikonal.h"
StructuredEikonal::StructuredEikonal(bool verbose)
:verbose_(verbose), isGpuMemCreated_(false),
width_(256), height_(256), depth_(256),
itersPerBlock_(10), solverType_(0) {}
StructuredEikonal::~StructuredEikonal() {}
void StructuredEikonal::writeNRRD(std::string filename) {
std::fstream out(filename.c_str(), std::ios::out | std::ios::binary);
out << "NRRD0001\n";
out << "# Complete NRRD file format specification at:\n";
out << "# http://teem.sourceforge.net/nrrd/format.html\n";
out << "type: double\n";
out << "dimension: 3\n";
out << "sizes: " << this->width_ << " " << this->height_ << " " << this->depth_ << "\n";
out << "endian: little\n";
out << "encoding: raw\n\n";
double checksum = 0.0;
for(size_t k = 0; k < this->depth_; k++) {
for(size_t j = 0; j < this->height_; j++) {
for(size_t i = 0; i < this->width_; i++) {
double d = this->answer_[i][j][k];
checksum += d;
out.write(reinterpret_cast<const char*>(&d),sizeof(double));
}
}
}
out.close();
printf("Checksum = %lf\n", checksum / (this->depth_ * this->height_ * this->width_));
}
void StructuredEikonal::setDims(size_t x, size_t y, size_t z) {
this->width_ = x;
this->height_ = y;
this->depth_ = z;
}
void StructuredEikonal::error(char* msg) {
printf("%s\n",msg);
assert(false);
exit(0);
}
void StructuredEikonal::init_device_mem() {
assert(this->width_ > 0 && this->height_ > 0 && this->depth_ > 0);
if(this->width_ <= 0 || this->height_ <= 0 || this->depth_ <= 0){
printf("Volume dimension cannot be zero");
exit(1);
}
// 1. Create /initialize GPU memory
size_t nx, ny, nz;
nx = this->width_ + (BLOCK_LENGTH-this->width_%BLOCK_LENGTH)%BLOCK_LENGTH;
ny = this->height_ + (BLOCK_LENGTH-this->height_%BLOCK_LENGTH)%BLOCK_LENGTH;
nz = this->depth_ + (BLOCK_LENGTH-this->depth_%BLOCK_LENGTH)%BLOCK_LENGTH;
if (this->verbose_) {
printf("%zu %zu %zu\n",nx,ny,nz);
}
auto volSize = nx*ny*nz;
auto blkSize = BLOCK_LENGTH*BLOCK_LENGTH*BLOCK_LENGTH;
auto nBlkX = nx / BLOCK_LENGTH;
auto nBlkY = ny / BLOCK_LENGTH;
auto nBlkZ = nz / BLOCK_LENGTH;
auto blockNum = nBlkX*nBlkY*nBlkZ;
this->memoryStruct_.xdim = static_cast<int>(nx);
this->memoryStruct_.ydim = static_cast<int>(ny);
this->memoryStruct_.zdim = static_cast<int>(nz);
this->memoryStruct_.volsize = static_cast<uint>(volSize);
this->memoryStruct_.blksize = static_cast<uint>(blkSize);
this->memoryStruct_.blklength = BLOCK_LENGTH;
this->memoryStruct_.blknum = static_cast<uint>(blockNum);
this->memoryStruct_.nIter = static_cast<int>(this->itersPerBlock_); // iter per block
if(this->isGpuMemCreated_) // delete previous memory
{
free((DOUBLE*)this->memoryStruct_.h_sol);
free((uint*)this->memoryStruct_.h_list);
free((bool*)this->memoryStruct_.h_listed);
free((bool*)this->memoryStruct_.h_listVol);
free((int*)this->memoryStruct_.blockOrder);
hipFree(this->memoryStruct_.d_spd);
hipFree(this->memoryStruct_.d_sol);
hipFree(this->memoryStruct_.t_sol); // temp solution for ping-pong
hipFree(this->memoryStruct_.d_con); // convergence volume
hipFree(this->memoryStruct_.d_list);
hipFree(this->memoryStruct_.d_listVol);
hipFree(this->memoryStruct_.d_mask);
}
this->isGpuMemCreated_ = true;
this->memoryStruct_.h_sol = (DOUBLE*) malloc(volSize*sizeof(DOUBLE)); // initial solution
this->memoryStruct_.h_list = (uint*) malloc(blockNum*sizeof(uint)); // linear list contains active block indices
this->memoryStruct_.h_listed = (bool*) malloc(blockNum*sizeof(bool)); // whether block is added to the list
this->memoryStruct_.h_listVol = (bool*) malloc(blockNum*sizeof(bool)); // volume list shows active/nonactive of corresponding block
this->memoryStruct_.blockOrder = (int*) malloc(blockNum*sizeof(int));
//
// create device memory
//
hipMalloc((void**)&(this->memoryStruct_.d_spd), volSize*sizeof(double));
hipMalloc((void**)&(this->memoryStruct_.d_sol), volSize*sizeof(DOUBLE));
hipMalloc((void**)&(this->memoryStruct_.t_sol), volSize*sizeof(DOUBLE)); // temp solution for ping-pong
hipMalloc((void**)&(this->memoryStruct_.d_con), volSize*sizeof(bool)); // convergence volume
hipMalloc((void**)&(this->memoryStruct_.d_list), blockNum*sizeof(uint));
hipMalloc((void**)&(this->memoryStruct_.d_listVol), blockNum*sizeof(bool));
hipMalloc((void**)&(this->memoryStruct_.d_mask), volSize*sizeof(bool));
}
void StructuredEikonal::set_attribute_mask() {
uint volSize = this->memoryStruct_.volsize;
int nx, ny, nz, blklength;
nx = memoryStruct_.xdim;
ny = memoryStruct_.ydim;
nz = memoryStruct_.zdim;
blklength = memoryStruct_.blklength;
// create host memory
double *h_spd = new double[volSize]; // byte speed, host
bool *h_mask = new bool[volSize];
// copy input volume to host memory
// make each block to be stored contiguously in 1D memory space
uint idx = 0;
for(int zStr = 0; zStr < nz; zStr += blklength) {
for(int yStr = 0; yStr < ny; yStr += blklength) {
for(int xStr = 0; xStr < nx; xStr += blklength) {
// for each block
for(int z=zStr; z<zStr+blklength; z++) {
for(int y=yStr; y<yStr+blklength; y++) {
for(int x=xStr; x<xStr+blklength; x++) {
h_spd[idx] = this->speeds_[x][y][z];
h_mask[idx] = true;
idx++;
}
}
}
}
}
}
// initialize GPU memory with host memory
hipMemcpy(memoryStruct_.d_spd, h_spd, volSize*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(memoryStruct_.d_mask, h_mask, volSize*sizeof(bool), hipMemcpyHostToDevice);
delete[] h_spd;
delete[] h_mask;
}
void StructuredEikonal::initialization() {
this->init_device_mem();
this->set_attribute_mask();
}
void StructuredEikonal::map_generator() {
double pi = 3.141592653589793238462643383;
this->speeds_ = std::vector<std::vector<std::vector<double> > >(
this->width_, std::vector<std::vector<double> >(
this->height_, std::vector<double>(this->depth_,1.)));
switch(this->solverType_){
case 0 :
//Constant Speed Map
break;
case 1 :
//Sinusoid Speed Map
for (size_t k = 0 ; k < this->depth_ ; ++k) {
for (size_t j = 0 ; j < this->height_; ++j) {
for ( size_t i = 0 ; i < this->width_ ; ++i) {
this->speeds_[i][j][k] =
(6 + 5*(sin((i*pi)/this->width_ *2))*
sin((j*pi)/this->height_*2)*
sin((k*pi)/this->depth_*2));
}
}
}
break;
}
}
void StructuredEikonal::setSeeds(std::vector<std::array<size_t, 3> > seeds) {
this->seeds_ = seeds;
}
void StructuredEikonal::useSeeds() {
if (this->verbose_) {
std::cout << "Loading seed volume..." << std::endl;
}
uint volSize, blockNum;
int nx, ny, nz, blklength;
nx = this->memoryStruct_.xdim;
ny = this->memoryStruct_.ydim;
nz = this->memoryStruct_.zdim;
volSize = this->memoryStruct_.volsize;
blklength = this->memoryStruct_.blklength;
blockNum = this->memoryStruct_.blknum;
// copy input volume to host memory
// make each block to be stored contiguously in 1D memory space
uint idx = 0;
uint blk_idx = 0;
uint list_idx = 0;
uint nActiveBlock = 0;
for(int zStr = 0; zStr < nz; zStr += blklength) {
for(int yStr = 0; yStr < ny; yStr += blklength) {
for(int xStr = 0; xStr < nx; xStr += blklength) {
// for each block
bool isSeedBlock = false;
for(int z=zStr; z<zStr+blklength; z++) {
for(int y=yStr; y<yStr+blklength; y++) {
for(int x=xStr; x<xStr+blklength; x++) {
this->memoryStruct_.h_sol[idx] = INF;
if (this->seeds_.empty()) {
if (x == nx/2 && y == ny/2 && z == nz/2) {
this->memoryStruct_.h_sol[idx] = 0;
isSeedBlock = true;
if (this->verbose_) {
printf("%d is Selected bt source \n",idx);
}
}
} else {
for(size_t i = 0; i < this->seeds_.size(); i++) {
if (this->seeds_[i][0] == (size_t)x &&
this->seeds_[i][1] == (size_t)y &&
this->seeds_[i][2] == (size_t)z) {
this->memoryStruct_.h_sol[idx] = 0;
isSeedBlock = true;
if (this->verbose_) {
printf("%d is Selected bt source \n",idx);
}
}
}
}
idx++;
}
}
}
///////////////////////////////////////////////
if(isSeedBlock) {
if (this->verbose_) {
printf("%d,%d,%d is Seed Block \n",zStr,yStr,xStr);
}
this->memoryStruct_.h_listVol[blk_idx] = true;
this->memoryStruct_.h_listed[blk_idx] = true;
this->memoryStruct_.h_list[list_idx] = blk_idx;
list_idx++;
nActiveBlock++;
} else {
this->memoryStruct_.h_listVol[blk_idx] = false;
this->memoryStruct_.h_listed[blk_idx] = false;
}
blk_idx++;
}
}
}
this->memoryStruct_.nActiveBlock = nActiveBlock;
// initialize GPU memory with host memory
hipMemcpy(this->memoryStruct_.d_sol, this->memoryStruct_.h_sol, volSize*sizeof(DOUBLE), hipMemcpyHostToDevice);
hipMemcpy(this->memoryStruct_.t_sol, this->memoryStruct_.h_sol, volSize*sizeof(DOUBLE), hipMemcpyHostToDevice);
hipMemcpy(this->memoryStruct_.d_list, this->memoryStruct_.h_list, nActiveBlock*sizeof(uint), hipMemcpyHostToDevice);
hipMemcpy(this->memoryStruct_.d_listVol, this->memoryStruct_.h_listVol, blockNum*sizeof(bool), hipMemcpyHostToDevice);
// initialize GPU memory with constant value
hipMemset(this->memoryStruct_.d_con, 1, volSize*sizeof(bool));
}
void StructuredEikonal::setMapType(size_t t) {
this->solverType_ = t;
}
void StructuredEikonal::solveEikonal() {
if (this->speeds_.empty()) {
this->map_generator();
}
this->isGpuMemCreated_ = false;
this->initialization();
this->useSeeds();
runEikonalSolverSimple(this->memoryStruct_);
this->get_solution();
}
std::vector< std::vector< std::vector<double> > >
StructuredEikonal::getFinalResult() {
return this->answer_;
}
void StructuredEikonal::get_solution() {
// copy solution from GPU
hipMemcpy(this->memoryStruct_.h_sol,
this->memoryStruct_.d_sol, this->memoryStruct_.volsize*sizeof(DOUBLE),
hipMemcpyDeviceToHost);
//put the data where it belongs in the grand scheme of data!
this->answer_ = std::vector<std::vector<std::vector<double> > >(
this->width_, std::vector<std::vector<double> >(
this->height_, std::vector<double>(this->depth_,0)));
for(size_t blockID = 0; blockID < this->memoryStruct_.blknum; blockID++) {
size_t baseAddr = blockID * this->memoryStruct_.blksize;
size_t xgridlength = this->memoryStruct_.xdim/BLOCK_LENGTH;
size_t ygridlength = this->memoryStruct_.ydim/BLOCK_LENGTH;
// compute block index
size_t bx = blockID%xgridlength;
size_t tmpIdx = (blockID - bx)/xgridlength;
size_t by = tmpIdx%ygridlength;
size_t bz = (tmpIdx-by)/ygridlength;
//translate back to real space
for(int k = 0; k < BLOCK_LENGTH; k++) {
for(int j = 0; j < BLOCK_LENGTH; j++) {
for(int i = 0; i < BLOCK_LENGTH; i++) {
double d = this->memoryStruct_.h_sol[baseAddr +
k * BLOCK_LENGTH * BLOCK_LENGTH +
j * BLOCK_LENGTH + i];
if ((i + bx * BLOCK_LENGTH) < this->width_ &&
(j + by * BLOCK_LENGTH) < this->height_ &&
(k + bz * BLOCK_LENGTH) < this->depth_) {
this->answer_[(i + bx * BLOCK_LENGTH)][(j +
by * BLOCK_LENGTH)][k + bz * BLOCK_LENGTH] = d;
}
}
}
}
}
}
void StructuredEikonal::setItersPerBlock(size_t t) {
this->itersPerBlock_ = t;
}
| 23edd96eae6039375bc2190cc158f99cfaba8b45.cu | #include "StructuredEikonal.h"
StructuredEikonal::StructuredEikonal(bool verbose)
:verbose_(verbose), isGpuMemCreated_(false),
width_(256), height_(256), depth_(256),
itersPerBlock_(10), solverType_(0) {}
StructuredEikonal::~StructuredEikonal() {}
void StructuredEikonal::writeNRRD(std::string filename) {
std::fstream out(filename.c_str(), std::ios::out | std::ios::binary);
out << "NRRD0001\n";
out << "# Complete NRRD file format specification at:\n";
out << "# http://teem.sourceforge.net/nrrd/format.html\n";
out << "type: double\n";
out << "dimension: 3\n";
out << "sizes: " << this->width_ << " " << this->height_ << " " << this->depth_ << "\n";
out << "endian: little\n";
out << "encoding: raw\n\n";
double checksum = 0.0;
for(size_t k = 0; k < this->depth_; k++) {
for(size_t j = 0; j < this->height_; j++) {
for(size_t i = 0; i < this->width_; i++) {
double d = this->answer_[i][j][k];
checksum += d;
out.write(reinterpret_cast<const char*>(&d),sizeof(double));
}
}
}
out.close();
printf("Checksum = %lf\n", checksum / (this->depth_ * this->height_ * this->width_));
}
void StructuredEikonal::setDims(size_t x, size_t y, size_t z) {
this->width_ = x;
this->height_ = y;
this->depth_ = z;
}
void StructuredEikonal::error(char* msg) {
printf("%s\n",msg);
assert(false);
exit(0);
}
void StructuredEikonal::init_device_mem() {
assert(this->width_ > 0 && this->height_ > 0 && this->depth_ > 0);
if(this->width_ <= 0 || this->height_ <= 0 || this->depth_ <= 0){
printf("Volume dimension cannot be zero");
exit(1);
}
// 1. Create /initialize GPU memory
size_t nx, ny, nz;
nx = this->width_ + (BLOCK_LENGTH-this->width_%BLOCK_LENGTH)%BLOCK_LENGTH;
ny = this->height_ + (BLOCK_LENGTH-this->height_%BLOCK_LENGTH)%BLOCK_LENGTH;
nz = this->depth_ + (BLOCK_LENGTH-this->depth_%BLOCK_LENGTH)%BLOCK_LENGTH;
if (this->verbose_) {
printf("%zu %zu %zu\n",nx,ny,nz);
}
auto volSize = nx*ny*nz;
auto blkSize = BLOCK_LENGTH*BLOCK_LENGTH*BLOCK_LENGTH;
auto nBlkX = nx / BLOCK_LENGTH;
auto nBlkY = ny / BLOCK_LENGTH;
auto nBlkZ = nz / BLOCK_LENGTH;
auto blockNum = nBlkX*nBlkY*nBlkZ;
this->memoryStruct_.xdim = static_cast<int>(nx);
this->memoryStruct_.ydim = static_cast<int>(ny);
this->memoryStruct_.zdim = static_cast<int>(nz);
this->memoryStruct_.volsize = static_cast<uint>(volSize);
this->memoryStruct_.blksize = static_cast<uint>(blkSize);
this->memoryStruct_.blklength = BLOCK_LENGTH;
this->memoryStruct_.blknum = static_cast<uint>(blockNum);
this->memoryStruct_.nIter = static_cast<int>(this->itersPerBlock_); // iter per block
if(this->isGpuMemCreated_) // delete previous memory
{
free((DOUBLE*)this->memoryStruct_.h_sol);
free((uint*)this->memoryStruct_.h_list);
free((bool*)this->memoryStruct_.h_listed);
free((bool*)this->memoryStruct_.h_listVol);
free((int*)this->memoryStruct_.blockOrder);
cudaFree(this->memoryStruct_.d_spd);
cudaFree(this->memoryStruct_.d_sol);
cudaFree(this->memoryStruct_.t_sol); // temp solution for ping-pong
cudaFree(this->memoryStruct_.d_con); // convergence volume
cudaFree(this->memoryStruct_.d_list);
cudaFree(this->memoryStruct_.d_listVol);
cudaFree(this->memoryStruct_.d_mask);
}
this->isGpuMemCreated_ = true;
this->memoryStruct_.h_sol = (DOUBLE*) malloc(volSize*sizeof(DOUBLE)); // initial solution
this->memoryStruct_.h_list = (uint*) malloc(blockNum*sizeof(uint)); // linear list contains active block indices
this->memoryStruct_.h_listed = (bool*) malloc(blockNum*sizeof(bool)); // whether block is added to the list
this->memoryStruct_.h_listVol = (bool*) malloc(blockNum*sizeof(bool)); // volume list shows active/nonactive of corresponding block
this->memoryStruct_.blockOrder = (int*) malloc(blockNum*sizeof(int));
//
// create device memory
//
cudaMalloc((void**)&(this->memoryStruct_.d_spd), volSize*sizeof(double));
cudaMalloc((void**)&(this->memoryStruct_.d_sol), volSize*sizeof(DOUBLE));
cudaMalloc((void**)&(this->memoryStruct_.t_sol), volSize*sizeof(DOUBLE)); // temp solution for ping-pong
cudaMalloc((void**)&(this->memoryStruct_.d_con), volSize*sizeof(bool)); // convergence volume
cudaMalloc((void**)&(this->memoryStruct_.d_list), blockNum*sizeof(uint));
cudaMalloc((void**)&(this->memoryStruct_.d_listVol), blockNum*sizeof(bool));
cudaMalloc((void**)&(this->memoryStruct_.d_mask), volSize*sizeof(bool));
}
void StructuredEikonal::set_attribute_mask() {
uint volSize = this->memoryStruct_.volsize;
int nx, ny, nz, blklength;
nx = memoryStruct_.xdim;
ny = memoryStruct_.ydim;
nz = memoryStruct_.zdim;
blklength = memoryStruct_.blklength;
// create host memory
double *h_spd = new double[volSize]; // byte speed, host
bool *h_mask = new bool[volSize];
// copy input volume to host memory
// make each block to be stored contiguously in 1D memory space
uint idx = 0;
for(int zStr = 0; zStr < nz; zStr += blklength) {
for(int yStr = 0; yStr < ny; yStr += blklength) {
for(int xStr = 0; xStr < nx; xStr += blklength) {
// for each block
for(int z=zStr; z<zStr+blklength; z++) {
for(int y=yStr; y<yStr+blklength; y++) {
for(int x=xStr; x<xStr+blklength; x++) {
h_spd[idx] = this->speeds_[x][y][z];
h_mask[idx] = true;
idx++;
}
}
}
}
}
}
// initialize GPU memory with host memory
cudaMemcpy(memoryStruct_.d_spd, h_spd, volSize*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(memoryStruct_.d_mask, h_mask, volSize*sizeof(bool), cudaMemcpyHostToDevice);
delete[] h_spd;
delete[] h_mask;
}
void StructuredEikonal::initialization() {
this->init_device_mem();
this->set_attribute_mask();
}
void StructuredEikonal::map_generator() {
double pi = 3.141592653589793238462643383;
this->speeds_ = std::vector<std::vector<std::vector<double> > >(
this->width_, std::vector<std::vector<double> >(
this->height_, std::vector<double>(this->depth_,1.)));
switch(this->solverType_){
case 0 :
//Constant Speed Map
break;
case 1 :
//Sinusoid Speed Map
for (size_t k = 0 ; k < this->depth_ ; ++k) {
for (size_t j = 0 ; j < this->height_; ++j) {
for ( size_t i = 0 ; i < this->width_ ; ++i) {
this->speeds_[i][j][k] =
(6 + 5*(sin((i*pi)/this->width_ *2))*
sin((j*pi)/this->height_*2)*
sin((k*pi)/this->depth_*2));
}
}
}
break;
}
}
void StructuredEikonal::setSeeds(std::vector<std::array<size_t, 3> > seeds) {
this->seeds_ = seeds;
}
void StructuredEikonal::useSeeds() {
if (this->verbose_) {
std::cout << "Loading seed volume..." << std::endl;
}
uint volSize, blockNum;
int nx, ny, nz, blklength;
nx = this->memoryStruct_.xdim;
ny = this->memoryStruct_.ydim;
nz = this->memoryStruct_.zdim;
volSize = this->memoryStruct_.volsize;
blklength = this->memoryStruct_.blklength;
blockNum = this->memoryStruct_.blknum;
// copy input volume to host memory
// make each block to be stored contiguously in 1D memory space
uint idx = 0;
uint blk_idx = 0;
uint list_idx = 0;
uint nActiveBlock = 0;
for(int zStr = 0; zStr < nz; zStr += blklength) {
for(int yStr = 0; yStr < ny; yStr += blklength) {
for(int xStr = 0; xStr < nx; xStr += blklength) {
// for each block
bool isSeedBlock = false;
for(int z=zStr; z<zStr+blklength; z++) {
for(int y=yStr; y<yStr+blklength; y++) {
for(int x=xStr; x<xStr+blklength; x++) {
this->memoryStruct_.h_sol[idx] = INF;
if (this->seeds_.empty()) {
if (x == nx/2 && y == ny/2 && z == nz/2) {
this->memoryStruct_.h_sol[idx] = 0;
isSeedBlock = true;
if (this->verbose_) {
printf("%d is Selected bt source \n",idx);
}
}
} else {
for(size_t i = 0; i < this->seeds_.size(); i++) {
if (this->seeds_[i][0] == (size_t)x &&
this->seeds_[i][1] == (size_t)y &&
this->seeds_[i][2] == (size_t)z) {
this->memoryStruct_.h_sol[idx] = 0;
isSeedBlock = true;
if (this->verbose_) {
printf("%d is Selected bt source \n",idx);
}
}
}
}
idx++;
}
}
}
///////////////////////////////////////////////
if(isSeedBlock) {
if (this->verbose_) {
printf("%d,%d,%d is Seed Block \n",zStr,yStr,xStr);
}
this->memoryStruct_.h_listVol[blk_idx] = true;
this->memoryStruct_.h_listed[blk_idx] = true;
this->memoryStruct_.h_list[list_idx] = blk_idx;
list_idx++;
nActiveBlock++;
} else {
this->memoryStruct_.h_listVol[blk_idx] = false;
this->memoryStruct_.h_listed[blk_idx] = false;
}
blk_idx++;
}
}
}
this->memoryStruct_.nActiveBlock = nActiveBlock;
// initialize GPU memory with host memory
cudaMemcpy(this->memoryStruct_.d_sol, this->memoryStruct_.h_sol, volSize*sizeof(DOUBLE), cudaMemcpyHostToDevice);
cudaMemcpy(this->memoryStruct_.t_sol, this->memoryStruct_.h_sol, volSize*sizeof(DOUBLE), cudaMemcpyHostToDevice);
cudaMemcpy(this->memoryStruct_.d_list, this->memoryStruct_.h_list, nActiveBlock*sizeof(uint), cudaMemcpyHostToDevice);
cudaMemcpy(this->memoryStruct_.d_listVol, this->memoryStruct_.h_listVol, blockNum*sizeof(bool), cudaMemcpyHostToDevice);
// initialize GPU memory with constant value
cudaMemset(this->memoryStruct_.d_con, 1, volSize*sizeof(bool));
}
void StructuredEikonal::setMapType(size_t t) {
this->solverType_ = t;
}
void StructuredEikonal::solveEikonal() {
if (this->speeds_.empty()) {
this->map_generator();
}
this->isGpuMemCreated_ = false;
this->initialization();
this->useSeeds();
runEikonalSolverSimple(this->memoryStruct_);
this->get_solution();
}
std::vector< std::vector< std::vector<double> > >
StructuredEikonal::getFinalResult() {
return this->answer_;
}
void StructuredEikonal::get_solution() {
// copy solution from GPU
cudaMemcpy(this->memoryStruct_.h_sol,
this->memoryStruct_.d_sol, this->memoryStruct_.volsize*sizeof(DOUBLE),
cudaMemcpyDeviceToHost);
//put the data where it belongs in the grand scheme of data!
this->answer_ = std::vector<std::vector<std::vector<double> > >(
this->width_, std::vector<std::vector<double> >(
this->height_, std::vector<double>(this->depth_,0)));
for(size_t blockID = 0; blockID < this->memoryStruct_.blknum; blockID++) {
size_t baseAddr = blockID * this->memoryStruct_.blksize;
size_t xgridlength = this->memoryStruct_.xdim/BLOCK_LENGTH;
size_t ygridlength = this->memoryStruct_.ydim/BLOCK_LENGTH;
// compute block index
size_t bx = blockID%xgridlength;
size_t tmpIdx = (blockID - bx)/xgridlength;
size_t by = tmpIdx%ygridlength;
size_t bz = (tmpIdx-by)/ygridlength;
//translate back to real space
for(int k = 0; k < BLOCK_LENGTH; k++) {
for(int j = 0; j < BLOCK_LENGTH; j++) {
for(int i = 0; i < BLOCK_LENGTH; i++) {
double d = this->memoryStruct_.h_sol[baseAddr +
k * BLOCK_LENGTH * BLOCK_LENGTH +
j * BLOCK_LENGTH + i];
if ((i + bx * BLOCK_LENGTH) < this->width_ &&
(j + by * BLOCK_LENGTH) < this->height_ &&
(k + bz * BLOCK_LENGTH) < this->depth_) {
this->answer_[(i + bx * BLOCK_LENGTH)][(j +
by * BLOCK_LENGTH)][k + bz * BLOCK_LENGTH] = d;
}
}
}
}
}
}
void StructuredEikonal::setItersPerBlock(size_t t) {
this->itersPerBlock_ = t;
}
|
7e6a8f7e22c1df097ab9a7618f18db79ba6aa8aa.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2008-2009 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ocustorage/coarray.h"
#include "tests/testframework.h"
void do_error() {
#ifdef OCU_OMP
const char *OMP = "";
#else
const char *OMP = "[DISABLED] ";
#endif
printf("utest [option] [test1] [test2] ...\n");
printf("Options are:\n");
printf(" -gpu N Run on the numbered GPU. Can also set via env var OCU_UTEST_GPU. Default value is 0.\n");
printf(" -help Print this message\n");
printf(" -multi %sRun in multigpu mode. Only multi-gpu-enabled tests will run.\n", OMP );
printf(" -numgpus N %sSet GPU count for multi gpu mode. Can also set via env var OCU_UTEST_MULTI. Default value is 2.\n", OMP);
printf(" -repeat N Repeat all tests N times\n");
printf("\n");
printf("Current tests are:\n");
UnitTestDriver::s_driver().print_tests();
exit(-1);
}
int main(int argc, char **argv)
{
int dev_cnt;
hipGetDeviceCount(&dev_cnt);
int gpu = getenv("OCU_UTEST_GPU") ? atoi(getenv("OCU_UTEST_GPU")) : 0;
int num_gpus = getenv("OCU_UTEST_MULTI") ? atoi(getenv("OCU_UTEST_MULTI")) : dev_cnt;
bool do_multi = false;
int unprocessed_args = argc-1;
int cur_arg = 1;
int repeat = 1;
while(cur_arg < argc && argv[cur_arg][0] == '-') {
if (strcmp(argv[cur_arg], "-gpu")==0) {
cur_arg++;
unprocessed_args--;
if (cur_arg < argc) {
gpu = atoi(argv[cur_arg]);
}
else do_error();
}
if (strcmp(argv[cur_arg], "-numgpus")==0) {
#ifndef OCU_OMP
printf("[ERROR] -numgpus option invalid when compiled with OCU_OMP_ENABLED FALSE");
do_error();
#else
cur_arg++;
unprocessed_args--;
if (cur_arg < argc) {
num_gpus = atoi(argv[cur_arg]);
}
else do_error();
#endif
}
if (strcmp(argv[cur_arg], "-repeat")==0) {
cur_arg++;
unprocessed_args--;
if (cur_arg < argc) {
repeat = atoi(argv[cur_arg]);
}
else do_error();
}
if (strcmp(argv[cur_arg], "-multi")==0) {
#ifndef OCU_OMP
printf("[ERROR] -multi option invalid when compiled with OCU_OMP_ENABLED FALSE");
do_error();
#else
do_multi = true;
#endif
}
if (strcmp(argv[cur_arg], "-help")==0) {
do_error();
}
cur_arg++;
unprocessed_args--;
}
UnitTestDriver::s_driver().set_multi(do_multi);
if (do_multi) {
#ifndef OCU_OMP
printf("[ERROR] Cannot run in multi mode when compiled with OCU_OMP_ENABLED FALSE\n");
#else
// start n threads, init all multithreading stuff, etc.
printf("[INFO] Running in multi-GPU mode with %d devices\n", num_gpus);
if (!ocu::CoArrayManager::initialize(num_gpus)) {
printf("[ERROR] Could not initialize CoArrayManager\n");
exit(-1);
}
if (!ocu::ThreadManager::initialize(num_gpus)) {
printf("[ERROR] Could not initialize ThreadManager\n");
exit(-1);
}
#pragma omp parallel
{
if (!ocu::ThreadManager::initialize_image(ocu::ThreadManager::this_image())) {
printf("[ERROR] Could not initialize ThreadManager image %d\n", ocu::ThreadManager::this_image());
exit(-1);
}
if (!ocu::CoArrayManager::initialize_image(ocu::ThreadManager::this_image())) {
printf("[ERROR] Could not initialize CoArrayManager image %d\n", ocu::ThreadManager::this_image());
exit(-1);
}
if (unprocessed_args == 0) {
for (int r=0; r < repeat; r++) {
if (!UnitTestDriver::s_driver().run_all_tests())
exit(-1);
}
}
else {
std::vector<std::string> tests;
for (int i=argc-unprocessed_args; i < argc; i++)
tests.push_back(argv[i]);
for (int r=0; r < repeat; r++) {
if (!UnitTestDriver::s_driver().run_tests(tests))
exit(-1);
}
}
}
#endif
}
else {
if (!ocu::ThreadManager::initialize(1)) {
printf("[ERROR] Could not initialize ThreadManager\n");
exit(-1);
}
if (!ocu::ThreadManager::initialize_image(gpu)) {
printf("[ERROR] Could not initialize ThreadManager on gpu %d\n", gpu);
exit(-1);
}
printf("[INFO] Running on GPU %d\n", gpu);
// call this once to force everything to initialize so any timing results are not skewed
hipFree(0);
if (unprocessed_args == 0) {
for (int r=0; r < repeat; r++) {
if (!UnitTestDriver::s_driver().run_single_gpu_tests())
exit(-1);
}
}
else {
std::vector<std::string> tests;
for (int i=argc-unprocessed_args; i < argc; i++)
tests.push_back(argv[i]);
for (int r=0; r < repeat; r++) {
if (!UnitTestDriver::s_driver().run_tests(tests))
exit(-1);
}
}
}
return 0;
}
| 7e6a8f7e22c1df097ab9a7618f18db79ba6aa8aa.cu | /*
* Copyright 2008-2009 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ocustorage/coarray.h"
#include "tests/testframework.h"
void do_error() {
#ifdef OCU_OMP
const char *OMP = "";
#else
const char *OMP = "[DISABLED] ";
#endif
printf("utest [option] [test1] [test2] ...\n");
printf("Options are:\n");
printf(" -gpu N Run on the numbered GPU. Can also set via env var OCU_UTEST_GPU. Default value is 0.\n");
printf(" -help Print this message\n");
printf(" -multi %sRun in multigpu mode. Only multi-gpu-enabled tests will run.\n", OMP );
printf(" -numgpus N %sSet GPU count for multi gpu mode. Can also set via env var OCU_UTEST_MULTI. Default value is 2.\n", OMP);
printf(" -repeat N Repeat all tests N times\n");
printf("\n");
printf("Current tests are:\n");
UnitTestDriver::s_driver().print_tests();
exit(-1);
}
int main(int argc, char **argv)
{
int dev_cnt;
cudaGetDeviceCount(&dev_cnt);
int gpu = getenv("OCU_UTEST_GPU") ? atoi(getenv("OCU_UTEST_GPU")) : 0;
int num_gpus = getenv("OCU_UTEST_MULTI") ? atoi(getenv("OCU_UTEST_MULTI")) : dev_cnt;
bool do_multi = false;
int unprocessed_args = argc-1;
int cur_arg = 1;
int repeat = 1;
while(cur_arg < argc && argv[cur_arg][0] == '-') {
if (strcmp(argv[cur_arg], "-gpu")==0) {
cur_arg++;
unprocessed_args--;
if (cur_arg < argc) {
gpu = atoi(argv[cur_arg]);
}
else do_error();
}
if (strcmp(argv[cur_arg], "-numgpus")==0) {
#ifndef OCU_OMP
printf("[ERROR] -numgpus option invalid when compiled with OCU_OMP_ENABLED FALSE");
do_error();
#else
cur_arg++;
unprocessed_args--;
if (cur_arg < argc) {
num_gpus = atoi(argv[cur_arg]);
}
else do_error();
#endif
}
if (strcmp(argv[cur_arg], "-repeat")==0) {
cur_arg++;
unprocessed_args--;
if (cur_arg < argc) {
repeat = atoi(argv[cur_arg]);
}
else do_error();
}
if (strcmp(argv[cur_arg], "-multi")==0) {
#ifndef OCU_OMP
printf("[ERROR] -multi option invalid when compiled with OCU_OMP_ENABLED FALSE");
do_error();
#else
do_multi = true;
#endif
}
if (strcmp(argv[cur_arg], "-help")==0) {
do_error();
}
cur_arg++;
unprocessed_args--;
}
UnitTestDriver::s_driver().set_multi(do_multi);
if (do_multi) {
#ifndef OCU_OMP
printf("[ERROR] Cannot run in multi mode when compiled with OCU_OMP_ENABLED FALSE\n");
#else
// start n threads, init all multithreading stuff, etc.
printf("[INFO] Running in multi-GPU mode with %d devices\n", num_gpus);
if (!ocu::CoArrayManager::initialize(num_gpus)) {
printf("[ERROR] Could not initialize CoArrayManager\n");
exit(-1);
}
if (!ocu::ThreadManager::initialize(num_gpus)) {
printf("[ERROR] Could not initialize ThreadManager\n");
exit(-1);
}
#pragma omp parallel
{
if (!ocu::ThreadManager::initialize_image(ocu::ThreadManager::this_image())) {
printf("[ERROR] Could not initialize ThreadManager image %d\n", ocu::ThreadManager::this_image());
exit(-1);
}
if (!ocu::CoArrayManager::initialize_image(ocu::ThreadManager::this_image())) {
printf("[ERROR] Could not initialize CoArrayManager image %d\n", ocu::ThreadManager::this_image());
exit(-1);
}
if (unprocessed_args == 0) {
for (int r=0; r < repeat; r++) {
if (!UnitTestDriver::s_driver().run_all_tests())
exit(-1);
}
}
else {
std::vector<std::string> tests;
for (int i=argc-unprocessed_args; i < argc; i++)
tests.push_back(argv[i]);
for (int r=0; r < repeat; r++) {
if (!UnitTestDriver::s_driver().run_tests(tests))
exit(-1);
}
}
}
#endif
}
else {
if (!ocu::ThreadManager::initialize(1)) {
printf("[ERROR] Could not initialize ThreadManager\n");
exit(-1);
}
if (!ocu::ThreadManager::initialize_image(gpu)) {
printf("[ERROR] Could not initialize ThreadManager on gpu %d\n", gpu);
exit(-1);
}
printf("[INFO] Running on GPU %d\n", gpu);
// call this once to force everything to initialize so any timing results are not skewed
cudaFree(0);
if (unprocessed_args == 0) {
for (int r=0; r < repeat; r++) {
if (!UnitTestDriver::s_driver().run_single_gpu_tests())
exit(-1);
}
}
else {
std::vector<std::string> tests;
for (int i=argc-unprocessed_args; i < argc; i++)
tests.push_back(argv[i]);
for (int r=0; r < repeat; r++) {
if (!UnitTestDriver::s_driver().run_tests(tests))
exit(-1);
}
}
}
return 0;
}
|
8919a6642ea62f6829ae17230ae7301521067d83.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// gpucompute/cuda-kernels.cu
// Copyright 2009-2012 Karel Vesely
// 2013 Ehsan Variani
// 2013 Johns Hopkins University (author: Daniel Povey)
// 2013 Hainan Xu
// 2013 Xiaohui Zhang
// 2013 Johns Hopkins University (author: Guoguo Chen)
// 2015 Yajie Miao
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
// WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
// MERCHANTABLITY OR NON-INFRINGEMENT.
// See the Apache 2 License for the specific language governing permissions and
// limitations under the License.
// In this file is the CUDA code of the CUDA kernels, plus the ANSI-C wrappers
#include <cfloat>
#include "cuda-kernels.h"
#include "cuPrintf_hip.cuh"
#include "cuPrintf.hip"
#include "ctc-utils.h"
#include "stdio.h"
/***********************************************************************
* Generic __device__ functions
*/
template<typename Real>
__device__
static Real _sum_reduce(Real buffer[]) {
// Total number of active threads
int32_cuda nTotalThreads = blockDim.x;
__syncthreads();
// perform tree-based reduction (sum)
while(nTotalThreads > 1) {
int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x >= halfPoint) { // was <
// Get the shared value stored by another thread
Real temp = 0.0;
if(threadIdx.x < nTotalThreads) { // was +halfPoint
temp = buffer[threadIdx.x]; // was +halfPoint
}
buffer[threadIdx.x - halfPoint] += temp;
}
__syncthreads();
nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two.
}
// the result
return buffer[0];
}
template<typename Real>
__device__
static Real _min_reduce(Real buffer[]) {
// Total number of active threads
int32_cuda nTotalThreads = blockDim.x;
__syncthreads();
// perform tree-based reduction (min)
while(nTotalThreads > 1) {
int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active
if (threadIdx.x < halfPoint) {
if (threadIdx.x + halfPoint < nTotalThreads) {
Real temp = buffer[threadIdx.x + halfPoint];
if (temp < buffer[threadIdx.x])
buffer[threadIdx.x] = temp;
}
}
__syncthreads();
nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two
}
// the result
return buffer[0];
}
template<typename Real>
__device__
static Real _max_reduce(Real buffer[]) {
// Total number of active threads
int32_cuda nTotalThreads = blockDim.x;
__syncthreads();
// perform tree-based reduction (max)
while(nTotalThreads > 1) {
int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x < halfPoint) {
// Get the shared value stored by another thread
if(threadIdx.x+halfPoint < nTotalThreads) {
Real temp = buffer[threadIdx.x + halfPoint];
if (temp > buffer[threadIdx.x])
buffer[threadIdx.x] = temp;
}
}
__syncthreads();
nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two.
}
// the result
return buffer[0];
}
template<typename Real>
__device__
static int32_cuda _max_id_reduce(Real val[], int32_cuda idx[]) {
// Total number of active threads
int32_cuda nTotalThreads = blockDim.x;
__syncthreads();
// perform tree-based reduction (get index of maximum)
while(nTotalThreads > 1) {
int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x < halfPoint) {
// Get the shared value stored by another thread
Real temp = -1e20;
if(threadIdx.x+halfPoint < nTotalThreads) {
temp = val[idx[threadIdx.x + halfPoint]];
}
if (temp > val[idx[threadIdx.x]]) idx[threadIdx.x]=idx[threadIdx.x + halfPoint];
}
__syncthreads();
nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two.
}
// the result
return idx[0];
}
/***********************************************************************
* CUDA kernels
* the functions are templated to have the float/double operations
*/
// for this kernel, following the newer pattern, the x-dim is the row-index, the
// y-dim is the col-index.
template<typename Real, typename OtherReal>
__global__
static void _copy_from_mat(Real* mat_out, const OtherReal* mat_in, MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index.
int32_cuda index_out = j + i * d_out.stride;
int32_cuda index_in = j + i * d_in.stride;
if (i < d_out.rows && j < d_out.cols)
mat_out[index_out] = static_cast<Real>(mat_in[index_in]);
}
// for this kernel, the x-dim is the row-index at the output, the y-dim is the
// col-index at the output
template<typename Real, typename OtherReal>
__global__
static void _copy_from_mat_trans(Real* mat_out, const OtherReal* mat_in, MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index out
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index out
int32_cuda index_out = j + i * d_out.stride;
int32_cuda index_in = i + j * d_in.stride;
if (i < d_out.rows && j < d_out.cols)
mat_out[index_out] = static_cast<Real>(mat_in[index_in]);
}
template<typename Real>
__global__
static void _apply_exp(Real* mat, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if ( i < d.cols && j < d.rows ) {
mat[index] = exp(mat[index]);
}
}
template<typename Real>
__global__
static void _set_const(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < d.rows)
mat[index] = value;
}
template<typename Real>
__global__
static void _add(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if (i < d.cols && j < d.rows)
mat[index] = mat[index] + value;
}
template<typename Real>
__global__
static void _scale(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if (i < d.cols && j < d.rows)
mat[index] = mat[index] * value;
}
template<typename Real>
__global__
static void _apply_log(Real* mat, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if (i < d.cols && j < d.rows)
mat[index] = log(mat[index]);
}
template<typename Real>
__global__
static void _mul_elements(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda dst_index = i + j*dst_d.stride, src_index = i + j*src_stride;
if (i < dst_d.cols && j < dst_d.rows)
mat[dst_index] = mat[dst_index] * A[src_index];
}
template<typename Real>
__global__
static void _vec_mul_elements(Real* v, const Real* a, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < dim)
v[i] = v[i] * a[i];
}
template<typename Real>
__global__
static void _mul_rows_vec(Real* mat, const Real* scale, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if (i < d.cols && j < d.rows)
mat[index] *= scale[j];
}
template<typename Real>
__global__
static void _add_mat(Real alpha, const Real* src, Real* dst, MatrixDim d, int src_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
int32_cuda index_src = i + j*src_stride;
if (i < d.cols && j < d.rows)
dst[index] = alpha*src[index_src] + dst[index];
}
template<typename Real>
__global__
static void _add_mat_trans(Real alpha, const Real* src, Real* dst, MatrixDim d, int src_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j *d.stride;
int32_cuda index_src = j + i*src_stride;
if (i < d.cols && j < d.rows)
dst[index] = alpha*src[index_src] + dst[index];
}
template<typename Real>
__global__
static void _add_vec_to_rows(Real alpha, const Real* row, Real beta, Real* dst, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if (i < d.cols && j < d.rows)
dst[index] = alpha*row[i] + beta*dst[index];
}
/*
* CuVector
*/
template<typename Real>
__global__
static void _copy_from_vec_df(double* v_out, const Real* v_in, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
// if (blockIdx.y > 0) return;
if (i < dim) {
v_out[i] = (double) v_in[i];
}
}
template<typename Real>
__global__
static void _copy_from_vec_fd(float* v_out, const Real* v_in, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
// if (blockIdx.y > 0) return;
if ( i < dim) {
v_out[i] = (float) v_in[i];
}
}
template<typename Real>
__global__
static void _vec_min(const Real* v, Real* value, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= CU1DBLOCK) return;
__shared__ Real row_data[CU1DBLOCK];
int block_size = (dim + CU1DBLOCK - 1) / CU1DBLOCK;
Real min = 1.0 / 0.0; // infinity.
for (int j = i * block_size; j < (i+1) * block_size && j < dim; j++) {
Real v_j = v[j];
if (v_j < min) min = v_j;
}
row_data[i] = min;
__syncthreads();
//get the sum
*value = _min_reduce(row_data);
}
template<typename Real>
__global__
static void _vec_max(const Real* v, Real* value, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
if(blockIdx.y > 0) return;
__shared__ Real row_data[CU1DBLOCK];
if(i >= CU1DBLOCK) return;
int block_size = (dim + CU1DBLOCK - 1) / CU1DBLOCK;
Real max = -1.0 / 0.0; // -infinity.
for (int j = i * block_size; j < (i+1) * block_size && j < dim; j++) {
Real v_j = v[j];
if (v_j > max) max = v_j;
}
row_data[i] = max;
__syncthreads();
//get the sum
*value = _max_reduce(row_data);
}
// Adds diag(M N) to v, where M and N are matrices. We supply row_stride and
// col_stride arguments for M and N, and swapping them allows us to transpose
// those matrices. Note: we imagine row-major indexing here, just like Kaldi
// and CBLAS (but unlike CUBLAS).
// This kernel expects the blockDim to be (CU1DBLOCK, 1) and the
// gridDim times CU1DBLOCK to be at least num-rows-of-v * threads_per_element.
// threads_per_element should be a power of 2.
template<typename Real>
__global__
static void _add_diag_mat_mat(
Real alpha, Real* v, int v_dim, const Real* M, int M_cols, int M_row_stride,
int M_col_stride, const Real *N, int N_row_stride, int N_col_stride,
int threads_per_element, Real beta) {
// we actually assume blockDim.x == CU1DBLOCK here.
// Each diagonal element of v is processed by "threads_per_element" threads.
__shared__ Real temp_data[CU1DBLOCK];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int v_idx = i / threads_per_element, // v_idx is the index into v that we are supposed to
sub_idx = i % threads_per_element; // add to; 0 <= sub_idx < threads_per_element tells
// us which block of elements we sum up.
if (v_idx >= v_dim) return;
Real sum = 0.0;
for (int j = sub_idx; j < M_cols; j += threads_per_element) {
int M_index = v_idx * M_row_stride + j * M_col_stride,
N_index = j * N_row_stride + v_idx * N_col_stride;
sum += M[M_index] * N[N_index];
}
temp_data[threadIdx.x] = sum;
// start_idx = threadIdx.x - sub_idx; // start of the position in temp_data
// that we want to sum up.
// The following is a tree-based reduction of the elements of temp_data from
// start_idx to start_idx + threads_per_element - 1; our own index is "sub_idx".
__syncthreads();
int num_total_threads = threads_per_element;
while (num_total_threads > 1) {
int half_point = ((1 + num_total_threads) >> 1);
if (sub_idx < half_point) {
Real temp = 0.0;
if (sub_idx + half_point < num_total_threads) {
temp = temp_data[threadIdx.x + half_point];
}
temp_data[threadIdx.x] += temp;
}
__syncthreads();
num_total_threads = half_point;
}
if (sub_idx == 0) {
v[v_idx] = beta * v[v_idx] + alpha * temp_data[threadIdx.x];
}
}
template<typename Real>
__global__
static void _add_vec_vec(Real alpha, Real* v, const Real* x, const Real* y, Real beta, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
// if (blockIdx.y > 0) return;
if (i < dim)
v[i] = alpha * x[i] * y[i] + beta * v[i];
}
template<typename Real>
__global__
static void _vec_apply_exp(Real* v, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
// if (blockIdx.y > 0) return;
if (i < dim) {
v[i] = exp(v[i]);
}
}
template<typename Real>
__global__
static void _vec_apply_log(Real* v, Real* flag, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
// if (blockIdx.y > 0) return;
if (i < dim) {
if (v[i] < 0) {
*flag = 1;
return;
}
v[i] = log(v[i]);
}
}
template<typename Real>
__global__
static void _vec_sum(Real *v, Real *sum, int dim, int inc) {
int i = threadIdx.x;
__shared__ Real row_data[CU1DBLOCK];
if (i >= CU1DBLOCK) return;
Real tmp_sum = 0;
int size = dim / CU1DBLOCK; //the least size in a loop (later part)
int threshold = dim - size * CU1DBLOCK; //any loop below this number would + 1
int loop_start;
int loop_end;
if(i < threshold) {
loop_start = i * (size + 1);
loop_end = (i+1) * (size + 1);
}
else {
loop_start = threshold + i * size;
loop_end = threshold + (i+1) * size;
}
for(int j = loop_start; j< loop_end; j++) {
tmp_sum += v[j * inc];
}
row_data[threadIdx.x] = tmp_sum;
__syncthreads();
*sum = _sum_reduce(row_data);
}
template<typename Real>
__global__
static void _pvec_sum(Real* v, Real* g, int dim, int size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int start = size * i;
if (start >= dim) return;
int end = start + size;
if (end > dim) end = dim;
__shared__ Real row_data[CU1DBLOCK];
Real sum = 0;
for (int j = start; j < end; j++)
sum += v[j];
row_data[threadIdx.x] = sum;
__syncthreads();
g[blockIdx.x] = _sum_reduce(row_data);
}
template<typename Real>
__global__
static void _vec_apply_floor(Real *v, Real floor_val, float *count, int dim) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i < dim) {
if ( v[i] < floor_val) {
v[i] = floor_val;
count[i] = 1;
} else {
count[i] = 0;
}
}
}
// Caution, here i/block{idx,dim}.x is the row index and j/block{idx,dim}.y is the col index.
// this is for no reason, really, I just happened to prefer this
// at the time. [dan]
template<typename Real>
__global__
static void _apply_pow(Real* mat, Real power, MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = i * d.stride + j;
if (i < d.rows && j < d.cols) {
if (power == 1.0)
return;
if (power == 2.0) {
mat[index] = mat[index] * mat[index];
} else if (power == 0.5) {
if (!(mat[index] >= 0.0))
return;
mat[index] = sqrt(mat[index]);
} else {
mat[index] = pow(mat[index], power);
}
}
}
template<typename Real>
__global__
static void _apply_floor(Real* mat, Real floor_val, MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = i + j * d.stride;
if (i < d.cols && j < d.rows) {
if (mat[index] < floor_val)
mat[index] = floor_val;
}
}
template<typename Real>
__global__
static void _apply_ceiling(Real* mat, Real ceiling_val, MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = i + j * d.stride;
if (i < d.cols && j < d.rows ) {
if (mat[index] > ceiling_val)
mat[index] = ceiling_val;
}
}
template<typename Real>
__global__
static void _add_row_sum_mat(const Real* mat, Real* vec_sum, MatrixDim d) {
int i = blockIdx.y * blockDim.y + threadIdx.y; //col
int j = blockIdx.x * blockDim.x + threadIdx.x; //row
if(blockIdx.x > 0) return;
if(blockDim.y != 1) return;
__shared__ Real row_data[CU1DBLOCK];
//copy the input to row_data
row_data[j] = mat[i+j*d.stride];
__syncthreads();
//get the sum
Real sum = _sum_reduce(row_data);
__syncthreads();
//add to previously accumulated sum
if(threadIdx.x == 0)
vec_sum[i] += sum;
}
template<typename Real>
__global__
static void _add_col_sum_mat(const Real* mat, Real* vec_sum, MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x; //row
int j = blockIdx.y * blockDim.y + threadIdx.y; //col
if(blockIdx.x > 0) return;
if(blockDim.y != 1) return;
__shared__ Real row_data[CU1DBLOCK];
//copy the input to row_data
row_data[i] = mat[i+j*d.stride];
__syncthreads();
//get the sum
Real sum = _sum_reduce(row_data);
__syncthreads();
//add to previously accumulated sum
if(threadIdx.x == 0)
vec_sum[j] += sum;
}
/*
* cu::
*/
template<typename Real>
__global__
static void _sigmoid(Real*y, const Real*x, MatrixDim d, int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j*d.stride, src_index = i + j*src_stride;
if(i < d.cols && j < d.rows) {
Real res = 1.0 / (1.0 + exp(-x[src_index]));
y[dst_index] = res;
}
}
template<typename Real>
__global__
static void _diff_sigmoid(Real*eout, const Real*e, const Real*y, MatrixDim d, int e_stride, int y_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j*d.stride;
int e_index = i + j*e_stride;
int y_index = i + j*y_stride;
if (i < d.cols && j < d.rows )
eout[dst_index] = y[y_index]*(1.0-y[y_index]) * e[e_index];
}
template<typename Real>
__global__
static void _tanh(Real*y, const Real*x, MatrixDim d, int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j*d.stride, src_index = i + j * src_stride;
if(i < d.cols && j < d.rows) {
Real exp_2x = exp(2.0*x[src_index]);
Real res;
if(isinf(exp_2x)) {
res = 1.0;
} else {
res = (exp_2x - 1.0) / (exp_2x + 1.0);
}
y[dst_index] = res;
}
}
template<typename Real>
__global__
static void _diff_tanh(Real*eout, const Real*e, const Real*y, MatrixDim d, int e_stride, int y_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j*d.stride;
int e_index = i + j*e_stride;
int y_index = i + j*y_stride;
if (i < d.cols && j < d.rows )
eout[dst_index] = (1.0 - y[y_index]*y[y_index]) * e[e_index];
}
template<typename Real>
__global__
static void _softmax_reduce(Real*y, const Real*x, MatrixDim d, int src_stride) {
int j = blockIdx.x;
int THREADS = blockDim.x;
if (j >= d.rows) return;
__shared__ Real aux[CU1DBLOCK];
int steps = (d.cols - 1) / THREADS + 1;
//copy input to aux
aux[threadIdx.x] = x[threadIdx.x+j*d.stride];
for(int i=1; i<steps; ++i) {
if(threadIdx.x+i*THREADS < d.cols && aux[threadIdx.x] < x[threadIdx.x+i*THREADS+j*d.stride])
aux[threadIdx.x] = x[threadIdx.x+i*THREADS+j*d.stride];
}
//get the maximum value
int nTotalThreads = THREADS;
__syncthreads();
while(nTotalThreads > 1) {
int halfPoint = ((1+nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x < halfPoint) {
// Get the shared value stored by another thread
if(threadIdx.x+halfPoint < nTotalThreads && aux[threadIdx.x] < aux[threadIdx.x+halfPoint])
aux[threadIdx.x] = aux[threadIdx.x + halfPoint];
}
__syncthreads();
nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two.
}
Real max = aux[0];
__syncthreads();
// subtract max, apply exp, sum up...
y[threadIdx.x+j*d.stride] = exp(x[threadIdx.x+j*d.stride] - max);
aux[threadIdx.x] = y[threadIdx.x+j*d.stride];
for(int i=1; i<steps; i++) {
if(threadIdx.x+i*THREADS < d.cols) {
y[threadIdx.x+i*THREADS+j*d.stride] = exp(x[threadIdx.x+i*THREADS+j*d.stride] - max);
aux[threadIdx.x] += y[threadIdx.x+i*THREADS+j*d.stride];
}
}
nTotalThreads = THREADS;
__syncthreads();
while(nTotalThreads > 1) {
int halfPoint = ((1+nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x < halfPoint) {
// Get the shared value stored by another thread
if(threadIdx.x+halfPoint < nTotalThreads)
aux[threadIdx.x] += aux[threadIdx.x + halfPoint];
}
__syncthreads();
nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two.
}
Real sum = aux[0];
__syncthreads();
//normalize by sum...
for(int i=0; i<steps; i++) {
if(threadIdx.x+i*THREADS < d.cols) {
y[threadIdx.x+i*THREADS+j*d.stride] = y[threadIdx.x+i*THREADS+j*d.stride] / sum;
}
}
}
template<typename Real>
__global__
static void _splice(Real* y, const Real* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d_out.stride;
if (i < d_out.cols && j < d_out.rows ) {
int32_cuda src_col = i % d_in.cols;
int32_cuda src_row = j + off[i / d_in.cols];
if(src_row < 0) src_row = 0;
if(src_row >= d_in.rows) src_row = d_in.rows-1;
y[index] = x[src_col + src_row*d_in.stride];
}
}
template<typename Real>
__global__
static void _copy(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d_out.stride;
if (i < d_out.cols && j < d_out.rows ) {
int32_cuda src_col = copy_from[i];
if(src_col >= 0 && src_col < d_in.cols) {
y[index] = x[src_col + j*d_in.stride];
} else {
y[index] = 1.0/0.0;
}
}
}
template<typename Real>
__global__
static void _randomize(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d_out.stride;
if (i < d_out.cols && j < d_out.rows ) {
int32_cuda src_row = copy_from[j];
y[index] = x[i + src_row*d_in.stride];
}
}
template<typename Real>
__global__
static void _regularize_l1(Real* wei, Real* grad, Real l1, Real lr, MatrixDim d, int stride_grad) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride,
grad_index = i + j*stride_grad;
if (i < d.cols && j < d.rows) {
if(wei[index]==0.0) return; //skip L1 if zero weight!
Real l1_signed = l1;
if(wei[index] < 0.0) //flip sign
l1_signed = -l1;
Real before = wei[index];
Real after = wei[index] -lr*grad[grad_index] -l1_signed;//simulate update
if((after > 0.0) ^ (before > 0.0)) { //sign changed?
wei[index] = 0.0;
grad[grad_index] = 0.0;
} else {
wei[index] -= l1_signed;
}
}
}
template<typename Real>
__global__
static void _find_row_max_id(const Real* mat, Real* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
if(blockIdx.x > 0) return;
if(blockDim.y != 1) return;
__shared__ Real value[CU1DBLOCK];
__shared__ int32_cuda index[CU1DBLOCK];
//copy to shared memory
value[threadIdx.x] = mat[i+j*d.stride];
index[threadIdx.x] = threadIdx.x;
__syncthreads();
//get the id of the max value
int32_cuda out_max = _max_id_reduce(value, index);
__syncthreads();
//see if it's bigger value
if(threadIdx.x == 0) {
if(vec_val[j] <= mat[out_max+j*d.stride]) {
vec_val[j] = mat[out_max+j*d.stride];
vec_id[j] = voff+out_max;
}
}
}
/***********************************************************************
* ANSI-C wrappers of CUDA kernels
*/
/*
* "int32"
*/
void cudaI32_set_const(dim3 Gr, dim3 Bl, int32_cuda* mat, int32_cuda value, MatrixDim d) {
hipLaunchKernelGGL(( _set_const), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
/*
* CuMatrix
*/
void cudaF_apply_exp(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) {
hipLaunchKernelGGL(( _apply_exp), dim3(Gr),dim3(Bl), 0, 0, mat,d);
}
void cudaD_apply_exp(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) {
hipLaunchKernelGGL(( _apply_exp), dim3(Gr),dim3(Bl), 0, 0, mat,d);
}
void cudaF_apply_pow(dim3 Gr, dim3 Bl, float* mat, float power, MatrixDim d) {
hipLaunchKernelGGL(( _apply_pow), dim3(Gr),dim3(Bl), 0, 0, mat, power, d);
}
void cudaD_apply_pow(dim3 Gr, dim3 Bl, double* mat, double power, MatrixDim d) {
hipLaunchKernelGGL(( _apply_pow), dim3(Gr),dim3(Bl), 0, 0, mat, power, d);
}
void cudaF_apply_floor(dim3 Gr, dim3 Bl, float* mat, float floor_val, MatrixDim d) {
hipLaunchKernelGGL(( _apply_floor), dim3(Gr),dim3(Bl), 0, 0, mat, floor_val, d);
}
void cudaD_apply_floor(dim3 Gr, dim3 Bl, double* mat, double floor_val, MatrixDim d) {
hipLaunchKernelGGL(( _apply_floor), dim3(Gr),dim3(Bl), 0, 0, mat, floor_val, d);
}
void cudaF_apply_ceiling(dim3 Gr, dim3 Bl, float* mat, float ceiling_val, MatrixDim d) {
hipLaunchKernelGGL(( _apply_ceiling), dim3(Gr),dim3(Bl), 0, 0, mat, ceiling_val, d);
}
void cudaD_apply_ceiling(dim3 Gr, dim3 Bl, double* mat, double ceiling_val, MatrixDim d) {
hipLaunchKernelGGL(( _apply_ceiling), dim3(Gr),dim3(Bl), 0, 0, mat, ceiling_val, d);
}
void cudaF_set_const(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) {
hipLaunchKernelGGL(( _set_const), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaD_set_const(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) {
hipLaunchKernelGGL(( _set_const), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaF_add(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) {
hipLaunchKernelGGL(( _add), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaD_add(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) {
hipLaunchKernelGGL(( _add), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaF_scale(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) {
hipLaunchKernelGGL(( _scale), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaD_scale(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) {
hipLaunchKernelGGL(( _scale), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaF_apply_log(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) {
hipLaunchKernelGGL(( _apply_log), dim3(Gr),dim3(Bl), 0, 0, mat,d);
}
void cudaD_apply_log(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) {
hipLaunchKernelGGL(( _apply_log), dim3(Gr),dim3(Bl), 0, 0, mat,d);
}
void cudaF_mul_elements(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d, int src_stride) {
hipLaunchKernelGGL(( _mul_elements), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride);
}
void cudaD_mul_elements(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d, int src_stride) {
hipLaunchKernelGGL(( _mul_elements), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride);
}
void cudaF_mul_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale, MatrixDim d) {
hipLaunchKernelGGL(( _mul_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d);
}
void cudaD_mul_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale, MatrixDim d) {
hipLaunchKernelGGL(( _mul_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d);
}
void cudaF_add_mat(dim3 Gr, dim3 Bl, float alpha, const float* src, float* dst, MatrixDim d, int src_stride, int A_trans) {
if (A_trans) {
hipLaunchKernelGGL(( _add_mat_trans), dim3(Gr),dim3(Bl), 0, 0, alpha,src,dst,d,src_stride);
} else {
hipLaunchKernelGGL(( _add_mat), dim3(Gr),dim3(Bl), 0, 0, alpha,src,dst,d,src_stride);
}
}
void cudaD_add_mat(dim3 Gr, dim3 Bl, double alpha, const double* src, double* dst, MatrixDim d, int src_stride, int A_trans) {
if (A_trans) {
hipLaunchKernelGGL(( _add_mat_trans), dim3(Gr),dim3(Bl), 0, 0, alpha,src,dst,d,src_stride);
} else {
hipLaunchKernelGGL(( _add_mat), dim3(Gr),dim3(Bl), 0, 0, alpha,src,dst,d,src_stride);
}
}
void cudaF_add_vec_to_rows(dim3 Gr, dim3 Bl, float alpha, const float* row, float beta, float* dst, MatrixDim d) {
hipLaunchKernelGGL(( _add_vec_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha,row,beta,dst,d);
}
void cudaD_add_vec_to_rows(dim3 Gr, dim3 Bl, double alpha, const double* row, double beta, double* dst, MatrixDim d) {
hipLaunchKernelGGL(( _add_vec_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha,row,beta,dst,d);
}
/*
* CuVector
*/
void cudaF_copy_from_vec_df(int Gr, int Bl, double* v_out, const float* v_in, int dim) {
hipLaunchKernelGGL(( _copy_from_vec_df), dim3(Gr),dim3(Bl), 0, 0, v_out,v_in,dim);
}
void cudaD_copy_from_vec_df(int Gr, int Bl, double* v_out, const double* v_in, int dim) {
hipLaunchKernelGGL(( _copy_from_vec_df), dim3(Gr),dim3(Bl), 0, 0, v_out,v_in,dim);
}
void cudaF_copy_from_vec_fd(int Gr, int Bl, float* v_out, const float* v_in, int dim) {
hipLaunchKernelGGL(( _copy_from_vec_fd), dim3(Gr),dim3(Bl), 0, 0, v_out,v_in,dim);
}
void cudaD_copy_from_vec_fd(int Gr, int Bl, float* v_out, const double* v_in, int dim) {
hipLaunchKernelGGL(( _copy_from_vec_fd), dim3(Gr),dim3(Bl), 0, 0, v_out,v_in,dim);
}
void cudaF_vec_mul_elements(int Gr, int Bl, float* v, const float* a, int dim) {
hipLaunchKernelGGL(( _vec_mul_elements), dim3(Gr),dim3(Bl), 0, 0, v, a, dim);
}
void cudaD_vec_mul_elements(int Gr, int Bl, double* v, const double* a, int dim) {
hipLaunchKernelGGL(( _vec_mul_elements), dim3(Gr),dim3(Bl), 0, 0, v, a, dim);
}
void cudaF_vec_min(const float* v, float* value, int dim) {
hipLaunchKernelGGL(( _vec_min), dim3(1),dim3(CU1DBLOCK), 0, 0, v, value, dim);
}
void cudaD_vec_min(const double* v, double* value, int dim) {
hipLaunchKernelGGL(( _vec_min), dim3(1),dim3(CU1DBLOCK), 0, 0, v, value, dim);
}
void cudaF_vec_max(const float* v, float* value, int dim) {
hipLaunchKernelGGL(( _vec_max), dim3(1),dim3(CU1DBLOCK), 0, 0, v, value, dim);
}
void cudaD_vec_max(const double* v, double* value, int dim) {
hipLaunchKernelGGL(( _vec_max), dim3(1),dim3(CU1DBLOCK), 0, 0, v, value, dim);
}
void cudaF_add_diag_mat_mat(int Gr, int Bl, float alpha, float* v, int v_dim, const float* M,
int M_cols, int M_row_stride, int M_col_stride, const float *N, int N_row_stride,
int N_col_stride, int threads_per_element, float beta) {
hipLaunchKernelGGL(( _add_diag_mat_mat), dim3(Gr),dim3(Bl), 0, 0, alpha, v, v_dim, M, M_cols, M_row_stride, M_col_stride,
N, N_row_stride, N_col_stride, threads_per_element, beta);
}
void cudaD_add_diag_mat_mat(int Gr, int Bl, double alpha, double* v, int v_dim, const double* M,
int M_cols, int M_row_stride, int M_col_stride, const double *N, int N_row_stride,
int N_col_stride, int threads_per_element, double beta) {
hipLaunchKernelGGL(( _add_diag_mat_mat), dim3(Gr),dim3(Bl), 0, 0, alpha, v, v_dim, M, M_cols, M_row_stride, M_col_stride,
N, N_row_stride, N_col_stride, threads_per_element, beta);
}
void cudaF_add_vec_vec(int Gr, int Bl, float alpha, float* v, const float* x, const float* y, float beta, int dim) {
hipLaunchKernelGGL(( _add_vec_vec), dim3(Gr),dim3(Bl), 0, 0, alpha,v,x,y,beta,dim);
}
void cudaD_add_vec_vec(int Gr, int Bl, double alpha, double* v, const double* x, const double* y, double beta, int dim) {
hipLaunchKernelGGL(( _add_vec_vec), dim3(Gr),dim3(Bl), 0, 0, alpha,v,x,y,beta,dim);
}
void cudaF_vec_sum(int Gr, int Bl, float* v, float* value, int dim, int inc) {
hipLaunchKernelGGL(( _vec_sum), dim3(Gr),dim3(Bl), 0, 0, v, value, dim, inc);
}
void cudaD_vec_sum(int Gr, int Bl, double* v, double* value, int dim, int inc) {
hipLaunchKernelGGL(( _vec_sum), dim3(Gr),dim3(Bl), 0, 0, v,value,dim,inc);
}
void cudaF_pvec_sum(int Gr, int Bl, float* v, float* pvec_sum, int dim, int size) {
hipLaunchKernelGGL(( _pvec_sum), dim3(Gr),dim3(Bl), 0, 0, v, pvec_sum, dim, size);
}
void cudaD_pvec_sum(int Gr, int Bl, double* v, double* pvec_sum, int dim, int size) {
hipLaunchKernelGGL(( _pvec_sum), dim3(Gr),dim3(Bl), 0, 0, v,pvec_sum,dim,size);
}
void cudaF_vec_apply_floor(int Gr, int Bl, float* v, float floor_val, float *count, int dim) {
hipLaunchKernelGGL(( _vec_apply_floor), dim3(Gr),dim3(Bl), 0, 0, v,floor_val,count,dim);
}
void cudaD_vec_apply_floor(int Gr, int Bl, double* v, double floor_val, float *count, int dim) {
hipLaunchKernelGGL(( _vec_apply_floor), dim3(Gr),dim3(Bl), 0, 0, v,floor_val,count,dim);
}
void cudaF_vec_apply_exp(int Gr, int Bl, float* v, int dim) {
hipLaunchKernelGGL(( _vec_apply_exp), dim3(Gr),dim3(Bl), 0, 0, v,dim);
}
void cudaD_vec_apply_exp(int Gr, int Bl, double* v, int dim) {
hipLaunchKernelGGL(( _vec_apply_exp), dim3(Gr),dim3(Bl), 0, 0, v,dim);
}
void cudaF_vec_apply_log(int Gr, int Bl, float* v, float* flag, int dim) {
hipLaunchKernelGGL(( _vec_apply_log), dim3(Gr),dim3(Bl), 0, 0, v,flag,dim);
}
void cudaD_vec_apply_log(int Gr, int Bl, double* v, double* flag, int dim) {
hipLaunchKernelGGL(( _vec_apply_log), dim3(Gr),dim3(Bl), 0, 0, v,flag,dim);
}
void cudaF_add_row_sum_mat(dim3 Gr, dim3 Bl, const float* mat, float* vec_sum, MatrixDim d) {
hipLaunchKernelGGL(( _add_row_sum_mat), dim3(Gr),dim3(Bl), 0, 0, mat,vec_sum,d);
}
void cudaD_add_row_sum_mat(dim3 Gr, dim3 Bl, const double* mat, double* vec_sum, MatrixDim d) {
hipLaunchKernelGGL(( _add_row_sum_mat), dim3(Gr),dim3(Bl), 0, 0, mat,vec_sum,d);
}
void cudaF_add_col_sum_mat(dim3 Gr, dim3 Bl, const float* mat, float* vec_sum, MatrixDim d) {
hipLaunchKernelGGL(( _add_col_sum_mat), dim3(Gr),dim3(Bl), 0, 0, mat,vec_sum,d);
}
void cudaD_add_col_sum_mat(dim3 Gr, dim3 Bl, const double* mat, double* vec_sum, MatrixDim d) {
hipLaunchKernelGGL(( _add_col_sum_mat), dim3(Gr),dim3(Bl), 0, 0, mat,vec_sum,d);
}
/*
* cu::
*/
void cudaF_sigmoid (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) {
hipLaunchKernelGGL(( _sigmoid), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride);
}
void cudaD_sigmoid (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) {
hipLaunchKernelGGL(( _sigmoid), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride);
}
void cudaF_diff_sigmoid (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d, int e_stride, int y_stride) {
hipLaunchKernelGGL(( _diff_sigmoid), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride);
}
void cudaD_diff_sigmoid (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d, int e_stride, int y_stride) {
hipLaunchKernelGGL(( _diff_sigmoid), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride);
}
void cudaF_tanh (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) {
hipLaunchKernelGGL(( _tanh), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride);
}
void cudaD_tanh (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) {
hipLaunchKernelGGL(( _tanh), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride);
}
void cudaF_diff_tanh (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d, int e_stride, int y_stride) {
hipLaunchKernelGGL(( _diff_tanh), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride);
}
void cudaD_diff_tanh (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d, int e_stride, int y_stride) {
hipLaunchKernelGGL(( _diff_tanh), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride);
}
void cudaF_softmax_reduce (size_t Gr, size_t Bl, float* y, const float* x, MatrixDim d, int src_stride) {
hipLaunchKernelGGL(( _softmax_reduce), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride);
}
void cudaD_softmax_reduce (size_t Gr, size_t Bl, double* y, const double* x, MatrixDim d, int src_stride) {
hipLaunchKernelGGL(( _softmax_reduce), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride);
}
void cudaF_splice(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _splice), dim3(Gr),dim3(Bl), 0, 0, y,x,off,d_out,d_in);
}
void cudaD_splice(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _splice), dim3(Gr),dim3(Bl), 0, 0, y,x,off,d_out,d_in);
}
void cudaF_copy(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _copy), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in);
}
void cudaD_copy(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _copy), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in);
}
void cudaF_randomize(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _randomize), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in);
}
void cudaD_randomize(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _randomize), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in);
}
void cudaF_regularize_l1(dim3 Gr, dim3 Bl, float* wei, float* grad, float l1, float lr, MatrixDim d, int stride_grad) {
hipLaunchKernelGGL(( _regularize_l1), dim3(Gr),dim3(Bl), 0, 0, wei,grad,l1,lr,d,stride_grad);
}
void cudaD_regularize_l1(dim3 Gr, dim3 Bl, double* wei, double* grad, double l1, double lr, MatrixDim d,int stride_grad) {
hipLaunchKernelGGL(( _regularize_l1), dim3(Gr),dim3(Bl), 0, 0, wei,grad,l1,lr,d,stride_grad);
}
void cudaF_find_row_max_id(dim3 Gr, dim3 Bl, const float* mat, float* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) {
hipLaunchKernelGGL(( _find_row_max_id), dim3(Gr),dim3(Bl), 0, 0, mat, vec_val, vec_id, voff, d);
}
void cudaD_find_row_max_id(dim3 Gr, dim3 Bl, const double* mat, double* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) {
hipLaunchKernelGGL(( _find_row_max_id), dim3(Gr),dim3(Bl), 0, 0, mat, vec_val, vec_id, voff, d);
}
/* Some conversion kernels for which it's more convenient to not name them F or D. */
void cuda_copy_from_mat_df(dim3 Gr, dim3 Bl, double* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _copy_from_mat), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_ff(dim3 Gr, dim3 Bl, float* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _copy_from_mat), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_fd(dim3 Gr, dim3 Bl, float *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _copy_from_mat), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_dd(dim3 Gr, dim3 Bl, double *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _copy_from_mat), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_df_trans(dim3 Gr, dim3 Bl, double* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _copy_from_mat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_ff_trans(dim3 Gr, dim3 Bl, float* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _copy_from_mat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_fd_trans(dim3 Gr, dim3 Bl, float *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _copy_from_mat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_dd_trans(dim3 Gr, dim3 Bl, double *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _copy_from_mat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in);
}
/*
* lstm::
*/
template<typename Real>
__global__
static void _add_mat_diag_vec(Real alpha, Real *mat, MatrixDim mat_dim,
const Real *mat2, int mat2_row_stride, int mat2_col_stride,
const Real *vec, Real beta) {
// Note from Dan: in this kernel, we make the x dimension correspond to the
// row index and y to the column index. That was not always the case for
// earlier kernels written by others.
int i = blockIdx.x * blockDim.x + threadIdx.x; // row index
int j = blockIdx.y * blockDim.y + threadIdx.y; // column index
int index = i * mat_dim.stride + j,
index2 = i * mat2_row_stride + j * mat2_col_stride;
if (i < mat_dim.rows && j < mat_dim.cols) {
mat[index] = alpha * mat2[index2] * vec[j] + beta * mat[index];
}
}
template<typename Real>
__global__
static void _add_mat_dot_mat(Real *data, const Real *srcA_data, const Real *srcB_data, int trasA, int transB, MatrixDim dim, int srcA_stride, int srcB_stride, Real alpha, Real beta) {
// 1 represents kTrans, 0 represents kNoTrans
// but for now, only kNoTrans is availiable
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda tgt_index = i + j*dim.stride;
int32_cuda srcA_index = i + j*srcA_stride;
int32_cuda srcB_index = i + j*srcB_stride;
if (i < dim.cols && j < dim.rows) {
data[tgt_index] = alpha*srcA_data[srcA_index]*srcB_data[srcB_index] + beta * data[tgt_index] ;
}
}
void cudaF_add_mat_diag_vec(dim3 Gr, dim3 Bl, float alpha, float *mat, MatrixDim mat_dim,
const float *mat2, int mat2_row_stride, int mat2_col_stride,
const float *vec, float beta) {
hipLaunchKernelGGL(( _add_mat_diag_vec), dim3(Gr),dim3(Bl), 0, 0, alpha, mat, mat_dim, mat2, mat2_row_stride,
mat2_col_stride, vec, beta);
}
void cudaD_add_mat_diag_vec(dim3 Gr, dim3 Bl, double alpha, double *mat, MatrixDim mat_dim,
const double *mat2, int mat2_row_stride, int mat2_col_stride,
const double *vec, double beta) {
hipLaunchKernelGGL(( _add_mat_diag_vec), dim3(Gr),dim3(Bl), 0, 0, alpha, mat, mat_dim, mat2, mat2_row_stride,
mat2_col_stride, vec, beta);
}
void cudaF_add_mat_dot_mat(dim3 Gr, dim3 Bl, float *data, const float *srcA_data, const float *srcB_data, int transA, int transB, MatrixDim dim, int srcA_stride, int srcB_stride, float alpha, float beta) {
hipLaunchKernelGGL(( _add_mat_dot_mat), dim3(Gr), dim3(Bl), 0, 0, data, srcA_data, srcB_data, transA, transB, dim, srcA_stride, srcB_stride, alpha, beta);
}
void cudaD_add_mat_dot_mat(dim3 Gr, dim3 Bl, double *data, const double *srcA_data, const double *srcB_data, int transA, int transB, MatrixDim dim, int srcA_stride, int srcB_stride, double alpha, double beta) {
hipLaunchKernelGGL(( _add_mat_dot_mat), dim3(Gr), dim3(Bl), 0, 0, data, srcA_data, srcB_data, transA, transB, dim, srcA_stride, srcB_stride, alpha, beta);
}
/*
* All the following kernels are written by Yajie Miao for CTC training
*/
template<typename Real>
__global__
static void _compute_ctc_alpha_one_sequence(Real* mat_alpha, int row, MatrixDim dim_alpha, const Real* mat_prob, MatrixDim dim_prob, const int32_cuda* labels) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda dim = dim_alpha.cols;
if (i < dim) {
int32_cuda index_alpha = i + row * dim_alpha.stride;
int32_cuda class_idx = labels[i];
int32_cuda index_prob = class_idx + row * dim_prob.stride;
int32_cuda index_alpha_rm1_i = i + (row - 1) * dim_alpha.stride;
int32_cuda index_alpha_rm1_im1 = (i - 1) + (row - 1) * dim_alpha.stride;
int32_cuda index_alpha_rm1_im2 = (i - 2) + (row - 1) * dim_alpha.stride;
if (row == 0) {
if (i < 2) mat_alpha[index_alpha] = mat_prob[index_prob];
else mat_alpha[index_alpha] = NumericLimits<Real>::log_zero_;
} else {
if (i > 1) {
if (i % 2 == 0 || labels[i-2] == labels[i]) {
mat_alpha[index_alpha] = AddAB(mat_prob[index_prob], LogAPlusB(mat_alpha[index_alpha_rm1_im1], mat_alpha[index_alpha_rm1_i]));
} else {
Real tmp = LogAPlusB(mat_alpha[index_alpha_rm1_im1], mat_alpha[index_alpha_rm1_i]);
mat_alpha[index_alpha] = AddAB(mat_prob[index_prob], LogAPlusB(mat_alpha[index_alpha_rm1_im2], tmp));
}
} else if (i == 1) {
mat_alpha[index_alpha] = AddAB(mat_prob[index_prob], LogAPlusB(mat_alpha[index_alpha_rm1_im1], mat_alpha[index_alpha_rm1_i]));
} else {
mat_alpha[index_alpha] = AddAB(mat_prob[index_prob], mat_alpha[index_alpha_rm1_i]);
}
}
}
}
template<typename Real>
__global__
static void _compute_ctc_alpha_multiple_sequence(Real* mat_alpha, int sequence_num, int row, MatrixDim dim_alpha, const Real* mat_prob, MatrixDim dim_prob, const int32_cuda* labels, int32_cuda dim_label_stride, const int32_cuda* seq_lengths) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row index, that is, the index for sequence
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // label index, cannot exceed 2*|l|+1
int32_cuda dim = dim_alpha.cols;
if (j >= dim || i >= sequence_num) return;
int32_cuda index_alpha = j + (row * sequence_num + i) * dim_alpha.stride;
int32_cuda index_label = j + i * dim_label_stride;
int32_cuda class_idx = labels[index_label];// if -1, this is the padding cell;labels now is a matrix which has the same size as mat_alpha
if (class_idx == -1 || row >= seq_lengths[i]) {
mat_alpha[index_alpha] = NumericLimits<Real>::log_zero_;
return;
}
int32_cuda index_label_m2 = (j-2) + i * dim_label_stride;
int32_cuda index_prob = class_idx + (row * sequence_num + i) * dim_prob.stride;
int32_cuda index_alpha_rm1_i = j + ((row-1) * sequence_num + i) * dim_alpha.stride;
int32_cuda index_alpha_rm1_im1 = (j-1) + ((row-1) * sequence_num + i) * dim_alpha.stride;
int32_cuda index_alpha_rm1_im2 = (j-2) + ((row-1) * sequence_num + i) * dim_alpha.stride;
if (row == 0) {
if (j < 2) mat_alpha[index_alpha] = mat_prob[index_prob];
else mat_alpha[index_alpha] = NumericLimits<Real>::log_zero_;
} else {
if (j > 1) {
if (j % 2 == 0 || labels[index_label_m2] == labels[index_label]) {
mat_alpha[index_alpha] = AddAB(mat_prob[index_prob], LogAPlusB(mat_alpha[index_alpha_rm1_im1], mat_alpha[index_alpha_rm1_i]));
} else {
Real tmp = LogAPlusB(mat_alpha[index_alpha_rm1_im1], mat_alpha[index_alpha_rm1_i]);
mat_alpha[index_alpha] = AddAB(mat_prob[index_prob], LogAPlusB(mat_alpha[index_alpha_rm1_im2], tmp));
}
} else if (j == 1) {
mat_alpha[index_alpha] = AddAB(mat_prob[index_prob], LogAPlusB(mat_alpha[index_alpha_rm1_im1], mat_alpha[index_alpha_rm1_i]));
} else {
mat_alpha[index_alpha] = AddAB(mat_prob[index_prob], mat_alpha[index_alpha_rm1_i]);
}
}
}
template<typename Real>
__global__
static void _compute_ctc_alpha_one_sequence_rescale(Real* mat_alpha, int row, MatrixDim dim_alpha, const Real* mat_prob, MatrixDim dim_prob, const int32_cuda* labels) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda dim = dim_alpha.cols;
if (i < dim) {
int32_cuda index_alpha = i + row * dim_alpha.stride;
int32_cuda class_idx = labels[i];
int32_cuda index_prob = class_idx + row * dim_prob.stride;
int32_cuda index_alpha_rm1_i = i + (row - 1) * dim_alpha.stride;
int32_cuda index_alpha_rm1_im1 = (i - 1) + (row - 1) * dim_alpha.stride;
int32_cuda index_alpha_rm1_im2 = (i - 2) + (row - 1) * dim_alpha.stride;
if (row == 0) {
if (i < 2) mat_alpha[index_alpha] = mat_prob[index_prob];
else mat_alpha[index_alpha] = 0.0;
} else {
if (i > 1) {
if (i % 2 == 0 || labels[i-2] == labels[i]) {
mat_alpha[index_alpha] = mat_prob[index_prob] * (mat_alpha[index_alpha_rm1_im1] + mat_alpha[index_alpha_rm1_i]);
} else {
mat_alpha[index_alpha] = mat_prob[index_prob] * (mat_alpha[index_alpha_rm1_im1] + mat_alpha[index_alpha_rm1_i] + mat_alpha[index_alpha_rm1_im2]);
}
} else if (i == 1) {
mat_alpha[index_alpha] = mat_prob[index_prob] * (mat_alpha[index_alpha_rm1_im1] + mat_alpha[index_alpha_rm1_i]);
} else {
mat_alpha[index_alpha] = mat_prob[index_prob] * mat_alpha[index_alpha_rm1_i];
}
}
}
}
template<typename Real>
__global__
static void _compute_ctc_beta_one_sequence(Real* mat_beta, int row, MatrixDim dim_beta, const Real* mat_prob, MatrixDim dim_prob, const int32_cuda* labels) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda dim = dim_beta.cols;
if (i < dim) {
int32_cuda index_beta = i + row * dim_beta.stride;
int32_cuda class_idx = labels[i];
int32_cuda index_prob = class_idx + row * dim_prob.stride;
int32_cuda index_beta_rp1_i = i + (row + 1) * dim_beta.stride;
int32_cuda index_beta_rp1_ip1 = (i + 1) + (row + 1) * dim_beta.stride;
int32_cuda index_beta_rp1_ip2 = (i + 2) + (row + 1) * dim_beta.stride;
int32_cuda row_num = dim_beta.rows;
if (row == row_num - 1) {
if (i > dim - 3) mat_beta[index_beta] = mat_prob[index_prob];
else mat_beta[index_beta] = NumericLimits<Real>::log_zero_;
} else {
if (i < dim - 2) {
if (i % 2 == 0 || labels[i+2] == labels[i]) {
mat_beta[index_beta] = AddAB(mat_prob[index_prob], LogAPlusB(mat_beta[index_beta_rp1_ip1], mat_beta[index_beta_rp1_i]));
} else {
Real tmp = LogAPlusB(mat_beta[index_beta_rp1_ip1], mat_beta[index_beta_rp1_i]);
mat_beta[index_beta] = AddAB(mat_prob[index_prob], LogAPlusB(mat_beta[index_beta_rp1_ip2], tmp));
}
} else if (i == dim - 2) {
mat_beta[index_beta] = AddAB(mat_prob[index_prob], LogAPlusB(mat_beta[index_beta_rp1_ip1], mat_beta[index_beta_rp1_i]));
} else {
mat_beta[index_beta] = AddAB(mat_prob[index_prob], mat_beta[index_beta_rp1_i]);
}
}
}
}
template<typename Real>
__global__
static void _compute_ctc_beta_multiple_sequence(Real* mat_beta, int sequence_num, int row, MatrixDim dim_beta, const Real* mat_prob, MatrixDim dim_prob, const int32_cuda* labels, int32_cuda dim_label_stride, const int32_cuda* seq_lengths, const int32_cuda* label_lengths) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row index, that is, the index for sequence
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // label index, cannot exceed 2*|l|+1
int32_cuda dim = dim_beta.cols;
if (j >= dim || i >= sequence_num) return;
int32_cuda index_beta = j + (row * sequence_num + i) * dim_beta.stride;
int32_cuda index_label = j + i * dim_label_stride;
int32_cuda class_idx = labels[index_label];// if -1, this is the padding cell;labels now is a matrix which has the same size as mat_alpha
if (class_idx == -1 || row >= seq_lengths[i]) {
mat_beta[index_beta] = NumericLimits<Real>::log_zero_;
return;
}
int32_cuda index_label_p2 = (j+2) + i * dim_label_stride;
int32_cuda index_prob = class_idx + (row * sequence_num + i) * dim_prob.stride;
int32_cuda index_beta_rp1_i = j + ((row+1) * sequence_num + i) * dim_beta.stride;
int32_cuda index_beta_rp1_ip1 = (j+1) + ((row+1) * sequence_num + i) * dim_beta.stride;
int32_cuda index_beta_rp1_ip2 = (j+2) + ((row+1) * sequence_num + i) * dim_beta.stride;
int32_cuda row_num = seq_lengths[i];
int32_cuda label_len = label_lengths[i];
/* if (row == row_num - 1) {
if (j > dim - 3) mat_beta[index_beta] = mat_prob[index_prob];
else mat_beta[index_beta] = NumericLimits<Real>::log_zero_;
} else {
if (j < dim - 2) {
if (j % 2 == 0 || labels[index_label_p2] == labels[index_label]) {
mat_beta[index_beta] = AddAB(mat_prob[index_prob], LogAPlusB(mat_beta[index_beta_rp1_ip1], mat_beta[index_beta_rp1_i]));
} else {
Real tmp = LogAPlusB(mat_beta[index_beta_rp1_ip1], mat_beta[index_beta_rp1_i]);
mat_beta[index_beta] = AddAB(mat_prob[index_prob], LogAPlusB(mat_beta[index_beta_rp1_ip2], tmp));
}
} else if (j == dim - 2) {
mat_beta[index_beta] = AddAB(mat_prob[index_prob], LogAPlusB(mat_beta[index_beta_rp1_ip1], mat_beta[index_beta_rp1_i]));
} else {
mat_beta[index_beta] = AddAB(mat_prob[index_prob], mat_beta[index_beta_rp1_i]);
}
}
*/
if (row == row_num - 1) {
if (j > label_len - 3) mat_beta[index_beta] = mat_prob[index_prob];
else mat_beta[index_beta] = NumericLimits<Real>::log_zero_;
} else {
if (j < label_len - 2) {
if (j % 2 == 0 || labels[index_label_p2] == labels[index_label]) {
mat_beta[index_beta] = AddAB(mat_prob[index_prob], LogAPlusB(mat_beta[index_beta_rp1_ip1], mat_beta[index_beta_rp1_i]));
} else {
Real tmp = LogAPlusB(mat_beta[index_beta_rp1_ip1], mat_beta[index_beta_rp1_i]);
mat_beta[index_beta] = AddAB(mat_prob[index_prob], LogAPlusB(mat_beta[index_beta_rp1_ip2], tmp));
}
} else if (j == label_len - 2) {
mat_beta[index_beta] = AddAB(mat_prob[index_prob], LogAPlusB(mat_beta[index_beta_rp1_ip1], mat_beta[index_beta_rp1_i]));
} else {
mat_beta[index_beta] = AddAB(mat_prob[index_prob], mat_beta[index_beta_rp1_i]);
}
}
}
template<typename Real>
__global__
static void _compute_ctc_beta_one_sequence_rescale(Real* mat_beta, int row, MatrixDim dim_beta, const Real* mat_prob, MatrixDim dim_prob, const int32_cuda* labels) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda dim = dim_beta.cols;
if (i < dim) {
int32_cuda index_beta = i + row * dim_beta.stride;
int32_cuda class_idx = labels[i];
int32_cuda index_prob = class_idx + row * dim_prob.stride;
int32_cuda index_beta_rp1_i = i + (row + 1) * dim_beta.stride;
int32_cuda index_beta_rp1_ip1 = (i + 1) + (row + 1) * dim_beta.stride;
int32_cuda index_beta_rp1_ip2 = (i + 2) + (row + 1) * dim_beta.stride;
int32_cuda row_num = dim_beta.rows;
if (row == row_num - 1) {
if (i > dim - 3) mat_beta[index_beta] = mat_prob[index_prob];
else mat_beta[index_beta] = 0;
} else {
if (i < dim - 2) {
if (i % 2 == 0 || labels[i+2] == labels[i]) {
mat_beta[index_beta] = mat_prob[index_prob] * (mat_beta[index_beta_rp1_ip1] + mat_beta[index_beta_rp1_i]);
} else {
mat_beta[index_beta] = mat_prob[index_prob] * (mat_beta[index_beta_rp1_ip1] + mat_beta[index_beta_rp1_i] + mat_beta[index_beta_rp1_ip2]);
}
} else if (i == dim - 2) {
mat_beta[index_beta] = mat_prob[index_prob] * (mat_beta[index_beta_rp1_ip1] + mat_beta[index_beta_rp1_i]);
} else {
mat_beta[index_beta] = mat_prob[index_prob] * mat_beta[index_beta_rp1_i];
}
}
}
}
// mat_prob are in probability scale.
template<typename Real>
__global__
static void _compute_ctc_error_one_sequence(Real* mat_error, MatrixDim dim_error, const Real* mat_alpha, const Real* mat_beta, MatrixDim dim_alpha, const Real* mat_prob, const int32_cuda* labels, Real pzx) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row index
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // column index
if (i < dim_error.rows && j < dim_error.cols) {
Real err = NumericLimits<Real>::log_zero_;
int32_cuda index_error = i * dim_error.stride + j;
for(int s = 0; s < dim_alpha.cols; s++) {
if (labels[s] == j) { //
int32_cuda index_alpha = i * dim_alpha.stride + s;
err = LogAPlusB(err, AddAB(mat_alpha[index_alpha], mat_beta[index_alpha]));
}
}
Real val = ExpA(SubAB(err, AddAB(pzx, mat_prob[index_error] == 0? NumericLimits<Real>::log_zero_ : 2*log(mat_prob[index_error]))));
mat_error[index_error] = -1.0 * val;
}
}
// mat_prob are in probability scale.
template<typename Real>
__global__
static void _compute_ctc_error_multiple_sequence(Real* mat_error, int32_cuda sequence_num, MatrixDim dim_error, const Real* mat_alpha, const Real* mat_beta, MatrixDim dim_alpha, const Real* mat_prob, const int32_cuda* labels, int32_cuda dim_label_stride, const int32_cuda* seq_lengths, const Real* pzx) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row index
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // column index
if (i >= dim_error.rows || j >= dim_error.cols) return;
int32_cuda seqX = i % sequence_num;
int32_cuda rowX = i / sequence_num;
if (rowX >= seq_lengths[seqX]) return;
Real err = NumericLimits<Real>::log_zero_;
int32_cuda index_error = i * dim_error.stride + j;
for(int s = 0; s < dim_alpha.cols; s++) {
int32_cuda index_label = s + seqX * dim_label_stride;
if (labels[index_label] == -1) {continue;}
if (labels[index_label] == j) { //
int32_cuda index_alpha = i * dim_alpha.stride + s;
err = LogAPlusB(err, AddAB(mat_alpha[index_alpha], mat_beta[index_alpha]));
}
}
Real val = ExpA(SubAB(err, AddAB(pzx[seqX], mat_prob[index_error] == 0? NumericLimits<Real>::log_zero_ : 2*log(mat_prob[index_error]))));
mat_error[index_error] = -1.0 * val;
}
template<typename Real>
__global__
static void _distribute_prob_by_label(Real* mat_prob_dist, MatrixDim dim_prob_dist, const Real* mat_prob, MatrixDim dim_prob, const int32_cuda* labels) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row index
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // column index
if (i < dim_prob_dist.rows && j < dim_prob_dist.cols) {
int32_cuda index_prob_dist = i * dim_prob_dist.stride + j;
int32_cuda index_prob = i * dim_prob.stride + labels[j];
mat_prob_dist[index_prob_dist] = mat_prob[index_prob];
}
}
// directly get the errors for the prior-softmax values
template<typename Real>
__global__
static void _compute_ctc_error_one_sequence_rescale(Real* mat_error, MatrixDim dim_error, const Real* mat_alpha, const Real* mat_beta, MatrixDim dim_alpha, const Real* mat_prob, const int32_cuda* labels, const Real* zt) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row index
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // column index
if (i < dim_error.rows && j < dim_error.cols) {
Real err = 0;
int32_cuda index_error = i * dim_error.stride + j;
for(int s = 0; s < dim_alpha.cols; s++) {
if (labels[s] == j) { //
int32_cuda index_alpha = i * dim_alpha.stride + s;
err += mat_alpha[index_alpha] * mat_beta[index_alpha];
}
}
if (mat_prob[index_error] == 0 || zt[i] == 0) {
mat_error[index_error] = 0;
} else {
mat_error[index_error] = mat_prob[index_error] - (err / zt[i]) / mat_prob[index_error];
}
}
}
void cudaF_compute_ctc_alpha(dim3 Gr, dim3 Bl, float *alpha, int row_idx, MatrixDim dim_alpha, const float *prob, MatrixDim dim_prob, const int *labels) {
hipLaunchKernelGGL(( _compute_ctc_alpha_one_sequence), dim3(Gr), dim3(Bl), 0, 0, alpha, row_idx, dim_alpha, prob, dim_prob, labels);
}
void cudaF_compute_ctc_beta(dim3 Gr, dim3 Bl, float *beta, int row_idx, MatrixDim dim_beta, const float *prob, MatrixDim dim_prob, const int *labels) {
hipLaunchKernelGGL(( _compute_ctc_beta_one_sequence), dim3(Gr), dim3(Bl), 0, 0, beta, row_idx, dim_beta, prob, dim_prob, labels);
}
void cudaF_compute_ctc_error(dim3 Gr, dim3 Bl, float *error, MatrixDim dim_error, const float *alpha, const float *beta, MatrixDim dim_alpha, const float *prob, const int *labels, float pzx) {
hipLaunchKernelGGL(( _compute_ctc_error_one_sequence), dim3(Gr), dim3(Bl), 0, 0, error, dim_error, alpha, beta, dim_alpha, prob, labels, pzx);
}
void cudaF_compute_ctc_alpha_rescale(dim3 Gr, dim3 Bl, float *alpha, int row_idx, MatrixDim dim_alpha, const float *prob, MatrixDim dim_prob, const int *labels) {
hipLaunchKernelGGL(( _compute_ctc_alpha_one_sequence_rescale), dim3(Gr), dim3(Bl), 0, 0, alpha, row_idx, dim_alpha, prob, dim_prob, labels);
}
void cudaF_compute_ctc_beta_rescale(dim3 Gr, dim3 Bl, float *beta, int row_idx, MatrixDim dim_beta, const float *prob, MatrixDim dim_prob, const int *labels) {
hipLaunchKernelGGL(( _compute_ctc_beta_one_sequence_rescale), dim3(Gr), dim3(Bl), 0, 0, beta, row_idx, dim_beta, prob, dim_prob, labels);
}
void cudaF_compute_ctc_error_rescale(dim3 Gr, dim3 Bl, float *error, MatrixDim dim_error, const float *alpha, const float *beta, MatrixDim dim_alpha, const float *prob, const int *labels, const float *zt) {
hipLaunchKernelGGL(( _compute_ctc_error_one_sequence_rescale), dim3(Gr), dim3(Bl), 0, 0, error, dim_error, alpha, beta, dim_alpha, prob, labels, zt);
}
void cudaF_distribute_prob_by_label(dim3 Gr, dim3 Bl, float *prob_dist, MatrixDim dim_prob_dist, const float *prob, MatrixDim dim_prob, const int *labels) {
hipLaunchKernelGGL(( _distribute_prob_by_label), dim3(Gr), dim3(Bl), 0, 0, prob_dist, dim_prob_dist, prob, dim_prob, labels);
}
void cudaF_compute_ctc_alpha_multiple_sequence(dim3 Gr, dim3 Bl, float *alpha, int seq_num, int row_idx, MatrixDim dim_alpha, const float *prob, MatrixDim dim_prob, const int *labels, int dim_label_stride, const int *seq_lengths) {
hipLaunchKernelGGL(( _compute_ctc_alpha_multiple_sequence), dim3(Gr), dim3(Bl), 0, 0, alpha, seq_num, row_idx, dim_alpha, prob, dim_prob, labels, dim_label_stride, seq_lengths);
}
void cudaF_compute_ctc_beta_multiple_sequence(dim3 Gr, dim3 Bl, float *beta, int seq_num, int row_idx, MatrixDim dim_beta, const float *prob, MatrixDim dim_prob, const int *labels, int dim_label_stride, const int *seq_lengths, const int *label_lengths) {
hipLaunchKernelGGL(( _compute_ctc_beta_multiple_sequence), dim3(Gr), dim3(Bl), 0, 0, beta, seq_num, row_idx, dim_beta, prob, dim_prob, labels, dim_label_stride, seq_lengths, label_lengths);
}
void cudaF_compute_ctc_error_multiple_sequence(dim3 Gr, dim3 Bl, float *error, int seq_num, MatrixDim dim_error, const float *alpha, const float *beta, MatrixDim dim_alpha, const float *prob, const int *labels, int dim_label_stride, const int *seq_lengths, const float *pzx) {
hipLaunchKernelGGL(( _compute_ctc_error_multiple_sequence), dim3(Gr), dim3(Bl), 0, 0, error, seq_num, dim_error, alpha, beta, dim_alpha, prob, labels, dim_label_stride, seq_lengths, pzx);
}
void cudaD_compute_ctc_alpha(dim3 Gr, dim3 Bl, double *alpha, int row_idx, MatrixDim dim_alpha, const double *prob, MatrixDim dim_prob, const int *labels) {
hipLaunchKernelGGL(( _compute_ctc_alpha_one_sequence), dim3(Gr), dim3(Bl), 0, 0, alpha, row_idx, dim_alpha, prob, dim_prob, labels);
}
void cudaD_compute_ctc_beta(dim3 Gr, dim3 Bl, double *beta, int row_idx, MatrixDim dim_beta, const double *prob, MatrixDim dim_prob, const int *labels) {
hipLaunchKernelGGL(( _compute_ctc_beta_one_sequence), dim3(Gr), dim3(Bl), 0, 0, beta, row_idx, dim_beta, prob, dim_prob, labels);
}
void cudaD_compute_ctc_error(dim3 Gr, dim3 Bl, double *error, MatrixDim dim_error, const double *alpha, const double *beta, MatrixDim dim_alpha, const double *prob, const int *labels, double pzx) {
hipLaunchKernelGGL(( _compute_ctc_error_one_sequence), dim3(Gr), dim3(Bl), 0, 0, error, dim_error, alpha, beta, dim_alpha, prob, labels, pzx);
}
void cudaD_compute_ctc_alpha_rescale(dim3 Gr, dim3 Bl, double *alpha, int row_idx, MatrixDim dim_alpha, const double *prob, MatrixDim dim_prob, const int *labels) {
hipLaunchKernelGGL(( _compute_ctc_alpha_one_sequence_rescale), dim3(Gr), dim3(Bl), 0, 0, alpha, row_idx, dim_alpha, prob, dim_prob, labels);
}
void cudaD_compute_ctc_beta_rescale(dim3 Gr, dim3 Bl, double *beta, int row_idx, MatrixDim dim_beta, const double *prob, MatrixDim dim_prob, const int *labels) {
hipLaunchKernelGGL(( _compute_ctc_beta_one_sequence_rescale), dim3(Gr), dim3(Bl), 0, 0, beta, row_idx, dim_beta, prob, dim_prob, labels);
}
void cudaD_compute_ctc_error_rescale(dim3 Gr, dim3 Bl, double *error, MatrixDim dim_error, const double *alpha, const double *beta, MatrixDim dim_alpha, const double *prob, const int *labels, const double *zt) {
hipLaunchKernelGGL(( _compute_ctc_error_one_sequence_rescale), dim3(Gr), dim3(Bl), 0, 0, error, dim_error, alpha, beta, dim_alpha, prob, labels, zt);
}
void cudaD_distribute_prob_by_label(dim3 Gr, dim3 Bl, double *prob_dist, MatrixDim dim_prob_dist, const double *prob, MatrixDim dim_prob, const int *labels) {
hipLaunchKernelGGL(( _distribute_prob_by_label), dim3(Gr), dim3(Bl), 0, 0, prob_dist, dim_prob_dist, prob, dim_prob, labels);
}
void cudaD_compute_ctc_alpha_multiple_sequence(dim3 Gr, dim3 Bl, double *alpha, int seq_num, int row_idx, MatrixDim dim_alpha, const double *prob, MatrixDim dim_prob, const int *labels, int dim_label_stride, const int *seq_lengths) {
hipLaunchKernelGGL(( _compute_ctc_alpha_multiple_sequence), dim3(Gr), dim3(Bl), 0, 0, alpha, seq_num, row_idx, dim_alpha, prob, dim_prob, labels, dim_label_stride, seq_lengths);
}
void cudaD_compute_ctc_beta_multiple_sequence(dim3 Gr, dim3 Bl, double *beta, int seq_num, int row_idx, MatrixDim dim_beta, const double *prob, MatrixDim dim_prob, const int *labels, int dim_label_stride, const int *seq_lengths, const int *label_lengths) {
hipLaunchKernelGGL(( _compute_ctc_beta_multiple_sequence), dim3(Gr), dim3(Bl), 0, 0, beta, seq_num, row_idx, dim_beta, prob, dim_prob, labels, dim_label_stride, seq_lengths, label_lengths);
}
void cudaD_compute_ctc_error_multiple_sequence(dim3 Gr, dim3 Bl, double *error, int seq_num, MatrixDim dim_error, const double *alpha, const double *beta, MatrixDim dim_alpha, const double *prob, const int *labels, int dim_label_stride, const int *seq_lengths, const double *pzx) {
hipLaunchKernelGGL(( _compute_ctc_error_multiple_sequence), dim3(Gr), dim3(Bl), 0, 0, error, seq_num, dim_error, alpha, beta, dim_alpha, prob, labels, dim_label_stride, seq_lengths, pzx);
}
| 8919a6642ea62f6829ae17230ae7301521067d83.cu | // gpucompute/cuda-kernels.cu
// Copyright 2009-2012 Karel Vesely
// 2013 Ehsan Variani
// 2013 Johns Hopkins University (author: Daniel Povey)
// 2013 Hainan Xu
// 2013 Xiaohui Zhang
// 2013 Johns Hopkins University (author: Guoguo Chen)
// 2015 Yajie Miao
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
// WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
// MERCHANTABLITY OR NON-INFRINGEMENT.
// See the Apache 2 License for the specific language governing permissions and
// limitations under the License.
// In this file is the CUDA code of the CUDA kernels, plus the ANSI-C wrappers
#include <cfloat>
#include "cuda-kernels.h"
#include "cuPrintf.cuh"
#include "cuPrintf.cu"
#include "ctc-utils.h"
#include "stdio.h"
/***********************************************************************
* Generic __device__ functions
*/
template<typename Real>
__device__
static Real _sum_reduce(Real buffer[]) {
// Total number of active threads
int32_cuda nTotalThreads = blockDim.x;
__syncthreads();
// perform tree-based reduction (sum)
while(nTotalThreads > 1) {
int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x >= halfPoint) { // was <
// Get the shared value stored by another thread
Real temp = 0.0;
if(threadIdx.x < nTotalThreads) { // was +halfPoint
temp = buffer[threadIdx.x]; // was +halfPoint
}
buffer[threadIdx.x - halfPoint] += temp;
}
__syncthreads();
nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two.
}
// the result
return buffer[0];
}
template<typename Real>
__device__
static Real _min_reduce(Real buffer[]) {
// Total number of active threads
int32_cuda nTotalThreads = blockDim.x;
__syncthreads();
// perform tree-based reduction (min)
while(nTotalThreads > 1) {
int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active
if (threadIdx.x < halfPoint) {
if (threadIdx.x + halfPoint < nTotalThreads) {
Real temp = buffer[threadIdx.x + halfPoint];
if (temp < buffer[threadIdx.x])
buffer[threadIdx.x] = temp;
}
}
__syncthreads();
nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two
}
// the result
return buffer[0];
}
template<typename Real>
__device__
static Real _max_reduce(Real buffer[]) {
// Total number of active threads
int32_cuda nTotalThreads = blockDim.x;
__syncthreads();
// perform tree-based reduction (max)
while(nTotalThreads > 1) {
int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x < halfPoint) {
// Get the shared value stored by another thread
if(threadIdx.x+halfPoint < nTotalThreads) {
Real temp = buffer[threadIdx.x + halfPoint];
if (temp > buffer[threadIdx.x])
buffer[threadIdx.x] = temp;
}
}
__syncthreads();
nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two.
}
// the result
return buffer[0];
}
template<typename Real>
__device__
static int32_cuda _max_id_reduce(Real val[], int32_cuda idx[]) {
// Total number of active threads
int32_cuda nTotalThreads = blockDim.x;
__syncthreads();
// perform tree-based reduction (get index of maximum)
while(nTotalThreads > 1) {
int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x < halfPoint) {
// Get the shared value stored by another thread
Real temp = -1e20;
if(threadIdx.x+halfPoint < nTotalThreads) {
temp = val[idx[threadIdx.x + halfPoint]];
}
if (temp > val[idx[threadIdx.x]]) idx[threadIdx.x]=idx[threadIdx.x + halfPoint];
}
__syncthreads();
nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two.
}
// the result
return idx[0];
}
/***********************************************************************
* CUDA kernels
* the functions are templated to have the float/double operations
*/
// for this kernel, following the newer pattern, the x-dim is the row-index, the
// y-dim is the col-index.
template<typename Real, typename OtherReal>
__global__
static void _copy_from_mat(Real* mat_out, const OtherReal* mat_in, MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index.
int32_cuda index_out = j + i * d_out.stride;
int32_cuda index_in = j + i * d_in.stride;
if (i < d_out.rows && j < d_out.cols)
mat_out[index_out] = static_cast<Real>(mat_in[index_in]);
}
// for this kernel, the x-dim is the row-index at the output, the y-dim is the
// col-index at the output
template<typename Real, typename OtherReal>
__global__
static void _copy_from_mat_trans(Real* mat_out, const OtherReal* mat_in, MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index out
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index out
int32_cuda index_out = j + i * d_out.stride;
int32_cuda index_in = i + j * d_in.stride;
if (i < d_out.rows && j < d_out.cols)
mat_out[index_out] = static_cast<Real>(mat_in[index_in]);
}
template<typename Real>
__global__
static void _apply_exp(Real* mat, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if ( i < d.cols && j < d.rows ) {
mat[index] = exp(mat[index]);
}
}
template<typename Real>
__global__
static void _set_const(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < d.rows)
mat[index] = value;
}
template<typename Real>
__global__
static void _add(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if (i < d.cols && j < d.rows)
mat[index] = mat[index] + value;
}
template<typename Real>
__global__
static void _scale(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if (i < d.cols && j < d.rows)
mat[index] = mat[index] * value;
}
template<typename Real>
__global__
static void _apply_log(Real* mat, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if (i < d.cols && j < d.rows)
mat[index] = log(mat[index]);
}
template<typename Real>
__global__
static void _mul_elements(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda dst_index = i + j*dst_d.stride, src_index = i + j*src_stride;
if (i < dst_d.cols && j < dst_d.rows)
mat[dst_index] = mat[dst_index] * A[src_index];
}
template<typename Real>
__global__
static void _vec_mul_elements(Real* v, const Real* a, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < dim)
v[i] = v[i] * a[i];
}
template<typename Real>
__global__
static void _mul_rows_vec(Real* mat, const Real* scale, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if (i < d.cols && j < d.rows)
mat[index] *= scale[j];
}
template<typename Real>
__global__
static void _add_mat(Real alpha, const Real* src, Real* dst, MatrixDim d, int src_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
int32_cuda index_src = i + j*src_stride;
if (i < d.cols && j < d.rows)
dst[index] = alpha*src[index_src] + dst[index];
}
template<typename Real>
__global__
static void _add_mat_trans(Real alpha, const Real* src, Real* dst, MatrixDim d, int src_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j *d.stride;
int32_cuda index_src = j + i*src_stride;
if (i < d.cols && j < d.rows)
dst[index] = alpha*src[index_src] + dst[index];
}
template<typename Real>
__global__
static void _add_vec_to_rows(Real alpha, const Real* row, Real beta, Real* dst, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if (i < d.cols && j < d.rows)
dst[index] = alpha*row[i] + beta*dst[index];
}
/*
* CuVector
*/
template<typename Real>
__global__
static void _copy_from_vec_df(double* v_out, const Real* v_in, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
// if (blockIdx.y > 0) return;
if (i < dim) {
v_out[i] = (double) v_in[i];
}
}
template<typename Real>
__global__
static void _copy_from_vec_fd(float* v_out, const Real* v_in, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
// if (blockIdx.y > 0) return;
if ( i < dim) {
v_out[i] = (float) v_in[i];
}
}
template<typename Real>
__global__
static void _vec_min(const Real* v, Real* value, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= CU1DBLOCK) return;
__shared__ Real row_data[CU1DBLOCK];
int block_size = (dim + CU1DBLOCK - 1) / CU1DBLOCK;
Real min = 1.0 / 0.0; // infinity.
for (int j = i * block_size; j < (i+1) * block_size && j < dim; j++) {
Real v_j = v[j];
if (v_j < min) min = v_j;
}
row_data[i] = min;
__syncthreads();
//get the sum
*value = _min_reduce(row_data);
}
template<typename Real>
__global__
static void _vec_max(const Real* v, Real* value, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
if(blockIdx.y > 0) return;
__shared__ Real row_data[CU1DBLOCK];
if(i >= CU1DBLOCK) return;
int block_size = (dim + CU1DBLOCK - 1) / CU1DBLOCK;
Real max = -1.0 / 0.0; // -infinity.
for (int j = i * block_size; j < (i+1) * block_size && j < dim; j++) {
Real v_j = v[j];
if (v_j > max) max = v_j;
}
row_data[i] = max;
__syncthreads();
//get the sum
*value = _max_reduce(row_data);
}
// Adds diag(M N) to v, where M and N are matrices. We supply row_stride and
// col_stride arguments for M and N, and swapping them allows us to transpose
// those matrices. Note: we imagine row-major indexing here, just like Kaldi
// and CBLAS (but unlike CUBLAS).
// This kernel expects the blockDim to be (CU1DBLOCK, 1) and the
// gridDim times CU1DBLOCK to be at least num-rows-of-v * threads_per_element.
// threads_per_element should be a power of 2.
template<typename Real>
__global__
static void _add_diag_mat_mat(
Real alpha, Real* v, int v_dim, const Real* M, int M_cols, int M_row_stride,
int M_col_stride, const Real *N, int N_row_stride, int N_col_stride,
int threads_per_element, Real beta) {
// we actually assume blockDim.x == CU1DBLOCK here.
// Each diagonal element of v is processed by "threads_per_element" threads.
__shared__ Real temp_data[CU1DBLOCK];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int v_idx = i / threads_per_element, // v_idx is the index into v that we are supposed to
sub_idx = i % threads_per_element; // add to; 0 <= sub_idx < threads_per_element tells
// us which block of elements we sum up.
if (v_idx >= v_dim) return;
Real sum = 0.0;
for (int j = sub_idx; j < M_cols; j += threads_per_element) {
int M_index = v_idx * M_row_stride + j * M_col_stride,
N_index = j * N_row_stride + v_idx * N_col_stride;
sum += M[M_index] * N[N_index];
}
temp_data[threadIdx.x] = sum;
// start_idx = threadIdx.x - sub_idx; // start of the position in temp_data
// that we want to sum up.
// The following is a tree-based reduction of the elements of temp_data from
// start_idx to start_idx + threads_per_element - 1; our own index is "sub_idx".
__syncthreads();
int num_total_threads = threads_per_element;
while (num_total_threads > 1) {
int half_point = ((1 + num_total_threads) >> 1);
if (sub_idx < half_point) {
Real temp = 0.0;
if (sub_idx + half_point < num_total_threads) {
temp = temp_data[threadIdx.x + half_point];
}
temp_data[threadIdx.x] += temp;
}
__syncthreads();
num_total_threads = half_point;
}
if (sub_idx == 0) {
v[v_idx] = beta * v[v_idx] + alpha * temp_data[threadIdx.x];
}
}
template<typename Real>
__global__
static void _add_vec_vec(Real alpha, Real* v, const Real* x, const Real* y, Real beta, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
// if (blockIdx.y > 0) return;
if (i < dim)
v[i] = alpha * x[i] * y[i] + beta * v[i];
}
template<typename Real>
__global__
static void _vec_apply_exp(Real* v, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
// if (blockIdx.y > 0) return;
if (i < dim) {
v[i] = exp(v[i]);
}
}
template<typename Real>
__global__
static void _vec_apply_log(Real* v, Real* flag, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
// if (blockIdx.y > 0) return;
if (i < dim) {
if (v[i] < 0) {
*flag = 1;
return;
}
v[i] = log(v[i]);
}
}
template<typename Real>
__global__
static void _vec_sum(Real *v, Real *sum, int dim, int inc) {
int i = threadIdx.x;
__shared__ Real row_data[CU1DBLOCK];
if (i >= CU1DBLOCK) return;
Real tmp_sum = 0;
int size = dim / CU1DBLOCK; //the least size in a loop (later part)
int threshold = dim - size * CU1DBLOCK; //any loop below this number would + 1
int loop_start;
int loop_end;
if(i < threshold) {
loop_start = i * (size + 1);
loop_end = (i+1) * (size + 1);
}
else {
loop_start = threshold + i * size;
loop_end = threshold + (i+1) * size;
}
for(int j = loop_start; j< loop_end; j++) {
tmp_sum += v[j * inc];
}
row_data[threadIdx.x] = tmp_sum;
__syncthreads();
*sum = _sum_reduce(row_data);
}
template<typename Real>
__global__
static void _pvec_sum(Real* v, Real* g, int dim, int size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int start = size * i;
if (start >= dim) return;
int end = start + size;
if (end > dim) end = dim;
__shared__ Real row_data[CU1DBLOCK];
Real sum = 0;
for (int j = start; j < end; j++)
sum += v[j];
row_data[threadIdx.x] = sum;
__syncthreads();
g[blockIdx.x] = _sum_reduce(row_data);
}
template<typename Real>
__global__
static void _vec_apply_floor(Real *v, Real floor_val, float *count, int dim) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i < dim) {
if ( v[i] < floor_val) {
v[i] = floor_val;
count[i] = 1;
} else {
count[i] = 0;
}
}
}
// Caution, here i/block{idx,dim}.x is the row index and j/block{idx,dim}.y is the col index.
// this is for no reason, really, I just happened to prefer this
// at the time. [dan]
template<typename Real>
__global__
static void _apply_pow(Real* mat, Real power, MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = i * d.stride + j;
if (i < d.rows && j < d.cols) {
if (power == 1.0)
return;
if (power == 2.0) {
mat[index] = mat[index] * mat[index];
} else if (power == 0.5) {
if (!(mat[index] >= 0.0))
return;
mat[index] = sqrt(mat[index]);
} else {
mat[index] = pow(mat[index], power);
}
}
}
template<typename Real>
__global__
static void _apply_floor(Real* mat, Real floor_val, MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = i + j * d.stride;
if (i < d.cols && j < d.rows) {
if (mat[index] < floor_val)
mat[index] = floor_val;
}
}
template<typename Real>
__global__
static void _apply_ceiling(Real* mat, Real ceiling_val, MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = i + j * d.stride;
if (i < d.cols && j < d.rows ) {
if (mat[index] > ceiling_val)
mat[index] = ceiling_val;
}
}
template<typename Real>
__global__
static void _add_row_sum_mat(const Real* mat, Real* vec_sum, MatrixDim d) {
int i = blockIdx.y * blockDim.y + threadIdx.y; //col
int j = blockIdx.x * blockDim.x + threadIdx.x; //row
if(blockIdx.x > 0) return;
if(blockDim.y != 1) return;
__shared__ Real row_data[CU1DBLOCK];
//copy the input to row_data
row_data[j] = mat[i+j*d.stride];
__syncthreads();
//get the sum
Real sum = _sum_reduce(row_data);
__syncthreads();
//add to previously accumulated sum
if(threadIdx.x == 0)
vec_sum[i] += sum;
}
template<typename Real>
__global__
static void _add_col_sum_mat(const Real* mat, Real* vec_sum, MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x; //row
int j = blockIdx.y * blockDim.y + threadIdx.y; //col
if(blockIdx.x > 0) return;
if(blockDim.y != 1) return;
__shared__ Real row_data[CU1DBLOCK];
//copy the input to row_data
row_data[i] = mat[i+j*d.stride];
__syncthreads();
//get the sum
Real sum = _sum_reduce(row_data);
__syncthreads();
//add to previously accumulated sum
if(threadIdx.x == 0)
vec_sum[j] += sum;
}
/*
* cu::
*/
template<typename Real>
__global__
static void _sigmoid(Real*y, const Real*x, MatrixDim d, int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j*d.stride, src_index = i + j*src_stride;
if(i < d.cols && j < d.rows) {
Real res = 1.0 / (1.0 + exp(-x[src_index]));
y[dst_index] = res;
}
}
template<typename Real>
__global__
static void _diff_sigmoid(Real*eout, const Real*e, const Real*y, MatrixDim d, int e_stride, int y_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j*d.stride;
int e_index = i + j*e_stride;
int y_index = i + j*y_stride;
if (i < d.cols && j < d.rows )
eout[dst_index] = y[y_index]*(1.0-y[y_index]) * e[e_index];
}
template<typename Real>
__global__
static void _tanh(Real*y, const Real*x, MatrixDim d, int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j*d.stride, src_index = i + j * src_stride;
if(i < d.cols && j < d.rows) {
Real exp_2x = exp(2.0*x[src_index]);
Real res;
if(isinf(exp_2x)) {
res = 1.0;
} else {
res = (exp_2x - 1.0) / (exp_2x + 1.0);
}
y[dst_index] = res;
}
}
template<typename Real>
__global__
static void _diff_tanh(Real*eout, const Real*e, const Real*y, MatrixDim d, int e_stride, int y_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j*d.stride;
int e_index = i + j*e_stride;
int y_index = i + j*y_stride;
if (i < d.cols && j < d.rows )
eout[dst_index] = (1.0 - y[y_index]*y[y_index]) * e[e_index];
}
template<typename Real>
__global__
static void _softmax_reduce(Real*y, const Real*x, MatrixDim d, int src_stride) {
int j = blockIdx.x;
int THREADS = blockDim.x;
if (j >= d.rows) return;
__shared__ Real aux[CU1DBLOCK];
int steps = (d.cols - 1) / THREADS + 1;
//copy input to aux
aux[threadIdx.x] = x[threadIdx.x+j*d.stride];
for(int i=1; i<steps; ++i) {
if(threadIdx.x+i*THREADS < d.cols && aux[threadIdx.x] < x[threadIdx.x+i*THREADS+j*d.stride])
aux[threadIdx.x] = x[threadIdx.x+i*THREADS+j*d.stride];
}
//get the maximum value
int nTotalThreads = THREADS;
__syncthreads();
while(nTotalThreads > 1) {
int halfPoint = ((1+nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x < halfPoint) {
// Get the shared value stored by another thread
if(threadIdx.x+halfPoint < nTotalThreads && aux[threadIdx.x] < aux[threadIdx.x+halfPoint])
aux[threadIdx.x] = aux[threadIdx.x + halfPoint];
}
__syncthreads();
nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two.
}
Real max = aux[0];
__syncthreads();
// subtract max, apply exp, sum up...
y[threadIdx.x+j*d.stride] = exp(x[threadIdx.x+j*d.stride] - max);
aux[threadIdx.x] = y[threadIdx.x+j*d.stride];
for(int i=1; i<steps; i++) {
if(threadIdx.x+i*THREADS < d.cols) {
y[threadIdx.x+i*THREADS+j*d.stride] = exp(x[threadIdx.x+i*THREADS+j*d.stride] - max);
aux[threadIdx.x] += y[threadIdx.x+i*THREADS+j*d.stride];
}
}
nTotalThreads = THREADS;
__syncthreads();
while(nTotalThreads > 1) {
int halfPoint = ((1+nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x < halfPoint) {
// Get the shared value stored by another thread
if(threadIdx.x+halfPoint < nTotalThreads)
aux[threadIdx.x] += aux[threadIdx.x + halfPoint];
}
__syncthreads();
nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two.
}
Real sum = aux[0];
__syncthreads();
//normalize by sum...
for(int i=0; i<steps; i++) {
if(threadIdx.x+i*THREADS < d.cols) {
y[threadIdx.x+i*THREADS+j*d.stride] = y[threadIdx.x+i*THREADS+j*d.stride] / sum;
}
}
}
template<typename Real>
__global__
static void _splice(Real* y, const Real* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d_out.stride;
if (i < d_out.cols && j < d_out.rows ) {
int32_cuda src_col = i % d_in.cols;
int32_cuda src_row = j + off[i / d_in.cols];
if(src_row < 0) src_row = 0;
if(src_row >= d_in.rows) src_row = d_in.rows-1;
y[index] = x[src_col + src_row*d_in.stride];
}
}
template<typename Real>
__global__
static void _copy(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d_out.stride;
if (i < d_out.cols && j < d_out.rows ) {
int32_cuda src_col = copy_from[i];
if(src_col >= 0 && src_col < d_in.cols) {
y[index] = x[src_col + j*d_in.stride];
} else {
y[index] = 1.0/0.0;
}
}
}
template<typename Real>
__global__
static void _randomize(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d_out.stride;
if (i < d_out.cols && j < d_out.rows ) {
int32_cuda src_row = copy_from[j];
y[index] = x[i + src_row*d_in.stride];
}
}
template<typename Real>
__global__
static void _regularize_l1(Real* wei, Real* grad, Real l1, Real lr, MatrixDim d, int stride_grad) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride,
grad_index = i + j*stride_grad;
if (i < d.cols && j < d.rows) {
if(wei[index]==0.0) return; //skip L1 if zero weight!
Real l1_signed = l1;
if(wei[index] < 0.0) //flip sign
l1_signed = -l1;
Real before = wei[index];
Real after = wei[index] -lr*grad[grad_index] -l1_signed;//simulate update
if((after > 0.0) ^ (before > 0.0)) { //sign changed?
wei[index] = 0.0;
grad[grad_index] = 0.0;
} else {
wei[index] -= l1_signed;
}
}
}
template<typename Real>
__global__
static void _find_row_max_id(const Real* mat, Real* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
if(blockIdx.x > 0) return;
if(blockDim.y != 1) return;
__shared__ Real value[CU1DBLOCK];
__shared__ int32_cuda index[CU1DBLOCK];
//copy to shared memory
value[threadIdx.x] = mat[i+j*d.stride];
index[threadIdx.x] = threadIdx.x;
__syncthreads();
//get the id of the max value
int32_cuda out_max = _max_id_reduce(value, index);
__syncthreads();
//see if it's bigger value
if(threadIdx.x == 0) {
if(vec_val[j] <= mat[out_max+j*d.stride]) {
vec_val[j] = mat[out_max+j*d.stride];
vec_id[j] = voff+out_max;
}
}
}
/***********************************************************************
* ANSI-C wrappers of CUDA kernels
*/
/*
* "int32"
*/
void cudaI32_set_const(dim3 Gr, dim3 Bl, int32_cuda* mat, int32_cuda value, MatrixDim d) {
_set_const<<<Gr,Bl>>>(mat,value,d);
}
/*
* CuMatrix
*/
void cudaF_apply_exp(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) {
_apply_exp<<<Gr,Bl>>>(mat,d);
}
void cudaD_apply_exp(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) {
_apply_exp<<<Gr,Bl>>>(mat,d);
}
void cudaF_apply_pow(dim3 Gr, dim3 Bl, float* mat, float power, MatrixDim d) {
_apply_pow<<<Gr,Bl>>>(mat, power, d);
}
void cudaD_apply_pow(dim3 Gr, dim3 Bl, double* mat, double power, MatrixDim d) {
_apply_pow<<<Gr,Bl>>>(mat, power, d);
}
void cudaF_apply_floor(dim3 Gr, dim3 Bl, float* mat, float floor_val, MatrixDim d) {
_apply_floor<<<Gr,Bl>>>(mat, floor_val, d);
}
void cudaD_apply_floor(dim3 Gr, dim3 Bl, double* mat, double floor_val, MatrixDim d) {
_apply_floor<<<Gr,Bl>>>(mat, floor_val, d);
}
void cudaF_apply_ceiling(dim3 Gr, dim3 Bl, float* mat, float ceiling_val, MatrixDim d) {
_apply_ceiling<<<Gr,Bl>>>(mat, ceiling_val, d);
}
void cudaD_apply_ceiling(dim3 Gr, dim3 Bl, double* mat, double ceiling_val, MatrixDim d) {
_apply_ceiling<<<Gr,Bl>>>(mat, ceiling_val, d);
}
void cudaF_set_const(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) {
_set_const<<<Gr,Bl>>>(mat,value,d);
}
void cudaD_set_const(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) {
_set_const<<<Gr,Bl>>>(mat,value,d);
}
void cudaF_add(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) {
_add<<<Gr,Bl>>>(mat,value,d);
}
void cudaD_add(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) {
_add<<<Gr,Bl>>>(mat,value,d);
}
void cudaF_scale(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) {
_scale<<<Gr,Bl>>>(mat,value,d);
}
void cudaD_scale(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) {
_scale<<<Gr,Bl>>>(mat,value,d);
}
void cudaF_apply_log(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) {
_apply_log<<<Gr,Bl>>>(mat,d);
}
void cudaD_apply_log(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) {
_apply_log<<<Gr,Bl>>>(mat,d);
}
void cudaF_mul_elements(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d, int src_stride) {
_mul_elements<<<Gr,Bl>>>(mat,A,dst_d,src_stride);
}
void cudaD_mul_elements(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d, int src_stride) {
_mul_elements<<<Gr,Bl>>>(mat,A,dst_d,src_stride);
}
void cudaF_mul_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale, MatrixDim d) {
_mul_rows_vec<<<Gr,Bl>>>(mat,scale,d);
}
void cudaD_mul_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale, MatrixDim d) {
_mul_rows_vec<<<Gr,Bl>>>(mat,scale,d);
}
void cudaF_add_mat(dim3 Gr, dim3 Bl, float alpha, const float* src, float* dst, MatrixDim d, int src_stride, int A_trans) {
if (A_trans) {
_add_mat_trans<<<Gr,Bl>>>(alpha,src,dst,d,src_stride);
} else {
_add_mat<<<Gr,Bl>>>(alpha,src,dst,d,src_stride);
}
}
void cudaD_add_mat(dim3 Gr, dim3 Bl, double alpha, const double* src, double* dst, MatrixDim d, int src_stride, int A_trans) {
if (A_trans) {
_add_mat_trans<<<Gr,Bl>>>(alpha,src,dst,d,src_stride);
} else {
_add_mat<<<Gr,Bl>>>(alpha,src,dst,d,src_stride);
}
}
void cudaF_add_vec_to_rows(dim3 Gr, dim3 Bl, float alpha, const float* row, float beta, float* dst, MatrixDim d) {
_add_vec_to_rows<<<Gr,Bl>>>(alpha,row,beta,dst,d);
}
void cudaD_add_vec_to_rows(dim3 Gr, dim3 Bl, double alpha, const double* row, double beta, double* dst, MatrixDim d) {
_add_vec_to_rows<<<Gr,Bl>>>(alpha,row,beta,dst,d);
}
/*
* CuVector
*/
void cudaF_copy_from_vec_df(int Gr, int Bl, double* v_out, const float* v_in, int dim) {
_copy_from_vec_df<<<Gr,Bl>>>(v_out,v_in,dim);
}
void cudaD_copy_from_vec_df(int Gr, int Bl, double* v_out, const double* v_in, int dim) {
_copy_from_vec_df<<<Gr,Bl>>>(v_out,v_in,dim);
}
void cudaF_copy_from_vec_fd(int Gr, int Bl, float* v_out, const float* v_in, int dim) {
_copy_from_vec_fd<<<Gr,Bl>>>(v_out,v_in,dim);
}
void cudaD_copy_from_vec_fd(int Gr, int Bl, float* v_out, const double* v_in, int dim) {
_copy_from_vec_fd<<<Gr,Bl>>>(v_out,v_in,dim);
}
void cudaF_vec_mul_elements(int Gr, int Bl, float* v, const float* a, int dim) {
_vec_mul_elements<<<Gr,Bl>>>(v, a, dim);
}
void cudaD_vec_mul_elements(int Gr, int Bl, double* v, const double* a, int dim) {
_vec_mul_elements<<<Gr,Bl>>>(v, a, dim);
}
void cudaF_vec_min(const float* v, float* value, int dim) {
_vec_min<<<1,CU1DBLOCK>>>(v, value, dim);
}
void cudaD_vec_min(const double* v, double* value, int dim) {
_vec_min<<<1,CU1DBLOCK>>>(v, value, dim);
}
void cudaF_vec_max(const float* v, float* value, int dim) {
_vec_max<<<1,CU1DBLOCK>>>(v, value, dim);
}
void cudaD_vec_max(const double* v, double* value, int dim) {
_vec_max<<<1,CU1DBLOCK>>>(v, value, dim);
}
void cudaF_add_diag_mat_mat(int Gr, int Bl, float alpha, float* v, int v_dim, const float* M,
int M_cols, int M_row_stride, int M_col_stride, const float *N, int N_row_stride,
int N_col_stride, int threads_per_element, float beta) {
_add_diag_mat_mat<<<Gr,Bl>>>(alpha, v, v_dim, M, M_cols, M_row_stride, M_col_stride,
N, N_row_stride, N_col_stride, threads_per_element, beta);
}
void cudaD_add_diag_mat_mat(int Gr, int Bl, double alpha, double* v, int v_dim, const double* M,
int M_cols, int M_row_stride, int M_col_stride, const double *N, int N_row_stride,
int N_col_stride, int threads_per_element, double beta) {
_add_diag_mat_mat<<<Gr,Bl>>>(alpha, v, v_dim, M, M_cols, M_row_stride, M_col_stride,
N, N_row_stride, N_col_stride, threads_per_element, beta);
}
void cudaF_add_vec_vec(int Gr, int Bl, float alpha, float* v, const float* x, const float* y, float beta, int dim) {
_add_vec_vec<<<Gr,Bl>>>(alpha,v,x,y,beta,dim);
}
void cudaD_add_vec_vec(int Gr, int Bl, double alpha, double* v, const double* x, const double* y, double beta, int dim) {
_add_vec_vec<<<Gr,Bl>>>(alpha,v,x,y,beta,dim);
}
void cudaF_vec_sum(int Gr, int Bl, float* v, float* value, int dim, int inc) {
_vec_sum<<<Gr,Bl>>>(v, value, dim, inc);
}
void cudaD_vec_sum(int Gr, int Bl, double* v, double* value, int dim, int inc) {
_vec_sum<<<Gr,Bl>>>(v,value,dim,inc);
}
void cudaF_pvec_sum(int Gr, int Bl, float* v, float* pvec_sum, int dim, int size) {
_pvec_sum<<<Gr,Bl>>>(v, pvec_sum, dim, size);
}
void cudaD_pvec_sum(int Gr, int Bl, double* v, double* pvec_sum, int dim, int size) {
_pvec_sum<<<Gr,Bl>>>(v,pvec_sum,dim,size);
}
void cudaF_vec_apply_floor(int Gr, int Bl, float* v, float floor_val, float *count, int dim) {
_vec_apply_floor<<<Gr,Bl>>>(v,floor_val,count,dim);
}
void cudaD_vec_apply_floor(int Gr, int Bl, double* v, double floor_val, float *count, int dim) {
_vec_apply_floor<<<Gr,Bl>>>(v,floor_val,count,dim);
}
void cudaF_vec_apply_exp(int Gr, int Bl, float* v, int dim) {
_vec_apply_exp<<<Gr,Bl>>>(v,dim);
}
void cudaD_vec_apply_exp(int Gr, int Bl, double* v, int dim) {
_vec_apply_exp<<<Gr,Bl>>>(v,dim);
}
void cudaF_vec_apply_log(int Gr, int Bl, float* v, float* flag, int dim) {
_vec_apply_log<<<Gr,Bl>>>(v,flag,dim);
}
void cudaD_vec_apply_log(int Gr, int Bl, double* v, double* flag, int dim) {
_vec_apply_log<<<Gr,Bl>>>(v,flag,dim);
}
void cudaF_add_row_sum_mat(dim3 Gr, dim3 Bl, const float* mat, float* vec_sum, MatrixDim d) {
_add_row_sum_mat<<<Gr,Bl>>>(mat,vec_sum,d);
}
void cudaD_add_row_sum_mat(dim3 Gr, dim3 Bl, const double* mat, double* vec_sum, MatrixDim d) {
_add_row_sum_mat<<<Gr,Bl>>>(mat,vec_sum,d);
}
void cudaF_add_col_sum_mat(dim3 Gr, dim3 Bl, const float* mat, float* vec_sum, MatrixDim d) {
_add_col_sum_mat<<<Gr,Bl>>>(mat,vec_sum,d);
}
void cudaD_add_col_sum_mat(dim3 Gr, dim3 Bl, const double* mat, double* vec_sum, MatrixDim d) {
_add_col_sum_mat<<<Gr,Bl>>>(mat,vec_sum,d);
}
/*
* cu::
*/
void cudaF_sigmoid (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) {
_sigmoid<<<Gr,Bl>>>(y, x, d, src_stride);
}
void cudaD_sigmoid (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) {
_sigmoid<<<Gr,Bl>>>(y, x, d, src_stride);
}
void cudaF_diff_sigmoid (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d, int e_stride, int y_stride) {
_diff_sigmoid<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride);
}
void cudaD_diff_sigmoid (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d, int e_stride, int y_stride) {
_diff_sigmoid<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride);
}
void cudaF_tanh (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) {
_tanh<<<Gr,Bl>>>(y, x, d, src_stride);
}
void cudaD_tanh (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) {
_tanh<<<Gr,Bl>>>(y, x, d, src_stride);
}
void cudaF_diff_tanh (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d, int e_stride, int y_stride) {
_diff_tanh<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride);
}
void cudaD_diff_tanh (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d, int e_stride, int y_stride) {
_diff_tanh<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride);
}
void cudaF_softmax_reduce (size_t Gr, size_t Bl, float* y, const float* x, MatrixDim d, int src_stride) {
_softmax_reduce<<<Gr,Bl>>>(y, x, d, src_stride);
}
void cudaD_softmax_reduce (size_t Gr, size_t Bl, double* y, const double* x, MatrixDim d, int src_stride) {
_softmax_reduce<<<Gr,Bl>>>(y, x, d, src_stride);
}
void cudaF_splice(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) {
_splice<<<Gr,Bl>>>(y,x,off,d_out,d_in);
}
void cudaD_splice(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) {
_splice<<<Gr,Bl>>>(y,x,off,d_out,d_in);
}
void cudaF_copy(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
_copy<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in);
}
void cudaD_copy(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
_copy<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in);
}
void cudaF_randomize(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
_randomize<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in);
}
void cudaD_randomize(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
_randomize<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in);
}
void cudaF_regularize_l1(dim3 Gr, dim3 Bl, float* wei, float* grad, float l1, float lr, MatrixDim d, int stride_grad) {
_regularize_l1<<<Gr,Bl>>>(wei,grad,l1,lr,d,stride_grad);
}
void cudaD_regularize_l1(dim3 Gr, dim3 Bl, double* wei, double* grad, double l1, double lr, MatrixDim d,int stride_grad) {
_regularize_l1<<<Gr,Bl>>>(wei,grad,l1,lr,d,stride_grad);
}
void cudaF_find_row_max_id(dim3 Gr, dim3 Bl, const float* mat, float* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) {
_find_row_max_id<<<Gr,Bl>>>(mat, vec_val, vec_id, voff, d);
}
void cudaD_find_row_max_id(dim3 Gr, dim3 Bl, const double* mat, double* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) {
_find_row_max_id<<<Gr,Bl>>>(mat, vec_val, vec_id, voff, d);
}
/* Some conversion kernels for which it's more convenient to not name them F or D. */
void cuda_copy_from_mat_df(dim3 Gr, dim3 Bl, double* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) {
_copy_from_mat<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_ff(dim3 Gr, dim3 Bl, float* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) {
_copy_from_mat<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_fd(dim3 Gr, dim3 Bl, float *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) {
_copy_from_mat<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_dd(dim3 Gr, dim3 Bl, double *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) {
_copy_from_mat<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_df_trans(dim3 Gr, dim3 Bl, double* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) {
_copy_from_mat_trans<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_ff_trans(dim3 Gr, dim3 Bl, float* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) {
_copy_from_mat_trans<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_fd_trans(dim3 Gr, dim3 Bl, float *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) {
_copy_from_mat_trans<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_dd_trans(dim3 Gr, dim3 Bl, double *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) {
_copy_from_mat_trans<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in);
}
/*
* lstm::
*/
template<typename Real>
__global__
static void _add_mat_diag_vec(Real alpha, Real *mat, MatrixDim mat_dim,
const Real *mat2, int mat2_row_stride, int mat2_col_stride,
const Real *vec, Real beta) {
// Note from Dan: in this kernel, we make the x dimension correspond to the
// row index and y to the column index. That was not always the case for
// earlier kernels written by others.
int i = blockIdx.x * blockDim.x + threadIdx.x; // row index
int j = blockIdx.y * blockDim.y + threadIdx.y; // column index
int index = i * mat_dim.stride + j,
index2 = i * mat2_row_stride + j * mat2_col_stride;
if (i < mat_dim.rows && j < mat_dim.cols) {
mat[index] = alpha * mat2[index2] * vec[j] + beta * mat[index];
}
}
template<typename Real>
__global__
static void _add_mat_dot_mat(Real *data, const Real *srcA_data, const Real *srcB_data, int trasA, int transB, MatrixDim dim, int srcA_stride, int srcB_stride, Real alpha, Real beta) {
// 1 represents kTrans, 0 represents kNoTrans
// but for now, only kNoTrans is availiable
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda tgt_index = i + j*dim.stride;
int32_cuda srcA_index = i + j*srcA_stride;
int32_cuda srcB_index = i + j*srcB_stride;
if (i < dim.cols && j < dim.rows) {
data[tgt_index] = alpha*srcA_data[srcA_index]*srcB_data[srcB_index] + beta * data[tgt_index] ;
}
}
void cudaF_add_mat_diag_vec(dim3 Gr, dim3 Bl, float alpha, float *mat, MatrixDim mat_dim,
const float *mat2, int mat2_row_stride, int mat2_col_stride,
const float *vec, float beta) {
_add_mat_diag_vec<<<Gr,Bl>>>(alpha, mat, mat_dim, mat2, mat2_row_stride,
mat2_col_stride, vec, beta);
}
void cudaD_add_mat_diag_vec(dim3 Gr, dim3 Bl, double alpha, double *mat, MatrixDim mat_dim,
const double *mat2, int mat2_row_stride, int mat2_col_stride,
const double *vec, double beta) {
_add_mat_diag_vec<<<Gr,Bl>>>(alpha, mat, mat_dim, mat2, mat2_row_stride,
mat2_col_stride, vec, beta);
}
void cudaF_add_mat_dot_mat(dim3 Gr, dim3 Bl, float *data, const float *srcA_data, const float *srcB_data, int transA, int transB, MatrixDim dim, int srcA_stride, int srcB_stride, float alpha, float beta) {
_add_mat_dot_mat<<<Gr, Bl>>>(data, srcA_data, srcB_data, transA, transB, dim, srcA_stride, srcB_stride, alpha, beta);
}
void cudaD_add_mat_dot_mat(dim3 Gr, dim3 Bl, double *data, const double *srcA_data, const double *srcB_data, int transA, int transB, MatrixDim dim, int srcA_stride, int srcB_stride, double alpha, double beta) {
_add_mat_dot_mat<<<Gr, Bl>>>(data, srcA_data, srcB_data, transA, transB, dim, srcA_stride, srcB_stride, alpha, beta);
}
/*
* All the following kernels are written by Yajie Miao for CTC training
*/
template<typename Real>
__global__
static void _compute_ctc_alpha_one_sequence(Real* mat_alpha, int row, MatrixDim dim_alpha, const Real* mat_prob, MatrixDim dim_prob, const int32_cuda* labels) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda dim = dim_alpha.cols;
if (i < dim) {
int32_cuda index_alpha = i + row * dim_alpha.stride;
int32_cuda class_idx = labels[i];
int32_cuda index_prob = class_idx + row * dim_prob.stride;
int32_cuda index_alpha_rm1_i = i + (row - 1) * dim_alpha.stride;
int32_cuda index_alpha_rm1_im1 = (i - 1) + (row - 1) * dim_alpha.stride;
int32_cuda index_alpha_rm1_im2 = (i - 2) + (row - 1) * dim_alpha.stride;
if (row == 0) {
if (i < 2) mat_alpha[index_alpha] = mat_prob[index_prob];
else mat_alpha[index_alpha] = NumericLimits<Real>::log_zero_;
} else {
if (i > 1) {
if (i % 2 == 0 || labels[i-2] == labels[i]) {
mat_alpha[index_alpha] = AddAB(mat_prob[index_prob], LogAPlusB(mat_alpha[index_alpha_rm1_im1], mat_alpha[index_alpha_rm1_i]));
} else {
Real tmp = LogAPlusB(mat_alpha[index_alpha_rm1_im1], mat_alpha[index_alpha_rm1_i]);
mat_alpha[index_alpha] = AddAB(mat_prob[index_prob], LogAPlusB(mat_alpha[index_alpha_rm1_im2], tmp));
}
} else if (i == 1) {
mat_alpha[index_alpha] = AddAB(mat_prob[index_prob], LogAPlusB(mat_alpha[index_alpha_rm1_im1], mat_alpha[index_alpha_rm1_i]));
} else {
mat_alpha[index_alpha] = AddAB(mat_prob[index_prob], mat_alpha[index_alpha_rm1_i]);
}
}
}
}
template<typename Real>
__global__
static void _compute_ctc_alpha_multiple_sequence(Real* mat_alpha, int sequence_num, int row, MatrixDim dim_alpha, const Real* mat_prob, MatrixDim dim_prob, const int32_cuda* labels, int32_cuda dim_label_stride, const int32_cuda* seq_lengths) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row index, that is, the index for sequence
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // label index, cannot exceed 2*|l|+1
int32_cuda dim = dim_alpha.cols;
if (j >= dim || i >= sequence_num) return;
int32_cuda index_alpha = j + (row * sequence_num + i) * dim_alpha.stride;
int32_cuda index_label = j + i * dim_label_stride;
int32_cuda class_idx = labels[index_label];// if -1, this is the padding cell;labels now is a matrix which has the same size as mat_alpha
if (class_idx == -1 || row >= seq_lengths[i]) {
mat_alpha[index_alpha] = NumericLimits<Real>::log_zero_;
return;
}
int32_cuda index_label_m2 = (j-2) + i * dim_label_stride;
int32_cuda index_prob = class_idx + (row * sequence_num + i) * dim_prob.stride;
int32_cuda index_alpha_rm1_i = j + ((row-1) * sequence_num + i) * dim_alpha.stride;
int32_cuda index_alpha_rm1_im1 = (j-1) + ((row-1) * sequence_num + i) * dim_alpha.stride;
int32_cuda index_alpha_rm1_im2 = (j-2) + ((row-1) * sequence_num + i) * dim_alpha.stride;
if (row == 0) {
if (j < 2) mat_alpha[index_alpha] = mat_prob[index_prob];
else mat_alpha[index_alpha] = NumericLimits<Real>::log_zero_;
} else {
if (j > 1) {
if (j % 2 == 0 || labels[index_label_m2] == labels[index_label]) {
mat_alpha[index_alpha] = AddAB(mat_prob[index_prob], LogAPlusB(mat_alpha[index_alpha_rm1_im1], mat_alpha[index_alpha_rm1_i]));
} else {
Real tmp = LogAPlusB(mat_alpha[index_alpha_rm1_im1], mat_alpha[index_alpha_rm1_i]);
mat_alpha[index_alpha] = AddAB(mat_prob[index_prob], LogAPlusB(mat_alpha[index_alpha_rm1_im2], tmp));
}
} else if (j == 1) {
mat_alpha[index_alpha] = AddAB(mat_prob[index_prob], LogAPlusB(mat_alpha[index_alpha_rm1_im1], mat_alpha[index_alpha_rm1_i]));
} else {
mat_alpha[index_alpha] = AddAB(mat_prob[index_prob], mat_alpha[index_alpha_rm1_i]);
}
}
}
template<typename Real>
__global__
static void _compute_ctc_alpha_one_sequence_rescale(Real* mat_alpha, int row, MatrixDim dim_alpha, const Real* mat_prob, MatrixDim dim_prob, const int32_cuda* labels) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda dim = dim_alpha.cols;
if (i < dim) {
int32_cuda index_alpha = i + row * dim_alpha.stride;
int32_cuda class_idx = labels[i];
int32_cuda index_prob = class_idx + row * dim_prob.stride;
int32_cuda index_alpha_rm1_i = i + (row - 1) * dim_alpha.stride;
int32_cuda index_alpha_rm1_im1 = (i - 1) + (row - 1) * dim_alpha.stride;
int32_cuda index_alpha_rm1_im2 = (i - 2) + (row - 1) * dim_alpha.stride;
if (row == 0) {
if (i < 2) mat_alpha[index_alpha] = mat_prob[index_prob];
else mat_alpha[index_alpha] = 0.0;
} else {
if (i > 1) {
if (i % 2 == 0 || labels[i-2] == labels[i]) {
mat_alpha[index_alpha] = mat_prob[index_prob] * (mat_alpha[index_alpha_rm1_im1] + mat_alpha[index_alpha_rm1_i]);
} else {
mat_alpha[index_alpha] = mat_prob[index_prob] * (mat_alpha[index_alpha_rm1_im1] + mat_alpha[index_alpha_rm1_i] + mat_alpha[index_alpha_rm1_im2]);
}
} else if (i == 1) {
mat_alpha[index_alpha] = mat_prob[index_prob] * (mat_alpha[index_alpha_rm1_im1] + mat_alpha[index_alpha_rm1_i]);
} else {
mat_alpha[index_alpha] = mat_prob[index_prob] * mat_alpha[index_alpha_rm1_i];
}
}
}
}
template<typename Real>
__global__
static void _compute_ctc_beta_one_sequence(Real* mat_beta, int row, MatrixDim dim_beta, const Real* mat_prob, MatrixDim dim_prob, const int32_cuda* labels) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda dim = dim_beta.cols;
if (i < dim) {
int32_cuda index_beta = i + row * dim_beta.stride;
int32_cuda class_idx = labels[i];
int32_cuda index_prob = class_idx + row * dim_prob.stride;
int32_cuda index_beta_rp1_i = i + (row + 1) * dim_beta.stride;
int32_cuda index_beta_rp1_ip1 = (i + 1) + (row + 1) * dim_beta.stride;
int32_cuda index_beta_rp1_ip2 = (i + 2) + (row + 1) * dim_beta.stride;
int32_cuda row_num = dim_beta.rows;
if (row == row_num - 1) {
if (i > dim - 3) mat_beta[index_beta] = mat_prob[index_prob];
else mat_beta[index_beta] = NumericLimits<Real>::log_zero_;
} else {
if (i < dim - 2) {
if (i % 2 == 0 || labels[i+2] == labels[i]) {
mat_beta[index_beta] = AddAB(mat_prob[index_prob], LogAPlusB(mat_beta[index_beta_rp1_ip1], mat_beta[index_beta_rp1_i]));
} else {
Real tmp = LogAPlusB(mat_beta[index_beta_rp1_ip1], mat_beta[index_beta_rp1_i]);
mat_beta[index_beta] = AddAB(mat_prob[index_prob], LogAPlusB(mat_beta[index_beta_rp1_ip2], tmp));
}
} else if (i == dim - 2) {
mat_beta[index_beta] = AddAB(mat_prob[index_prob], LogAPlusB(mat_beta[index_beta_rp1_ip1], mat_beta[index_beta_rp1_i]));
} else {
mat_beta[index_beta] = AddAB(mat_prob[index_prob], mat_beta[index_beta_rp1_i]);
}
}
}
}
template<typename Real>
__global__
static void _compute_ctc_beta_multiple_sequence(Real* mat_beta, int sequence_num, int row, MatrixDim dim_beta, const Real* mat_prob, MatrixDim dim_prob, const int32_cuda* labels, int32_cuda dim_label_stride, const int32_cuda* seq_lengths, const int32_cuda* label_lengths) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row index, that is, the index for sequence
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // label index, cannot exceed 2*|l|+1
int32_cuda dim = dim_beta.cols;
if (j >= dim || i >= sequence_num) return;
int32_cuda index_beta = j + (row * sequence_num + i) * dim_beta.stride;
int32_cuda index_label = j + i * dim_label_stride;
int32_cuda class_idx = labels[index_label];// if -1, this is the padding cell;labels now is a matrix which has the same size as mat_alpha
if (class_idx == -1 || row >= seq_lengths[i]) {
mat_beta[index_beta] = NumericLimits<Real>::log_zero_;
return;
}
int32_cuda index_label_p2 = (j+2) + i * dim_label_stride;
int32_cuda index_prob = class_idx + (row * sequence_num + i) * dim_prob.stride;
int32_cuda index_beta_rp1_i = j + ((row+1) * sequence_num + i) * dim_beta.stride;
int32_cuda index_beta_rp1_ip1 = (j+1) + ((row+1) * sequence_num + i) * dim_beta.stride;
int32_cuda index_beta_rp1_ip2 = (j+2) + ((row+1) * sequence_num + i) * dim_beta.stride;
int32_cuda row_num = seq_lengths[i];
int32_cuda label_len = label_lengths[i];
/* if (row == row_num - 1) {
if (j > dim - 3) mat_beta[index_beta] = mat_prob[index_prob];
else mat_beta[index_beta] = NumericLimits<Real>::log_zero_;
} else {
if (j < dim - 2) {
if (j % 2 == 0 || labels[index_label_p2] == labels[index_label]) {
mat_beta[index_beta] = AddAB(mat_prob[index_prob], LogAPlusB(mat_beta[index_beta_rp1_ip1], mat_beta[index_beta_rp1_i]));
} else {
Real tmp = LogAPlusB(mat_beta[index_beta_rp1_ip1], mat_beta[index_beta_rp1_i]);
mat_beta[index_beta] = AddAB(mat_prob[index_prob], LogAPlusB(mat_beta[index_beta_rp1_ip2], tmp));
}
} else if (j == dim - 2) {
mat_beta[index_beta] = AddAB(mat_prob[index_prob], LogAPlusB(mat_beta[index_beta_rp1_ip1], mat_beta[index_beta_rp1_i]));
} else {
mat_beta[index_beta] = AddAB(mat_prob[index_prob], mat_beta[index_beta_rp1_i]);
}
}
*/
if (row == row_num - 1) {
if (j > label_len - 3) mat_beta[index_beta] = mat_prob[index_prob];
else mat_beta[index_beta] = NumericLimits<Real>::log_zero_;
} else {
if (j < label_len - 2) {
if (j % 2 == 0 || labels[index_label_p2] == labels[index_label]) {
mat_beta[index_beta] = AddAB(mat_prob[index_prob], LogAPlusB(mat_beta[index_beta_rp1_ip1], mat_beta[index_beta_rp1_i]));
} else {
Real tmp = LogAPlusB(mat_beta[index_beta_rp1_ip1], mat_beta[index_beta_rp1_i]);
mat_beta[index_beta] = AddAB(mat_prob[index_prob], LogAPlusB(mat_beta[index_beta_rp1_ip2], tmp));
}
} else if (j == label_len - 2) {
mat_beta[index_beta] = AddAB(mat_prob[index_prob], LogAPlusB(mat_beta[index_beta_rp1_ip1], mat_beta[index_beta_rp1_i]));
} else {
mat_beta[index_beta] = AddAB(mat_prob[index_prob], mat_beta[index_beta_rp1_i]);
}
}
}
template<typename Real>
__global__
static void _compute_ctc_beta_one_sequence_rescale(Real* mat_beta, int row, MatrixDim dim_beta, const Real* mat_prob, MatrixDim dim_prob, const int32_cuda* labels) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda dim = dim_beta.cols;
if (i < dim) {
int32_cuda index_beta = i + row * dim_beta.stride;
int32_cuda class_idx = labels[i];
int32_cuda index_prob = class_idx + row * dim_prob.stride;
int32_cuda index_beta_rp1_i = i + (row + 1) * dim_beta.stride;
int32_cuda index_beta_rp1_ip1 = (i + 1) + (row + 1) * dim_beta.stride;
int32_cuda index_beta_rp1_ip2 = (i + 2) + (row + 1) * dim_beta.stride;
int32_cuda row_num = dim_beta.rows;
if (row == row_num - 1) {
if (i > dim - 3) mat_beta[index_beta] = mat_prob[index_prob];
else mat_beta[index_beta] = 0;
} else {
if (i < dim - 2) {
if (i % 2 == 0 || labels[i+2] == labels[i]) {
mat_beta[index_beta] = mat_prob[index_prob] * (mat_beta[index_beta_rp1_ip1] + mat_beta[index_beta_rp1_i]);
} else {
mat_beta[index_beta] = mat_prob[index_prob] * (mat_beta[index_beta_rp1_ip1] + mat_beta[index_beta_rp1_i] + mat_beta[index_beta_rp1_ip2]);
}
} else if (i == dim - 2) {
mat_beta[index_beta] = mat_prob[index_prob] * (mat_beta[index_beta_rp1_ip1] + mat_beta[index_beta_rp1_i]);
} else {
mat_beta[index_beta] = mat_prob[index_prob] * mat_beta[index_beta_rp1_i];
}
}
}
}
// mat_prob are in probability scale.
template<typename Real>
__global__
static void _compute_ctc_error_one_sequence(Real* mat_error, MatrixDim dim_error, const Real* mat_alpha, const Real* mat_beta, MatrixDim dim_alpha, const Real* mat_prob, const int32_cuda* labels, Real pzx) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row index
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // column index
if (i < dim_error.rows && j < dim_error.cols) {
Real err = NumericLimits<Real>::log_zero_;
int32_cuda index_error = i * dim_error.stride + j;
for(int s = 0; s < dim_alpha.cols; s++) {
if (labels[s] == j) { //
int32_cuda index_alpha = i * dim_alpha.stride + s;
err = LogAPlusB(err, AddAB(mat_alpha[index_alpha], mat_beta[index_alpha]));
}
}
Real val = ExpA(SubAB(err, AddAB(pzx, mat_prob[index_error] == 0? NumericLimits<Real>::log_zero_ : 2*log(mat_prob[index_error]))));
mat_error[index_error] = -1.0 * val;
}
}
// mat_prob are in probability scale.
template<typename Real>
__global__
static void _compute_ctc_error_multiple_sequence(Real* mat_error, int32_cuda sequence_num, MatrixDim dim_error, const Real* mat_alpha, const Real* mat_beta, MatrixDim dim_alpha, const Real* mat_prob, const int32_cuda* labels, int32_cuda dim_label_stride, const int32_cuda* seq_lengths, const Real* pzx) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row index
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // column index
if (i >= dim_error.rows || j >= dim_error.cols) return;
int32_cuda seqX = i % sequence_num;
int32_cuda rowX = i / sequence_num;
if (rowX >= seq_lengths[seqX]) return;
Real err = NumericLimits<Real>::log_zero_;
int32_cuda index_error = i * dim_error.stride + j;
for(int s = 0; s < dim_alpha.cols; s++) {
int32_cuda index_label = s + seqX * dim_label_stride;
if (labels[index_label] == -1) {continue;}
if (labels[index_label] == j) { //
int32_cuda index_alpha = i * dim_alpha.stride + s;
err = LogAPlusB(err, AddAB(mat_alpha[index_alpha], mat_beta[index_alpha]));
}
}
Real val = ExpA(SubAB(err, AddAB(pzx[seqX], mat_prob[index_error] == 0? NumericLimits<Real>::log_zero_ : 2*log(mat_prob[index_error]))));
mat_error[index_error] = -1.0 * val;
}
template<typename Real>
__global__
static void _distribute_prob_by_label(Real* mat_prob_dist, MatrixDim dim_prob_dist, const Real* mat_prob, MatrixDim dim_prob, const int32_cuda* labels) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row index
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // column index
if (i < dim_prob_dist.rows && j < dim_prob_dist.cols) {
int32_cuda index_prob_dist = i * dim_prob_dist.stride + j;
int32_cuda index_prob = i * dim_prob.stride + labels[j];
mat_prob_dist[index_prob_dist] = mat_prob[index_prob];
}
}
// directly get the errors for the prior-softmax values
template<typename Real>
__global__
static void _compute_ctc_error_one_sequence_rescale(Real* mat_error, MatrixDim dim_error, const Real* mat_alpha, const Real* mat_beta, MatrixDim dim_alpha, const Real* mat_prob, const int32_cuda* labels, const Real* zt) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row index
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // column index
if (i < dim_error.rows && j < dim_error.cols) {
Real err = 0;
int32_cuda index_error = i * dim_error.stride + j;
for(int s = 0; s < dim_alpha.cols; s++) {
if (labels[s] == j) { //
int32_cuda index_alpha = i * dim_alpha.stride + s;
err += mat_alpha[index_alpha] * mat_beta[index_alpha];
}
}
if (mat_prob[index_error] == 0 || zt[i] == 0) {
mat_error[index_error] = 0;
} else {
mat_error[index_error] = mat_prob[index_error] - (err / zt[i]) / mat_prob[index_error];
}
}
}
void cudaF_compute_ctc_alpha(dim3 Gr, dim3 Bl, float *alpha, int row_idx, MatrixDim dim_alpha, const float *prob, MatrixDim dim_prob, const int *labels) {
_compute_ctc_alpha_one_sequence<<<Gr, Bl>>>(alpha, row_idx, dim_alpha, prob, dim_prob, labels);
}
void cudaF_compute_ctc_beta(dim3 Gr, dim3 Bl, float *beta, int row_idx, MatrixDim dim_beta, const float *prob, MatrixDim dim_prob, const int *labels) {
_compute_ctc_beta_one_sequence<<<Gr, Bl>>>(beta, row_idx, dim_beta, prob, dim_prob, labels);
}
void cudaF_compute_ctc_error(dim3 Gr, dim3 Bl, float *error, MatrixDim dim_error, const float *alpha, const float *beta, MatrixDim dim_alpha, const float *prob, const int *labels, float pzx) {
_compute_ctc_error_one_sequence<<<Gr, Bl>>>(error, dim_error, alpha, beta, dim_alpha, prob, labels, pzx);
}
void cudaF_compute_ctc_alpha_rescale(dim3 Gr, dim3 Bl, float *alpha, int row_idx, MatrixDim dim_alpha, const float *prob, MatrixDim dim_prob, const int *labels) {
_compute_ctc_alpha_one_sequence_rescale<<<Gr, Bl>>>(alpha, row_idx, dim_alpha, prob, dim_prob, labels);
}
void cudaF_compute_ctc_beta_rescale(dim3 Gr, dim3 Bl, float *beta, int row_idx, MatrixDim dim_beta, const float *prob, MatrixDim dim_prob, const int *labels) {
_compute_ctc_beta_one_sequence_rescale<<<Gr, Bl>>>(beta, row_idx, dim_beta, prob, dim_prob, labels);
}
void cudaF_compute_ctc_error_rescale(dim3 Gr, dim3 Bl, float *error, MatrixDim dim_error, const float *alpha, const float *beta, MatrixDim dim_alpha, const float *prob, const int *labels, const float *zt) {
_compute_ctc_error_one_sequence_rescale<<<Gr, Bl>>>(error, dim_error, alpha, beta, dim_alpha, prob, labels, zt);
}
void cudaF_distribute_prob_by_label(dim3 Gr, dim3 Bl, float *prob_dist, MatrixDim dim_prob_dist, const float *prob, MatrixDim dim_prob, const int *labels) {
_distribute_prob_by_label<<<Gr, Bl>>>(prob_dist, dim_prob_dist, prob, dim_prob, labels);
}
void cudaF_compute_ctc_alpha_multiple_sequence(dim3 Gr, dim3 Bl, float *alpha, int seq_num, int row_idx, MatrixDim dim_alpha, const float *prob, MatrixDim dim_prob, const int *labels, int dim_label_stride, const int *seq_lengths) {
_compute_ctc_alpha_multiple_sequence<<<Gr, Bl>>>(alpha, seq_num, row_idx, dim_alpha, prob, dim_prob, labels, dim_label_stride, seq_lengths);
}
void cudaF_compute_ctc_beta_multiple_sequence(dim3 Gr, dim3 Bl, float *beta, int seq_num, int row_idx, MatrixDim dim_beta, const float *prob, MatrixDim dim_prob, const int *labels, int dim_label_stride, const int *seq_lengths, const int *label_lengths) {
_compute_ctc_beta_multiple_sequence<<<Gr, Bl>>>(beta, seq_num, row_idx, dim_beta, prob, dim_prob, labels, dim_label_stride, seq_lengths, label_lengths);
}
void cudaF_compute_ctc_error_multiple_sequence(dim3 Gr, dim3 Bl, float *error, int seq_num, MatrixDim dim_error, const float *alpha, const float *beta, MatrixDim dim_alpha, const float *prob, const int *labels, int dim_label_stride, const int *seq_lengths, const float *pzx) {
_compute_ctc_error_multiple_sequence<<<Gr, Bl>>>(error, seq_num, dim_error, alpha, beta, dim_alpha, prob, labels, dim_label_stride, seq_lengths, pzx);
}
void cudaD_compute_ctc_alpha(dim3 Gr, dim3 Bl, double *alpha, int row_idx, MatrixDim dim_alpha, const double *prob, MatrixDim dim_prob, const int *labels) {
_compute_ctc_alpha_one_sequence<<<Gr, Bl>>>(alpha, row_idx, dim_alpha, prob, dim_prob, labels);
}
void cudaD_compute_ctc_beta(dim3 Gr, dim3 Bl, double *beta, int row_idx, MatrixDim dim_beta, const double *prob, MatrixDim dim_prob, const int *labels) {
_compute_ctc_beta_one_sequence<<<Gr, Bl>>>(beta, row_idx, dim_beta, prob, dim_prob, labels);
}
void cudaD_compute_ctc_error(dim3 Gr, dim3 Bl, double *error, MatrixDim dim_error, const double *alpha, const double *beta, MatrixDim dim_alpha, const double *prob, const int *labels, double pzx) {
_compute_ctc_error_one_sequence<<<Gr, Bl>>>(error, dim_error, alpha, beta, dim_alpha, prob, labels, pzx);
}
void cudaD_compute_ctc_alpha_rescale(dim3 Gr, dim3 Bl, double *alpha, int row_idx, MatrixDim dim_alpha, const double *prob, MatrixDim dim_prob, const int *labels) {
_compute_ctc_alpha_one_sequence_rescale<<<Gr, Bl>>>(alpha, row_idx, dim_alpha, prob, dim_prob, labels);
}
void cudaD_compute_ctc_beta_rescale(dim3 Gr, dim3 Bl, double *beta, int row_idx, MatrixDim dim_beta, const double *prob, MatrixDim dim_prob, const int *labels) {
_compute_ctc_beta_one_sequence_rescale<<<Gr, Bl>>>(beta, row_idx, dim_beta, prob, dim_prob, labels);
}
void cudaD_compute_ctc_error_rescale(dim3 Gr, dim3 Bl, double *error, MatrixDim dim_error, const double *alpha, const double *beta, MatrixDim dim_alpha, const double *prob, const int *labels, const double *zt) {
_compute_ctc_error_one_sequence_rescale<<<Gr, Bl>>>(error, dim_error, alpha, beta, dim_alpha, prob, labels, zt);
}
void cudaD_distribute_prob_by_label(dim3 Gr, dim3 Bl, double *prob_dist, MatrixDim dim_prob_dist, const double *prob, MatrixDim dim_prob, const int *labels) {
_distribute_prob_by_label<<<Gr, Bl>>>(prob_dist, dim_prob_dist, prob, dim_prob, labels);
}
void cudaD_compute_ctc_alpha_multiple_sequence(dim3 Gr, dim3 Bl, double *alpha, int seq_num, int row_idx, MatrixDim dim_alpha, const double *prob, MatrixDim dim_prob, const int *labels, int dim_label_stride, const int *seq_lengths) {
_compute_ctc_alpha_multiple_sequence<<<Gr, Bl>>>(alpha, seq_num, row_idx, dim_alpha, prob, dim_prob, labels, dim_label_stride, seq_lengths);
}
void cudaD_compute_ctc_beta_multiple_sequence(dim3 Gr, dim3 Bl, double *beta, int seq_num, int row_idx, MatrixDim dim_beta, const double *prob, MatrixDim dim_prob, const int *labels, int dim_label_stride, const int *seq_lengths, const int *label_lengths) {
_compute_ctc_beta_multiple_sequence<<<Gr, Bl>>>(beta, seq_num, row_idx, dim_beta, prob, dim_prob, labels, dim_label_stride, seq_lengths, label_lengths);
}
void cudaD_compute_ctc_error_multiple_sequence(dim3 Gr, dim3 Bl, double *error, int seq_num, MatrixDim dim_error, const double *alpha, const double *beta, MatrixDim dim_alpha, const double *prob, const int *labels, int dim_label_stride, const int *seq_lengths, const double *pzx) {
_compute_ctc_error_multiple_sequence<<<Gr, Bl>>>(error, seq_num, dim_error, alpha, beta, dim_alpha, prob, labels, dim_label_stride, seq_lengths, pzx);
}
|
5ba9ce92021bf4114cf66edc531eab7188cfaa11.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <cstdlib>
#include <staggered_oprod.h>
#include <tune_quda.h>
#include <quda_internal.h>
#include <gauge_field_order.h>
#include <quda_matrix.h>
namespace quda {
#ifdef GPU_STAGGERED_OPROD
namespace { // anonymous
#include <texture.h>
}
static bool kernelPackT = true;
template<int N>
void createEventArray(hipEvent_t (&event)[N], unsigned int flags=hipEventDefault)
{
for(int i=0; i<N; ++i)
hipEventCreate(&event[i],flags);
return;
}
template<int N>
void destroyEventArray(hipEvent_t (&event)[N])
{
for(int i=0; i<N; ++i)
hipEventDestroy(event[i]);
}
static hipEvent_t packEnd;
static hipEvent_t gatherEnd[4];
static hipEvent_t scatterEnd[4];
static hipEvent_t oprodStart;
static hipEvent_t oprodEnd;
void createStaggeredOprodEvents(){
#ifdef MULTI_GPU
hipEventCreate(&packEnd, hipEventDisableTiming);
createEventArray(gatherEnd, hipEventDisableTiming);
createEventArray(scatterEnd, hipEventDisableTiming);
#endif
hipEventCreate(&oprodStart, hipEventDisableTiming);
hipEventCreate(&oprodEnd, hipEventDisableTiming);
return;
}
void destroyStaggeredOprodEvents(){
#ifdef MULTI_GPU
destroyEventArray(gatherEnd);
destroyEventArray(scatterEnd);
hipEventDestroy(packEnd);
#endif
hipEventDestroy(oprodStart);
hipEventDestroy(oprodEnd);
return;
}
enum KernelType {OPROD_INTERIOR_KERNEL, OPROD_EXTERIOR_KERNEL};
template<typename Complex, typename Output, typename Input>
struct StaggeredOprodArg {
unsigned int length;
unsigned int X[4];
unsigned int parity;
unsigned int dir;
unsigned int ghostOffset;
unsigned int displacement;
KernelType kernelType;
bool partitioned[4];
Input inA;
Input inB;
Output outA;
Output outB;
cudaGaugeField& outFieldA;
cudaGaugeField& outFieldB;
typename RealTypeId<Complex>::Type coeff[2];
StaggeredOprodArg(const unsigned int length,
const unsigned int X[4],
const unsigned int parity,
const unsigned int dir,
const unsigned int ghostOffset,
const unsigned int displacement,
const KernelType& kernelType,
const double coeff[2],
Input& inA,
Input& inB,
Output& outA,
Output& outB,
cudaGaugeField& outFieldA,
cudaGaugeField& outFieldB) : length(length), parity(parity), ghostOffset(ghostOffset),
displacement(displacement), kernelType(kernelType), inA(inA), inB(inB), outA(outA), outB(outB),
outFieldA(outFieldA), outFieldB(outFieldB)
{
this->coeff[0] = coeff[0];
this->coeff[1] = coeff[1];
for(int i=0; i<4; ++i) this->X[i] = X[i];
for(int i=0; i<4; ++i) this->partitioned[i] = commDimPartitioned(i) ? true : false;
}
};
enum IndexType {
EVEN_X = 0,
EVEN_Y = 1,
EVEN_Z = 2,
EVEN_T = 3
};
template <IndexType idxType>
static __device__ __forceinline__ void coordsFromIndex(int& idx, int c[4],
const unsigned int cb_idx, const unsigned int parity, const unsigned int X[4])
{
const unsigned int &LX = X[0];
const unsigned int &LY = X[1];
const unsigned int &LZ = X[2];
const unsigned int XYZ = X[2]*X[1]*X[0];
const unsigned int XY = X[1]*X[0];
idx = 2*cb_idx;
int x, y, z, t;
if (idxType == EVEN_X /*!(LX & 1)*/) { // X even
// t = idx / XYZ;
// z = (idx / XY) % Z;
// y = (idx / X) % Y;
// idx += (parity + t + z + y) & 1;
// x = idx % X;
// equivalent to the above, but with fewer divisions/mods:
int aux1 = idx / LX;
x = idx - aux1 * LX;
int aux2 = aux1 / LY;
y = aux1 - aux2 * LY;
t = aux2 / LZ;
z = aux2 - t * LZ;
aux1 = (parity + t + z + y) & 1;
x += aux1;
idx += aux1;
} else if (idxType == EVEN_Y /*!(LY & 1)*/) { // Y even
t = idx / XYZ;
z = (idx / XY) % LZ;
idx += (parity + t + z) & 1;
y = (idx / LX) % LY;
x = idx % LX;
} else if (idxType == EVEN_Z /*!(LZ & 1)*/) { // Z even
t = idx / XYZ;
idx += (parity + t) & 1;
z = (idx / XY) % LZ;
y = (idx / LX) % LY;
x = idx % LX;
} else {
idx += parity;
t = idx / XYZ;
z = (idx / XY) % LZ;
y = (idx / LX) % LY;
x = idx % LX;
}
c[0] = x;
c[1] = y;
c[2] = z;
c[3] = t;
}
// Get the coordinates for the exterior kernels
template<int Nspin>
__device__ void coordsFromIndex(unsigned int x[4], const unsigned int cb_idx, const unsigned int X[4], const unsigned int dir, const int displacement, const unsigned int parity)
{
if(Nspin == 1){
unsigned int Xh[2] = {X[0]/2, X[1]/2};
switch(dir){
case 0:
x[2] = cb_idx/Xh[1] % X[2];
x[3] = cb_idx/(Xh[1]*X[2]) % X[3];
x[0] = cb_idx/(Xh[1]*X[2]*X[3]);
x[0] += (X[0] - displacement);
x[1] = 2*(cb_idx % Xh[1]) + ((x[0]+x[2]+x[3]+parity)&1);
break;
case 1:
x[2] = cb_idx/Xh[0] % X[2];
x[3] = cb_idx/(Xh[0]*X[2]) % X[3];
x[1] = cb_idx/(Xh[0]*X[2]*X[3]);
x[1] += (X[1] - displacement);
x[0] = 2*(cb_idx % Xh[0]) + ((x[1]+x[2]+x[3]+parity)&1);
break;
case 2:
x[1] = cb_idx/Xh[0] % X[1];
x[3] = cb_idx/(Xh[0]*X[1]) % X[3];
x[2] = cb_idx/(Xh[0]*X[1]*X[3]);
x[2] += (X[2] - displacement);
x[0] = 2*(cb_idx % Xh[0]) + ((x[1]+x[2]+x[3]+parity)&1);
break;
case 3:
x[1] = cb_idx/Xh[0] % X[1];
x[2] = cb_idx/(Xh[0]*X[1]) % X[2];
x[3] = cb_idx/(Xh[0]*X[1]*X[2]);
x[3] += (X[3] - displacement);
x[0] = 2*(cb_idx % Xh[0]) + ((x[1]+x[2]+x[3]+parity)&1);
break;
}
}else if(Nspin == 3){
// currently unsupported
}
return;
}
template<int Nspin, int Nface>
__device__ int ghostIndexFromCoords(const unsigned int x[4], const unsigned int X[4], const unsigned int dir, const int shift){
return 0;
}
template<>
__device__ int ghostIndexFromCoords<3,3>(
const unsigned int x[4],
const unsigned int X[4],
unsigned int dir,
const int shift)
{
unsigned int ghost_idx;
if(shift > 0){
if((x[dir] + shift) >= X[dir]){
switch(dir){
case 0:
ghost_idx = (3*3 + (x[0]-X[0]+shift))*(X[3]*X[2]*X[1])/2 + ((x[3]*X[2] + x[2])*X[1] + x[1])/2;
break;
case 1:
ghost_idx = (3*3 + (x[1]-X[1]+shift))*(X[3]*X[2]*X[0])/2 + (x[3]*X[2]*X[0] + x[2]*X[0] + x[0])/2;
break;
case 2:
ghost_idx = (3*3 + (x[2]-X[2]+shift))*(X[3]*X[1]*X[0])/2 + (x[3]*X[1]*X[0] + x[1]*X[0] + x[0])/2;
break;
case 3:
ghost_idx = (3*3 + (x[3]-X[3]+shift))*(X[2]*X[1]*X[0])/2 + (x[2]*X[1]*X[0] + x[1]*X[0] + x[0])/2;
break;
default:
break;
} // switch
} // x[dir] + shift[dir] >= X[dir]
}else{ // shift < 0
if(static_cast<int>(x[dir]) + shift < 0){
switch(dir){
case 0:
ghost_idx = (3 + shift)*(X[3]*X[2]*X[1])/2 + ((x[3]*X[2] + x[2])*X[1] + x[1])/2;
break;
case 1:
ghost_idx = (3 + shift)*(X[3]*X[2]*X[0])/2 + ((x[3]*X[2] + x[2])*X[0] + x[0])/2;
break;
case 2:
ghost_idx = (3 + shift)*(X[3]*X[1]*X[0])/2 + ((x[3]*X[1] + x[1])*X[0] + x[0])/2;
break;
case 3:
ghost_idx = (3 + shift)*(X[2]*X[1]*X[0])/2 + ((x[2]*X[1] + x[1])*X[0] + x[0])/2;
break;
} // switch(dir)
}
} // shift < 0
return ghost_idx;
}
__device__ __forceinline__
int neighborIndex(const unsigned int& cb_idx, const int shift[4], const bool partitioned[4], const unsigned int& parity,
const unsigned int X[4]){
int full_idx;
int x[4];
coordsFromIndex<EVEN_X>(full_idx, x, cb_idx, parity, X);
#ifdef MULTI_GPU
for(int dim = 0; dim<4; ++dim){
if(partitioned[dim])
if( (x[dim]+shift[dim])<0 || (x[dim]+shift[dim])>=X[dim]) return -1;
}
#endif
for(int dim=0; dim<4; ++dim){
x[dim] = shift[dim] ? (x[dim]+shift[dim] + X[dim]) % X[dim] : x[dim];
}
return (((x[3]*X[2] + x[2])*X[1] + x[1])*X[0] + x[0]) >> 1;
}
template<typename Complex, typename Output, typename Input>
__global__ void interiorOprodKernel(StaggeredOprodArg<Complex, Output, Input> arg)
{
unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int gridSize = gridDim.x*blockDim.x;
typedef typename RealTypeId<Complex>::Type real;
Complex x[3];
Complex y[3];
Complex z[3];
Matrix<Complex,3> result;
Matrix<Complex,3> tempA, tempB; // input
while(idx<arg.length){
arg.inA.load(x, idx);
for(int dir=0; dir<4; ++dir){
int shift[4] = {0,0,0,0};
shift[dir] = 1;
const int first_nbr_idx = neighborIndex(idx, shift, arg.partitioned, arg.parity, arg.X);
if(first_nbr_idx >= 0){
arg.inB.load(y, first_nbr_idx);
outerProd(y,x,&result);
arg.outA.load(reinterpret_cast<real*>(tempA.data), idx, dir, arg.parity);
result = tempA + result*arg.coeff[0];
arg.outA.save(reinterpret_cast<real*>(result.data), idx, dir, arg.parity);
shift[dir] = 3;
const int third_nbr_idx = neighborIndex(idx, shift, arg.partitioned, arg.parity, arg.X);
if(third_nbr_idx >= 0){
arg.inB.load(z, third_nbr_idx);
outerProd(z, x, &result);
arg.outB.load(reinterpret_cast<real*>(tempB.data), idx, dir, arg.parity);
result = tempB + result*arg.coeff[1];
arg.outB.save(reinterpret_cast<real*>(result.data), idx, dir, arg.parity);
}
}
} // dir
idx += gridSize;
}
return;
} // interiorOprodKernel
template<typename Complex, typename Output, typename Input>
__global__ void exteriorOprodKernel(StaggeredOprodArg<Complex, Output, Input> arg)
{
unsigned int cb_idx = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int gridSize = gridDim.x*blockDim.x;
Complex a[3];
Complex b[3];
Matrix<Complex,3> result;
Matrix<Complex,3> inmatrix; // input
typedef typename RealTypeId<Complex>::Type real;
Output& out = (arg.displacement == 1) ? arg.outA : arg.outB;
real coeff = (arg.displacement == 1) ? arg.coeff[0] : arg.coeff[1];
unsigned int x[4];
while(cb_idx<arg.length){
coordsFromIndex<1>(x, cb_idx, arg.X, arg.dir, arg.displacement, arg.parity);
const unsigned int bulk_cb_idx = ((((x[3]*arg.X[2] + x[2])*arg.X[1] + x[1])*arg.X[0] + x[0]) >> 1);
out.load(reinterpret_cast<real*>(inmatrix.data), bulk_cb_idx, arg.dir, arg.parity);
arg.inA.load(a, bulk_cb_idx);
const unsigned int ghost_idx = arg.ghostOffset + ghostIndexFromCoords<3,3>(x, arg.X, arg.dir, arg.displacement);
arg.inB.load(b, ghost_idx);
outerProd(b,a,&result);
result = inmatrix + result*coeff;
out.save(reinterpret_cast<real*>(result.data), bulk_cb_idx, arg.dir, arg.parity);
cb_idx += gridSize;
}
return;
}
template<typename Complex, typename Output, typename Input>
class StaggeredOprodField : public Tunable {
private:
StaggeredOprodArg<Complex,Output,Input> arg;
QudaFieldLocation location; // location of the lattice fields
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam &) const { return 0; }
unsigned int minThreads() const { return arg.outA.volumeCB; }
bool tunedGridDim() const { return false; }
public:
StaggeredOprodField(const StaggeredOprodArg<Complex,Output,Input> &arg,
QudaFieldLocation location)
: arg(arg), location(location) {
sprintf(vol,"%dx%dx%dx%d",arg.X[0],arg.X[1],arg.X[2],arg.X[3]);
sprintf(aux,"threads=%d,prec=%lu,stride=%d",arg.length,sizeof(Complex)/2,arg.inA.Stride());
}
virtual ~StaggeredOprodField() {}
void set(const StaggeredOprodArg<Complex,Output,Input> &arg, QudaFieldLocation location){
// This is a hack. Need to change this!
this->arg.dir = arg.dir;
this->arg.length = arg.length;
this->arg.ghostOffset = arg.ghostOffset;
this->arg.kernelType = arg.kernelType;
this->location = location;
} // set
void apply(const hipStream_t &stream){
if(location == QUDA_CUDA_FIELD_LOCATION){
// Disable tuning for the time being
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
//if(arg.kernelType == OPROD_INTERIOR_KERNEL){
hipLaunchKernelGGL(( interiorOprodKernel), dim3(tp.grid),dim3(tp.block),tp.shared_bytes, stream, arg);
// dim3 blockDim(128, 1, 1);
// const int gridSize = (arg.length + (blockDim.x-1))/blockDim.x;
// dim3 gridDim(gridSize, 1, 1);
// interiorOprodKernel<<<gridDim,blockDim,0, stream>>>(arg);
// }else if(arg.kernelType == OPROD_EXTERIOR_KERNEL){
// const unsigned int volume = arg.X[0]*arg.X[1]*arg.X[2]*arg.X[3];
// arg.inB.setStride(3*volume/(2*arg.X[arg.dir]));
// exteriorOprodKernel<<<tp.grid,tp.block,tp.shared_bytes, stream>>>(arg);
// arg.inB.setStride(arg.inA.Stride());
// }else{
// errorQuda("Kernel type not supported\n");
// }
}else{ // run the CPU code
errorQuda("No CPU support for staggered outer-product calculation\n");
}
} // apply
void preTune(){
this->arg.outFieldA.backup();
this->arg.outFieldB.backup();
}
void postTune(){
this->arg.outFieldA.restore();
this->arg.outFieldB.restore();
}
long long flops() const {
return 0; // fix this
}
long long bytes() const {
return 0; // fix this
}
TuneKey tuneKey() const { return TuneKey(vol, typeid(*this).name(), aux);}
}; // StaggeredOprodField
template<typename Complex, typename Output, typename Input>
void computeStaggeredOprodCuda(Output outA, Output outB, cudaGaugeField& outFieldA, cudaGaugeField& outFieldB, Input& inA, Input& inB, cudaColorSpinorField& src,
FaceBuffer& faceBuffer, const unsigned int parity, const int faceVolumeCB[4],
const unsigned int ghostOffset[4], const double coeff[2])
{
hipEventRecord(oprodStart, streams[Nstream-1]);
const unsigned int dim[4] = {src.X(0)*2, src.X(1), src.X(2), src.X(3)};
// Create the arguments for the interior kernel
StaggeredOprodArg<Complex,Output,Input> arg(outA.volumeCB, dim, parity, 0, 0, 1, OPROD_INTERIOR_KERNEL, coeff, inA, inB, outA, outB, outFieldA,
outFieldB);
StaggeredOprodField<Complex,Output,Input> oprod(arg, QUDA_CUDA_FIELD_LOCATION);
#ifdef MULTI_GPU
bool pack=false;
for(int i=3; i>=0; i--){
if(commDimPartitioned(i) && (i!=3 || kernelPackT)){
pack = true;
break;
}
} // i=3,..,0
// source, dir(+/-1), parity, dagger, stream_ptr
if(pack){
faceBuffer.pack(src, -1, 1-parity, 0, streams); // packing is all done in streams[Nstream-1]
//faceBuffer.pack(src, 1-parity, 0, streams); // packing is all done in streams[Nstream-1]
hipEventRecord(packEnd, streams[Nstream-1]);
}
for(int i=3; i>=0; i--){
if(commDimPartitioned(i)){
hipEvent_t &event = (i!=3 || kernelPackT) ? packEnd : oprodStart;
hipStreamWaitEvent(streams[2*i], event, 0); // wait in stream 2*i for event to complete
// Initialize the host transfer from the source spinor
faceBuffer.gather(src, false, 2*i);
// record the end of the gathering
hipEventRecord(gatherEnd[i], streams[2*i]);
} // comDim(i)
} // i=3,..,0
#endif
oprod.apply(streams[Nstream-1]);
/*
dim3 blockDim(128, 1, 1);
const int gridSize = (arg.length + (blockDim.x-1))/blockDim.x;
dim3 gridDim(gridSize, 1, 1);
interiorOprodKernel<<<gridDim,blockDim,0,streams[Nstream-1]>>>(arg);
*/
#ifdef MULTI_GPU
// compute gather completed
int gatherCompleted[5];
int commsCompleted[5];
int oprodCompleted[4];
for(int i=0; i<4; ++i){
gatherCompleted[i] = commsCompleted[i] = oprodCompleted[i] = 0;
}
gatherCompleted[4] = commsCompleted[4] = 1;
// initialize commDimTotal
int commDimTotal = 0;
for(int i=0; i<4; ++i){
commDimTotal += commDimPartitioned(i);
}
commDimTotal *= 2;
// initialize previousDir
int previousDir[4];
for(int i=3; i>=0; i--){
if(commDimPartitioned(i)){
int prev = 4;
for(int j=3; j>i; j--){
if(commDimPartitioned(j)){
prev = j;
}
}
previousDir[i] = prev;
}
} // set previous directions
if(commDimTotal){
arg.kernelType = OPROD_EXTERIOR_KERNEL;
unsigned int completeSum=0;
while(completeSum < commDimTotal){
for(int i=3; i>=0; i--){
if(!commDimPartitioned(i)) continue;
if(!gatherCompleted[i] && gatherCompleted[previousDir[i]]){
hipError_t event_test = hipEventQuery(gatherEnd[i]);
if(event_test == hipSuccess){
gatherCompleted[i] = 1;
completeSum++;
faceBuffer.commsStart(2*i);
}
}
// Query if comms has finished
if(!commsCompleted[i] && commsCompleted[previousDir[i]] && gatherCompleted[i]){
int comms_test = faceBuffer.commsQuery(2*i);
if(comms_test){
commsCompleted[i] = 1;
completeSum++;
faceBuffer.scatter(src, false, 2*i);
}
}
// enqueue the boundary oprod kernel as soon as the scatters have been enqueud
if(!oprodCompleted[i] && commsCompleted[i]){
hipEventRecord(scatterEnd[i], streams[2*i]);
hipStreamWaitEvent(streams[Nstream-1], scatterEnd[i],0);
arg.dir = i;
arg.ghostOffset = ghostOffset[i];
const unsigned int volume = arg.X[0]*arg.X[1]*arg.X[2]*arg.X[3];
arg.inB.setStride(3*volume/(2*arg.X[arg.dir]));
// First, do the one hop term
{
arg.length = faceVolumeCB[i];
arg.displacement = 1;
dim3 blockDim(128, 1, 1);
const int gridSize = (arg.length + (blockDim.x-1))/blockDim.x;
dim3 gridDim(gridSize, 1, 1);
hipLaunchKernelGGL(( exteriorOprodKernel), dim3(gridDim), dim3(blockDim), 0, streams[Nstream-1], arg);
}
// Now do the 3 hop term - Try putting this in a separate stream
{
arg.displacement = 3;
arg.length = arg.displacement*faceVolumeCB[i];
dim3 blockDim(128, 1, 1);
const int gridSize = (arg.length + (blockDim.x-1))/blockDim.x;
dim3 gridDim(gridSize, 1, 1);
hipLaunchKernelGGL(( exteriorOprodKernel), dim3(gridDim), dim3(blockDim), 0, streams[Nstream-1], arg);
}
arg.inB.setStride(arg.inA.Stride());
oprodCompleted[i] = 1;
}
} // i=3,..,0
} // completeSum < commDimTotal
} // if commDimTotal
#endif
} // computeStaggeredOprodCuda
#endif // GPU_STAGGERED_OPROD
// At the moment, I pass an instance of FaceBuffer in.
// Soon, faceBuffer will be subsumed into cudaColorSpinorField.
void computeStaggeredOprod(cudaGaugeField& outA, cudaGaugeField& outB, cudaColorSpinorField& in,
FaceBuffer& faceBuffer,
const unsigned int parity, const double coeff[2])
{
#ifdef GPU_STAGGERED_OPROD
if(outA.Order() != QUDA_FLOAT2_GAUGE_ORDER)
errorQuda("Unsupported output ordering: %d\n", outA.Order());
if(outB.Order() != QUDA_FLOAT2_GAUGE_ORDER)
errorQuda("Unsupported output ordering: %d\n", outB.Order());
unsigned int ghostOffset[4] = {0,0,0,0};
#ifdef MULTI_GPU
const unsigned int Npad = in.Ncolor()*in.Nspin()*2/in.FieldOrder();
for(int dir=0; dir<4; ++dir){
ghostOffset[dir] = Npad*(in.GhostOffset(dir) + in.Stride());
}
#endif
if(in.Precision() != outA.Precision()) errorQuda("Mixed precision not supported: %d %d\n", in.Precision(), outA.Precision());
cudaColorSpinorField& inA = (parity&1) ? in.Odd() : in.Even();
cudaColorSpinorField& inB = (parity&1) ? in.Even() : in.Odd();
if(in.Precision() == QUDA_DOUBLE_PRECISION){
Spinor<double2, double2, double2, 3, 0, 0> spinorA(inA);
Spinor<double2, double2, double2, 3, 0, 0> spinorB(inB);
computeStaggeredOprodCuda<double2>(FloatNOrder<double, 18, 2, 18>(outA), FloatNOrder<double, 18, 2, 18>(outB),
outA, outB,
spinorA, spinorB, inB, faceBuffer, parity, inB.GhostFace(), ghostOffset, coeff);
}else if(in.Precision() == QUDA_SINGLE_PRECISION){
Spinor<float2, float2, float2, 3, 0, 0> spinorA(inA);
Spinor<float2, float2, float2, 3, 0, 0> spinorB(inB);
computeStaggeredOprodCuda<float2>(FloatNOrder<float, 18, 2, 18>(outA), FloatNOrder<float, 18, 2, 18>(outB),
outA, outB,
spinorA, spinorB, inB, faceBuffer, parity, inB.GhostFace(), ghostOffset, coeff);
}else{
errorQuda("Unsupported precision: %d\n", in.Precision());
}
#else // GPU_STAGGERED_OPROD not defined
errorQuda("Staggered Outer Product has not been built!");
#endif
return;
} // computeStaggeredOprod
} // namespace quda
| 5ba9ce92021bf4114cf66edc531eab7188cfaa11.cu | #include <cstdio>
#include <cstdlib>
#include <staggered_oprod.h>
#include <tune_quda.h>
#include <quda_internal.h>
#include <gauge_field_order.h>
#include <quda_matrix.h>
namespace quda {
#ifdef GPU_STAGGERED_OPROD
namespace { // anonymous
#include <texture.h>
}
static bool kernelPackT = true;
template<int N>
void createEventArray(cudaEvent_t (&event)[N], unsigned int flags=cudaEventDefault)
{
for(int i=0; i<N; ++i)
cudaEventCreate(&event[i],flags);
return;
}
template<int N>
void destroyEventArray(cudaEvent_t (&event)[N])
{
for(int i=0; i<N; ++i)
cudaEventDestroy(event[i]);
}
static cudaEvent_t packEnd;
static cudaEvent_t gatherEnd[4];
static cudaEvent_t scatterEnd[4];
static cudaEvent_t oprodStart;
static cudaEvent_t oprodEnd;
void createStaggeredOprodEvents(){
#ifdef MULTI_GPU
cudaEventCreate(&packEnd, cudaEventDisableTiming);
createEventArray(gatherEnd, cudaEventDisableTiming);
createEventArray(scatterEnd, cudaEventDisableTiming);
#endif
cudaEventCreate(&oprodStart, cudaEventDisableTiming);
cudaEventCreate(&oprodEnd, cudaEventDisableTiming);
return;
}
void destroyStaggeredOprodEvents(){
#ifdef MULTI_GPU
destroyEventArray(gatherEnd);
destroyEventArray(scatterEnd);
cudaEventDestroy(packEnd);
#endif
cudaEventDestroy(oprodStart);
cudaEventDestroy(oprodEnd);
return;
}
enum KernelType {OPROD_INTERIOR_KERNEL, OPROD_EXTERIOR_KERNEL};
template<typename Complex, typename Output, typename Input>
struct StaggeredOprodArg {
unsigned int length;
unsigned int X[4];
unsigned int parity;
unsigned int dir;
unsigned int ghostOffset;
unsigned int displacement;
KernelType kernelType;
bool partitioned[4];
Input inA;
Input inB;
Output outA;
Output outB;
cudaGaugeField& outFieldA;
cudaGaugeField& outFieldB;
typename RealTypeId<Complex>::Type coeff[2];
StaggeredOprodArg(const unsigned int length,
const unsigned int X[4],
const unsigned int parity,
const unsigned int dir,
const unsigned int ghostOffset,
const unsigned int displacement,
const KernelType& kernelType,
const double coeff[2],
Input& inA,
Input& inB,
Output& outA,
Output& outB,
cudaGaugeField& outFieldA,
cudaGaugeField& outFieldB) : length(length), parity(parity), ghostOffset(ghostOffset),
displacement(displacement), kernelType(kernelType), inA(inA), inB(inB), outA(outA), outB(outB),
outFieldA(outFieldA), outFieldB(outFieldB)
{
this->coeff[0] = coeff[0];
this->coeff[1] = coeff[1];
for(int i=0; i<4; ++i) this->X[i] = X[i];
for(int i=0; i<4; ++i) this->partitioned[i] = commDimPartitioned(i) ? true : false;
}
};
enum IndexType {
EVEN_X = 0,
EVEN_Y = 1,
EVEN_Z = 2,
EVEN_T = 3
};
template <IndexType idxType>
static __device__ __forceinline__ void coordsFromIndex(int& idx, int c[4],
const unsigned int cb_idx, const unsigned int parity, const unsigned int X[4])
{
const unsigned int &LX = X[0];
const unsigned int &LY = X[1];
const unsigned int &LZ = X[2];
const unsigned int XYZ = X[2]*X[1]*X[0];
const unsigned int XY = X[1]*X[0];
idx = 2*cb_idx;
int x, y, z, t;
if (idxType == EVEN_X /*!(LX & 1)*/) { // X even
// t = idx / XYZ;
// z = (idx / XY) % Z;
// y = (idx / X) % Y;
// idx += (parity + t + z + y) & 1;
// x = idx % X;
// equivalent to the above, but with fewer divisions/mods:
int aux1 = idx / LX;
x = idx - aux1 * LX;
int aux2 = aux1 / LY;
y = aux1 - aux2 * LY;
t = aux2 / LZ;
z = aux2 - t * LZ;
aux1 = (parity + t + z + y) & 1;
x += aux1;
idx += aux1;
} else if (idxType == EVEN_Y /*!(LY & 1)*/) { // Y even
t = idx / XYZ;
z = (idx / XY) % LZ;
idx += (parity + t + z) & 1;
y = (idx / LX) % LY;
x = idx % LX;
} else if (idxType == EVEN_Z /*!(LZ & 1)*/) { // Z even
t = idx / XYZ;
idx += (parity + t) & 1;
z = (idx / XY) % LZ;
y = (idx / LX) % LY;
x = idx % LX;
} else {
idx += parity;
t = idx / XYZ;
z = (idx / XY) % LZ;
y = (idx / LX) % LY;
x = idx % LX;
}
c[0] = x;
c[1] = y;
c[2] = z;
c[3] = t;
}
// Get the coordinates for the exterior kernels
template<int Nspin>
__device__ void coordsFromIndex(unsigned int x[4], const unsigned int cb_idx, const unsigned int X[4], const unsigned int dir, const int displacement, const unsigned int parity)
{
if(Nspin == 1){
unsigned int Xh[2] = {X[0]/2, X[1]/2};
switch(dir){
case 0:
x[2] = cb_idx/Xh[1] % X[2];
x[3] = cb_idx/(Xh[1]*X[2]) % X[3];
x[0] = cb_idx/(Xh[1]*X[2]*X[3]);
x[0] += (X[0] - displacement);
x[1] = 2*(cb_idx % Xh[1]) + ((x[0]+x[2]+x[3]+parity)&1);
break;
case 1:
x[2] = cb_idx/Xh[0] % X[2];
x[3] = cb_idx/(Xh[0]*X[2]) % X[3];
x[1] = cb_idx/(Xh[0]*X[2]*X[3]);
x[1] += (X[1] - displacement);
x[0] = 2*(cb_idx % Xh[0]) + ((x[1]+x[2]+x[3]+parity)&1);
break;
case 2:
x[1] = cb_idx/Xh[0] % X[1];
x[3] = cb_idx/(Xh[0]*X[1]) % X[3];
x[2] = cb_idx/(Xh[0]*X[1]*X[3]);
x[2] += (X[2] - displacement);
x[0] = 2*(cb_idx % Xh[0]) + ((x[1]+x[2]+x[3]+parity)&1);
break;
case 3:
x[1] = cb_idx/Xh[0] % X[1];
x[2] = cb_idx/(Xh[0]*X[1]) % X[2];
x[3] = cb_idx/(Xh[0]*X[1]*X[2]);
x[3] += (X[3] - displacement);
x[0] = 2*(cb_idx % Xh[0]) + ((x[1]+x[2]+x[3]+parity)&1);
break;
}
}else if(Nspin == 3){
// currently unsupported
}
return;
}
template<int Nspin, int Nface>
__device__ int ghostIndexFromCoords(const unsigned int x[4], const unsigned int X[4], const unsigned int dir, const int shift){
return 0;
}
template<>
__device__ int ghostIndexFromCoords<3,3>(
const unsigned int x[4],
const unsigned int X[4],
unsigned int dir,
const int shift)
{
unsigned int ghost_idx;
if(shift > 0){
if((x[dir] + shift) >= X[dir]){
switch(dir){
case 0:
ghost_idx = (3*3 + (x[0]-X[0]+shift))*(X[3]*X[2]*X[1])/2 + ((x[3]*X[2] + x[2])*X[1] + x[1])/2;
break;
case 1:
ghost_idx = (3*3 + (x[1]-X[1]+shift))*(X[3]*X[2]*X[0])/2 + (x[3]*X[2]*X[0] + x[2]*X[0] + x[0])/2;
break;
case 2:
ghost_idx = (3*3 + (x[2]-X[2]+shift))*(X[3]*X[1]*X[0])/2 + (x[3]*X[1]*X[0] + x[1]*X[0] + x[0])/2;
break;
case 3:
ghost_idx = (3*3 + (x[3]-X[3]+shift))*(X[2]*X[1]*X[0])/2 + (x[2]*X[1]*X[0] + x[1]*X[0] + x[0])/2;
break;
default:
break;
} // switch
} // x[dir] + shift[dir] >= X[dir]
}else{ // shift < 0
if(static_cast<int>(x[dir]) + shift < 0){
switch(dir){
case 0:
ghost_idx = (3 + shift)*(X[3]*X[2]*X[1])/2 + ((x[3]*X[2] + x[2])*X[1] + x[1])/2;
break;
case 1:
ghost_idx = (3 + shift)*(X[3]*X[2]*X[0])/2 + ((x[3]*X[2] + x[2])*X[0] + x[0])/2;
break;
case 2:
ghost_idx = (3 + shift)*(X[3]*X[1]*X[0])/2 + ((x[3]*X[1] + x[1])*X[0] + x[0])/2;
break;
case 3:
ghost_idx = (3 + shift)*(X[2]*X[1]*X[0])/2 + ((x[2]*X[1] + x[1])*X[0] + x[0])/2;
break;
} // switch(dir)
}
} // shift < 0
return ghost_idx;
}
__device__ __forceinline__
int neighborIndex(const unsigned int& cb_idx, const int shift[4], const bool partitioned[4], const unsigned int& parity,
const unsigned int X[4]){
int full_idx;
int x[4];
coordsFromIndex<EVEN_X>(full_idx, x, cb_idx, parity, X);
#ifdef MULTI_GPU
for(int dim = 0; dim<4; ++dim){
if(partitioned[dim])
if( (x[dim]+shift[dim])<0 || (x[dim]+shift[dim])>=X[dim]) return -1;
}
#endif
for(int dim=0; dim<4; ++dim){
x[dim] = shift[dim] ? (x[dim]+shift[dim] + X[dim]) % X[dim] : x[dim];
}
return (((x[3]*X[2] + x[2])*X[1] + x[1])*X[0] + x[0]) >> 1;
}
template<typename Complex, typename Output, typename Input>
__global__ void interiorOprodKernel(StaggeredOprodArg<Complex, Output, Input> arg)
{
unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int gridSize = gridDim.x*blockDim.x;
typedef typename RealTypeId<Complex>::Type real;
Complex x[3];
Complex y[3];
Complex z[3];
Matrix<Complex,3> result;
Matrix<Complex,3> tempA, tempB; // input
while(idx<arg.length){
arg.inA.load(x, idx);
for(int dir=0; dir<4; ++dir){
int shift[4] = {0,0,0,0};
shift[dir] = 1;
const int first_nbr_idx = neighborIndex(idx, shift, arg.partitioned, arg.parity, arg.X);
if(first_nbr_idx >= 0){
arg.inB.load(y, first_nbr_idx);
outerProd(y,x,&result);
arg.outA.load(reinterpret_cast<real*>(tempA.data), idx, dir, arg.parity);
result = tempA + result*arg.coeff[0];
arg.outA.save(reinterpret_cast<real*>(result.data), idx, dir, arg.parity);
shift[dir] = 3;
const int third_nbr_idx = neighborIndex(idx, shift, arg.partitioned, arg.parity, arg.X);
if(third_nbr_idx >= 0){
arg.inB.load(z, third_nbr_idx);
outerProd(z, x, &result);
arg.outB.load(reinterpret_cast<real*>(tempB.data), idx, dir, arg.parity);
result = tempB + result*arg.coeff[1];
arg.outB.save(reinterpret_cast<real*>(result.data), idx, dir, arg.parity);
}
}
} // dir
idx += gridSize;
}
return;
} // interiorOprodKernel
template<typename Complex, typename Output, typename Input>
__global__ void exteriorOprodKernel(StaggeredOprodArg<Complex, Output, Input> arg)
{
unsigned int cb_idx = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int gridSize = gridDim.x*blockDim.x;
Complex a[3];
Complex b[3];
Matrix<Complex,3> result;
Matrix<Complex,3> inmatrix; // input
typedef typename RealTypeId<Complex>::Type real;
Output& out = (arg.displacement == 1) ? arg.outA : arg.outB;
real coeff = (arg.displacement == 1) ? arg.coeff[0] : arg.coeff[1];
unsigned int x[4];
while(cb_idx<arg.length){
coordsFromIndex<1>(x, cb_idx, arg.X, arg.dir, arg.displacement, arg.parity);
const unsigned int bulk_cb_idx = ((((x[3]*arg.X[2] + x[2])*arg.X[1] + x[1])*arg.X[0] + x[0]) >> 1);
out.load(reinterpret_cast<real*>(inmatrix.data), bulk_cb_idx, arg.dir, arg.parity);
arg.inA.load(a, bulk_cb_idx);
const unsigned int ghost_idx = arg.ghostOffset + ghostIndexFromCoords<3,3>(x, arg.X, arg.dir, arg.displacement);
arg.inB.load(b, ghost_idx);
outerProd(b,a,&result);
result = inmatrix + result*coeff;
out.save(reinterpret_cast<real*>(result.data), bulk_cb_idx, arg.dir, arg.parity);
cb_idx += gridSize;
}
return;
}
template<typename Complex, typename Output, typename Input>
class StaggeredOprodField : public Tunable {
private:
StaggeredOprodArg<Complex,Output,Input> arg;
QudaFieldLocation location; // location of the lattice fields
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam &) const { return 0; }
unsigned int minThreads() const { return arg.outA.volumeCB; }
bool tunedGridDim() const { return false; }
public:
StaggeredOprodField(const StaggeredOprodArg<Complex,Output,Input> &arg,
QudaFieldLocation location)
: arg(arg), location(location) {
sprintf(vol,"%dx%dx%dx%d",arg.X[0],arg.X[1],arg.X[2],arg.X[3]);
sprintf(aux,"threads=%d,prec=%lu,stride=%d",arg.length,sizeof(Complex)/2,arg.inA.Stride());
}
virtual ~StaggeredOprodField() {}
void set(const StaggeredOprodArg<Complex,Output,Input> &arg, QudaFieldLocation location){
// This is a hack. Need to change this!
this->arg.dir = arg.dir;
this->arg.length = arg.length;
this->arg.ghostOffset = arg.ghostOffset;
this->arg.kernelType = arg.kernelType;
this->location = location;
} // set
void apply(const cudaStream_t &stream){
if(location == QUDA_CUDA_FIELD_LOCATION){
// Disable tuning for the time being
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
//if(arg.kernelType == OPROD_INTERIOR_KERNEL){
interiorOprodKernel<<<tp.grid,tp.block,tp.shared_bytes, stream>>>(arg);
// dim3 blockDim(128, 1, 1);
// const int gridSize = (arg.length + (blockDim.x-1))/blockDim.x;
// dim3 gridDim(gridSize, 1, 1);
// interiorOprodKernel<<<gridDim,blockDim,0, stream>>>(arg);
// }else if(arg.kernelType == OPROD_EXTERIOR_KERNEL){
// const unsigned int volume = arg.X[0]*arg.X[1]*arg.X[2]*arg.X[3];
// arg.inB.setStride(3*volume/(2*arg.X[arg.dir]));
// exteriorOprodKernel<<<tp.grid,tp.block,tp.shared_bytes, stream>>>(arg);
// arg.inB.setStride(arg.inA.Stride());
// }else{
// errorQuda("Kernel type not supported\n");
// }
}else{ // run the CPU code
errorQuda("No CPU support for staggered outer-product calculation\n");
}
} // apply
void preTune(){
this->arg.outFieldA.backup();
this->arg.outFieldB.backup();
}
void postTune(){
this->arg.outFieldA.restore();
this->arg.outFieldB.restore();
}
long long flops() const {
return 0; // fix this
}
long long bytes() const {
return 0; // fix this
}
TuneKey tuneKey() const { return TuneKey(vol, typeid(*this).name(), aux);}
}; // StaggeredOprodField
template<typename Complex, typename Output, typename Input>
void computeStaggeredOprodCuda(Output outA, Output outB, cudaGaugeField& outFieldA, cudaGaugeField& outFieldB, Input& inA, Input& inB, cudaColorSpinorField& src,
FaceBuffer& faceBuffer, const unsigned int parity, const int faceVolumeCB[4],
const unsigned int ghostOffset[4], const double coeff[2])
{
cudaEventRecord(oprodStart, streams[Nstream-1]);
const unsigned int dim[4] = {src.X(0)*2, src.X(1), src.X(2), src.X(3)};
// Create the arguments for the interior kernel
StaggeredOprodArg<Complex,Output,Input> arg(outA.volumeCB, dim, parity, 0, 0, 1, OPROD_INTERIOR_KERNEL, coeff, inA, inB, outA, outB, outFieldA,
outFieldB);
StaggeredOprodField<Complex,Output,Input> oprod(arg, QUDA_CUDA_FIELD_LOCATION);
#ifdef MULTI_GPU
bool pack=false;
for(int i=3; i>=0; i--){
if(commDimPartitioned(i) && (i!=3 || kernelPackT)){
pack = true;
break;
}
} // i=3,..,0
// source, dir(+/-1), parity, dagger, stream_ptr
if(pack){
faceBuffer.pack(src, -1, 1-parity, 0, streams); // packing is all done in streams[Nstream-1]
//faceBuffer.pack(src, 1-parity, 0, streams); // packing is all done in streams[Nstream-1]
cudaEventRecord(packEnd, streams[Nstream-1]);
}
for(int i=3; i>=0; i--){
if(commDimPartitioned(i)){
cudaEvent_t &event = (i!=3 || kernelPackT) ? packEnd : oprodStart;
cudaStreamWaitEvent(streams[2*i], event, 0); // wait in stream 2*i for event to complete
// Initialize the host transfer from the source spinor
faceBuffer.gather(src, false, 2*i);
// record the end of the gathering
cudaEventRecord(gatherEnd[i], streams[2*i]);
} // comDim(i)
} // i=3,..,0
#endif
oprod.apply(streams[Nstream-1]);
/*
dim3 blockDim(128, 1, 1);
const int gridSize = (arg.length + (blockDim.x-1))/blockDim.x;
dim3 gridDim(gridSize, 1, 1);
interiorOprodKernel<<<gridDim,blockDim,0,streams[Nstream-1]>>>(arg);
*/
#ifdef MULTI_GPU
// compute gather completed
int gatherCompleted[5];
int commsCompleted[5];
int oprodCompleted[4];
for(int i=0; i<4; ++i){
gatherCompleted[i] = commsCompleted[i] = oprodCompleted[i] = 0;
}
gatherCompleted[4] = commsCompleted[4] = 1;
// initialize commDimTotal
int commDimTotal = 0;
for(int i=0; i<4; ++i){
commDimTotal += commDimPartitioned(i);
}
commDimTotal *= 2;
// initialize previousDir
int previousDir[4];
for(int i=3; i>=0; i--){
if(commDimPartitioned(i)){
int prev = 4;
for(int j=3; j>i; j--){
if(commDimPartitioned(j)){
prev = j;
}
}
previousDir[i] = prev;
}
} // set previous directions
if(commDimTotal){
arg.kernelType = OPROD_EXTERIOR_KERNEL;
unsigned int completeSum=0;
while(completeSum < commDimTotal){
for(int i=3; i>=0; i--){
if(!commDimPartitioned(i)) continue;
if(!gatherCompleted[i] && gatherCompleted[previousDir[i]]){
cudaError_t event_test = cudaEventQuery(gatherEnd[i]);
if(event_test == cudaSuccess){
gatherCompleted[i] = 1;
completeSum++;
faceBuffer.commsStart(2*i);
}
}
// Query if comms has finished
if(!commsCompleted[i] && commsCompleted[previousDir[i]] && gatherCompleted[i]){
int comms_test = faceBuffer.commsQuery(2*i);
if(comms_test){
commsCompleted[i] = 1;
completeSum++;
faceBuffer.scatter(src, false, 2*i);
}
}
// enqueue the boundary oprod kernel as soon as the scatters have been enqueud
if(!oprodCompleted[i] && commsCompleted[i]){
cudaEventRecord(scatterEnd[i], streams[2*i]);
cudaStreamWaitEvent(streams[Nstream-1], scatterEnd[i],0);
arg.dir = i;
arg.ghostOffset = ghostOffset[i];
const unsigned int volume = arg.X[0]*arg.X[1]*arg.X[2]*arg.X[3];
arg.inB.setStride(3*volume/(2*arg.X[arg.dir]));
// First, do the one hop term
{
arg.length = faceVolumeCB[i];
arg.displacement = 1;
dim3 blockDim(128, 1, 1);
const int gridSize = (arg.length + (blockDim.x-1))/blockDim.x;
dim3 gridDim(gridSize, 1, 1);
exteriorOprodKernel<<<gridDim, blockDim, 0, streams[Nstream-1]>>>(arg);
}
// Now do the 3 hop term - Try putting this in a separate stream
{
arg.displacement = 3;
arg.length = arg.displacement*faceVolumeCB[i];
dim3 blockDim(128, 1, 1);
const int gridSize = (arg.length + (blockDim.x-1))/blockDim.x;
dim3 gridDim(gridSize, 1, 1);
exteriorOprodKernel<<<gridDim, blockDim, 0, streams[Nstream-1]>>>(arg);
}
arg.inB.setStride(arg.inA.Stride());
oprodCompleted[i] = 1;
}
} // i=3,..,0
} // completeSum < commDimTotal
} // if commDimTotal
#endif
} // computeStaggeredOprodCuda
#endif // GPU_STAGGERED_OPROD
// At the moment, I pass an instance of FaceBuffer in.
// Soon, faceBuffer will be subsumed into cudaColorSpinorField.
void computeStaggeredOprod(cudaGaugeField& outA, cudaGaugeField& outB, cudaColorSpinorField& in,
FaceBuffer& faceBuffer,
const unsigned int parity, const double coeff[2])
{
#ifdef GPU_STAGGERED_OPROD
if(outA.Order() != QUDA_FLOAT2_GAUGE_ORDER)
errorQuda("Unsupported output ordering: %d\n", outA.Order());
if(outB.Order() != QUDA_FLOAT2_GAUGE_ORDER)
errorQuda("Unsupported output ordering: %d\n", outB.Order());
unsigned int ghostOffset[4] = {0,0,0,0};
#ifdef MULTI_GPU
const unsigned int Npad = in.Ncolor()*in.Nspin()*2/in.FieldOrder();
for(int dir=0; dir<4; ++dir){
ghostOffset[dir] = Npad*(in.GhostOffset(dir) + in.Stride());
}
#endif
if(in.Precision() != outA.Precision()) errorQuda("Mixed precision not supported: %d %d\n", in.Precision(), outA.Precision());
cudaColorSpinorField& inA = (parity&1) ? in.Odd() : in.Even();
cudaColorSpinorField& inB = (parity&1) ? in.Even() : in.Odd();
if(in.Precision() == QUDA_DOUBLE_PRECISION){
Spinor<double2, double2, double2, 3, 0, 0> spinorA(inA);
Spinor<double2, double2, double2, 3, 0, 0> spinorB(inB);
computeStaggeredOprodCuda<double2>(FloatNOrder<double, 18, 2, 18>(outA), FloatNOrder<double, 18, 2, 18>(outB),
outA, outB,
spinorA, spinorB, inB, faceBuffer, parity, inB.GhostFace(), ghostOffset, coeff);
}else if(in.Precision() == QUDA_SINGLE_PRECISION){
Spinor<float2, float2, float2, 3, 0, 0> spinorA(inA);
Spinor<float2, float2, float2, 3, 0, 0> spinorB(inB);
computeStaggeredOprodCuda<float2>(FloatNOrder<float, 18, 2, 18>(outA), FloatNOrder<float, 18, 2, 18>(outB),
outA, outB,
spinorA, spinorB, inB, faceBuffer, parity, inB.GhostFace(), ghostOffset, coeff);
}else{
errorQuda("Unsupported precision: %d\n", in.Precision());
}
#else // GPU_STAGGERED_OPROD not defined
errorQuda("Staggered Outer Product has not been built!");
#endif
return;
} // computeStaggeredOprod
} // namespace quda
|
21463ad3f3d89d0357206d599e1fc207e007bb0d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/driver_types.h>
#include <iostream>
#include <string>
#include "timer.h"
using namespace std;
using namespace timer;
class ElementOp {
public:
virtual ~ElementOp() {}
virtual void Calc(int n, const float* x, const float* y, float* z) {}
};
class ElementContext {
public:
ElementContext(ElementOp* op):
op_(op)
{
}
void Calc(int n, const float* x, const float* y, float* z) {
op_->Calc(n, x, y, z);
}
private:
ElementOp* op_;
};
__global__ void AddKernelPerThread(const float* x, const float* y, float* z) {
int idx = threadIdx.x;
z[idx] = x[idx] + y[idx];
}
__global__ void AddKernelPerBlock(const float* x, const float* y, float* z) {
int idx = blockIdx.x;
z[idx] = x[idx] + y[idx];
}
class ElementAddOpWithThread: public ElementOp {
public:
~ElementAddOpWithThread() {}
void Calc(int n, const float* x, const float* y, float* z) {
// allocate GPU buffers
float* cuda_x = NULL;
float* cuda_y = NULL;
float* cuda_z = NULL;
hipMalloc((void**)&cuda_x, sizeof(float) * n);
hipMalloc((void**)&cuda_y, sizeof(float) * n);
hipMalloc((void**)&cuda_z, sizeof(float) * n);
// copy host memory to GPU buffers
hipMemcpy(cuda_x, x, sizeof(float) * n, hipMemcpyHostToDevice);
hipMemcpy(cuda_y, y, sizeof(float) * n, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( AddKernelPerThread), dim3(1), dim3(n), 0, 0, cuda_x, cuda_y, cuda_z);
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
hipDeviceSynchronize();
// copy GPU buffers to host memory
hipMemcpy(z, cuda_z, sizeof(float) * n, hipMemcpyDeviceToHost);
hipFree(cuda_x);
hipFree(cuda_y);
hipFree(cuda_z);
}
};
class ElementAddOpWithBlock: public ElementOp {
public:
~ElementAddOpWithBlock() {}
void Calc(int n, const float* x, const float* y, float* z) {
// allocate GPU buffers
float* cuda_x = NULL;
float* cuda_y = NULL;
float* cuda_z = NULL;
hipMalloc((void**)&cuda_x, sizeof(float) * n);
hipMalloc((void**)&cuda_y, sizeof(float) * n);
hipMalloc((void**)&cuda_z, sizeof(float) * n);
// copy host memory to GPU buffers
hipMemcpy(cuda_x, x, sizeof(float) * n, hipMemcpyHostToDevice);
hipMemcpy(cuda_y, y, sizeof(float) * n, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( AddKernelPerBlock), dim3(n), dim3(1), 0, 0, cuda_x, cuda_y, cuda_z);
hipStreamSynchronize(0);
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
hipDeviceSynchronize();
// copy GPU buffers to host memory
hipMemcpy(z, cuda_z, sizeof(float) * n, hipMemcpyDeviceToHost);
hipFree(cuda_x);
hipFree(cuda_y);
hipFree(cuda_z);
}
};
class ElementAddOpWithStream: public ElementOp {
public:
~ElementAddOpWithStream() {}
void Calc(int n, const float* x, const float* y, float* z) {
// allocate GPU buffers
float* cuda_x = NULL;
float* cuda_y = NULL;
float* cuda_z = NULL;
hipMalloc((void**)&cuda_x, sizeof(float) * n);
hipMalloc((void**)&cuda_y, sizeof(float) * n);
hipMalloc((void**)&cuda_z, sizeof(float) * n);
// copy host memory to GPU buffers
hipMemcpy(cuda_x, x, sizeof(float) * n, hipMemcpyHostToDevice);
hipMemcpy(cuda_y, y, sizeof(float) * n, hipMemcpyHostToDevice);
// stream
const int num_streams = 5;
hipStream_t stream[num_streams];
for (int i = 0; i < num_streams; i++) {
hipStreamCreate(&stream[i]);
}
for (int i = 0; i < num_streams; i++) {
hipLaunchKernelGGL(( AddKernelPerBlock), dim3(1), dim3(1), 0, stream[i], x + i, y + i, z + i);
}
hipDeviceSynchronize();
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
hipDeviceSynchronize();
// copy GPU buffers to host memory
hipMemcpy(z, cuda_z, sizeof(float) * n, hipMemcpyDeviceToHost);
hipFree(cuda_x);
hipFree(cuda_y);
hipFree(cuda_z);
}
};
void Print(const float* a, int n) {
for (int i = 0; i < n; i++) {
cout << a[i] << " ";
}
cout << endl;
}
class GPUAutoTimer {
public:
GPUAutoTimer(string str=""):
str_(str)
{
hipEventCreate(&start_); //
hipEventCreate(&stop_);
hipEventRecord(start_, 0); //
}
~GPUAutoTimer() {
hipEventRecord(stop_, 0); //
hipEventSynchronize(stop_); //
float elapsed_time;
hipEventElapsedTime(&elapsed_time, start_, stop_); // (:ms)
fprintf(stderr, "%s use %.3fms\n", str_.c_str(), elapsed_time);
hipEventDestroy(start_);
hipEventDestroy(stop_);
}
private:
string str_;
hipEvent_t start_; //
hipEvent_t stop_;
};
int main() {
const int n = 1024;
float a[n];
float b[n];
float c[n];
for (int i = 0; i < n; i++) {
a[i] = random() % 10;
b[i] = random() % 10;
}
int count;
hipGetDeviceCount(&count);
hipSetDevice(0);
{
GPUAutoTimer gpu_auto_timer("GPU");
AutoTimer auto_timer("CPU");
ElementAddOpWithThread elt_op_add_with_thread;
ElementContext elt_context1(&elt_op_add_with_thread);
elt_context1.Calc(n, a, b, c);
}
{
GPUAutoTimer gpu_auto_timer("GPU");
AutoTimer auto_timer("CPU");
ElementAddOpWithBlock elt_op_add_with_block;
ElementContext elt_context2(&elt_op_add_with_block);
elt_context2.Calc(n, a, b, c);
}
{
GPUAutoTimer gpu_auto_timer("GPU");
AutoTimer auto_timer("CPU");
ElementAddOpWithStream elt_op_add_with_stream;
ElementContext elt_context3(&elt_op_add_with_stream);
elt_context3.Calc(n, a, b, c);
}
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
hipDeviceReset();
return 0;
}
| 21463ad3f3d89d0357206d599e1fc207e007bb0d.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <driver_types.h>
#include <iostream>
#include <string>
#include "timer.h"
using namespace std;
using namespace timer;
class ElementOp {
public:
virtual ~ElementOp() {}
virtual void Calc(int n, const float* x, const float* y, float* z) {}
};
class ElementContext {
public:
ElementContext(ElementOp* op):
op_(op)
{
}
void Calc(int n, const float* x, const float* y, float* z) {
op_->Calc(n, x, y, z);
}
private:
ElementOp* op_;
};
__global__ void AddKernelPerThread(const float* x, const float* y, float* z) {
int idx = threadIdx.x;
z[idx] = x[idx] + y[idx];
}
__global__ void AddKernelPerBlock(const float* x, const float* y, float* z) {
int idx = blockIdx.x;
z[idx] = x[idx] + y[idx];
}
class ElementAddOpWithThread: public ElementOp {
public:
~ElementAddOpWithThread() {}
void Calc(int n, const float* x, const float* y, float* z) {
// allocate GPU buffers
float* cuda_x = NULL;
float* cuda_y = NULL;
float* cuda_z = NULL;
cudaMalloc((void**)&cuda_x, sizeof(float) * n);
cudaMalloc((void**)&cuda_y, sizeof(float) * n);
cudaMalloc((void**)&cuda_z, sizeof(float) * n);
// copy host memory to GPU buffers
cudaMemcpy(cuda_x, x, sizeof(float) * n, cudaMemcpyHostToDevice);
cudaMemcpy(cuda_y, y, sizeof(float) * n, cudaMemcpyHostToDevice);
AddKernelPerThread<<<1, n>>>(cuda_x, cuda_y, cuda_z);
// cudaThreadSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaThreadSynchronize();
// copy GPU buffers to host memory
cudaMemcpy(z, cuda_z, sizeof(float) * n, cudaMemcpyDeviceToHost);
cudaFree(cuda_x);
cudaFree(cuda_y);
cudaFree(cuda_z);
}
};
class ElementAddOpWithBlock: public ElementOp {
public:
~ElementAddOpWithBlock() {}
void Calc(int n, const float* x, const float* y, float* z) {
// allocate GPU buffers
float* cuda_x = NULL;
float* cuda_y = NULL;
float* cuda_z = NULL;
cudaMalloc((void**)&cuda_x, sizeof(float) * n);
cudaMalloc((void**)&cuda_y, sizeof(float) * n);
cudaMalloc((void**)&cuda_z, sizeof(float) * n);
// copy host memory to GPU buffers
cudaMemcpy(cuda_x, x, sizeof(float) * n, cudaMemcpyHostToDevice);
cudaMemcpy(cuda_y, y, sizeof(float) * n, cudaMemcpyHostToDevice);
AddKernelPerBlock<<<n, 1>>>(cuda_x, cuda_y, cuda_z);
cudaStreamSynchronize(0);
// cudaThreadSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaThreadSynchronize();
// copy GPU buffers to host memory
cudaMemcpy(z, cuda_z, sizeof(float) * n, cudaMemcpyDeviceToHost);
cudaFree(cuda_x);
cudaFree(cuda_y);
cudaFree(cuda_z);
}
};
class ElementAddOpWithStream: public ElementOp {
public:
~ElementAddOpWithStream() {}
void Calc(int n, const float* x, const float* y, float* z) {
// allocate GPU buffers
float* cuda_x = NULL;
float* cuda_y = NULL;
float* cuda_z = NULL;
cudaMalloc((void**)&cuda_x, sizeof(float) * n);
cudaMalloc((void**)&cuda_y, sizeof(float) * n);
cudaMalloc((void**)&cuda_z, sizeof(float) * n);
// copy host memory to GPU buffers
cudaMemcpy(cuda_x, x, sizeof(float) * n, cudaMemcpyHostToDevice);
cudaMemcpy(cuda_y, y, sizeof(float) * n, cudaMemcpyHostToDevice);
// stream
const int num_streams = 5;
cudaStream_t stream[num_streams];
for (int i = 0; i < num_streams; i++) {
cudaStreamCreate(&stream[i]);
}
for (int i = 0; i < num_streams; i++) {
AddKernelPerBlock<<<1, 1, 0, stream[i]>>>(x + i, y + i, z + i);
}
cudaDeviceSynchronize();
// cudaThreadSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaThreadSynchronize();
// copy GPU buffers to host memory
cudaMemcpy(z, cuda_z, sizeof(float) * n, cudaMemcpyDeviceToHost);
cudaFree(cuda_x);
cudaFree(cuda_y);
cudaFree(cuda_z);
}
};
void Print(const float* a, int n) {
for (int i = 0; i < n; i++) {
cout << a[i] << " ";
}
cout << endl;
}
class GPUAutoTimer {
public:
GPUAutoTimer(string str=""):
str_(str)
{
cudaEventCreate(&start_); // 创建事件
cudaEventCreate(&stop_);
cudaEventRecord(start_, 0); // 记录开始
}
~GPUAutoTimer() {
cudaEventRecord(stop_, 0); // 记录结束
cudaEventSynchronize(stop_); // 事件同步,等待结束事件之前的设备操作均已完成
float elapsed_time;
cudaEventElapsedTime(&elapsed_time, start_, stop_); // 计算两个事件之间的时长(单位:ms)
fprintf(stderr, "%s use %.3fms\n", str_.c_str(), elapsed_time);
cudaEventDestroy(start_);
cudaEventDestroy(stop_);
}
private:
string str_;
cudaEvent_t start_; // 事件对象
cudaEvent_t stop_;
};
int main() {
const int n = 1024;
float a[n];
float b[n];
float c[n];
for (int i = 0; i < n; i++) {
a[i] = random() % 10;
b[i] = random() % 10;
}
int count;
cudaGetDeviceCount(&count);
cudaSetDevice(0);
{
GPUAutoTimer gpu_auto_timer("GPU");
AutoTimer auto_timer("CPU");
ElementAddOpWithThread elt_op_add_with_thread;
ElementContext elt_context1(&elt_op_add_with_thread);
elt_context1.Calc(n, a, b, c);
}
{
GPUAutoTimer gpu_auto_timer("GPU");
AutoTimer auto_timer("CPU");
ElementAddOpWithBlock elt_op_add_with_block;
ElementContext elt_context2(&elt_op_add_with_block);
elt_context2.Calc(n, a, b, c);
}
{
GPUAutoTimer gpu_auto_timer("GPU");
AutoTimer auto_timer("CPU");
ElementAddOpWithStream elt_op_add_with_stream;
ElementContext elt_context3(&elt_op_add_with_stream);
elt_context3.Calc(n, a, b, c);
}
// cudaThreadExit must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaThreadExit();
return 0;
}
|
dc0cb1fe11c63b170d1e6510e86788bff2b28fad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
#include "utils/utils.h"
// TODO 6: Write the code to add the two arrays element by element and
// store the result in another array
__global__ void add_arrays(const float *a, const float *b, float *c, int N) {
unsigned int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < N) {
c[i] = a[i] + b[i];
}
}
int main(void) {
hipSetDevice(0);
int N = 1 << 20;
hipError_t err;
const size_t block_size = 256;
size_t num_blocks;
float *host_array_a = 0;
float *host_array_b = 0;
float *host_array_c = 0;
float *device_array_a = 0;
float *device_array_b = 0;
float *device_array_c = 0;
// TODO 1: Allocate the host's arrays
host_array_a = (float*)malloc(N * sizeof(*host_array_a));
DIE(host_array_a == NULL, "malloc(host_array_a)");
host_array_b = (float*)malloc(N * sizeof(*host_array_b));
DIE(host_array_b == NULL, "malloc(host_array_b)");
host_array_c = (float*)malloc(N * sizeof(*host_array_c));
DIE(host_array_c == NULL, "malloc(host_array_c)");
// TODO 2: Allocate the device's arrays
err = hipMalloc(&device_array_a, N * sizeof(*device_array_a));
DIE(err != hipSuccess || device_array_a == NULL,
"hipMalloc(device_array_a)");
err = hipMalloc(&device_array_b, N * sizeof(*device_array_b));
DIE(err != hipSuccess || device_array_b == NULL,
"hipMalloc(device_array_b)");
err = hipMalloc(&device_array_c, N * sizeof(*device_array_c));
DIE(err != hipSuccess || device_array_c == NULL,
"hipMalloc(device_array_c)");
// TODO 3: Check for allocation errors
// TODO 4: Fill array with values; use fill_array_float to fill
// host_array_a and fill_array_random to fill host_array_b. Each
// function has the signature (float *a, int n), where n = number of elements.
fill_array_float(host_array_a, N);
fill_array_random(host_array_b, N);
// TODO 5: Copy the host's arrays to device
err = hipMemcpy(device_array_a, host_array_a,
N * sizeof(*host_array_a), hipMemcpyHostToDevice);
DIE(err != hipSuccess, "hipMemcpy(host_array_a)");
err = hipMemcpy(device_array_b, host_array_b,
N * sizeof(*host_array_b), hipMemcpyHostToDevice);
DIE(err != hipSuccess, "hipMemcpy(host_array_b)");
// TODO 6: Execute the kernel, calculating first the grid size
// and the amount of threads in each block from the grid
// Hint: For this execise the block_size can have any value lower than the
// API's maximum value (it's recommended to be close to the maximum
// value).
num_blocks = N / block_size;
if (N % block_size) {
++num_blocks;
}
hipLaunchKernelGGL(( add_arrays), dim3(num_blocks), dim3(block_size), 0, 0, device_array_a, device_array_b,
device_array_c, N);
err = hipDeviceSynchronize();
DIE(err != hipSuccess, "hipDeviceSynchronize");
// TODO 7: Copy back the results and then uncomment the checking function
err = hipMemcpy(host_array_c, device_array_c,
N * sizeof(*host_array_c), hipMemcpyDeviceToHost);
DIE(err != hipSuccess, "hipMemcpy(device_array_c)");
check_task_2(host_array_a, host_array_b, host_array_c, N);
// TODO 8: Free the memory
free(host_array_a);
free(host_array_b);
free(host_array_c);
err = hipFree(device_array_a);
DIE(err != hipSuccess, "hipFree(device_array_a)");
err = hipFree(device_array_b);
DIE(err != hipSuccess, "hipFree(device_array_b)");
err = hipFree(device_array_c);
DIE(err != hipSuccess, "hipFree(device_array_c)");
return 0;
}
| dc0cb1fe11c63b170d1e6510e86788bff2b28fad.cu | #include <stdio.h>
#include <math.h>
#include "utils/utils.h"
// TODO 6: Write the code to add the two arrays element by element and
// store the result in another array
__global__ void add_arrays(const float *a, const float *b, float *c, int N) {
unsigned int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < N) {
c[i] = a[i] + b[i];
}
}
int main(void) {
cudaSetDevice(0);
int N = 1 << 20;
cudaError_t err;
const size_t block_size = 256;
size_t num_blocks;
float *host_array_a = 0;
float *host_array_b = 0;
float *host_array_c = 0;
float *device_array_a = 0;
float *device_array_b = 0;
float *device_array_c = 0;
// TODO 1: Allocate the host's arrays
host_array_a = (float*)malloc(N * sizeof(*host_array_a));
DIE(host_array_a == NULL, "malloc(host_array_a)");
host_array_b = (float*)malloc(N * sizeof(*host_array_b));
DIE(host_array_b == NULL, "malloc(host_array_b)");
host_array_c = (float*)malloc(N * sizeof(*host_array_c));
DIE(host_array_c == NULL, "malloc(host_array_c)");
// TODO 2: Allocate the device's arrays
err = cudaMalloc(&device_array_a, N * sizeof(*device_array_a));
DIE(err != cudaSuccess || device_array_a == NULL,
"cudaMalloc(device_array_a)");
err = cudaMalloc(&device_array_b, N * sizeof(*device_array_b));
DIE(err != cudaSuccess || device_array_b == NULL,
"cudaMalloc(device_array_b)");
err = cudaMalloc(&device_array_c, N * sizeof(*device_array_c));
DIE(err != cudaSuccess || device_array_c == NULL,
"cudaMalloc(device_array_c)");
// TODO 3: Check for allocation errors
// TODO 4: Fill array with values; use fill_array_float to fill
// host_array_a and fill_array_random to fill host_array_b. Each
// function has the signature (float *a, int n), where n = number of elements.
fill_array_float(host_array_a, N);
fill_array_random(host_array_b, N);
// TODO 5: Copy the host's arrays to device
err = cudaMemcpy(device_array_a, host_array_a,
N * sizeof(*host_array_a), cudaMemcpyHostToDevice);
DIE(err != cudaSuccess, "cudaMemcpy(host_array_a)");
err = cudaMemcpy(device_array_b, host_array_b,
N * sizeof(*host_array_b), cudaMemcpyHostToDevice);
DIE(err != cudaSuccess, "cudaMemcpy(host_array_b)");
// TODO 6: Execute the kernel, calculating first the grid size
// and the amount of threads in each block from the grid
// Hint: For this execise the block_size can have any value lower than the
// API's maximum value (it's recommended to be close to the maximum
// value).
num_blocks = N / block_size;
if (N % block_size) {
++num_blocks;
}
add_arrays<<<num_blocks, block_size>>>(device_array_a, device_array_b,
device_array_c, N);
err = cudaDeviceSynchronize();
DIE(err != cudaSuccess, "cudaDeviceSynchronize");
// TODO 7: Copy back the results and then uncomment the checking function
err = cudaMemcpy(host_array_c, device_array_c,
N * sizeof(*host_array_c), cudaMemcpyDeviceToHost);
DIE(err != cudaSuccess, "cudaMemcpy(device_array_c)");
check_task_2(host_array_a, host_array_b, host_array_c, N);
// TODO 8: Free the memory
free(host_array_a);
free(host_array_b);
free(host_array_c);
err = cudaFree(device_array_a);
DIE(err != cudaSuccess, "cudaFree(device_array_a)");
err = cudaFree(device_array_b);
DIE(err != cudaSuccess, "cudaFree(device_array_b)");
err = cudaFree(device_array_c);
DIE(err != cudaSuccess, "cudaFree(device_array_c)");
return 0;
}
|
c662d4d94792df4660aedcca2ca0db4d21cbab60.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <vector>
#include <sys/time.h>
#include <string>
#include "type.h"
#include "juliaset.cuh"
#include "lodepng.h"
#include "juliaAlgorithm.cuh"
using namespace std;
void checkError(hipError_t err) {
if (err != hipSuccess) {
std::cout << hipGetErrorString(err) << std::endl;
exit(-1);
}
}
double getSeconds() {
timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec * (double)tp.tv_usec * 1e-6);
}
int main(int argc, const char *argv[]){
if(argc > 4 || argc < 1){std::cout << "Please enter only threadx and thready" << std::endl; abort();}
int threadsX = std::atoi(argv[1]);
int threadsY = std::atoi(argv[2]);
const char* filename1 = "julia1.png"; //const char* filename2 = "julia2.png" ;
const long long ARRAY_SIZE = 2048 * 2048 * 3;
const long long ARRAY_BYTES = ARRAY_SIZE * sizeof(unsigned char);
unsigned char* h_image_out = new unsigned char[ARRAY_SIZE];
double startTime,stopTime;
//unsigned char * d_image_out;
unsigned char* d_image_out;// = new unsigned char[ARRAY_SIZE];
checkError(hipMalloc((void**)&d_image_out, ARRAY_BYTES));
startTime = getSeconds();
dim3 threadsPerBlock(threadsX,threadsY);
dim3 numBlocks(2048/threadsPerBlock.x,2048/threadsPerBlock.y);
// Kernel launch
hipLaunchKernelGGL(( juliaAlgorithm) , dim3(numBlocks) , dim3(threadsPerBlock), 0, 0, d_image_out);
hipError_t(hipPeekAtLastError());
hipDeviceSynchronize();
stopTime = getSeconds();
std::cout << "time :" << (stopTime-startTime) * 1e-3 << "ms" << std::endl;
checkError(hipMemcpy(h_image_out, d_image_out, ARRAY_BYTES, hipMemcpyDeviceToHost));
lodepng::encode(filename1, h_image_out, 2048, 2048,LCT_RGB,8);
checkError(hipFree(d_image_out));
return 0;
}
| c662d4d94792df4660aedcca2ca0db4d21cbab60.cu | #include <iostream>
#include <vector>
#include <sys/time.h>
#include <string>
#include "type.h"
#include "juliaset.cuh"
#include "lodepng.h"
#include "juliaAlgorithm.cuh"
using namespace std;
void checkError(cudaError_t err) {
if (err != cudaSuccess) {
std::cout << cudaGetErrorString(err) << std::endl;
exit(-1);
}
}
double getSeconds() {
timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec * (double)tp.tv_usec * 1e-6);
}
int main(int argc, const char *argv[]){
if(argc > 4 || argc < 1){std::cout << "Please enter only threadx and thready" << std::endl; abort();}
int threadsX = std::atoi(argv[1]);
int threadsY = std::atoi(argv[2]);
const char* filename1 = "julia1.png"; //const char* filename2 = "julia2.png" ;
const long long ARRAY_SIZE = 2048 * 2048 * 3;
const long long ARRAY_BYTES = ARRAY_SIZE * sizeof(unsigned char);
unsigned char* h_image_out = new unsigned char[ARRAY_SIZE];
double startTime,stopTime;
//unsigned char * d_image_out;
unsigned char* d_image_out;// = new unsigned char[ARRAY_SIZE];
checkError(cudaMalloc((void**)&d_image_out, ARRAY_BYTES));
startTime = getSeconds();
dim3 threadsPerBlock(threadsX,threadsY);
dim3 numBlocks(2048/threadsPerBlock.x,2048/threadsPerBlock.y);
// Kernel launch
juliaAlgorithm <<<numBlocks , threadsPerBlock>>> (d_image_out);
cudaError(cudaPeekAtLastError());
cudaDeviceSynchronize();
stopTime = getSeconds();
std::cout << "time :" << (stopTime-startTime) * 1e-3 << "ms" << std::endl;
checkError(cudaMemcpy(h_image_out, d_image_out, ARRAY_BYTES, cudaMemcpyDeviceToHost));
lodepng::encode(filename1, h_image_out, 2048, 2048,LCT_RGB,8);
checkError(cudaFree(d_image_out));
return 0;
}
|
a9928fd5c8c0f02e881adf6f2830b1adab015ad6.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <cassert>
int main() {
int n_devices;
hipGetDeviceCount(&n_devices);
assert(n_devices > 0);
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
printf("Device name: %s\n", prop.name);
printf("Shared memory per block (bytes): %ld\n", prop.sharedMemPerBlock);
printf("Max threads per block: %d\n", prop.maxThreadsPerBlock);
printf("Max threads per multiprocessor: %d\n", prop.maxThreadsPerMultiProcessor);
printf("Multiprocessor count: %d\n", prop.multiProcessorCount);
return 0;
}
| a9928fd5c8c0f02e881adf6f2830b1adab015ad6.cu | #include <stdio.h>
#include <cassert>
int main() {
int n_devices;
cudaGetDeviceCount(&n_devices);
assert(n_devices > 0);
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
printf("Device name: %s\n", prop.name);
printf("Shared memory per block (bytes): %ld\n", prop.sharedMemPerBlock);
printf("Max threads per block: %d\n", prop.maxThreadsPerBlock);
printf("Max threads per multiprocessor: %d\n", prop.maxThreadsPerMultiProcessor);
printf("Multiprocessor count: %d\n", prop.multiProcessorCount);
return 0;
}
|
a9edb9794e267e1bf1d65d63b87c58a8359cbccc.hip | // !!! This is a file automatically generated by hipify!!!
/*--------------------------------------------------------------------------*\
Copyright (c) 2008-2010, Danny Ruijters. All rights reserved.
http://www.dannyruijters.nl/cubicinterpolation/
This file is part of CUDA Cubic B-Spline Interpolation (CI).
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holders nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are
those of the authors and should not be interpreted as representing official
policies, either expressed or implied.
\*--------------------------------------------------------------------------*/
#ifndef _CUBIC3D_KERNEL_H_
#define _CUBIC3D_KERNEL_H_
#include "internal/bspline_kernel.cu"
//! Trilinearly interpolated texture lookup, using unnormalized coordinates.
//! This function merely serves as a reference for the tricubic versions.
//! @param tex 3D texture
//! @param coord unnormalized 3D texture coordinate
template<class T, enum hipTextureReadMode mode>
__device__ float linearTex3D( texture<T, 3, mode> tex, float3 coord )
{
return tex3D( tex, coord.x, coord.y, coord.z );
}
//! Tricubic interpolated texture lookup, using unnormalized coordinates.
//! Straight forward implementation, using 64 nearest neighbour lookups.
//! @param tex 3D texture
//! @param coord unnormalized 3D texture coordinate
template<class T, enum hipTextureReadMode mode>
__device__ float cubicTex3DSimple( texture<T, 3, mode> tex, float3 coord )
{
// transform the coordinate from [0,extent] to [-0.5, extent-0.5]
const float3 coord_grid = coord - 0.5f;
float3 index = floor(coord_grid);
const float3 fraction = coord_grid - index;
index = index + 0.5f; //move from [-0.5, extent-0.5] to [0, extent]
float result = 0.0f;
for ( float z = -1; z < 2.5f; z++ ) //range [-1, 2]
{
float bsplineZ = bspline( z - fraction.z );
float w = index.z + z;
for ( float y = -1; y < 2.5f; y++ )
{
float bsplineYZ = bspline( y - fraction.y ) * bsplineZ;
float v = index.y + y;
for ( float x = -1; x < 2.5f; x++ )
{
float bsplineXYZ = bspline( x - fraction.x ) * bsplineYZ;
float u = index.x + x;
result += bsplineXYZ * tex3D( tex, u, v, w );
}
}
}
return result;
}
//! Tricubic interpolated texture lookup, using unnormalized coordinates.
//! Fast implementation, using 8 trilinear lookups.
//! @param tex 3D texture
//! @param coord unnormalized 3D texture coordinate
#define WEIGHTS bspline_weights
#define CUBICTEX3D cubicTex3D
#include "internal/cubicTex3D_kernel.cu"
#undef CUBICTEX3D
#undef WEIGHTS
// Fast tricubic interpolated 1st order derivative texture lookup in x-, y-
// and z-direction, using unnormalized coordinates.
__device__ void bspline_weights_1st_derivative_x( float3 fraction,
float3& w0, float3& w1, float3& w2, float3& w3 )
{
float t0, t1, t2, t3;
bspline_weights_1st_derivative( fraction.x, t0, t1, t2, t3 );
w0.x = t0; w1.x = t1; w2.x = t2; w3.x = t3;
bspline_weights( fraction.y, t0, t1, t2, t3 );
w0.y = t0; w1.y = t1; w2.y = t2; w3.y = t3;
bspline_weights( fraction.z, t0, t1, t2, t3 );
w0.z = t0; w1.z = t1; w2.z = t2; w3.z = t3;
}
__device__ void bspline_weights_1st_derivative_y( float3 fraction,
float3& w0, float3& w1, float3& w2, float3& w3 )
{
float t0, t1, t2, t3;
bspline_weights( fraction.x, t0, t1, t2, t3 );
w0.x = t0; w1.x = t1; w2.x = t2; w3.x = t3;
bspline_weights_1st_derivative( fraction.y, t0, t1, t2, t3 );
w0.y = t0; w1.y = t1; w2.y = t2; w3.y = t3;
bspline_weights( fraction.z, t0, t1, t2, t3 );
w0.z = t0; w1.z = t1; w2.z = t2; w3.z = t3;
}
__device__ void bspline_weights_1st_derivative_z( float3 fraction,
float3& w0, float3& w1, float3& w2, float3& w3 )
{
float t0, t1, t2, t3;
bspline_weights( fraction.x, t0, t1, t2, t3 );
w0.x = t0; w1.x = t1; w2.x = t2; w3.x = t3;
bspline_weights( fraction.y, t0, t1, t2, t3 );
w0.y = t0; w1.y = t1; w2.y = t2; w3.y = t3;
bspline_weights_1st_derivative( fraction.z, t0, t1, t2, t3 );
w0.z = t0; w1.z = t1; w2.z = t2; w3.z = t3;
}
#define WEIGHTS bspline_weights_1st_derivative_x
#define CUBICTEX3D cubicTex3D_1st_derivative_x
#include "internal/cubicTex3D_kernel.cu"
#undef CUBICTEX3D
#undef WEIGHTS
#define WEIGHTS bspline_weights_1st_derivative_y
#define CUBICTEX3D cubicTex3D_1st_derivative_y
#include "internal/cubicTex3D_kernel.cu"
#undef CUBICTEX3D
#undef WEIGHTS
#define WEIGHTS bspline_weights_1st_derivative_z
#define CUBICTEX3D cubicTex3D_1st_derivative_z
#include "internal/cubicTex3D_kernel.cu"
#undef CUBICTEX3D
#undef WEIGHTS
#endif // _CUBIC3D_KERNEL_H_
| a9edb9794e267e1bf1d65d63b87c58a8359cbccc.cu | /*--------------------------------------------------------------------------*\
Copyright (c) 2008-2010, Danny Ruijters. All rights reserved.
http://www.dannyruijters.nl/cubicinterpolation/
This file is part of CUDA Cubic B-Spline Interpolation (CI).
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holders nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are
those of the authors and should not be interpreted as representing official
policies, either expressed or implied.
\*--------------------------------------------------------------------------*/
#ifndef _CUBIC3D_KERNEL_H_
#define _CUBIC3D_KERNEL_H_
#include "internal/bspline_kernel.cu"
//! Trilinearly interpolated texture lookup, using unnormalized coordinates.
//! This function merely serves as a reference for the tricubic versions.
//! @param tex 3D texture
//! @param coord unnormalized 3D texture coordinate
template<class T, enum cudaTextureReadMode mode>
__device__ float linearTex3D( texture<T, 3, mode> tex, float3 coord )
{
return tex3D( tex, coord.x, coord.y, coord.z );
}
//! Tricubic interpolated texture lookup, using unnormalized coordinates.
//! Straight forward implementation, using 64 nearest neighbour lookups.
//! @param tex 3D texture
//! @param coord unnormalized 3D texture coordinate
template<class T, enum cudaTextureReadMode mode>
__device__ float cubicTex3DSimple( texture<T, 3, mode> tex, float3 coord )
{
// transform the coordinate from [0,extent] to [-0.5, extent-0.5]
const float3 coord_grid = coord - 0.5f;
float3 index = floor(coord_grid);
const float3 fraction = coord_grid - index;
index = index + 0.5f; //move from [-0.5, extent-0.5] to [0, extent]
float result = 0.0f;
for ( float z = -1; z < 2.5f; z++ ) //range [-1, 2]
{
float bsplineZ = bspline( z - fraction.z );
float w = index.z + z;
for ( float y = -1; y < 2.5f; y++ )
{
float bsplineYZ = bspline( y - fraction.y ) * bsplineZ;
float v = index.y + y;
for ( float x = -1; x < 2.5f; x++ )
{
float bsplineXYZ = bspline( x - fraction.x ) * bsplineYZ;
float u = index.x + x;
result += bsplineXYZ * tex3D( tex, u, v, w );
}
}
}
return result;
}
//! Tricubic interpolated texture lookup, using unnormalized coordinates.
//! Fast implementation, using 8 trilinear lookups.
//! @param tex 3D texture
//! @param coord unnormalized 3D texture coordinate
#define WEIGHTS bspline_weights
#define CUBICTEX3D cubicTex3D
#include "internal/cubicTex3D_kernel.cu"
#undef CUBICTEX3D
#undef WEIGHTS
// Fast tricubic interpolated 1st order derivative texture lookup in x-, y-
// and z-direction, using unnormalized coordinates.
__device__ void bspline_weights_1st_derivative_x( float3 fraction,
float3& w0, float3& w1, float3& w2, float3& w3 )
{
float t0, t1, t2, t3;
bspline_weights_1st_derivative( fraction.x, t0, t1, t2, t3 );
w0.x = t0; w1.x = t1; w2.x = t2; w3.x = t3;
bspline_weights( fraction.y, t0, t1, t2, t3 );
w0.y = t0; w1.y = t1; w2.y = t2; w3.y = t3;
bspline_weights( fraction.z, t0, t1, t2, t3 );
w0.z = t0; w1.z = t1; w2.z = t2; w3.z = t3;
}
__device__ void bspline_weights_1st_derivative_y( float3 fraction,
float3& w0, float3& w1, float3& w2, float3& w3 )
{
float t0, t1, t2, t3;
bspline_weights( fraction.x, t0, t1, t2, t3 );
w0.x = t0; w1.x = t1; w2.x = t2; w3.x = t3;
bspline_weights_1st_derivative( fraction.y, t0, t1, t2, t3 );
w0.y = t0; w1.y = t1; w2.y = t2; w3.y = t3;
bspline_weights( fraction.z, t0, t1, t2, t3 );
w0.z = t0; w1.z = t1; w2.z = t2; w3.z = t3;
}
__device__ void bspline_weights_1st_derivative_z( float3 fraction,
float3& w0, float3& w1, float3& w2, float3& w3 )
{
float t0, t1, t2, t3;
bspline_weights( fraction.x, t0, t1, t2, t3 );
w0.x = t0; w1.x = t1; w2.x = t2; w3.x = t3;
bspline_weights( fraction.y, t0, t1, t2, t3 );
w0.y = t0; w1.y = t1; w2.y = t2; w3.y = t3;
bspline_weights_1st_derivative( fraction.z, t0, t1, t2, t3 );
w0.z = t0; w1.z = t1; w2.z = t2; w3.z = t3;
}
#define WEIGHTS bspline_weights_1st_derivative_x
#define CUBICTEX3D cubicTex3D_1st_derivative_x
#include "internal/cubicTex3D_kernel.cu"
#undef CUBICTEX3D
#undef WEIGHTS
#define WEIGHTS bspline_weights_1st_derivative_y
#define CUBICTEX3D cubicTex3D_1st_derivative_y
#include "internal/cubicTex3D_kernel.cu"
#undef CUBICTEX3D
#undef WEIGHTS
#define WEIGHTS bspline_weights_1st_derivative_z
#define CUBICTEX3D cubicTex3D_1st_derivative_z
#include "internal/cubicTex3D_kernel.cu"
#undef CUBICTEX3D
#undef WEIGHTS
#endif // _CUBIC3D_KERNEL_H_
|
5b0afbb47ca2916a7f2ab96922124c42f4d27a41.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/Dispatch.h>
#include <ATen/ExpandUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/CUDAGenerator.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <hiprand/hiprand_kernel.h>
#include <utility>
#include <functional>
#include <ATen/native/Distributions.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/LegacyTHFunctionsCUDA.h>
#include <THH/THHGeneral.h>
#include <THH/THHTensorRandom.h>
#include <THH/THHGenerator.hpp>
#include <THH/THHApply.cuh>
#include <THH/THHDeviceUtils.cuh>
#include <cstdint>
#include <limits>
#include <utility>
#include <type_traits>
/**
* Note [Register spilling in hiprand call for CUDA < 10]
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* For CUDA < 10, hiprandStatePhilox4_32_10_t engine achieves poor performance (60% SOL bandwidth)
* when called to generate one random number at a time. This is because the line
* unsigned ret = (&state->output.x)[state->STATE++];
* in
* QUALIFIERS unsigned int hiprand(hiprandStatePhilox4_32_10_t *state)
* in hiprand/hiprand_kernel.h dynamically indexes into state.output, preventing the compiler from ever
* storing state.output in registers.
*
* CUDA 10 fixed this problem. However, for backwards compatibility, in the following kernels
* we are using hiprand distributions that utilize hiprand4 call. hiprand4 call doesn't have the
* register spilling problem.
*/
THCGenerator* THCRandom_getGenerator(THCState* state);
namespace {
// Increment should be at least the number of hiprand() random numbers used in
// each thread. It is the user's responsibility to make sure that the increment for philox is never
// smaller than the number of hiprand() calls. Increment value > the number of hiprand() calls
// won't harm but anything less would mean that you would be reusing random values from
// previous calls.
// e.g. In many kernels below, we use distributions that utilize hiprand4 call in the kernel.
// Hence, increment value should be at least 4 for those kernels.
std::pair<uint64_t, uint64_t> next_philox_seed(at::Generator* gen, uint64_t increment) {
auto gen_ = THCRandom_getGenerator(at::globalContext().getTHCState());
uint64_t offset = gen_->state.philox_seed_offset.fetch_add(increment);
return std::make_pair(gen_->state.initial_seed, offset);
}
// launch bounds used for kernels utilizing TensorIterator
const uint32_t block_size_bound = 256;
const uint32_t grid_size_bound = 4;
// number of randoms given by distributions like hiprand_uniform4, hiprand_uniform2_double
// used in calculating philox offset.
const uint32_t curand4_engine_calls = 4;
// utility function that calculates proper philox_offset
// for distributions utilizing TensorIterator. For distributions using
// TensorIterator, we are using a grid-stride loop with each
// thread yielding one element per thread. For the edge of the grid-stride
// loop, if the tensor size is large, the unroll loop will kick in and the float4
// from hiprand4 will start getting utilized (for common tensor sizes, we end up
// using rand.x from each thread). Hence, the philox_offset is
// (number of elements per thread * number of engine calls), which makes
// sure that philox offset increment is not less than the number of randoms used
// in each thread.
std::tuple<uint64_t, dim3, dim3> calc_execution_policy(int64_t total_elements) {
const uint64_t numel = static_cast<uint64_t>(total_elements);
const uint32_t block_size = block_size_bound;
const uint32_t unroll = curand4_engine_calls;
dim3 dim_block(block_size);
dim3 grid((numel + block_size - 1) / block_size);
uint32_t blocks_per_sm = at::cuda::getCurrentDeviceProperties()->maxThreadsPerMultiProcessor / block_size;
grid.x = ::min(
static_cast<uint32_t>(at::cuda::getCurrentDeviceProperties()->multiProcessorCount) * blocks_per_sm,
grid.x);
//number of times random will be generated per thread, to offset philox counter in thc random state
uint64_t counter_offset = ((numel - 1) / (block_size * grid.x * unroll) + 1)
* curand4_engine_calls;
return std::make_tuple(counter_offset, grid, dim_block);
}
// grid stride loop kernel for distributions
template<typename accscalar_t, int unroll_factor, typename dist_t, typename transform_t>
C10_LAUNCH_BOUNDS_2(block_size_bound, grid_size_bound)
__global__ void distribution_elementwise_grid_stride_kernel(int numel,
std::pair<uint64_t, uint64_t> seeds,
const dist_t dist_func,
const transform_t transform_func) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
hiprandStatePhilox4_32_10_t state;
hiprand_init(
seeds.first,
idx,
seeds.second,
&state);
int rounded_size = ((numel - 1)/(blockDim.x * gridDim.x * unroll_factor)+1) *
blockDim.x * gridDim.x * unroll_factor;
for(int linear_index = idx; linear_index < rounded_size; linear_index += blockDim.x * gridDim.x * unroll_factor) {
auto rand = dist_func(&state);
#pragma unroll
for (int ii = 0; ii < unroll_factor; ii++) {
int li = linear_index + blockDim.x * gridDim.x * ii;
if (li < numel) {
transform_func(li, static_cast<accscalar_t>((&rand.x)[ii]));
}
}
__syncthreads();
}
}
/**
* distribution_nullary_kernel is analogous to gpu_nullary_kernel in
* ATen/native/cuda/Loops.cuh. Like gpu_nullary_kernel, it uses
* TensorIterator to launch a kernel. However, the differences are
* - it launches a grid-stride loop based kernel. The kernel is not
* generic like elementwise_kernel in Loops.cuh and is specialized
* for the distribution kernels here.
* - For big size tensors, we can launch multiple kernels recursively
* (i.e. if (!iter.can_use_32bit_indexing())) and hence, the philox
* offset calculation is done in this function.
*
* FIXME: Can we specialize elementwise_kernel and launch_kernel in Loops.cuh
* to have grid-stride loop kernel and then use that to launch our distribution
* kernels? Note that we need a grid-stride loop kernel because, we found by testing
* that it achieves peak effective bandwidth.
*/
template<typename scalar_t,
typename accscalar_t,
int unroll_factor,
typename dist_t,
typename transform_t>
void distribution_nullary_kernel(at::TensorIterator& iter,
at::Generator* gen,
const dist_t& dist_func,
const transform_t transform_func) {
static_assert(unroll_factor >= 1, "unroll_factor must be >= 1.");
int64_t numel = iter.numel();
if (numel == 0) {
return;
}
auto execution_policy = calc_execution_policy(numel);
auto counter_offset = std::get<0>(execution_policy);
auto grid = std::get<1>(execution_policy);
auto block = std::get<2>(execution_policy);
auto seeds = next_philox_seed(gen, counter_offset);
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
distribution_nullary_kernel<scalar_t, accscalar_t, unroll_factor>(sub_iter,
gen, dist_func, transform_func);
}
return;
}
char* out_data = (char*)iter.data_ptr(0);
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
if (iter.is_trivial_1d()) {
auto strides = iter.get_inner_strides();
int stride0 = strides[0];
hipLaunchKernelGGL(( distribution_elementwise_grid_stride_kernel<accscalar_t, unroll_factor>), dim3(grid), dim3(block), 0, stream,
numel,
seeds,
dist_func,
[=]__device__(int idx, accscalar_t rand) {
scalar_t* out = (scalar_t*)&out_data[stride0 * idx];
*out = transform_func(rand);
}
);
} else {
auto offset_calc = at::native::make_offset_calculator<1>(iter);
hipLaunchKernelGGL(( distribution_elementwise_grid_stride_kernel<accscalar_t, unroll_factor>), dim3(grid), dim3(block), 0, stream,
numel,
seeds,
dist_func,
[=]__device__(int idx, accscalar_t rand) {
auto offsets = offset_calc.get(idx);
scalar_t* out = (scalar_t*)&out_data[offsets[0]];
*out = transform_func(rand);
}
);
}
AT_CUDA_CHECK(hipGetLastError());
}
template <typename scalar_t>
void poisson_cuda_kernel(
at::Tensor& ret,
const at::Tensor& lambda,
std::pair<uint64_t, uint64_t> seeds) {
at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t>(
ret,
lambda,
[seeds] __device__(
scalar_t & ret_val, const scalar_t& lambda) {
hiprandStatePhilox4_32_10_t state;
hiprand_init(
seeds.first,
blockIdx.x * blockDim.x + threadIdx.x,
seeds.second,
&state);
ret_val = static_cast<scalar_t>(hiprand_poisson(&state, lambda));
});
}
template <typename scalar_t>
void gamma_cuda_kernel(
at::Tensor& ret,
const at::Tensor& alpha,
std::pair<uint64_t, uint64_t> seeds) {
using accscalar_t = at::acc_type<scalar_t, true>;
at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t>(
ret,
alpha,
[seeds] __device__(
scalar_t & ret_val, const scalar_t& alpha) {
hiprandStatePhilox4_32_10_t state;
hiprand_init(
seeds.first,
blockIdx.x * blockDim.x + threadIdx.x,
seeds.second,
&state);
auto uniform_lambda = [&state] __device__ () {
return hiprand_uniform(&state);
};
BaseSampler<accscalar_t, decltype(uniform_lambda)> standard_uniform(uniform_lambda);
auto normal_lambda = [&state] __device__ () {
return hiprand_normal(&state);
};
BaseSampler<accscalar_t, decltype(normal_lambda)> standard_normal(normal_lambda);
auto sample = sample_gamma<scalar_t, accscalar_t, decltype(uniform_lambda), decltype(normal_lambda)>(alpha, standard_uniform, standard_normal);
auto min_value = std::numeric_limits<scalar_t>::min();
ret_val = (min_value > sample) ? min_value : sample;
});
}
template <typename scalar_t>
void gamma_grad_cuda_kernel(
at::Tensor& ret,
const at::Tensor& self,
const at::Tensor& output) {
using accscalar_t = at::acc_type<scalar_t, true>;
at::cuda::CUDA_tensor_apply3<scalar_t, scalar_t, scalar_t>(
ret, self, output,
[] __device__ (scalar_t& ret_val, const scalar_t& self_val, const scalar_t &output_val) {
ret_val = standard_gamma_grad_one<scalar_t, accscalar_t>(self_val, output_val);
});
}
template <typename scalar_t>
void dirichlet_grad_cuda_kernel(
at::Tensor& ret,
const at::Tensor& x,
const at::Tensor& alpha,
const at::Tensor& total) {
using accscalar_t = at::acc_type<scalar_t, true>;
at::cuda::CUDA_tensor_apply4<scalar_t, scalar_t, scalar_t, scalar_t>(
ret, x, alpha, total,
[] __device__ (scalar_t& ret_val, const scalar_t& x_val, const scalar_t& alpha_val, const scalar_t& total_val) {
ret_val = dirichlet_grad_one<scalar_t, accscalar_t>(x_val, alpha_val, total_val);
});
}
template<typename scalar_t, typename prob_t>
void bernoulli_tensor_cuda_kernel(
at::Tensor& ret, const at::Tensor& p,
std::pair<uint64_t, uint64_t> seeds) {
// The template argument `4` below indicates that we want to operate on four
// element at each time. See NOTE [ CUDA_tensor_applyN helpers ] for details.
at::cuda::CUDA_tensor_apply2<scalar_t, prob_t, 4>(
ret, p,
[seeds] __device__(
int n, scalar_t& v1, scalar_t& v2, scalar_t& v3, scalar_t& v4,
const prob_t& p1, const prob_t& p2, const prob_t& p3, const prob_t& p4) {
hiprandStatePhilox4_32_10_t state;
hiprand_init(
seeds.first,
blockIdx.x * blockDim.x + threadIdx.x,
seeds.second,
&state);
// See Note [Register spilling in hiprand call for CUDA < 10]
float4 rand = hiprand_uniform4(&state);
switch (n) {
case 4: {
assert(0 <= p4 && p4 <= 1);
v4 = static_cast<scalar_t>(rand.w <= p4);
// fallthrough
}
case 3: {
assert(0 <= p3 && p3 <= 1);
v3 = static_cast<scalar_t>(rand.z <= p3);
// fallthrough
}
case 2: {
assert(0 <= p2 && p2 <= 1);
v2 = static_cast<scalar_t>(rand.y <= p2);
// fallthrough
}
case 1: {
assert(0 <= p1 && p1 <= 1);
v1 = static_cast<scalar_t>(rand.x <= p1);
}
}
}
);
}
template<typename scalar_t>
void dirichlet_scalar_cuda_kernel(
at::Tensor& ret,
const at::Tensor& gamma) {
auto gamma_sum = gamma.sum(-1, true).expand(ret.sizes());
at::cuda::CUDA_tensor_apply3<scalar_t, scalar_t, scalar_t>(ret, gamma, gamma_sum,
[] __device__(scalar_t &ret_val, const scalar_t &gamma, const scalar_t &gamma_sum) {
ret_val = gamma / gamma_sum;
auto min_value = std::numeric_limits<scalar_t>::min();
auto max_value = 1 - std::numeric_limits<scalar_t>::epsilon();
ret_val = (min_value > ret_val) ? min_value : ret_val;
ret_val = (max_value < ret_val) ? max_value : ret_val;
});
}
} // namespace
namespace at { namespace native {
Tensor _s_poisson_cuda(const Tensor& lambda, Generator* gen) {
Tensor ret = at::empty(lambda.sizes(), lambda.options());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(ret.scalar_type(), "poisson_cuda", [&] {
poisson_cuda_kernel<scalar_t>(ret, lambda, next_philox_seed(gen, 20));
});
return ret;
}
Tensor _s_gamma_cuda(const Tensor& alpha, Generator* gen) {
Tensor ret = at::empty(alpha.sizes(), alpha.options());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(ret.scalar_type(), "gamma_cuda", [&] {
gamma_cuda_kernel<scalar_t>(ret, alpha, next_philox_seed(gen, 10));
});
return ret;
}
Tensor _s_dirichlet_cuda(const Tensor& alpha, Generator* gen) {
Tensor ret = at::empty(alpha.sizes(), alpha.options());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(ret.scalar_type(), "dirichlet", [&] {
Tensor gamma = at::empty(alpha.sizes(), alpha.options());
gamma_cuda_kernel<scalar_t>(gamma, alpha, next_philox_seed(gen, 10));
dirichlet_scalar_cuda_kernel<scalar_t>(ret, gamma);
});
return ret;
}
Tensor _standard_gamma_grad_cuda(const Tensor& self, const Tensor& output) {
Tensor ret = at::empty(self.sizes(), self.options());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "_standard_gamma_grad_cuda", [&] {
gamma_grad_cuda_kernel<scalar_t>(ret, self, output);
});
return ret;
}
Tensor _dirichlet_grad_cuda(const Tensor& x, const Tensor& alpha, const Tensor& total) {
Tensor ret = at::empty(x.sizes(), x.options());
AT_DISPATCH_FLOATING_TYPES(x.scalar_type(), "_dirichlet_grad_cuda", [&] {
dirichlet_grad_cuda_kernel<scalar_t>(ret, x, alpha, total);
});
return ret;
}
Tensor& bernoulli_tensor_cuda_(Tensor &self, const Tensor& p_, Generator* gen) {
auto p = std::get<0>(expand_inplace(self, p_.to(kCUDA)));
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, self.scalar_type(), "bernoulli_tensor_cuda_self_", [&] {
using self_t = scalar_t;
auto seeds = next_philox_seed(gen, 10);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(p.scalar_type(), "bernoulli_tensor_cuda_p_", [&] {
using p_t = scalar_t;
return bernoulli_tensor_cuda_kernel<self_t, p_t>(self, p, seeds);
});
});
return self;
}
void uniform_kernel_cuda(TensorIterator& iter, double from_, double to_, Generator* gen_) {
auto gen = check_generator<CUDAGenerator>(gen_, &globalContext().defaultGenerator(kCUDA));
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "uniform_cuda", [&] {
auto from = static_cast<scalar_t>(from_);
auto to = static_cast<scalar_t>(to_);
TORCH_CHECK(from <= to,
"uniform_ expects to return a [from, to) range, but found from=", from,
" > to=", to);
TORCH_CHECK((to - from) <= std::numeric_limits<scalar_t>::max(),
"uniform_ expects to-from <= std::numeric_limits<", toString(iter.dtype()),
">::max(), but found to=", to, " and from=", from,
" which result in to-from to exceed the limit");
using accscalar_t = at::acc_type<scalar_t, true>;
auto range = static_cast<accscalar_t>(to-from);
from = static_cast<accscalar_t>(from);
// define lambda to reverse bounds, multiply 'range' and add 'from_'
auto uniform_func = [range, from] __device__ (accscalar_t rand) {
// reverse the bounds of hiprand4 from (0, 1] to [0, 1)
// Note that this method is from legacy THCTensorRandom and is likely to give
// you more 0-s, since, the probability of gettings 1-s is higher than 0-s and
// by reversing the bounds, we are flipping the probabilities of 1-s and 0-s.
auto reverse_bound_rand = rand == static_cast<accscalar_t>(1.0) ? static_cast<accscalar_t>(0.0) : rand;
return static_cast<scalar_t>(reverse_bound_rand * range + from);
};
if (std::is_same<scalar_t, double>::value) {
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls/2>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_uniform2_double(state); },
uniform_func);
} else {
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_uniform4(state); },
uniform_func);
}
});
}
void random_kernel_cuda(TensorIterator& iter, uint64_t range, int64_t base, Generator* gen_) {
auto gen = check_generator<CUDAGenerator>(gen_, &globalContext().defaultGenerator(kCUDA));
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Bool, at::ScalarType::Half, iter.dtype(), "random_cuda", [&] {
if (std::is_same<scalar_t, double>::value || std::is_same<scalar_t, int64_t>::value) {
// define lambda to mod with range and add base
auto random_func = [range, base] __device__ (uint64_t rand) {
return static_cast<int64_t>(rand % range + base);
};
distribution_nullary_kernel<scalar_t, uint64_t, curand4_engine_calls/2>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) -> ulonglong2 {
ulonglong2 ret;
uint4 rand_val = hiprand4(state);
ret.x = (static_cast<uint64_t>(rand_val.x) << 32) | rand_val.y;
ret.y = (static_cast<uint64_t>(rand_val.z) << 32) | rand_val.w;
return ret;
},
random_func);
} else {
auto random_func = [range, base] __device__ (uint32_t rand) {
return static_cast<int32_t>(rand % static_cast<uint32_t>(range) + static_cast<int32_t>(base));
};
distribution_nullary_kernel<scalar_t, uint32_t, curand4_engine_calls>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) {
return hiprand4(state);
},
random_func);
}
});
}
void normal_kernel_cuda(TensorIterator& iter, double mean_, double std_, Generator* gen_) {
auto gen = check_generator<CUDAGenerator>(gen_, &globalContext().defaultGenerator(kCUDA));
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "normal_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto mean = static_cast<accscalar_t>(mean_);
auto std = static_cast<accscalar_t>(std_);
// define lambda to multiply std and add mean
auto normal_func = [mean, std] __device__ (accscalar_t rand) {
return static_cast<scalar_t>(rand * std + mean);
};
if (std::is_same<scalar_t, double>::value) {
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls/2>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_normal2_double(state); },
normal_func);
} else {
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_normal4(state); },
normal_func);
}
});
}
void cauchy_kernel_cuda(TensorIterator& iter, double median_, double sigma_, Generator* gen_) {
auto gen = check_generator<CUDAGenerator>(gen_, &globalContext().defaultGenerator(kCUDA));
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "cauchy_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto median = static_cast<accscalar_t>(median_);
auto sigma = static_cast<accscalar_t>(sigma_);
if (std::is_same<scalar_t, double>::value) {
// define lambda for cauchy transformation
auto cauchy_func = [median, sigma] __device__ (accscalar_t rand) {
return static_cast<scalar_t>(median + sigma *
::tan(static_cast<accscalar_t>(M_PI) * (rand-static_cast<accscalar_t>(0.5))));
};
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls/2>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_uniform2_double(state); },
cauchy_func);
} else {
// use __tanf fast approximation for peak bandwidth
auto cauchy_func = [median, sigma] __device__ (accscalar_t rand) {
return static_cast<scalar_t>(median + sigma *
__tanf(static_cast<accscalar_t>(M_PI) * (rand-static_cast<accscalar_t>(0.5))));
};
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_uniform4(state); },
cauchy_func);
}
});
}
void exponential_kernel_cuda(TensorIterator& iter, double lambda_, Generator* gen_) {
auto gen = check_generator<CUDAGenerator>(gen_, &globalContext().defaultGenerator(kCUDA));
// Note that HIP doesn't support std::nextafter in device code.
auto nextafter_1_0_float = std::nextafter(1.0f, 0.0f);
auto nextafter_1_0_double = std::nextafter(1.0, 0.0);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "exponential_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto lambda = static_cast<accscalar_t>(lambda_);
if (std::is_same<scalar_t, double>::value) {
// define lambda for exponential transformation
auto exponential_func = [lambda, nextafter_1_0_double] __device__ (accscalar_t rand) {
accscalar_t sample;
// hiprand_uniform has (0,1] bounds. log(1) is 0 and exponential excludes 0.
// Hence, squash the 1 to just below 1.
if(rand == static_cast<accscalar_t>(1.0)) {
sample = ::log(nextafter_1_0_double);
} else {
sample = ::log(rand);
}
return static_cast<scalar_t>(static_cast<accscalar_t>(-1.0) / lambda * sample);
};
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls/2>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_uniform2_double(state); },
exponential_func);
} else {
// use __logf fast approximation for peak bandwidth
auto exponential_func = [lambda, nextafter_1_0_float] __device__ (accscalar_t rand) {
accscalar_t sample;
if(rand == static_cast<accscalar_t>(1.0)) {
sample = __logf(nextafter_1_0_float);
} else {
sample = __logf(rand);
}
return static_cast<scalar_t>(static_cast<accscalar_t>(-1.0) / lambda * sample);
};
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_uniform4(state); },
exponential_func);
}
});
}
void geometric_kernel_cuda(TensorIterator& iter, double p_, Generator* gen_) {
auto gen = check_generator<CUDAGenerator>(gen_, &globalContext().defaultGenerator(kCUDA));
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, iter.dtype(), "geometric_cuda", [&] {
if (std::is_same<scalar_t, double>::value) {
// define lambda for geometric transformation
auto geometric_func = [p_] __device__ (double rand) {
return static_cast<scalar_t>(::ceil(::log(rand) / ::log(static_cast<double>(1.0)-p_)));
};
distribution_nullary_kernel<scalar_t, double, curand4_engine_calls/2>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_uniform2_double(state); },
geometric_func);
} else {
auto p = static_cast<float>(p_);
auto geometric_func = [p] __device__ (float rand) {
// use __logf fast approximation for peak bandwidth
return static_cast<scalar_t>(::ceil(__logf(rand) / __logf(static_cast<float>(1.0)-p)));
};
distribution_nullary_kernel<scalar_t, float, curand4_engine_calls>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_uniform4(state); },
geometric_func);
}
});
}
void log_normal_kernel_cuda(TensorIterator& iter, double mean_, double std_, Generator* gen_) {
auto gen = check_generator<CUDAGenerator>(gen_, &globalContext().defaultGenerator(kCUDA));
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "log_normal_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto mean = static_cast<accscalar_t>(mean_);
auto std = static_cast<accscalar_t>(std_);
if (std::is_same<scalar_t, double>::value) {
// define lambda for log_normal transformation
auto log_normal_func = [mean, std] __device__ (accscalar_t rand) {
return static_cast<scalar_t>(::exp(rand * std + mean));
};
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls/2>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_normal2_double(state); },
log_normal_func);
} else {
auto log_normal_func = [mean, std] __device__ (accscalar_t rand) {
// use __expf fast approximation for peak bandwidth
return static_cast<scalar_t>(__expf(rand * std + mean));
};
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_normal4(state); },
log_normal_func);
}
});
}
void bernoulli_scalar_cuda_kernel(TensorIterator& iter, double p_, Generator* gen_) {
auto gen = check_generator<CUDAGenerator>(gen_, &globalContext().defaultGenerator(kCUDA));
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, iter.dtype(), "bernoulli_scalar_cuda_", [&] {
if (std::is_same<scalar_t, double>::value) {
// define lambda for bernoulli transformation
auto bernoulli_func = [p_] __device__ (double rand) {
return static_cast<scalar_t>(rand <= p_);
};
distribution_nullary_kernel<scalar_t, double, curand4_engine_calls/2>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_uniform2_double(state); },
bernoulli_func);
} else {
auto p = static_cast<float>(p_);
auto bernoulli_func = [p] __device__ (float rand) {
return static_cast<scalar_t>(rand <= p);
};
distribution_nullary_kernel<scalar_t, float, curand4_engine_calls>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_uniform4(state); },
bernoulli_func);
}
});
}
Tensor& uniform_cuda_(Tensor& self, double from, double to, Generator* gen) {
auto iter = TensorIterator::nullary_op(self);
uniform_kernel_cuda(*iter, from, to, gen);
return self;
}
Tensor& random_cuda_(Tensor& self, Generator* gen) {
auto iter = TensorIterator::nullary_op(self);
uint64_t range;
auto iter_scalar_type = iter->dtype();
if (isFloatingType(iter_scalar_type)) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter_scalar_type, "random_cuda_range_calc", [&] {
range = static_cast<uint64_t>((1ULL << std::numeric_limits<scalar_t>::digits) + 1);
});
} else {
AT_DISPATCH_INTEGRAL_TYPES(iter_scalar_type, "random_cuda_range_calc", [&] {
range = static_cast<uint64_t>(std::numeric_limits<scalar_t>::max()) + 1;
});
}
random_kernel_cuda(*iter, range, 0, gen);
return self;
}
Tensor& clamped_random_cuda_(Tensor& self, int64_t from, int64_t to, Generator* gen) {
TORCH_CHECK(from < to, "random_ expects 'from' to be less than 'to', but got from=", from, " >= to=", to);
auto iter = TensorIterator::nullary_op(self);
uint64_t range = to - from;
random_kernel_cuda(*iter, range, from, gen);
return self;
}
Tensor& capped_random_cuda_(Tensor& self, int64_t to, Generator* gen) {
return clamped_random_cuda_(self, 0, to, gen);
}
Tensor& normal_cuda_(Tensor& self, double mean, double std, Generator* gen) {
TORCH_CHECK(std > 0.0, "normal_ expects std > 0.0, but found std=", std);
auto iter = TensorIterator::nullary_op(self);
normal_kernel_cuda(*iter, mean, std, gen);
return self;
}
Tensor& normal_out_cuda(Tensor& output, const Tensor& mean, double std, Generator* gen) {
normal_cuda_(output, 0, std, gen);
output.add_(mean);
return output;
}
Tensor& normal_out_cuda(Tensor& output, double mean, const Tensor& std, Generator* gen) {
normal_cuda_(output, 0, 1, gen);
auto mean_tensor = at::full({1}, mean, output.options());
at::native::legacy::cuda::_th_addcmul_out(output, mean_tensor, output, std, 1);
return output;
}
Tensor& normal_out_cuda(Tensor& output, const Tensor& mean, const Tensor& std, Generator* gen) {
normal_cuda_(output, 0, 1, gen);
at::native::legacy::cuda::_th_addcmul_out(output, mean, output, std, 1);
return output;
}
Tensor normal_cuda(const Tensor& mean, double std, Generator* gen) {
Tensor ret = at::empty(mean.sizes(), mean.options());
normal_out_cuda(ret, mean, std, gen);
return ret;
}
Tensor normal_cuda(double mean, const Tensor& std, Generator* gen) {
Tensor ret = at::empty(std.sizes(), std.options());
normal_out_cuda(ret, mean, std, gen);
return ret;
}
Tensor normal_cuda(const Tensor& mean, const Tensor& std, Generator* gen) {
Tensor ret = at::empty(mean.sizes(), mean.options());
normal_out_cuda(ret, mean, std, gen);
return ret;
}
Tensor& cauchy_cuda_(Tensor& self, double median, double sigma, Generator* gen) {
auto iter = TensorIterator::nullary_op(self);
cauchy_kernel_cuda(*iter, median, sigma, gen);
return self;
}
Tensor& exponential_cuda_(Tensor& self, double lambda, Generator* gen) {
auto iter = TensorIterator::nullary_op(self);
exponential_kernel_cuda(*iter, lambda, gen);
return self;
}
Tensor& geometric_cuda_(Tensor& self, double p, Generator* gen) {
TORCH_CHECK(0 < p && p < 1, "geometric_ expects p to be in (0, 1), but got p=", p);
auto iter = TensorIterator::nullary_op(self);
geometric_kernel_cuda(*iter, p, gen);
return self;
}
Tensor& log_normal_cuda_(Tensor& self, double mean, double std, Generator* gen) {
TORCH_CHECK(std > 0.0, "log_normal_ expects std > 0.0, but found std=", std);
auto iter = TensorIterator::nullary_op(self);
log_normal_kernel_cuda(*iter, mean, std, gen);
return self;
}
Tensor& bernoulli_scalar_cuda_(Tensor &self, double p, Generator* gen) {
TORCH_CHECK(0 <= p && p <= 1, "bernoulli_ expects p to be in [0, 1], but got p=", p);
auto iter = TensorIterator::nullary_op(self);
bernoulli_scalar_cuda_kernel(*iter, p, gen);
return self;
}
}} // namespace at::native
| 5b0afbb47ca2916a7f2ab96922124c42f4d27a41.cu | #include <ATen/Dispatch.h>
#include <ATen/ExpandUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/CUDAGenerator.h>
#include <curand.h>
#include <curand_kernel.h>
#include <curand_philox4x32_x.h>
#include <utility>
#include <functional>
#include <ATen/native/Distributions.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/LegacyTHFunctionsCUDA.h>
#include <THC/THCGeneral.h>
#include <THC/THCTensorRandom.h>
#include <THC/THCGenerator.hpp>
#include <THC/THCApply.cuh>
#include <THC/THCDeviceUtils.cuh>
#include <cstdint>
#include <limits>
#include <utility>
#include <type_traits>
/**
* Note [Register spilling in curand call for CUDA < 10]
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* For CUDA < 10, curandStatePhilox4_32_10_t engine achieves poor performance (60% SOL bandwidth)
* when called to generate one random number at a time. This is because the line
* unsigned ret = (&state->output.x)[state->STATE++];
* in
* QUALIFIERS unsigned int curand(curandStatePhilox4_32_10_t *state)
* in curand_kernel.h dynamically indexes into state.output, preventing the compiler from ever
* storing state.output in registers.
*
* CUDA 10 fixed this problem. However, for backwards compatibility, in the following kernels
* we are using curand distributions that utilize curand4 call. curand4 call doesn't have the
* register spilling problem.
*/
THCGenerator* THCRandom_getGenerator(THCState* state);
namespace {
// Increment should be at least the number of curand() random numbers used in
// each thread. It is the user's responsibility to make sure that the increment for philox is never
// smaller than the number of curand() calls. Increment value > the number of curand() calls
// won't harm but anything less would mean that you would be reusing random values from
// previous calls.
// e.g. In many kernels below, we use distributions that utilize curand4 call in the kernel.
// Hence, increment value should be at least 4 for those kernels.
std::pair<uint64_t, uint64_t> next_philox_seed(at::Generator* gen, uint64_t increment) {
auto gen_ = THCRandom_getGenerator(at::globalContext().getTHCState());
uint64_t offset = gen_->state.philox_seed_offset.fetch_add(increment);
return std::make_pair(gen_->state.initial_seed, offset);
}
// launch bounds used for kernels utilizing TensorIterator
const uint32_t block_size_bound = 256;
const uint32_t grid_size_bound = 4;
// number of randoms given by distributions like curand_uniform4, curand_uniform2_double
// used in calculating philox offset.
const uint32_t curand4_engine_calls = 4;
// utility function that calculates proper philox_offset
// for distributions utilizing TensorIterator. For distributions using
// TensorIterator, we are using a grid-stride loop with each
// thread yielding one element per thread. For the edge of the grid-stride
// loop, if the tensor size is large, the unroll loop will kick in and the float4
// from curand4 will start getting utilized (for common tensor sizes, we end up
// using rand.x from each thread). Hence, the philox_offset is
// (number of elements per thread * number of engine calls), which makes
// sure that philox offset increment is not less than the number of randoms used
// in each thread.
std::tuple<uint64_t, dim3, dim3> calc_execution_policy(int64_t total_elements) {
const uint64_t numel = static_cast<uint64_t>(total_elements);
const uint32_t block_size = block_size_bound;
const uint32_t unroll = curand4_engine_calls;
dim3 dim_block(block_size);
dim3 grid((numel + block_size - 1) / block_size);
uint32_t blocks_per_sm = at::cuda::getCurrentDeviceProperties()->maxThreadsPerMultiProcessor / block_size;
grid.x = std::min(
static_cast<uint32_t>(at::cuda::getCurrentDeviceProperties()->multiProcessorCount) * blocks_per_sm,
grid.x);
//number of times random will be generated per thread, to offset philox counter in thc random state
uint64_t counter_offset = ((numel - 1) / (block_size * grid.x * unroll) + 1)
* curand4_engine_calls;
return std::make_tuple(counter_offset, grid, dim_block);
}
// grid stride loop kernel for distributions
template<typename accscalar_t, int unroll_factor, typename dist_t, typename transform_t>
C10_LAUNCH_BOUNDS_2(block_size_bound, grid_size_bound)
__global__ void distribution_elementwise_grid_stride_kernel(int numel,
std::pair<uint64_t, uint64_t> seeds,
const dist_t dist_func,
const transform_t transform_func) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
curandStatePhilox4_32_10_t state;
curand_init(
seeds.first,
idx,
seeds.second,
&state);
int rounded_size = ((numel - 1)/(blockDim.x * gridDim.x * unroll_factor)+1) *
blockDim.x * gridDim.x * unroll_factor;
for(int linear_index = idx; linear_index < rounded_size; linear_index += blockDim.x * gridDim.x * unroll_factor) {
auto rand = dist_func(&state);
#pragma unroll
for (int ii = 0; ii < unroll_factor; ii++) {
int li = linear_index + blockDim.x * gridDim.x * ii;
if (li < numel) {
transform_func(li, static_cast<accscalar_t>((&rand.x)[ii]));
}
}
__syncthreads();
}
}
/**
* distribution_nullary_kernel is analogous to gpu_nullary_kernel in
* ATen/native/cuda/Loops.cuh. Like gpu_nullary_kernel, it uses
* TensorIterator to launch a kernel. However, the differences are
* - it launches a grid-stride loop based kernel. The kernel is not
* generic like elementwise_kernel in Loops.cuh and is specialized
* for the distribution kernels here.
* - For big size tensors, we can launch multiple kernels recursively
* (i.e. if (!iter.can_use_32bit_indexing())) and hence, the philox
* offset calculation is done in this function.
*
* FIXME: Can we specialize elementwise_kernel and launch_kernel in Loops.cuh
* to have grid-stride loop kernel and then use that to launch our distribution
* kernels? Note that we need a grid-stride loop kernel because, we found by testing
* that it achieves peak effective bandwidth.
*/
template<typename scalar_t,
typename accscalar_t,
int unroll_factor,
typename dist_t,
typename transform_t>
void distribution_nullary_kernel(at::TensorIterator& iter,
at::Generator* gen,
const dist_t& dist_func,
const transform_t transform_func) {
static_assert(unroll_factor >= 1, "unroll_factor must be >= 1.");
int64_t numel = iter.numel();
if (numel == 0) {
return;
}
auto execution_policy = calc_execution_policy(numel);
auto counter_offset = std::get<0>(execution_policy);
auto grid = std::get<1>(execution_policy);
auto block = std::get<2>(execution_policy);
auto seeds = next_philox_seed(gen, counter_offset);
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
distribution_nullary_kernel<scalar_t, accscalar_t, unroll_factor>(sub_iter,
gen, dist_func, transform_func);
}
return;
}
char* out_data = (char*)iter.data_ptr(0);
auto stream = at::cuda::getCurrentCUDAStream();
if (iter.is_trivial_1d()) {
auto strides = iter.get_inner_strides();
int stride0 = strides[0];
distribution_elementwise_grid_stride_kernel<accscalar_t, unroll_factor><<<grid, block, 0, stream>>>(
numel,
seeds,
dist_func,
[=]__device__(int idx, accscalar_t rand) {
scalar_t* out = (scalar_t*)&out_data[stride0 * idx];
*out = transform_func(rand);
}
);
} else {
auto offset_calc = at::native::make_offset_calculator<1>(iter);
distribution_elementwise_grid_stride_kernel<accscalar_t, unroll_factor><<<grid, block, 0, stream>>>(
numel,
seeds,
dist_func,
[=]__device__(int idx, accscalar_t rand) {
auto offsets = offset_calc.get(idx);
scalar_t* out = (scalar_t*)&out_data[offsets[0]];
*out = transform_func(rand);
}
);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template <typename scalar_t>
void poisson_cuda_kernel(
at::Tensor& ret,
const at::Tensor& lambda,
std::pair<uint64_t, uint64_t> seeds) {
at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t>(
ret,
lambda,
[seeds] __device__(
scalar_t & ret_val, const scalar_t& lambda) {
curandStatePhilox4_32_10_t state;
curand_init(
seeds.first,
blockIdx.x * blockDim.x + threadIdx.x,
seeds.second,
&state);
ret_val = static_cast<scalar_t>(curand_poisson(&state, lambda));
});
}
template <typename scalar_t>
void gamma_cuda_kernel(
at::Tensor& ret,
const at::Tensor& alpha,
std::pair<uint64_t, uint64_t> seeds) {
using accscalar_t = at::acc_type<scalar_t, true>;
at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t>(
ret,
alpha,
[seeds] __device__(
scalar_t & ret_val, const scalar_t& alpha) {
curandStatePhilox4_32_10_t state;
curand_init(
seeds.first,
blockIdx.x * blockDim.x + threadIdx.x,
seeds.second,
&state);
auto uniform_lambda = [&state] __device__ () {
return curand_uniform(&state);
};
BaseSampler<accscalar_t, decltype(uniform_lambda)> standard_uniform(uniform_lambda);
auto normal_lambda = [&state] __device__ () {
return curand_normal(&state);
};
BaseSampler<accscalar_t, decltype(normal_lambda)> standard_normal(normal_lambda);
auto sample = sample_gamma<scalar_t, accscalar_t, decltype(uniform_lambda), decltype(normal_lambda)>(alpha, standard_uniform, standard_normal);
auto min_value = std::numeric_limits<scalar_t>::min();
ret_val = (min_value > sample) ? min_value : sample;
});
}
template <typename scalar_t>
void gamma_grad_cuda_kernel(
at::Tensor& ret,
const at::Tensor& self,
const at::Tensor& output) {
using accscalar_t = at::acc_type<scalar_t, true>;
at::cuda::CUDA_tensor_apply3<scalar_t, scalar_t, scalar_t>(
ret, self, output,
[] __device__ (scalar_t& ret_val, const scalar_t& self_val, const scalar_t &output_val) {
ret_val = standard_gamma_grad_one<scalar_t, accscalar_t>(self_val, output_val);
});
}
template <typename scalar_t>
void dirichlet_grad_cuda_kernel(
at::Tensor& ret,
const at::Tensor& x,
const at::Tensor& alpha,
const at::Tensor& total) {
using accscalar_t = at::acc_type<scalar_t, true>;
at::cuda::CUDA_tensor_apply4<scalar_t, scalar_t, scalar_t, scalar_t>(
ret, x, alpha, total,
[] __device__ (scalar_t& ret_val, const scalar_t& x_val, const scalar_t& alpha_val, const scalar_t& total_val) {
ret_val = dirichlet_grad_one<scalar_t, accscalar_t>(x_val, alpha_val, total_val);
});
}
template<typename scalar_t, typename prob_t>
void bernoulli_tensor_cuda_kernel(
at::Tensor& ret, const at::Tensor& p,
std::pair<uint64_t, uint64_t> seeds) {
// The template argument `4` below indicates that we want to operate on four
// element at each time. See NOTE [ CUDA_tensor_applyN helpers ] for details.
at::cuda::CUDA_tensor_apply2<scalar_t, prob_t, 4>(
ret, p,
[seeds] __device__(
int n, scalar_t& v1, scalar_t& v2, scalar_t& v3, scalar_t& v4,
const prob_t& p1, const prob_t& p2, const prob_t& p3, const prob_t& p4) {
curandStatePhilox4_32_10_t state;
curand_init(
seeds.first,
blockIdx.x * blockDim.x + threadIdx.x,
seeds.second,
&state);
// See Note [Register spilling in curand call for CUDA < 10]
float4 rand = curand_uniform4(&state);
switch (n) {
case 4: {
assert(0 <= p4 && p4 <= 1);
v4 = static_cast<scalar_t>(rand.w <= p4);
// fallthrough
}
case 3: {
assert(0 <= p3 && p3 <= 1);
v3 = static_cast<scalar_t>(rand.z <= p3);
// fallthrough
}
case 2: {
assert(0 <= p2 && p2 <= 1);
v2 = static_cast<scalar_t>(rand.y <= p2);
// fallthrough
}
case 1: {
assert(0 <= p1 && p1 <= 1);
v1 = static_cast<scalar_t>(rand.x <= p1);
}
}
}
);
}
template<typename scalar_t>
void dirichlet_scalar_cuda_kernel(
at::Tensor& ret,
const at::Tensor& gamma) {
auto gamma_sum = gamma.sum(-1, true).expand(ret.sizes());
at::cuda::CUDA_tensor_apply3<scalar_t, scalar_t, scalar_t>(ret, gamma, gamma_sum,
[] __device__(scalar_t &ret_val, const scalar_t &gamma, const scalar_t &gamma_sum) {
ret_val = gamma / gamma_sum;
auto min_value = std::numeric_limits<scalar_t>::min();
auto max_value = 1 - std::numeric_limits<scalar_t>::epsilon();
ret_val = (min_value > ret_val) ? min_value : ret_val;
ret_val = (max_value < ret_val) ? max_value : ret_val;
});
}
} // namespace
namespace at { namespace native {
Tensor _s_poisson_cuda(const Tensor& lambda, Generator* gen) {
Tensor ret = at::empty(lambda.sizes(), lambda.options());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(ret.scalar_type(), "poisson_cuda", [&] {
poisson_cuda_kernel<scalar_t>(ret, lambda, next_philox_seed(gen, 20));
});
return ret;
}
Tensor _s_gamma_cuda(const Tensor& alpha, Generator* gen) {
Tensor ret = at::empty(alpha.sizes(), alpha.options());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(ret.scalar_type(), "gamma_cuda", [&] {
gamma_cuda_kernel<scalar_t>(ret, alpha, next_philox_seed(gen, 10));
});
return ret;
}
Tensor _s_dirichlet_cuda(const Tensor& alpha, Generator* gen) {
Tensor ret = at::empty(alpha.sizes(), alpha.options());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(ret.scalar_type(), "dirichlet", [&] {
Tensor gamma = at::empty(alpha.sizes(), alpha.options());
gamma_cuda_kernel<scalar_t>(gamma, alpha, next_philox_seed(gen, 10));
dirichlet_scalar_cuda_kernel<scalar_t>(ret, gamma);
});
return ret;
}
Tensor _standard_gamma_grad_cuda(const Tensor& self, const Tensor& output) {
Tensor ret = at::empty(self.sizes(), self.options());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "_standard_gamma_grad_cuda", [&] {
gamma_grad_cuda_kernel<scalar_t>(ret, self, output);
});
return ret;
}
Tensor _dirichlet_grad_cuda(const Tensor& x, const Tensor& alpha, const Tensor& total) {
Tensor ret = at::empty(x.sizes(), x.options());
AT_DISPATCH_FLOATING_TYPES(x.scalar_type(), "_dirichlet_grad_cuda", [&] {
dirichlet_grad_cuda_kernel<scalar_t>(ret, x, alpha, total);
});
return ret;
}
Tensor& bernoulli_tensor_cuda_(Tensor &self, const Tensor& p_, Generator* gen) {
auto p = std::get<0>(expand_inplace(self, p_.to(kCUDA)));
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, self.scalar_type(), "bernoulli_tensor_cuda_self_", [&] {
using self_t = scalar_t;
auto seeds = next_philox_seed(gen, 10);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(p.scalar_type(), "bernoulli_tensor_cuda_p_", [&] {
using p_t = scalar_t;
return bernoulli_tensor_cuda_kernel<self_t, p_t>(self, p, seeds);
});
});
return self;
}
void uniform_kernel_cuda(TensorIterator& iter, double from_, double to_, Generator* gen_) {
auto gen = check_generator<CUDAGenerator>(gen_, &globalContext().defaultGenerator(kCUDA));
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "uniform_cuda", [&] {
auto from = static_cast<scalar_t>(from_);
auto to = static_cast<scalar_t>(to_);
TORCH_CHECK(from <= to,
"uniform_ expects to return a [from, to) range, but found from=", from,
" > to=", to);
TORCH_CHECK((to - from) <= std::numeric_limits<scalar_t>::max(),
"uniform_ expects to-from <= std::numeric_limits<", toString(iter.dtype()),
">::max(), but found to=", to, " and from=", from,
" which result in to-from to exceed the limit");
using accscalar_t = at::acc_type<scalar_t, true>;
auto range = static_cast<accscalar_t>(to-from);
from = static_cast<accscalar_t>(from);
// define lambda to reverse bounds, multiply 'range' and add 'from_'
auto uniform_func = [range, from] __device__ (accscalar_t rand) {
// reverse the bounds of curand4 from (0, 1] to [0, 1)
// Note that this method is from legacy THCTensorRandom and is likely to give
// you more 0-s, since, the probability of gettings 1-s is higher than 0-s and
// by reversing the bounds, we are flipping the probabilities of 1-s and 0-s.
auto reverse_bound_rand = rand == static_cast<accscalar_t>(1.0) ? static_cast<accscalar_t>(0.0) : rand;
return static_cast<scalar_t>(reverse_bound_rand * range + from);
};
if (std::is_same<scalar_t, double>::value) {
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls/2>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_uniform2_double(state); },
uniform_func);
} else {
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_uniform4(state); },
uniform_func);
}
});
}
void random_kernel_cuda(TensorIterator& iter, uint64_t range, int64_t base, Generator* gen_) {
auto gen = check_generator<CUDAGenerator>(gen_, &globalContext().defaultGenerator(kCUDA));
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Bool, at::ScalarType::Half, iter.dtype(), "random_cuda", [&] {
if (std::is_same<scalar_t, double>::value || std::is_same<scalar_t, int64_t>::value) {
// define lambda to mod with range and add base
auto random_func = [range, base] __device__ (uint64_t rand) {
return static_cast<int64_t>(rand % range + base);
};
distribution_nullary_kernel<scalar_t, uint64_t, curand4_engine_calls/2>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) -> ulonglong2 {
ulonglong2 ret;
uint4 rand_val = curand4(state);
ret.x = (static_cast<uint64_t>(rand_val.x) << 32) | rand_val.y;
ret.y = (static_cast<uint64_t>(rand_val.z) << 32) | rand_val.w;
return ret;
},
random_func);
} else {
auto random_func = [range, base] __device__ (uint32_t rand) {
return static_cast<int32_t>(rand % static_cast<uint32_t>(range) + static_cast<int32_t>(base));
};
distribution_nullary_kernel<scalar_t, uint32_t, curand4_engine_calls>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) {
return curand4(state);
},
random_func);
}
});
}
void normal_kernel_cuda(TensorIterator& iter, double mean_, double std_, Generator* gen_) {
auto gen = check_generator<CUDAGenerator>(gen_, &globalContext().defaultGenerator(kCUDA));
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "normal_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto mean = static_cast<accscalar_t>(mean_);
auto std = static_cast<accscalar_t>(std_);
// define lambda to multiply std and add mean
auto normal_func = [mean, std] __device__ (accscalar_t rand) {
return static_cast<scalar_t>(rand * std + mean);
};
if (std::is_same<scalar_t, double>::value) {
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls/2>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_normal2_double(state); },
normal_func);
} else {
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_normal4(state); },
normal_func);
}
});
}
void cauchy_kernel_cuda(TensorIterator& iter, double median_, double sigma_, Generator* gen_) {
auto gen = check_generator<CUDAGenerator>(gen_, &globalContext().defaultGenerator(kCUDA));
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "cauchy_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto median = static_cast<accscalar_t>(median_);
auto sigma = static_cast<accscalar_t>(sigma_);
if (std::is_same<scalar_t, double>::value) {
// define lambda for cauchy transformation
auto cauchy_func = [median, sigma] __device__ (accscalar_t rand) {
return static_cast<scalar_t>(median + sigma *
::tan(static_cast<accscalar_t>(M_PI) * (rand-static_cast<accscalar_t>(0.5))));
};
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls/2>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_uniform2_double(state); },
cauchy_func);
} else {
// use __tanf fast approximation for peak bandwidth
auto cauchy_func = [median, sigma] __device__ (accscalar_t rand) {
return static_cast<scalar_t>(median + sigma *
__tanf(static_cast<accscalar_t>(M_PI) * (rand-static_cast<accscalar_t>(0.5))));
};
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_uniform4(state); },
cauchy_func);
}
});
}
void exponential_kernel_cuda(TensorIterator& iter, double lambda_, Generator* gen_) {
auto gen = check_generator<CUDAGenerator>(gen_, &globalContext().defaultGenerator(kCUDA));
// Note that HIP doesn't support std::nextafter in device code.
auto nextafter_1_0_float = std::nextafter(1.0f, 0.0f);
auto nextafter_1_0_double = std::nextafter(1.0, 0.0);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "exponential_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto lambda = static_cast<accscalar_t>(lambda_);
if (std::is_same<scalar_t, double>::value) {
// define lambda for exponential transformation
auto exponential_func = [lambda, nextafter_1_0_double] __device__ (accscalar_t rand) {
accscalar_t sample;
// curand_uniform has (0,1] bounds. log(1) is 0 and exponential excludes 0.
// Hence, squash the 1 to just below 1.
if(rand == static_cast<accscalar_t>(1.0)) {
sample = ::log(nextafter_1_0_double);
} else {
sample = ::log(rand);
}
return static_cast<scalar_t>(static_cast<accscalar_t>(-1.0) / lambda * sample);
};
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls/2>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_uniform2_double(state); },
exponential_func);
} else {
// use __logf fast approximation for peak bandwidth
auto exponential_func = [lambda, nextafter_1_0_float] __device__ (accscalar_t rand) {
accscalar_t sample;
if(rand == static_cast<accscalar_t>(1.0)) {
sample = __logf(nextafter_1_0_float);
} else {
sample = __logf(rand);
}
return static_cast<scalar_t>(static_cast<accscalar_t>(-1.0) / lambda * sample);
};
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_uniform4(state); },
exponential_func);
}
});
}
void geometric_kernel_cuda(TensorIterator& iter, double p_, Generator* gen_) {
auto gen = check_generator<CUDAGenerator>(gen_, &globalContext().defaultGenerator(kCUDA));
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, iter.dtype(), "geometric_cuda", [&] {
if (std::is_same<scalar_t, double>::value) {
// define lambda for geometric transformation
auto geometric_func = [p_] __device__ (double rand) {
return static_cast<scalar_t>(::ceil(::log(rand) / ::log(static_cast<double>(1.0)-p_)));
};
distribution_nullary_kernel<scalar_t, double, curand4_engine_calls/2>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_uniform2_double(state); },
geometric_func);
} else {
auto p = static_cast<float>(p_);
auto geometric_func = [p] __device__ (float rand) {
// use __logf fast approximation for peak bandwidth
return static_cast<scalar_t>(::ceil(__logf(rand) / __logf(static_cast<float>(1.0)-p)));
};
distribution_nullary_kernel<scalar_t, float, curand4_engine_calls>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_uniform4(state); },
geometric_func);
}
});
}
void log_normal_kernel_cuda(TensorIterator& iter, double mean_, double std_, Generator* gen_) {
auto gen = check_generator<CUDAGenerator>(gen_, &globalContext().defaultGenerator(kCUDA));
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "log_normal_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto mean = static_cast<accscalar_t>(mean_);
auto std = static_cast<accscalar_t>(std_);
if (std::is_same<scalar_t, double>::value) {
// define lambda for log_normal transformation
auto log_normal_func = [mean, std] __device__ (accscalar_t rand) {
return static_cast<scalar_t>(::exp(rand * std + mean));
};
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls/2>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_normal2_double(state); },
log_normal_func);
} else {
auto log_normal_func = [mean, std] __device__ (accscalar_t rand) {
// use __expf fast approximation for peak bandwidth
return static_cast<scalar_t>(__expf(rand * std + mean));
};
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_normal4(state); },
log_normal_func);
}
});
}
void bernoulli_scalar_cuda_kernel(TensorIterator& iter, double p_, Generator* gen_) {
auto gen = check_generator<CUDAGenerator>(gen_, &globalContext().defaultGenerator(kCUDA));
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, iter.dtype(), "bernoulli_scalar_cuda_", [&] {
if (std::is_same<scalar_t, double>::value) {
// define lambda for bernoulli transformation
auto bernoulli_func = [p_] __device__ (double rand) {
return static_cast<scalar_t>(rand <= p_);
};
distribution_nullary_kernel<scalar_t, double, curand4_engine_calls/2>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_uniform2_double(state); },
bernoulli_func);
} else {
auto p = static_cast<float>(p_);
auto bernoulli_func = [p] __device__ (float rand) {
return static_cast<scalar_t>(rand <= p);
};
distribution_nullary_kernel<scalar_t, float, curand4_engine_calls>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_uniform4(state); },
bernoulli_func);
}
});
}
Tensor& uniform_cuda_(Tensor& self, double from, double to, Generator* gen) {
auto iter = TensorIterator::nullary_op(self);
uniform_kernel_cuda(*iter, from, to, gen);
return self;
}
Tensor& random_cuda_(Tensor& self, Generator* gen) {
auto iter = TensorIterator::nullary_op(self);
uint64_t range;
auto iter_scalar_type = iter->dtype();
if (isFloatingType(iter_scalar_type)) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter_scalar_type, "random_cuda_range_calc", [&] {
range = static_cast<uint64_t>((1ULL << std::numeric_limits<scalar_t>::digits) + 1);
});
} else {
AT_DISPATCH_INTEGRAL_TYPES(iter_scalar_type, "random_cuda_range_calc", [&] {
range = static_cast<uint64_t>(std::numeric_limits<scalar_t>::max()) + 1;
});
}
random_kernel_cuda(*iter, range, 0, gen);
return self;
}
Tensor& clamped_random_cuda_(Tensor& self, int64_t from, int64_t to, Generator* gen) {
TORCH_CHECK(from < to, "random_ expects 'from' to be less than 'to', but got from=", from, " >= to=", to);
auto iter = TensorIterator::nullary_op(self);
uint64_t range = to - from;
random_kernel_cuda(*iter, range, from, gen);
return self;
}
Tensor& capped_random_cuda_(Tensor& self, int64_t to, Generator* gen) {
return clamped_random_cuda_(self, 0, to, gen);
}
Tensor& normal_cuda_(Tensor& self, double mean, double std, Generator* gen) {
TORCH_CHECK(std > 0.0, "normal_ expects std > 0.0, but found std=", std);
auto iter = TensorIterator::nullary_op(self);
normal_kernel_cuda(*iter, mean, std, gen);
return self;
}
Tensor& normal_out_cuda(Tensor& output, const Tensor& mean, double std, Generator* gen) {
normal_cuda_(output, 0, std, gen);
output.add_(mean);
return output;
}
Tensor& normal_out_cuda(Tensor& output, double mean, const Tensor& std, Generator* gen) {
normal_cuda_(output, 0, 1, gen);
auto mean_tensor = at::full({1}, mean, output.options());
at::native::legacy::cuda::_th_addcmul_out(output, mean_tensor, output, std, 1);
return output;
}
Tensor& normal_out_cuda(Tensor& output, const Tensor& mean, const Tensor& std, Generator* gen) {
normal_cuda_(output, 0, 1, gen);
at::native::legacy::cuda::_th_addcmul_out(output, mean, output, std, 1);
return output;
}
Tensor normal_cuda(const Tensor& mean, double std, Generator* gen) {
Tensor ret = at::empty(mean.sizes(), mean.options());
normal_out_cuda(ret, mean, std, gen);
return ret;
}
Tensor normal_cuda(double mean, const Tensor& std, Generator* gen) {
Tensor ret = at::empty(std.sizes(), std.options());
normal_out_cuda(ret, mean, std, gen);
return ret;
}
Tensor normal_cuda(const Tensor& mean, const Tensor& std, Generator* gen) {
Tensor ret = at::empty(mean.sizes(), mean.options());
normal_out_cuda(ret, mean, std, gen);
return ret;
}
Tensor& cauchy_cuda_(Tensor& self, double median, double sigma, Generator* gen) {
auto iter = TensorIterator::nullary_op(self);
cauchy_kernel_cuda(*iter, median, sigma, gen);
return self;
}
Tensor& exponential_cuda_(Tensor& self, double lambda, Generator* gen) {
auto iter = TensorIterator::nullary_op(self);
exponential_kernel_cuda(*iter, lambda, gen);
return self;
}
Tensor& geometric_cuda_(Tensor& self, double p, Generator* gen) {
TORCH_CHECK(0 < p && p < 1, "geometric_ expects p to be in (0, 1), but got p=", p);
auto iter = TensorIterator::nullary_op(self);
geometric_kernel_cuda(*iter, p, gen);
return self;
}
Tensor& log_normal_cuda_(Tensor& self, double mean, double std, Generator* gen) {
TORCH_CHECK(std > 0.0, "log_normal_ expects std > 0.0, but found std=", std);
auto iter = TensorIterator::nullary_op(self);
log_normal_kernel_cuda(*iter, mean, std, gen);
return self;
}
Tensor& bernoulli_scalar_cuda_(Tensor &self, double p, Generator* gen) {
TORCH_CHECK(0 <= p && p <= 1, "bernoulli_ expects p to be in [0, 1], but got p=", p);
auto iter = TensorIterator::nullary_op(self);
bernoulli_scalar_cuda_kernel(*iter, p, gen);
return self;
}
}} // namespace at::native
|
d0e6d0a086749d411d31c62155ad591b978178aa.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Default code in Managedcuda document
*/
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
extern "C" {
__global__ void multKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] * b[i];
}
int main() { return 0; }
}
| d0e6d0a086749d411d31c62155ad591b978178aa.cu | /*
* Default code in Managedcuda document
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
extern "C" {
__global__ void multKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] * b[i];
}
int main() { return 0; }
}
|
11639fe8cdf0af8bd9387fc844b1d41343a21666.hip | // !!! This is a file automatically generated by hipify!!!
#include <algorithm>
#include "model/gpt_encoder.h"
#include "tools/util.h"
/**
@file
Example of how to run gpt inference using our implementation.
*/
// Appoint precision.
const lightseq::cuda::OperationType optype =
lightseq::cuda::OperationType::FP32;
int main(int argc, char *argv[]) {
/* ---step1. init environment--- */
hipStream_t stream_;
hipStream_t cache_stream_;
hipblasHandle_t hd_;
hipSetDevice(0);
hipStreamCreate(&stream_);
hipStreamCreate(&cache_stream_);
hipblasCreate(&hd_);
hipblasSetStream(hd_, stream_);
/* ---step2. load model weights into GPU memory--- */
lightseq::cuda::GptWeight<optype> tw_;
// saved in custom proto file
std::string model_weights_path = argv[1];
std::string res = tw_.initializing(model_weights_path);
if (!res.empty()) {
std::cout << res << std::endl;
return 0;
}
/*
step3. instantiate encoder, init the gpu memory buffer.
using thrust vector to avoid manage gpu memory by hand
*/
int max_batch_size = 128;
thrust::device_vector<int> d_input_ =
std::vector<int>(max_batch_size * tw_._max_step, 0);
thrust::device_vector<int> d_sample_ =
std::vector<int>(max_batch_size * tw_._max_step, 0);
thrust::device_vector<float> d_ppl_ = std::vector<float>(max_batch_size, 0.f);
std::shared_ptr<lightseq::cuda::GptEncoder<optype>> encoder_ =
std::make_shared<lightseq::cuda::GptEncoder<optype>>(
max_batch_size,
reinterpret_cast<int *>(thrust::raw_pointer_cast(d_input_.data())),
reinterpret_cast<float *>(thrust::raw_pointer_cast(d_ppl_.data())),
reinterpret_cast<int *>(thrust::raw_pointer_cast(d_sample_.data())),
tw_, stream_, cache_stream_, hd_);
res = encoder_->check();
if (!res.empty()) {
std::cout << res << std::endl;
return 1;
}
// init gpu memory buffer
long buf_bytesize = encoder_->compute_buffer_bytesize();
thrust::device_vector<int> d_buf_ =
std::vector<int>(buf_bytesize / sizeof(int) + 1, 0);
encoder_->init_buffer(
reinterpret_cast<void *>(thrust::raw_pointer_cast(d_buf_.data())));
hipStreamSynchronize(stream_);
/* ---step4. read input token ids from file--- */
int batch_size;
int batch_seq_len;
std::vector<int> host_input;
// the first line of input file should
// be two integers: batch_size and batch_seq_len.
// followed by batch_size lines of
// batch_seq_len integers, e.g.
// 2 3
// 666 666 666
// 666 666 666
std::string input_file_name = argv[2];
lightseq::cuda::read_batch_tokenids_from_file(input_file_name, batch_size,
batch_seq_len, host_input);
/* ---step5. infer and log--- */
for (int i = 0; i < 10; i++) {
auto start = std::chrono::high_resolution_clock::now();
// copy inputs from cpu memory to gpu memory
hipMemcpyAsync(
reinterpret_cast<int *>(thrust::raw_pointer_cast(d_input_.data())),
host_input.data(), sizeof(int) * batch_size * batch_seq_len,
hipMemcpyHostToDevice, stream_);
encoder_->run_one_infer(batch_size, batch_seq_len);
lightseq::cuda::print_time_duration(start, "one infer time", stream_);
lightseq::cuda::print_vec(d_ppl_.data(), "ppl", batch_size);
}
return 0;
}
| 11639fe8cdf0af8bd9387fc844b1d41343a21666.cu | #include <algorithm>
#include "model/gpt_encoder.h"
#include "tools/util.h"
/**
@file
Example of how to run gpt inference using our implementation.
*/
// Appoint precision.
const lightseq::cuda::OperationType optype =
lightseq::cuda::OperationType::FP32;
int main(int argc, char *argv[]) {
/* ---step1. init environment--- */
cudaStream_t stream_;
cudaStream_t cache_stream_;
cublasHandle_t hd_;
cudaSetDevice(0);
cudaStreamCreate(&stream_);
cudaStreamCreate(&cache_stream_);
cublasCreate(&hd_);
cublasSetStream(hd_, stream_);
/* ---step2. load model weights into GPU memory--- */
lightseq::cuda::GptWeight<optype> tw_;
// saved in custom proto file
std::string model_weights_path = argv[1];
std::string res = tw_.initializing(model_weights_path);
if (!res.empty()) {
std::cout << res << std::endl;
return 0;
}
/*
step3. instantiate encoder, init the gpu memory buffer.
using thrust vector to avoid manage gpu memory by hand
*/
int max_batch_size = 128;
thrust::device_vector<int> d_input_ =
std::vector<int>(max_batch_size * tw_._max_step, 0);
thrust::device_vector<int> d_sample_ =
std::vector<int>(max_batch_size * tw_._max_step, 0);
thrust::device_vector<float> d_ppl_ = std::vector<float>(max_batch_size, 0.f);
std::shared_ptr<lightseq::cuda::GptEncoder<optype>> encoder_ =
std::make_shared<lightseq::cuda::GptEncoder<optype>>(
max_batch_size,
reinterpret_cast<int *>(thrust::raw_pointer_cast(d_input_.data())),
reinterpret_cast<float *>(thrust::raw_pointer_cast(d_ppl_.data())),
reinterpret_cast<int *>(thrust::raw_pointer_cast(d_sample_.data())),
tw_, stream_, cache_stream_, hd_);
res = encoder_->check();
if (!res.empty()) {
std::cout << res << std::endl;
return 1;
}
// init gpu memory buffer
long buf_bytesize = encoder_->compute_buffer_bytesize();
thrust::device_vector<int> d_buf_ =
std::vector<int>(buf_bytesize / sizeof(int) + 1, 0);
encoder_->init_buffer(
reinterpret_cast<void *>(thrust::raw_pointer_cast(d_buf_.data())));
cudaStreamSynchronize(stream_);
/* ---step4. read input token ids from file--- */
int batch_size;
int batch_seq_len;
std::vector<int> host_input;
// the first line of input file should
// be two integers: batch_size and batch_seq_len.
// followed by batch_size lines of
// batch_seq_len integers, e.g.
// 2 3
// 666 666 666
// 666 666 666
std::string input_file_name = argv[2];
lightseq::cuda::read_batch_tokenids_from_file(input_file_name, batch_size,
batch_seq_len, host_input);
/* ---step5. infer and log--- */
for (int i = 0; i < 10; i++) {
auto start = std::chrono::high_resolution_clock::now();
// copy inputs from cpu memory to gpu memory
cudaMemcpyAsync(
reinterpret_cast<int *>(thrust::raw_pointer_cast(d_input_.data())),
host_input.data(), sizeof(int) * batch_size * batch_seq_len,
cudaMemcpyHostToDevice, stream_);
encoder_->run_one_infer(batch_size, batch_seq_len);
lightseq::cuda::print_time_duration(start, "one infer time", stream_);
lightseq::cuda::print_vec(d_ppl_.data(), "ppl", batch_size);
}
return 0;
}
|
919d7119fde8369d402bc6da0cdbe8e958b4b6ad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/sigmoid_cross_entropy_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SigmoidCrossEntropyLossForwardGPU(const int nthreads,
const Dtype* input_data, const Dtype* target, Dtype* loss,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(i, nthreads) {
const int target_value = static_cast<int>(target[i]);
if (has_ignore_label_ && target_value == ignore_label_) {
loss[i] = 0;
counts[i] = 0;
} else {
loss[i] = input_data[i] * (target[i] - (input_data[i] >= 0)) -
log(1 + exp(input_data[i] - 2 * input_data[i] *
(input_data[i] >= 0)));
counts[i] = 1;
}
}
}
template <typename Dtype>
__global__ void SigmoidCrossEntropyLossIgnoreDiffGPU(const int count,
const int ignore_label, const Dtype* target, Dtype* diff) {
CUDA_KERNEL_LOOP(i, count) {
const int target_value = static_cast<int>(target[i]);
if (target_value == ignore_label) {
diff[i] = 0;
}
}
}
template <typename Dtype>
void SigmoidCrossEntropyLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
// The forward pass computes the sigmoid outputs.
sigmoid_bottom_vec_[0] = bottom[0];
sigmoid_layer_->Forward(sigmoid_bottom_vec_, sigmoid_top_vec_);
// Compute the loss (negative log likelihood)
const int count = bottom[0]->count();
// Stable version of loss computation from input data
const Dtype* input_data = bottom[0]->gpu_data();
const Dtype* target = bottom[1]->gpu_data();
// Since this memory is not used for anything, we use it here to avoid having
// to allocate new GPU memory to accumulate intermediate results.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
Dtype* count_data = bottom[1]->mutable_gpu_diff();
Dtype valid_count;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SigmoidCrossEntropyLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS),0,Caffe::cuda_stream(), count, input_data, target, loss_data,
has_ignore_label_, ignore_label_, count_data);
// Only launch another CUDA kernel if we actually need the valid count.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(count, count_data, &valid_count);
} else {
valid_count = count;
}
Dtype loss;
caffe_gpu_asum(count, loss_data, &loss);
normalizer_ = get_normalizer(normalization_, valid_count);
top[0]->mutable_cpu_data()[0] = loss / normalizer_;
// Clear scratch memory to prevent interfering with backward (see #6202).
caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom[0]->mutable_gpu_diff());
caffe_gpu_set(bottom[1]->count(), Dtype(0), bottom[1]->mutable_gpu_diff());
}
template <typename Dtype>
void SigmoidCrossEntropyLossLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
// First, compute the diff
const int count = bottom[0]->count();
const Dtype* sigmoid_output_data = sigmoid_output_->gpu_data();
const Dtype* target = bottom[1]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
caffe_copy(count, sigmoid_output_data, bottom_diff);
caffe_gpu_axpy(count, Dtype(-1), target, bottom_diff);
// Zero out gradient of ignored targets.
if (has_ignore_label_) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SigmoidCrossEntropyLossIgnoreDiffGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS),0,Caffe::cuda_stream(), count, ignore_label_, target, bottom_diff);
}
// Scale down gradient
Dtype loss_weight = top[0]->cpu_diff()[0] / normalizer_;
caffe_gpu_scal(count, loss_weight, bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SigmoidCrossEntropyLossLayer);
} // namespace caffe
| 919d7119fde8369d402bc6da0cdbe8e958b4b6ad.cu | #include <vector>
#include "caffe/layers/sigmoid_cross_entropy_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SigmoidCrossEntropyLossForwardGPU(const int nthreads,
const Dtype* input_data, const Dtype* target, Dtype* loss,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(i, nthreads) {
const int target_value = static_cast<int>(target[i]);
if (has_ignore_label_ && target_value == ignore_label_) {
loss[i] = 0;
counts[i] = 0;
} else {
loss[i] = input_data[i] * (target[i] - (input_data[i] >= 0)) -
log(1 + exp(input_data[i] - 2 * input_data[i] *
(input_data[i] >= 0)));
counts[i] = 1;
}
}
}
template <typename Dtype>
__global__ void SigmoidCrossEntropyLossIgnoreDiffGPU(const int count,
const int ignore_label, const Dtype* target, Dtype* diff) {
CUDA_KERNEL_LOOP(i, count) {
const int target_value = static_cast<int>(target[i]);
if (target_value == ignore_label) {
diff[i] = 0;
}
}
}
template <typename Dtype>
void SigmoidCrossEntropyLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
// The forward pass computes the sigmoid outputs.
sigmoid_bottom_vec_[0] = bottom[0];
sigmoid_layer_->Forward(sigmoid_bottom_vec_, sigmoid_top_vec_);
// Compute the loss (negative log likelihood)
const int count = bottom[0]->count();
// Stable version of loss computation from input data
const Dtype* input_data = bottom[0]->gpu_data();
const Dtype* target = bottom[1]->gpu_data();
// Since this memory is not used for anything, we use it here to avoid having
// to allocate new GPU memory to accumulate intermediate results.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
Dtype* count_data = bottom[1]->mutable_gpu_diff();
Dtype valid_count;
// NOLINT_NEXT_LINE(whitespace/operators)
SigmoidCrossEntropyLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS,0,Caffe::cuda_stream()>>>(count, input_data, target, loss_data,
has_ignore_label_, ignore_label_, count_data);
// Only launch another CUDA kernel if we actually need the valid count.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(count, count_data, &valid_count);
} else {
valid_count = count;
}
Dtype loss;
caffe_gpu_asum(count, loss_data, &loss);
normalizer_ = get_normalizer(normalization_, valid_count);
top[0]->mutable_cpu_data()[0] = loss / normalizer_;
// Clear scratch memory to prevent interfering with backward (see #6202).
caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom[0]->mutable_gpu_diff());
caffe_gpu_set(bottom[1]->count(), Dtype(0), bottom[1]->mutable_gpu_diff());
}
template <typename Dtype>
void SigmoidCrossEntropyLossLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
// First, compute the diff
const int count = bottom[0]->count();
const Dtype* sigmoid_output_data = sigmoid_output_->gpu_data();
const Dtype* target = bottom[1]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
caffe_copy(count, sigmoid_output_data, bottom_diff);
caffe_gpu_axpy(count, Dtype(-1), target, bottom_diff);
// Zero out gradient of ignored targets.
if (has_ignore_label_) {
// NOLINT_NEXT_LINE(whitespace/operators)
SigmoidCrossEntropyLossIgnoreDiffGPU<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS,0,Caffe::cuda_stream()>>>(count, ignore_label_, target, bottom_diff);
}
// Scale down gradient
Dtype loss_weight = top[0]->cpu_diff()[0] / normalizer_;
caffe_gpu_scal(count, loss_weight, bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SigmoidCrossEntropyLossLayer);
} // namespace caffe
|
4b360f360262c60717cca0050b1c1fa7591ae80b.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright 2018 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "taso/ops.h"
#include "taso/cuda_helper.h"
using namespace taso;
void Conv2D::map(void)
{
// create descriptors
checkCUDNN(cudnnCreateTensorDescriptor(&inputTensor));
checkCUDNN(cudnnCreateTensorDescriptor(&biasTensor));
checkCUDNN(cudnnCreateTensorDescriptor(&outputTensor));
checkCUDNN(cudnnCreateFilterDescriptor(&filterDesc));
checkCUDNN(cudnnCreateConvolutionDescriptor(&convDesc));
int inputN = inputs[0].dim[0];
int inputC = inputs[0].dim[1];
int inputH = inputs[0].dim[2];
int inputW = inputs[0].dim[3];
int outputC = inputs[1].dim[0];
int groups = inputs[0].dim[1] / inputs[1].dim[1];
int padH, padW;
get_padding(&padH, &padW);
// set descriptors
checkCUDNN(cudnnSetTensor4dDescriptor(inputTensor, CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT, inputN, inputC, inputH, inputW));
checkCUDNN(cudnnSetTensor4dDescriptor(biasTensor, CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT, 1, outputC, 1, 1));
checkCUDNN(cudnnSetFilter4dDescriptor(filterDesc, CUDNN_DATA_FLOAT,
CUDNN_TENSOR_NCHW, inputs[1].dim[0], inputs[1].dim[1],
inputs[1].dim[2], inputs[1].dim[3]));
checkCUDNN(cudnnSetConvolution2dDescriptor(convDesc, padH, padW,
strideH, strideW, 1/*dilationH*/, 1/*dilationW*/,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
checkCUDNN(cudnnSetConvolutionMathType(convDesc, CUDNN_TENSOR_OP_MATH));
if (groups != 1) {
checkCUDNN(cudnnSetConvolutionGroupCount(convDesc, groups));
}
int n, c, h, w;
checkCUDNN(cudnnGetConvolution2dForwardOutputDim(convDesc,
inputTensor, filterDesc, &n, &c, &h, &w));
assert(n == inputN);
assert(c == outputC);
assert(outputs[0].dim[2] == h);
assert(outputs[0].dim[3] == w);
checkCUDNN(cudnnSetTensor4dDescriptor(outputTensor, CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT, n, c, h, w));
if (activation != AC_MODE_NONE) {
checkCUDNN(cudnnCreateActivationDescriptor(&actiDesc));
cudnnActivationMode_t mode = get_activation_mode(activation);
checkCUDNN(cudnnSetActivationDescriptor(actiDesc, mode,
CUDNN_NOT_PROPAGATE_NAN, 0.0));
}
// allocate tensors
size_t outputSize = sizeof(DATATYPE) * n * c * h * w;
size_t biasSize = sizeof(DATATYPE) * outputC;
checkCUDA(hipMalloc(&biasPtr, biasSize));
checkCUDA(hipMemset(biasPtr, 0, biasSize));
checkCUDA(hipMalloc(&outputs[0].data_ptr, outputSize));
}
void Conv2D::unmap(void)
{
checkCUDNN(cudnnDestroyTensorDescriptor(inputTensor));
checkCUDNN(cudnnDestroyTensorDescriptor(biasTensor));
checkCUDNN(cudnnDestroyTensorDescriptor(outputTensor));
checkCUDNN(cudnnDestroyFilterDescriptor(filterDesc));
checkCUDNN(cudnnDestroyConvolutionDescriptor(convDesc));
if (activation != AC_MODE_NONE) {
checkCUDNN(cudnnDestroyActivationDescriptor(actiDesc));
}
// free tensors
checkCUDA(hipFree(outputs[0].data_ptr));
checkCUDA(hipFree(biasPtr));
}
void Conv2D::forward(bool block)
{
const float alpha = 1.0f;
const float beta = 0.0f;
if (activation != AC_MODE_NONE) {
checkCUDNN(cudnnConvolutionBiasActivationForward(
model->dnn, &alpha, inputTensor, inputs[0].data_ptr, filterDesc, inputs[1].data_ptr,
convDesc, fwdAlgo, model->workSpace, model->workSpaceSize,
&beta, outputTensor, outputs[0].data_ptr, biasTensor, biasPtr, actiDesc,
outputTensor, outputs[0].data_ptr));
} else {
checkCUDNN(cudnnConvolutionForward(
model->dnn, &alpha, inputTensor, inputs[0].data_ptr, filterDesc, inputs[1].data_ptr,
convDesc, fwdAlgo, model->workSpace, model->workSpaceSize,
&beta, outputTensor, outputs[0].data_ptr));
checkCUDNN(cudnnAddTensor(model->dnn, &alpha, biasTensor, biasPtr,
&alpha, outputTensor, outputs[0].data_ptr));
}
if (block)
checkCUDA(hipDeviceSynchronize());
}
void Model::measure_conv2d_cost(Conv2D* conv)
{
const float alpha = 1.0f;
const float beta = 0.0f;
int inputN = conv->inputs[0].dim[0];
int inputC = conv->inputs[0].dim[1];
int inputH = conv->inputs[0].dim[2];
int inputW = conv->inputs[0].dim[3];
int kernelH = conv->inputs[1].dim[2];
int kernelW = conv->inputs[1].dim[3];
int outputC = conv->outputs[0].dim[1];
int outputH = conv->outputs[0].dim[2];
int outputW = conv->outputs[0].dim[3];
int groups = conv->inputs[0].dim[1] / conv->inputs[1].dim[1];
int padH, padW;
// Reference: https://www.tensorflow.org/api_guides/python/nn#Convolution
switch (conv->padding) {
case PD_MODE_SAME:
int totalPadH, totalPadW;
if (inputH % conv->strideH == 0)
totalPadH = max(kernelH - conv->strideH, 0);
else
totalPadH = max(kernelH - (inputH % conv->strideH), 0);
if (inputW % conv->strideW == 0)
totalPadW = max(kernelW - conv->strideW, 0);
else
totalPadW = max(kernelW - (inputW % conv->strideW), 0);
// assert same padding on both sides
padH = (totalPadH + 1) / 2;
padW = (totalPadW + 1) / 2;
break;
case PD_MODE_VALID:
padH = 0;
padW = 0;
break;
default:
assert(false);
}
checkCUDNN(cudnnSetTensor4dDescriptor(inputTensor, CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT, inputN, inputC, inputH, inputW));
checkCUDNN(cudnnSetTensor4dDescriptor(biasTensor, CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT, 1, outputC, 1, 1));
checkCUDNN(cudnnSetFilter4dDescriptor(filterDesc, CUDNN_DATA_FLOAT,
CUDNN_TENSOR_NCHW, conv->inputs[1].dim[0], conv->inputs[1].dim[1],
conv->inputs[1].dim[2], conv->inputs[1].dim[3]));
checkCUDNN(cudnnSetConvolution2dDescriptor(convDesc, padH, padW,
conv->strideH, conv->strideW, 1/*dilationH*/, 1/*dilationW*/,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
checkCUDNN(cudnnSetConvolutionMathType(convDesc, CUDNN_TENSOR_OP_MATH));
checkCUDNN(cudnnSetConvolutionGroupCount(convDesc, groups));
checkCUDNN(cudnnSetActivationDescriptor(actiDesc, CUDNN_ACTIVATION_RELU,
CUDNN_NOT_PROPAGATE_NAN, 0.0));
int n, c, h, w;
checkCUDNN(cudnnGetConvolution2dForwardOutputDim(convDesc,
inputTensor, filterDesc, &n, &c, &h, &w));
assert(n == inputN);
assert(c == outputC);
assert(outputH == h);
assert(outputW == w);
checkCUDNN(cudnnSetTensor4dDescriptor(outputTensor, CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT, n, c, h, w));
size_t inputSize = sizeof(DATATYPE) * inputN * inputC * inputH * inputW;
size_t filterSize = sizeof(DATATYPE) * inputC * outputC
* kernelH * kernelW;
size_t outputSize = sizeof(DATATYPE) * n * c * h * w;
assert(inputSize < MAX_TENSOR_SIZE);
assert(filterSize < MAX_TENSOR_SIZE);
assert(outputSize < MAX_TENSOR_SIZE);
const int reqAlgCnt = 8;
int cnt = 0;
cudnnConvolutionFwdAlgoPerf_t perfResults[reqAlgCnt];
checkCUDNN(cudnnFindConvolutionForwardAlgorithmEx(
dnn, inputTensor, inputPtr, filterDesc, filterPtr, convDesc,
outputTensor, outputPtr, reqAlgCnt, &cnt, perfResults,
workSpace, workSpaceSize));
assert(cnt > 0);
checkCUDNN(perfResults[0].status);
//for (int i = 0; i < cnt; i++) {
//printf("fwdAlgo(%d) time(%.2lfms) space(%dMB)\n", perfResults[i].algo,
// perfResults[i].time, perfResults[i].memory / 1024 / 1024);
//}
conv->fwdAlgo = perfResults[0].algo;
checkCUDA(hipDeviceSynchronize());
for (int i = 0; i < WARMUP_TIMES + REPEAT_TIMES; i++) {
if (i == WARMUP_TIMES) {
checkCUDA(hipEventRecord(startEvent));
}
if (conv->activation != AC_MODE_NONE) {
checkCUDNN(cudnnConvolutionBiasActivationForward(
dnn, &alpha, inputTensor, inputPtr, filterDesc, filterPtr,
convDesc, conv->fwdAlgo, workSpace, workSpaceSize,
&beta, outputTensor, outputPtr, biasTensor, biasPtr, actiDesc,
outputTensor, outputPtr));
} else {
checkCUDNN(cudnnConvolutionForward(
dnn, &alpha, inputTensor, inputPtr, filterDesc, filterPtr,
convDesc, conv->fwdAlgo, workSpace, workSpaceSize,
&beta, outputTensor, outputPtr));
checkCUDNN(cudnnAddTensor(dnn, &alpha, biasTensor, biasPtr,
&alpha, outputTensor, outputPtr));
}
}
checkCUDA(hipEventRecord(endEvent));
checkCUDA(hipEventSynchronize(endEvent));
float milliseconds;
hipEventElapsedTime(&milliseconds, startEvent, endEvent);
conv->runtime = milliseconds / REPEAT_TIMES;
if (print_cost)
printf(" measure[Conv2D]: i(%d %d %d %d) w(%d %d %d %d) s(%d %d) p(%d %d) cost(%.4lf)\n",
conv->inputs[0].dim[0], conv->inputs[0].dim[1], conv->inputs[0].dim[2], conv->inputs[0].dim[3],
conv->inputs[1].dim[0], conv->inputs[1].dim[1], conv->inputs[1].dim[2], conv->inputs[1].dim[3],
conv->strideH, conv->strideW, padH, padW, conv->runtime);
}
| 4b360f360262c60717cca0050b1c1fa7591ae80b.cu | /* Copyright 2018 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "taso/ops.h"
#include "taso/cuda_helper.h"
using namespace taso;
void Conv2D::map(void)
{
// create descriptors
checkCUDNN(cudnnCreateTensorDescriptor(&inputTensor));
checkCUDNN(cudnnCreateTensorDescriptor(&biasTensor));
checkCUDNN(cudnnCreateTensorDescriptor(&outputTensor));
checkCUDNN(cudnnCreateFilterDescriptor(&filterDesc));
checkCUDNN(cudnnCreateConvolutionDescriptor(&convDesc));
int inputN = inputs[0].dim[0];
int inputC = inputs[0].dim[1];
int inputH = inputs[0].dim[2];
int inputW = inputs[0].dim[3];
int outputC = inputs[1].dim[0];
int groups = inputs[0].dim[1] / inputs[1].dim[1];
int padH, padW;
get_padding(&padH, &padW);
// set descriptors
checkCUDNN(cudnnSetTensor4dDescriptor(inputTensor, CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT, inputN, inputC, inputH, inputW));
checkCUDNN(cudnnSetTensor4dDescriptor(biasTensor, CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT, 1, outputC, 1, 1));
checkCUDNN(cudnnSetFilter4dDescriptor(filterDesc, CUDNN_DATA_FLOAT,
CUDNN_TENSOR_NCHW, inputs[1].dim[0], inputs[1].dim[1],
inputs[1].dim[2], inputs[1].dim[3]));
checkCUDNN(cudnnSetConvolution2dDescriptor(convDesc, padH, padW,
strideH, strideW, 1/*dilationH*/, 1/*dilationW*/,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
checkCUDNN(cudnnSetConvolutionMathType(convDesc, CUDNN_TENSOR_OP_MATH));
if (groups != 1) {
checkCUDNN(cudnnSetConvolutionGroupCount(convDesc, groups));
}
int n, c, h, w;
checkCUDNN(cudnnGetConvolution2dForwardOutputDim(convDesc,
inputTensor, filterDesc, &n, &c, &h, &w));
assert(n == inputN);
assert(c == outputC);
assert(outputs[0].dim[2] == h);
assert(outputs[0].dim[3] == w);
checkCUDNN(cudnnSetTensor4dDescriptor(outputTensor, CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT, n, c, h, w));
if (activation != AC_MODE_NONE) {
checkCUDNN(cudnnCreateActivationDescriptor(&actiDesc));
cudnnActivationMode_t mode = get_activation_mode(activation);
checkCUDNN(cudnnSetActivationDescriptor(actiDesc, mode,
CUDNN_NOT_PROPAGATE_NAN, 0.0));
}
// allocate tensors
size_t outputSize = sizeof(DATATYPE) * n * c * h * w;
size_t biasSize = sizeof(DATATYPE) * outputC;
checkCUDA(cudaMalloc(&biasPtr, biasSize));
checkCUDA(cudaMemset(biasPtr, 0, biasSize));
checkCUDA(cudaMalloc(&outputs[0].data_ptr, outputSize));
}
void Conv2D::unmap(void)
{
checkCUDNN(cudnnDestroyTensorDescriptor(inputTensor));
checkCUDNN(cudnnDestroyTensorDescriptor(biasTensor));
checkCUDNN(cudnnDestroyTensorDescriptor(outputTensor));
checkCUDNN(cudnnDestroyFilterDescriptor(filterDesc));
checkCUDNN(cudnnDestroyConvolutionDescriptor(convDesc));
if (activation != AC_MODE_NONE) {
checkCUDNN(cudnnDestroyActivationDescriptor(actiDesc));
}
// free tensors
checkCUDA(cudaFree(outputs[0].data_ptr));
checkCUDA(cudaFree(biasPtr));
}
void Conv2D::forward(bool block)
{
const float alpha = 1.0f;
const float beta = 0.0f;
if (activation != AC_MODE_NONE) {
checkCUDNN(cudnnConvolutionBiasActivationForward(
model->dnn, &alpha, inputTensor, inputs[0].data_ptr, filterDesc, inputs[1].data_ptr,
convDesc, fwdAlgo, model->workSpace, model->workSpaceSize,
&beta, outputTensor, outputs[0].data_ptr, biasTensor, biasPtr, actiDesc,
outputTensor, outputs[0].data_ptr));
} else {
checkCUDNN(cudnnConvolutionForward(
model->dnn, &alpha, inputTensor, inputs[0].data_ptr, filterDesc, inputs[1].data_ptr,
convDesc, fwdAlgo, model->workSpace, model->workSpaceSize,
&beta, outputTensor, outputs[0].data_ptr));
checkCUDNN(cudnnAddTensor(model->dnn, &alpha, biasTensor, biasPtr,
&alpha, outputTensor, outputs[0].data_ptr));
}
if (block)
checkCUDA(cudaDeviceSynchronize());
}
void Model::measure_conv2d_cost(Conv2D* conv)
{
const float alpha = 1.0f;
const float beta = 0.0f;
int inputN = conv->inputs[0].dim[0];
int inputC = conv->inputs[0].dim[1];
int inputH = conv->inputs[0].dim[2];
int inputW = conv->inputs[0].dim[3];
int kernelH = conv->inputs[1].dim[2];
int kernelW = conv->inputs[1].dim[3];
int outputC = conv->outputs[0].dim[1];
int outputH = conv->outputs[0].dim[2];
int outputW = conv->outputs[0].dim[3];
int groups = conv->inputs[0].dim[1] / conv->inputs[1].dim[1];
int padH, padW;
// Reference: https://www.tensorflow.org/api_guides/python/nn#Convolution
switch (conv->padding) {
case PD_MODE_SAME:
int totalPadH, totalPadW;
if (inputH % conv->strideH == 0)
totalPadH = max(kernelH - conv->strideH, 0);
else
totalPadH = max(kernelH - (inputH % conv->strideH), 0);
if (inputW % conv->strideW == 0)
totalPadW = max(kernelW - conv->strideW, 0);
else
totalPadW = max(kernelW - (inputW % conv->strideW), 0);
// assert same padding on both sides
padH = (totalPadH + 1) / 2;
padW = (totalPadW + 1) / 2;
break;
case PD_MODE_VALID:
padH = 0;
padW = 0;
break;
default:
assert(false);
}
checkCUDNN(cudnnSetTensor4dDescriptor(inputTensor, CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT, inputN, inputC, inputH, inputW));
checkCUDNN(cudnnSetTensor4dDescriptor(biasTensor, CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT, 1, outputC, 1, 1));
checkCUDNN(cudnnSetFilter4dDescriptor(filterDesc, CUDNN_DATA_FLOAT,
CUDNN_TENSOR_NCHW, conv->inputs[1].dim[0], conv->inputs[1].dim[1],
conv->inputs[1].dim[2], conv->inputs[1].dim[3]));
checkCUDNN(cudnnSetConvolution2dDescriptor(convDesc, padH, padW,
conv->strideH, conv->strideW, 1/*dilationH*/, 1/*dilationW*/,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
checkCUDNN(cudnnSetConvolutionMathType(convDesc, CUDNN_TENSOR_OP_MATH));
checkCUDNN(cudnnSetConvolutionGroupCount(convDesc, groups));
checkCUDNN(cudnnSetActivationDescriptor(actiDesc, CUDNN_ACTIVATION_RELU,
CUDNN_NOT_PROPAGATE_NAN, 0.0));
int n, c, h, w;
checkCUDNN(cudnnGetConvolution2dForwardOutputDim(convDesc,
inputTensor, filterDesc, &n, &c, &h, &w));
assert(n == inputN);
assert(c == outputC);
assert(outputH == h);
assert(outputW == w);
checkCUDNN(cudnnSetTensor4dDescriptor(outputTensor, CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT, n, c, h, w));
size_t inputSize = sizeof(DATATYPE) * inputN * inputC * inputH * inputW;
size_t filterSize = sizeof(DATATYPE) * inputC * outputC
* kernelH * kernelW;
size_t outputSize = sizeof(DATATYPE) * n * c * h * w;
assert(inputSize < MAX_TENSOR_SIZE);
assert(filterSize < MAX_TENSOR_SIZE);
assert(outputSize < MAX_TENSOR_SIZE);
const int reqAlgCnt = 8;
int cnt = 0;
cudnnConvolutionFwdAlgoPerf_t perfResults[reqAlgCnt];
checkCUDNN(cudnnFindConvolutionForwardAlgorithmEx(
dnn, inputTensor, inputPtr, filterDesc, filterPtr, convDesc,
outputTensor, outputPtr, reqAlgCnt, &cnt, perfResults,
workSpace, workSpaceSize));
assert(cnt > 0);
checkCUDNN(perfResults[0].status);
//for (int i = 0; i < cnt; i++) {
//printf("fwdAlgo(%d) time(%.2lfms) space(%dMB)\n", perfResults[i].algo,
// perfResults[i].time, perfResults[i].memory / 1024 / 1024);
//}
conv->fwdAlgo = perfResults[0].algo;
checkCUDA(cudaDeviceSynchronize());
for (int i = 0; i < WARMUP_TIMES + REPEAT_TIMES; i++) {
if (i == WARMUP_TIMES) {
checkCUDA(cudaEventRecord(startEvent));
}
if (conv->activation != AC_MODE_NONE) {
checkCUDNN(cudnnConvolutionBiasActivationForward(
dnn, &alpha, inputTensor, inputPtr, filterDesc, filterPtr,
convDesc, conv->fwdAlgo, workSpace, workSpaceSize,
&beta, outputTensor, outputPtr, biasTensor, biasPtr, actiDesc,
outputTensor, outputPtr));
} else {
checkCUDNN(cudnnConvolutionForward(
dnn, &alpha, inputTensor, inputPtr, filterDesc, filterPtr,
convDesc, conv->fwdAlgo, workSpace, workSpaceSize,
&beta, outputTensor, outputPtr));
checkCUDNN(cudnnAddTensor(dnn, &alpha, biasTensor, biasPtr,
&alpha, outputTensor, outputPtr));
}
}
checkCUDA(cudaEventRecord(endEvent));
checkCUDA(cudaEventSynchronize(endEvent));
float milliseconds;
cudaEventElapsedTime(&milliseconds, startEvent, endEvent);
conv->runtime = milliseconds / REPEAT_TIMES;
if (print_cost)
printf(" measure[Conv2D]: i(%d %d %d %d) w(%d %d %d %d) s(%d %d) p(%d %d) cost(%.4lf)\n",
conv->inputs[0].dim[0], conv->inputs[0].dim[1], conv->inputs[0].dim[2], conv->inputs[0].dim[3],
conv->inputs[1].dim[0], conv->inputs[1].dim[1], conv->inputs[1].dim[2], conv->inputs[1].dim[3],
conv->strideH, conv->strideW, padH, padW, conv->runtime);
}
|
8dc0bb61cbdc5646a3581603fb027b50976a19c6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "lite/backends/cuda/math/elementwise.h"
#include "lite/backends/cuda/math/utils.h"
namespace paddle {
namespace lite {
namespace cuda {
namespace math {
template <typename Dtype>
__global__ void elementwise_add_kernel(const size_t total,
const Dtype* x_data,
const Dtype* y_data,
Dtype* out_data) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < total) {
#if __CUDA_ARCH__ >= 350
out_data[tid] = __ldg(x_data + tid) + __ldg(y_data + tid);
#else
out_data[tid] = x_data[tid] + y_data[tid];
#endif
}
}
__global__ void elementwise_add_int8_kernel(const size_t total,
const float* x_data,
const float* y_data,
const float alpha,
int8_t* out_data) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < total) {
float temp_d;
#if __CUDA_ARCH__ >= 350
temp_d = __ldg(x_data + tid) + __ldg(y_data + tid);
#else
temp_d = x_data[tid] + y_data[tid];
#endif
out_data[tid] = from_float<int8_t>(temp_d * alpha);
}
}
__global__ void elementwise_add_nhwc4_int8_kernel(const size_t total,
const float4* x_data,
const float4* y_data,
const float alpha,
char4* out_data) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < total) {
const float4 x_d = x_data[tid];
const float4 y_d = y_data[tid];
float4 packed_val;
char4 result_val;
packed_val.x = (x_d.x + y_d.x) * alpha;
result_val.x = from_float<int8_t>(packed_val.x);
packed_val.y = (x_d.y + y_d.y) * alpha;
result_val.y = from_float<int8_t>(packed_val.y);
packed_val.z = (x_d.z + y_d.z) * alpha;
result_val.z = from_float<int8_t>(packed_val.z);
packed_val.w = (x_d.w + y_d.w) * alpha;
result_val.w = from_float<int8_t>(packed_val.w);
out_data[tid] = result_val;
}
}
template <typename Dtype>
void elementwise_add(int num,
const Dtype* x_data,
const Dtype* y_data,
Dtype* out_data,
hipStream_t stream) {
int thread = 256;
int block = (num + thread - 1) / thread;
hipLaunchKernelGGL(( elementwise_add_kernel), dim3(block), dim3(thread), 0, stream,
num, x_data, y_data, out_data);
}
template void elementwise_add(
int, const float*, const float*, float*, hipStream_t);
// input type is float32
// output type is int8
void elementwise_add_int8(int num,
const float* x_data,
const float* y_data,
const float alpha,
int8_t* out_data,
hipStream_t stream) {
int thread = 256;
int block = (num + thread - 1) / thread;
// elementwise_add_int8_kernel<<<block, thread, 0, stream>>>(
hipLaunchKernelGGL(( elementwise_add_int8_kernel), dim3(block), dim3(thread), 0, 0,
num, x_data, y_data, alpha, out_data);
}
void elementwise_add_nhwc4_int8(int num,
const void* x_data,
const void* y_data,
const float alpha,
void* out_data,
hipStream_t stream) {
int thread = 512;
int block = (num + thread - 1) / thread;
// elementwise_add_nhwc4_int8_kernel<<<block, thread, 0, stream>>>(
hipLaunchKernelGGL(( elementwise_add_nhwc4_int8_kernel), dim3(block), dim3(thread), 0, 0,
num,
static_cast<const float4*>(x_data),
static_cast<const float4*>(y_data),
alpha,
static_cast<char4*>(out_data));
}
} // namespace math
} // namespace cuda
} // namespace lite
} // namespace paddle
| 8dc0bb61cbdc5646a3581603fb027b50976a19c6.cu | // Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "lite/backends/cuda/math/elementwise.h"
#include "lite/backends/cuda/math/utils.h"
namespace paddle {
namespace lite {
namespace cuda {
namespace math {
template <typename Dtype>
__global__ void elementwise_add_kernel(const size_t total,
const Dtype* x_data,
const Dtype* y_data,
Dtype* out_data) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < total) {
#if __CUDA_ARCH__ >= 350
out_data[tid] = __ldg(x_data + tid) + __ldg(y_data + tid);
#else
out_data[tid] = x_data[tid] + y_data[tid];
#endif
}
}
__global__ void elementwise_add_int8_kernel(const size_t total,
const float* x_data,
const float* y_data,
const float alpha,
int8_t* out_data) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < total) {
float temp_d;
#if __CUDA_ARCH__ >= 350
temp_d = __ldg(x_data + tid) + __ldg(y_data + tid);
#else
temp_d = x_data[tid] + y_data[tid];
#endif
out_data[tid] = from_float<int8_t>(temp_d * alpha);
}
}
__global__ void elementwise_add_nhwc4_int8_kernel(const size_t total,
const float4* x_data,
const float4* y_data,
const float alpha,
char4* out_data) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < total) {
const float4 x_d = x_data[tid];
const float4 y_d = y_data[tid];
float4 packed_val;
char4 result_val;
packed_val.x = (x_d.x + y_d.x) * alpha;
result_val.x = from_float<int8_t>(packed_val.x);
packed_val.y = (x_d.y + y_d.y) * alpha;
result_val.y = from_float<int8_t>(packed_val.y);
packed_val.z = (x_d.z + y_d.z) * alpha;
result_val.z = from_float<int8_t>(packed_val.z);
packed_val.w = (x_d.w + y_d.w) * alpha;
result_val.w = from_float<int8_t>(packed_val.w);
out_data[tid] = result_val;
}
}
template <typename Dtype>
void elementwise_add(int num,
const Dtype* x_data,
const Dtype* y_data,
Dtype* out_data,
cudaStream_t stream) {
int thread = 256;
int block = (num + thread - 1) / thread;
elementwise_add_kernel<<<block, thread, 0, stream>>>(
num, x_data, y_data, out_data);
}
template void elementwise_add(
int, const float*, const float*, float*, cudaStream_t);
// input type is float32
// output type is int8
void elementwise_add_int8(int num,
const float* x_data,
const float* y_data,
const float alpha,
int8_t* out_data,
cudaStream_t stream) {
int thread = 256;
int block = (num + thread - 1) / thread;
// elementwise_add_int8_kernel<<<block, thread, 0, stream>>>(
elementwise_add_int8_kernel<<<block, thread>>>(
num, x_data, y_data, alpha, out_data);
}
void elementwise_add_nhwc4_int8(int num,
const void* x_data,
const void* y_data,
const float alpha,
void* out_data,
cudaStream_t stream) {
int thread = 512;
int block = (num + thread - 1) / thread;
// elementwise_add_nhwc4_int8_kernel<<<block, thread, 0, stream>>>(
elementwise_add_nhwc4_int8_kernel<<<block, thread>>>(
num,
static_cast<const float4*>(x_data),
static_cast<const float4*>(y_data),
alpha,
static_cast<char4*>(out_data));
}
} // namespace math
} // namespace cuda
} // namespace lite
} // namespace paddle
|
4d61c60320dcdedc8e3b4e8de86e341c3d295fc7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Lab 6 - Volume Rendering Fractals
*/
#ifndef _TEXTURE3D_KERNEL_H_
#define _TEXTURE3D_KERNEL_H_
#include "cutil_math.h"
/* Volume texture declaration */
texture<float, 3, hipReadModeElementType> tex;
typedef struct {
float4 m[3];
} float3x4;
__constant__ float3x4 c_invViewMatrix; // inverse view matrix
/* Need to write host code to set these */
__constant__ float4 c_juliaC; // julia set constant
__constant__ float4 c_juliaPlane; // plane eqn of 3D slice
struct Ray {
float3 o; // origin
float3 d; // direction
};
// multiply two quaternions
__device__ float4
mul_quat(float4 p, float4 q)
{
return make_float4(p.x*q.x-p.y*q.y-p.z*q.z-p.w*q.w,
p.x*q.y+p.y*q.x+p.z*q.w-p.w*q.z,
p.x*q.z-p.y*q.w+p.z*q.x+p.w*q.y,
p.x*q.w+p.y*q.z-p.z*q.y+p.w*q.x);
}
// square a quaternion (could be optimized)
__device__ float4
sqr_quat(float4 p)
{
// this could/should be optimized
return mul_quat(p,p);
}
// convert a 3d position to a 4d quaternion using plane-slice
__device__ float4
pos_to_quat(float3 pos, float4 plane)
{
return make_float4(pos.x, pos.y, pos.z,
plane.x*pos.x+plane.y*pos.y+plane.z*pos.z+plane.w);
}
// intersect ray with a box
// http://www.siggraph.org/education/materials/HyperGraph/raytrace/rtinter3.htm
__device__
int intersectBox(Ray r, float3 boxmin, float3 boxmax, float *tnear, float *tfar)
{
// compute intersection of ray with all six bbox planes
float3 invR = make_float3(1.0f) / r.d;
float3 tbot = invR * (boxmin - r.o);
float3 ttop = invR * (boxmax - r.o);
// re-order intersections to find smallest and largest on each axis
float3 tmin = fminf(ttop, tbot);
float3 tmax = fmaxf(ttop, tbot);
// find the largest tmin and the smallest tmax
float largest_tmin = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z));
float smallest_tmax = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z));
*tnear = largest_tmin;
*tfar = smallest_tmax;
return smallest_tmax > largest_tmin;
}
// transform vector by matrix (no translation)
__device__
float3 mul(const float3x4 &M, const float3 &v)
{
float3 r;
r.x = dot(v, make_float3(M.m[0]));
r.y = dot(v, make_float3(M.m[1]));
r.z = dot(v, make_float3(M.m[2]));
return r;
}
// transform vector by matrix with translation
__device__
float4 mul(const float3x4 &M, const float4 &v)
{
float4 r;
r.x = dot(v, M.m[0]);
r.y = dot(v, M.m[1]);
r.z = dot(v, M.m[2]);
r.w = 1.0f;
return r;
}
// color conversion functions
__device__ uint rgbaFloatToInt(float4 rgba)
{
rgba.x = __saturatef(rgba.x); // clamp to [0.0, 1.0]
rgba.y = __saturatef(rgba.y);
rgba.z = __saturatef(rgba.z);
rgba.w = __saturatef(rgba.w);
return (uint(rgba.w*255)<<24) | (uint(rgba.z*255)<<16) | (uint(rgba.y*255)<<8) | uint(rgba.x*255);
}
__device__ uint rFloatToInt(float r)
{
r = __saturatef(r); // clamp to [0.0, 1.0]
return (uint(r*255)<<24) | (uint(r*255)<<16) | (uint(r*255)<<8) | uint(r*255);
}
// get a normal from volume texture
/* feel free to use this, but you should also compute the normal
using JuliaDist */
__device__ float3 d_TexNormal(float3 pos)
{
float3 normal = make_float3(0);
float d = 0.04f;
normal.x = (tex3D(tex, pos.x+d, pos.y, pos.z)-tex3D(tex, pos.x-d, pos.y, pos.z));
normal.y = (tex3D(tex, pos.x, pos.y+d, pos.z)-tex3D(tex, pos.x, pos.y-d, pos.z));
normal.z = (tex3D(tex, pos.x, pos.y, pos.z+d)-tex3D(tex, pos.x, pos.y, pos.z-d));
return normalize(normal);
}
// computes julia distance function
__device__ float
d_JuliaDist(float3 pos, int niter)
{
int i;
// float4 z0 = pos_to_quat(pos, c_juliaPlane);
// float4 zp = make_float4(1,0,0,0);
/* TODO: fill in JuliaDist function */
return 0.0f;
}
// perform volume rendering
__global__ void
d_render(uint *d_output, uint imageW, uint imageH, float epsilon)
{
// amount to step by
float tstep = 0.0015f;
int maxSteps = 2000;
uint x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
uint y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
float u = (x / (float) imageW)*2.0f-1.0f;
float v = (y / (float) imageH)*2.0f-1.0f;
// calculate eye ray in world space
Ray eyeRay;
eyeRay.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 1.0f)));
eyeRay.d = normalize(make_float3(u, v, -2.0f));
eyeRay.d = mul(c_invViewMatrix, eyeRay.d);
// find intersection with box
float tnear, tfar;
// return if not intersecting
if (!intersectBox(eyeRay,
make_float3(-2.0f,-2.0f,-2.0f),
make_float3(2.0f,2.0f,2.0f),
&tnear, &tfar))
return;
// clamp to near plane
if (tnear < 0.0f) tnear = 0.0f;
float t = tnear;
// accumulate values
float accum = 0.0f;
// start stepping through space
for(int i=0; i<maxSteps; i++) {
pos = eyeRay.o + eyeRay.d*t;
// map position to [0, 1] coordinates
pos = pos*0.25f+0.5f;
// read from 3D texture
float sample = tex3D(tex, pos.x, pos.y, pos.z);
accum += sample;
t += tstep;
if (t > tfar) break;
}
/* TODO: calculate normal vector */
if ((x < imageW) && (y < imageH)) {
// write output color
uint i = __umul24(y, imageW) + x;
float4 col4 = make_float4(accum*0.01f);
/* TODO: calculate output color based on lighting and position */
d_output[i] = rgbaFloatToInt(col4);
}
}
// recompute julia set at a single volume point
__global__ void
d_setfractal(float *d_output)
{
// get x,y,z indices from kernel
// uint x = threadIdx.x;
ulong i = 0;
/* TODO: get y, z coordinates from blockIdx,
compute juliadist at position */
// set output value
d_output[i] = 0.0f;
}
#endif // #ifndef _TEXTURE3D_KERNEL_H_
| 4d61c60320dcdedc8e3b4e8de86e341c3d295fc7.cu | /*
* Lab 6 - Volume Rendering Fractals
*/
#ifndef _TEXTURE3D_KERNEL_H_
#define _TEXTURE3D_KERNEL_H_
#include "cutil_math.h"
/* Volume texture declaration */
texture<float, 3, cudaReadModeElementType> tex;
typedef struct {
float4 m[3];
} float3x4;
__constant__ float3x4 c_invViewMatrix; // inverse view matrix
/* Need to write host code to set these */
__constant__ float4 c_juliaC; // julia set constant
__constant__ float4 c_juliaPlane; // plane eqn of 3D slice
struct Ray {
float3 o; // origin
float3 d; // direction
};
// multiply two quaternions
__device__ float4
mul_quat(float4 p, float4 q)
{
return make_float4(p.x*q.x-p.y*q.y-p.z*q.z-p.w*q.w,
p.x*q.y+p.y*q.x+p.z*q.w-p.w*q.z,
p.x*q.z-p.y*q.w+p.z*q.x+p.w*q.y,
p.x*q.w+p.y*q.z-p.z*q.y+p.w*q.x);
}
// square a quaternion (could be optimized)
__device__ float4
sqr_quat(float4 p)
{
// this could/should be optimized
return mul_quat(p,p);
}
// convert a 3d position to a 4d quaternion using plane-slice
__device__ float4
pos_to_quat(float3 pos, float4 plane)
{
return make_float4(pos.x, pos.y, pos.z,
plane.x*pos.x+plane.y*pos.y+plane.z*pos.z+plane.w);
}
// intersect ray with a box
// http://www.siggraph.org/education/materials/HyperGraph/raytrace/rtinter3.htm
__device__
int intersectBox(Ray r, float3 boxmin, float3 boxmax, float *tnear, float *tfar)
{
// compute intersection of ray with all six bbox planes
float3 invR = make_float3(1.0f) / r.d;
float3 tbot = invR * (boxmin - r.o);
float3 ttop = invR * (boxmax - r.o);
// re-order intersections to find smallest and largest on each axis
float3 tmin = fminf(ttop, tbot);
float3 tmax = fmaxf(ttop, tbot);
// find the largest tmin and the smallest tmax
float largest_tmin = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z));
float smallest_tmax = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z));
*tnear = largest_tmin;
*tfar = smallest_tmax;
return smallest_tmax > largest_tmin;
}
// transform vector by matrix (no translation)
__device__
float3 mul(const float3x4 &M, const float3 &v)
{
float3 r;
r.x = dot(v, make_float3(M.m[0]));
r.y = dot(v, make_float3(M.m[1]));
r.z = dot(v, make_float3(M.m[2]));
return r;
}
// transform vector by matrix with translation
__device__
float4 mul(const float3x4 &M, const float4 &v)
{
float4 r;
r.x = dot(v, M.m[0]);
r.y = dot(v, M.m[1]);
r.z = dot(v, M.m[2]);
r.w = 1.0f;
return r;
}
// color conversion functions
__device__ uint rgbaFloatToInt(float4 rgba)
{
rgba.x = __saturatef(rgba.x); // clamp to [0.0, 1.0]
rgba.y = __saturatef(rgba.y);
rgba.z = __saturatef(rgba.z);
rgba.w = __saturatef(rgba.w);
return (uint(rgba.w*255)<<24) | (uint(rgba.z*255)<<16) | (uint(rgba.y*255)<<8) | uint(rgba.x*255);
}
__device__ uint rFloatToInt(float r)
{
r = __saturatef(r); // clamp to [0.0, 1.0]
return (uint(r*255)<<24) | (uint(r*255)<<16) | (uint(r*255)<<8) | uint(r*255);
}
// get a normal from volume texture
/* feel free to use this, but you should also compute the normal
using JuliaDist */
__device__ float3 d_TexNormal(float3 pos)
{
float3 normal = make_float3(0);
float d = 0.04f;
normal.x = (tex3D(tex, pos.x+d, pos.y, pos.z)-tex3D(tex, pos.x-d, pos.y, pos.z));
normal.y = (tex3D(tex, pos.x, pos.y+d, pos.z)-tex3D(tex, pos.x, pos.y-d, pos.z));
normal.z = (tex3D(tex, pos.x, pos.y, pos.z+d)-tex3D(tex, pos.x, pos.y, pos.z-d));
return normalize(normal);
}
// computes julia distance function
__device__ float
d_JuliaDist(float3 pos, int niter)
{
int i;
// float4 z0 = pos_to_quat(pos, c_juliaPlane);
// float4 zp = make_float4(1,0,0,0);
/* TODO: fill in JuliaDist function */
return 0.0f;
}
// perform volume rendering
__global__ void
d_render(uint *d_output, uint imageW, uint imageH, float epsilon)
{
// amount to step by
float tstep = 0.0015f;
int maxSteps = 2000;
uint x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
uint y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
float u = (x / (float) imageW)*2.0f-1.0f;
float v = (y / (float) imageH)*2.0f-1.0f;
// calculate eye ray in world space
Ray eyeRay;
eyeRay.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 1.0f)));
eyeRay.d = normalize(make_float3(u, v, -2.0f));
eyeRay.d = mul(c_invViewMatrix, eyeRay.d);
// find intersection with box
float tnear, tfar;
// return if not intersecting
if (!intersectBox(eyeRay,
make_float3(-2.0f,-2.0f,-2.0f),
make_float3(2.0f,2.0f,2.0f),
&tnear, &tfar))
return;
// clamp to near plane
if (tnear < 0.0f) tnear = 0.0f;
float t = tnear;
// accumulate values
float accum = 0.0f;
// start stepping through space
for(int i=0; i<maxSteps; i++) {
pos = eyeRay.o + eyeRay.d*t;
// map position to [0, 1] coordinates
pos = pos*0.25f+0.5f;
// read from 3D texture
float sample = tex3D(tex, pos.x, pos.y, pos.z);
accum += sample;
t += tstep;
if (t > tfar) break;
}
/* TODO: calculate normal vector */
if ((x < imageW) && (y < imageH)) {
// write output color
uint i = __umul24(y, imageW) + x;
float4 col4 = make_float4(accum*0.01f);
/* TODO: calculate output color based on lighting and position */
d_output[i] = rgbaFloatToInt(col4);
}
}
// recompute julia set at a single volume point
__global__ void
d_setfractal(float *d_output)
{
// get x,y,z indices from kernel
// uint x = threadIdx.x;
ulong i = 0;
/* TODO: get y, z coordinates from blockIdx,
compute juliadist at position */
// set output value
d_output[i] = 0.0f;
}
#endif // #ifndef _TEXTURE3D_KERNEL_H_
|
315d5ddaba6bd0eb838e791618a7110927eeca91.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "kernels_hip.cuh"
namespace emida
{
// Copies subregions from source pattern using the `begins` - positions of the subregions. Also subtracts the sum of each
// subregion
template<typename IN, typename OUT, typename S = OUT>
__global__ void prepare_pics(
const IN* __restrict__ pic,
OUT* __restrict__ slices,
const OUT* __restrict__ hanning_x,
const OUT* __restrict__ hanning_y,
const S* __restrict__ sums,
const size2_t* __restrict__ begins,
size2_t src_size,
size2_t slice_size,
size2_t out_size,
esize_t begins_size,
esize_t batch_size)
{
esize_t tid = blockIdx.x * blockDim.x + threadIdx.x;
esize_t slice_tid = tid % slice_size.area();
esize_t slice_num = tid / slice_size.area();
size2_t slice_pos = { slice_tid % slice_size.x, slice_tid / slice_size.x };
esize_t begins_num = slice_num % begins_size;
esize_t pic_num = slice_num / begins_size;
if (slice_num >= begins_size * batch_size)
return;
size2_t pic_pos = begins[begins_num] + slice_pos;
OUT pixel = pic[pic_num * src_size.area() + pic_pos.pos(src_size.x)];
//subtract mean of the picture
pixel -= (OUT)sums[slice_num] / slice_size.area();
//apply hanning filter and convert to OUT (float or double)
pixel = (OUT)pixel * hanning_x[slice_pos.x] * hanning_y[slice_pos.y];
slices[slice_num * out_size.area() + slice_pos.pos(out_size.x)] = pixel;
}
template<typename IN, typename OUT, typename S>
void run_prepare_pics(
const IN* pic,
OUT* slices,
const OUT* hanning_x,
const OUT* hanning_y,
const S* sums,
const size2_t* begins,
size2_t src_size,
size2_t slice_size,
size2_t out_size,
esize_t begins_size,
esize_t batch_size)
{
esize_t block_size = 1024;
esize_t grid_size(div_up(slice_size.area() * batch_size * begins_size, block_size));
hipLaunchKernelGGL(( prepare_pics), dim3(grid_size), dim3(block_size) , 0, 0, pic, slices, hanning_x, hanning_y, sums, begins, src_size, slice_size, out_size, begins_size, batch_size);
}
template void run_prepare_pics<uint16_t, double>(
const uint16_t* pic,
double* slices,
const double* hanning_x,
const double* hanning_y,
const double* sums,
const size2_t* begins,
size2_t src_size,
size2_t slice_size,
size2_t out_size,
esize_t begins_size,
esize_t batch_size);
template void run_prepare_pics<uint16_t, double, uint32_t>(
const uint16_t* pic,
double* slices,
const double* hanning_x,
const double* hanning_y,
const uint32_t* sums,
const size2_t* begins,
size2_t src_size,
size2_t slice_size,
size2_t out_size,
esize_t begins_size,
esize_t batch_size);
template void run_prepare_pics<uint16_t, float>(
const uint16_t* pic,
float* slices,
const float* hanning_x,
const float* hanning_y,
const float* sums,
const size2_t* begins,
size2_t src_size,
size2_t slice_size,
size2_t out_size,
esize_t begins_size,
esize_t batch_size);
template void run_prepare_pics<uint16_t, float, uint32_t>(
const uint16_t* pic,
float* slices,
const float* hanning_x,
const float* hanning_y,
const uint32_t* sums,
const size2_t* begins,
size2_t src_size,
size2_t slice_size,
size2_t out_size,
esize_t begins_size,
esize_t batch_size);
template void run_prepare_pics<double, double>(
const double* pic,
double* slices,
const double* hanning_x,
const double* hanning_y,
const double* sums,
const size2_t* begins,
size2_t src_size,
size2_t slice_size,
size2_t out_size,
esize_t begins_size,
esize_t batch_size);
template void run_prepare_pics<double, double, uint32_t>(
const double* pic,
double* slices,
const double* hanning_x,
const double* hanning_y,
const uint32_t* sums,
const size2_t* begins,
size2_t src_size,
size2_t slice_size,
size2_t out_size,
esize_t begins_size,
esize_t batch_size);
}
| 315d5ddaba6bd0eb838e791618a7110927eeca91.cu | #include "cuda.h"
#include "cuda_runtime.h"
#include "kernels.cuh"
namespace emida
{
// Copies subregions from source pattern using the `begins` - positions of the subregions. Also subtracts the sum of each
// subregion
template<typename IN, typename OUT, typename S = OUT>
__global__ void prepare_pics(
const IN* __restrict__ pic,
OUT* __restrict__ slices,
const OUT* __restrict__ hanning_x,
const OUT* __restrict__ hanning_y,
const S* __restrict__ sums,
const size2_t* __restrict__ begins,
size2_t src_size,
size2_t slice_size,
size2_t out_size,
esize_t begins_size,
esize_t batch_size)
{
esize_t tid = blockIdx.x * blockDim.x + threadIdx.x;
esize_t slice_tid = tid % slice_size.area();
esize_t slice_num = tid / slice_size.area();
size2_t slice_pos = { slice_tid % slice_size.x, slice_tid / slice_size.x };
esize_t begins_num = slice_num % begins_size;
esize_t pic_num = slice_num / begins_size;
if (slice_num >= begins_size * batch_size)
return;
size2_t pic_pos = begins[begins_num] + slice_pos;
OUT pixel = pic[pic_num * src_size.area() + pic_pos.pos(src_size.x)];
//subtract mean of the picture
pixel -= (OUT)sums[slice_num] / slice_size.area();
//apply hanning filter and convert to OUT (float or double)
pixel = (OUT)pixel * hanning_x[slice_pos.x] * hanning_y[slice_pos.y];
slices[slice_num * out_size.area() + slice_pos.pos(out_size.x)] = pixel;
}
template<typename IN, typename OUT, typename S>
void run_prepare_pics(
const IN* pic,
OUT* slices,
const OUT* hanning_x,
const OUT* hanning_y,
const S* sums,
const size2_t* begins,
size2_t src_size,
size2_t slice_size,
size2_t out_size,
esize_t begins_size,
esize_t batch_size)
{
esize_t block_size = 1024;
esize_t grid_size(div_up(slice_size.area() * batch_size * begins_size, block_size));
prepare_pics<<<grid_size, block_size >>> (pic, slices, hanning_x, hanning_y, sums, begins, src_size, slice_size, out_size, begins_size, batch_size);
}
template void run_prepare_pics<uint16_t, double>(
const uint16_t* pic,
double* slices,
const double* hanning_x,
const double* hanning_y,
const double* sums,
const size2_t* begins,
size2_t src_size,
size2_t slice_size,
size2_t out_size,
esize_t begins_size,
esize_t batch_size);
template void run_prepare_pics<uint16_t, double, uint32_t>(
const uint16_t* pic,
double* slices,
const double* hanning_x,
const double* hanning_y,
const uint32_t* sums,
const size2_t* begins,
size2_t src_size,
size2_t slice_size,
size2_t out_size,
esize_t begins_size,
esize_t batch_size);
template void run_prepare_pics<uint16_t, float>(
const uint16_t* pic,
float* slices,
const float* hanning_x,
const float* hanning_y,
const float* sums,
const size2_t* begins,
size2_t src_size,
size2_t slice_size,
size2_t out_size,
esize_t begins_size,
esize_t batch_size);
template void run_prepare_pics<uint16_t, float, uint32_t>(
const uint16_t* pic,
float* slices,
const float* hanning_x,
const float* hanning_y,
const uint32_t* sums,
const size2_t* begins,
size2_t src_size,
size2_t slice_size,
size2_t out_size,
esize_t begins_size,
esize_t batch_size);
template void run_prepare_pics<double, double>(
const double* pic,
double* slices,
const double* hanning_x,
const double* hanning_y,
const double* sums,
const size2_t* begins,
size2_t src_size,
size2_t slice_size,
size2_t out_size,
esize_t begins_size,
esize_t batch_size);
template void run_prepare_pics<double, double, uint32_t>(
const double* pic,
double* slices,
const double* hanning_x,
const double* hanning_y,
const uint32_t* sums,
const size2_t* begins,
size2_t src_size,
size2_t slice_size,
size2_t out_size,
esize_t begins_size,
esize_t batch_size);
}
|
072d5963265ced08026cfd337b345b8fbc0dc5ae.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "gtest/gtest.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/scope.h"
#include "core/paddlefl_mpc/mpc_protocol/network/cuda_copy_network.h"
#include "core/paddlefl_mpc/mpc_protocol/context_holder.h"
#include "core/paddlefl_mpc/mpc_protocol/abstract_context.h"
#include "core/common/paddle_tensor_impl.cu.h"
#include "aby3_context.h"
#include "fixedpoint_tensor.h"
#include "fixedpoint_tensor_imp.h"
namespace aby3 {
using g_ctx_holder = paddle::mpc::ContextHolder;
template<typename T>
using CudaPaddleTensor = common::CudaPaddleTensor<T>;
using Fix64N16 = FixedPointTensor<int64_t, 16>;
using BTensor = BooleanTensor<int64_t>;
using AbstractContext = paddle::mpc::AbstractContext;
class FixedTensorTest : public ::testing::Test {
public:
const paddle::platform::CUDADeviceContext* get_gpu_ctx() {
paddle::platform::CUDAPlace gpu(0);
auto& pool = paddle::platform::DeviceContextPool::Instance();
return pool.template GetByPlace<paddle::platform::CUDAPlace>(gpu);
}
std::shared_ptr<paddle::framework::ExecutionContext> _exec_ctx;
std::shared_ptr<AbstractContext> _mpc_ctx[3];
std::shared_ptr<TensorAdapterFactory> _s_tensor_factory;
std::thread _t[3];
static void SetUpTestCase() {
paddle::platform::CUDAPlace gpu(0);
paddle::platform::DeviceContextPool::Init({gpu});
auto& pool = paddle::platform::DeviceContextPool::Instance();
paddle::mpc::AbstractContext::_s_stream = pool.GetByPlace(gpu)->stream();
}
void SetUp() {
paddle::framework::OperatorBase* op = nullptr;
paddle::framework::Scope scope;
paddle::framework::RuntimeContext ctx({}, {});
// only device_ctx is needed
_exec_ctx = std::make_shared<paddle::framework::ExecutionContext>(
*op, scope, *get_gpu_ctx(), ctx);
_s_tensor_factory = std::make_shared<::common::CudaPaddleTensorFactory>(get_gpu_ctx());
std::thread t[3];
for (size_t i = 0; i < 3; ++i) {
_t[i] = std::thread(&FixedTensorTest::gen_mpc_ctx, this, i);
// using namespace std::chrono_literals;
// std::this_thread::sleep_for(20ms);
}
for (auto& ti : _t) {
ti.join();
}
}
void gen_mpc_ctx(size_t idx) {
auto net = std::make_shared<paddle::mpc::CudaCopyNetwork>(idx, 3,
get_gpu_ctx()->stream());
// net->init();
_mpc_ctx[idx] = std::make_shared<ABY3Context>(idx, net);
}
std::shared_ptr<TensorAdapter<int64_t> > gen(float val, std::vector<size_t> shape) {
auto ret = _s_tensor_factory->template create<int64_t>(shape);
dynamic_cast<CudaPaddleTensor<int64_t>*>(ret.get())->from_float_point_scalar(val, shape, 16);
return ret;
}
};
using paddle::mpc::ContextHolder;
#define TEST_SIZE 2
#define COMMA ,
#define TEST_METHOD_CIPHER(method, buf) __TEST_METHOD_CIPHER(method, (&rhs) COMMA, buf, TEST_SIZE COMMA TEST_SIZE, Fix64N16)
#define TEST_METHOD_CIPHER_PLAIN(method, buf) __TEST_METHOD_CIPHER(method, (tensor[0].get()) COMMA, buf, TEST_SIZE COMMA TEST_SIZE, Fix64N16)
#define TEST_METHOD_MONO(method, buf) __TEST_METHOD_CIPHER(method, , buf, TEST_SIZE COMMA TEST_SIZE, Fix64N16)
#define __TEST_METHOD_CIPHER(method, input, buf, ret_shape, ret_type) do {\
std::shared_ptr<TensorAdapter<int64_t> > tensor[9]; \
for (int i = 0; i < 2; i++) { \
tensor[i] = gen(1.23, {TEST_SIZE, TEST_SIZE}); \
} \
for (int i = 2; i < 9; i++) { \
tensor[i] = gen(1.23, { ret_shape }); \
} \
Fix64N16 lhs(tensor[0].get(), tensor[1].get()); \
Fix64N16 rhs(tensor[0].get(), tensor[1].get()); \
ret_type ret0(tensor[2].get(), tensor[3].get()); \
ret_type ret1(tensor[4].get(), tensor[5].get()); \
ret_type ret2(tensor[6].get(), tensor[7].get()); \
_t[0] = std::thread( \
[&] () { \
ContextHolder::template run_with_context( \
_exec_ctx.get(), _mpc_ctx[0], \
[&](){ \
lhs.method(input &ret0); \
ret0.reveal_to_one(0, tensor[8].get()); \
}); \
} \
); \
_t[1] = std::thread( \
[&] () { \
ContextHolder::template run_with_context( \
_exec_ctx.get(), _mpc_ctx[1], \
[&](){ \
lhs.method(input &ret1); \
ret1.reveal_to_one(0, nullptr); \
}); \
} \
); \
_t[2] = std::thread( \
[&] () { \
ContextHolder::template run_with_context( \
_exec_ctx.get(), _mpc_ctx[2], \
[&](){ \
lhs.method(input &ret2); \
ret2.reveal_to_one(0, nullptr); \
}); \
} \
); \
for (auto &t: _t) { \
t.join(); \
} \
hipMemcpy(buf, tensor[8]->data(), sizeof(buf), hipMemcpyDeviceToHost); \
hipStreamSynchronize(get_gpu_ctx()->stream()); \
} while(0)
TEST_F(FixedTensorTest, add) {
int64_t buf[4];
TEST_METHOD_CIPHER(add, buf);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1.23 * 3 * 2, buf[i] / 65536.0, 0.001);
}
}
TEST_F(FixedTensorTest, add_plain) {
int64_t buf[4];
TEST_METHOD_CIPHER_PLAIN(add, buf);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1.23 * 4, buf[i] / 65536.0, 0.001);
}
}
TEST_F(FixedTensorTest, sub) {
int64_t buf[4];
TEST_METHOD_CIPHER(sub, buf);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(0, buf[i] / 65536.0, 0.001);
}
}
TEST_F(FixedTensorTest, sub_plain) {
int64_t buf[4];
TEST_METHOD_CIPHER_PLAIN(sub, buf);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1.23 * 2, buf[i] / 65536.0, 0.001);
}
}
TEST_F(FixedTensorTest, mul) {
int64_t buf[4];
TEST_METHOD_CIPHER(mul, buf);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1.23 * 3 * 1.23 * 3, buf[i] / 65536.0, 0.001);
}
}
TEST_F(FixedTensorTest, mul_plain) {
int64_t buf[4];
TEST_METHOD_CIPHER_PLAIN(mul, buf);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1.23 * 3 * 1.23, buf[i] / 65536.0, 0.001);
}
}
TEST_F(FixedTensorTest, mat_mul) {
int64_t buf[4];
TEST_METHOD_CIPHER(mat_mul, buf);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1.23 * 3 * 1.23 * 3 * TEST_SIZE, buf[i] / 65536.0, 0.1);
}
}
TEST_F(FixedTensorTest, mat_mul_plain) {
int64_t buf[4];
TEST_METHOD_CIPHER_PLAIN(mat_mul, buf);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1.23 * 3 * 1.23 * TEST_SIZE, buf[i] / 65536.0, 0.05);
}
}
TEST_F(FixedTensorTest, neg) {
int64_t buf[4];
TEST_METHOD_MONO(negative, buf);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(-1.23 * 3, buf[i] / 65536.0, 0.001);
}
}
TEST_F(FixedTensorTest, div_plain) {
int64_t buf[4];
TEST_METHOD_CIPHER_PLAIN(div, buf);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(3, buf[i] / 65536.0, 0.001);
}
}
TEST_F(FixedTensorTest, div) {
int64_t buf[4];
TEST_METHOD_CIPHER(div, buf);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1, buf[i] / 65536.0, 0.001);
}
}
TEST_F(FixedTensorTest, long_div) {
int64_t buf[4];
TEST_METHOD_CIPHER(long_div, buf);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1, buf[i] / 65536.0, 0.01);
}
}
TEST_F(FixedTensorTest, inverse_square_root) {
int64_t buf[4];
TEST_METHOD_MONO(inverse_square_root, buf);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(0.52, buf[i] / 65536.0, 0.001);
}
}
TEST_F(FixedTensorTest, exp) {
int64_t buf[4];
TEST_METHOD_MONO(exp, buf);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(40, buf[i] / 65536.0, 2);
}
}
TEST_F(FixedTensorTest, sum) {
int64_t buf[1];
__TEST_METHOD_CIPHER(sum, , buf, 1, Fix64N16);
for (int i = 0; i < 1; ++i) {
EXPECT_NEAR(3 * 1.23 * TEST_SIZE * TEST_SIZE, buf[i] / 65536.0, 0.001);
}
}
TEST_F(FixedTensorTest, dot_mul) {
int64_t buf[1];
__TEST_METHOD_CIPHER(dot_mul, (&rhs) COMMA, buf, 1, Fix64N16);
for (int i = 0; i < 1; ++i) {
EXPECT_NEAR(3 *3 * 1.23 * 1.23 * TEST_SIZE * TEST_SIZE, buf[i] / 65536.0, 0.001);
}
}
TEST_F(FixedTensorTest, dot_mul_plain) {
int64_t buf[1];
__TEST_METHOD_CIPHER(dot_mul, (tensor[0].get()) COMMA, buf, 1, Fix64N16);
for (int i = 0; i < 1; ++i) {
EXPECT_NEAR(3 * 1.23 * 1.23 * TEST_SIZE * TEST_SIZE, buf[i] / 65536.0, 0.001);
}
}
TEST_F(FixedTensorTest, relu) {
int64_t buf[4];
TEST_METHOD_MONO(relu, buf);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1.23 * 3, buf[i] / 65536.0, 2);
}
}
TEST_F(FixedTensorTest, sigmoid) {
int64_t buf[4];
TEST_METHOD_MONO(sigmoid, buf);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1.0, buf[i] / 65536.0, 2);
}
}
TEST_F(FixedTensorTest, sigmoid_enhanced) {
int64_t buf[4];
TEST_METHOD_MONO(sigmoid_enhanced, buf);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1.0, buf[i] / 65536.0, 2);
}
}
TEST_F(FixedTensorTest, sigmoid_chebyshev) {
int64_t buf[4];
TEST_METHOD_MONO(sigmoid_chebyshev, buf);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1.0, buf[i] / 65536.0, 2);
}
}
TEST_F(FixedTensorTest, sigmoid_high_precision) {
int64_t buf[4];
TEST_METHOD_MONO(sigmoid_high_precision, buf);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1.0, buf[i] / 65536.0, 2);
}
}
TEST_F(FixedTensorTest, softmax) {
int64_t buf[4];
TEST_METHOD_MONO(softmax, buf);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(0.5, buf[i] / 65536.0, 2);
}
}
TEST_F(FixedTensorTest, lt) {
int64_t buf[4];
__TEST_METHOD_CIPHER(lt, (&rhs) COMMA, buf, TEST_SIZE COMMA TEST_SIZE, BTensor);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(0, buf[i], 0.001);
}
}
TEST_F(FixedTensorTest, lt_plain) {
int64_t buf[4];
__TEST_METHOD_CIPHER(lt, (tensor[0].get()) COMMA, buf, TEST_SIZE COMMA TEST_SIZE, BTensor);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(0, buf[i], 0.001);
}
}
TEST_F(FixedTensorTest, leq) {
int64_t buf[4];
__TEST_METHOD_CIPHER(leq, (&rhs) COMMA, buf, TEST_SIZE COMMA TEST_SIZE, BTensor);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1, buf[i], 0.001);
}
}
TEST_F(FixedTensorTest, leq_plain) {
int64_t buf[4];
__TEST_METHOD_CIPHER(leq, (tensor[0].get()) COMMA, buf, TEST_SIZE COMMA TEST_SIZE, BTensor);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(0, buf[i], 0.001);
}
}
TEST_F(FixedTensorTest, gt) {
int64_t buf[4];
__TEST_METHOD_CIPHER(gt, (&rhs) COMMA, buf, TEST_SIZE COMMA TEST_SIZE, BTensor);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(0, buf[i], 0.001);
}
}
TEST_F(FixedTensorTest, gt_plain) {
int64_t buf[4];
__TEST_METHOD_CIPHER(gt, (tensor[0].get()) COMMA, buf, TEST_SIZE COMMA TEST_SIZE, BTensor);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1, buf[i], 0.001);
}
}
TEST_F(FixedTensorTest, geq) {
int64_t buf[4];
__TEST_METHOD_CIPHER(geq, (&rhs) COMMA, buf, TEST_SIZE COMMA TEST_SIZE, BTensor);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1, buf[i], 0.001);
}
}
TEST_F(FixedTensorTest, geq_plain) {
int64_t buf[4];
__TEST_METHOD_CIPHER(geq, (tensor[0].get()) COMMA, buf, TEST_SIZE COMMA TEST_SIZE, BTensor);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1, buf[i], 0.001);
}
}
TEST_F(FixedTensorTest, eq) {
int64_t buf[4];
__TEST_METHOD_CIPHER(eq, (&rhs) COMMA, buf, TEST_SIZE COMMA TEST_SIZE, BTensor);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1, buf[i], 0.001);
}
}
TEST_F(FixedTensorTest, eq_plain) {
int64_t buf[4];
__TEST_METHOD_CIPHER(eq, (tensor[0].get()) COMMA, buf, TEST_SIZE COMMA TEST_SIZE, BTensor);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(0, buf[i], 0.001);
}
}
TEST_F(FixedTensorTest, neq) {
int64_t buf[4];
__TEST_METHOD_CIPHER(neq, (&rhs) COMMA, buf, TEST_SIZE COMMA TEST_SIZE, BTensor);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(0, buf[i], 0.001);
}
}
TEST_F(FixedTensorTest, neq_plain) {
int64_t buf[4];
__TEST_METHOD_CIPHER(neq, (tensor[0].get()) COMMA, buf, TEST_SIZE COMMA TEST_SIZE, BTensor);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1, buf[i], 0.001);
}
}
TEST_F(FixedTensorTest, max) {
int64_t buf[4];
TEST_METHOD_CIPHER(max, buf);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1.23 * 3, buf[i] / 65536.0, 0.001);
}
}
TEST_F(FixedTensorTest, max_plain) {
int64_t buf[4];
TEST_METHOD_CIPHER_PLAIN(max, buf);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1.23 * 3, buf[i] / 65536.0, 0.001);
}
}
TEST_F(FixedTensorTest, max_pooling) {
int64_t buf[4];
__TEST_METHOD_CIPHER(max_pooling, , buf, 1 COMMA TEST_SIZE, Fix64N16);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1.23 * 3, buf[i] / 65536.0, 0.001);
}
}
TEST_F(FixedTensorTest, preds_to_idx) {
int64_t buf[4];
std::shared_ptr<TensorAdapter<int64_t> > tensor[9]; \
for (int i = 0; i < 9; i++) { \
tensor[i] = gen(0.33, { 4 }); \
} \
Fix64N16 lhs(tensor[0].get(), tensor[1].get()); \
Fix64N16 ret0(tensor[2].get(), tensor[3].get()); \
Fix64N16 ret1(tensor[4].get(), tensor[5].get()); \
Fix64N16 ret2(tensor[6].get(), tensor[7].get()); \
_t[0] = std::thread( \
[&] () { \
ContextHolder::template run_with_context( \
_exec_ctx.get(), _mpc_ctx[0], \
[&](){ \
Fix64N16::preds_to_indices(&lhs, &ret0); \
ret0.reveal_to_one(0, tensor[8].get()); \
}); \
} \
); \
_t[1] = std::thread( \
[&] () { \
ContextHolder::template run_with_context( \
_exec_ctx.get(), _mpc_ctx[1], \
[&](){ \
Fix64N16::preds_to_indices(&lhs, &ret1); \
ret1.reveal_to_one(0, nullptr); \
}); \
} \
); \
_t[2] = std::thread( \
[&] () { \
ContextHolder::template run_with_context( \
_exec_ctx.get(), _mpc_ctx[2], \
[&](){ \
Fix64N16::preds_to_indices(&lhs, &ret2); \
ret2.reveal_to_one(0, nullptr); \
}); \
} \
); \
for (auto &t: _t) { \
t.join(); \
} \
hipMemcpy(buf, tensor[8]->data(), sizeof(buf), hipMemcpyDeviceToHost); \
hipStreamSynchronize(get_gpu_ctx()->stream()); \
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1, buf[i] / 65536.0, 0.001);
}
}
TEST_F(FixedTensorTest, tp_fp_fn) {
int64_t buf[3];
std::shared_ptr<TensorAdapter<int64_t> > tensor[9]; \
for (int i = 0; i < 9; i++) { \
tensor[i] = gen(0.33, { 3 }); \
} \
Fix64N16 lhs(tensor[0].get(), tensor[1].get()); \
Fix64N16 ret0(tensor[2].get(), tensor[3].get()); \
Fix64N16 ret1(tensor[4].get(), tensor[5].get()); \
Fix64N16 ret2(tensor[6].get(), tensor[7].get()); \
_t[0] = std::thread( \
[&] () { \
ContextHolder::template run_with_context( \
_exec_ctx.get(), _mpc_ctx[0], \
[&](){ \
Fix64N16::calc_tp_fp_fn(&lhs, &lhs, &ret0); \
ret0.reveal_to_one(0, tensor[8].get()); \
}); \
} \
); \
_t[1] = std::thread( \
[&] () { \
ContextHolder::template run_with_context( \
_exec_ctx.get(), _mpc_ctx[1], \
[&](){ \
Fix64N16::calc_tp_fp_fn(&lhs, &lhs, &ret1); \
ret1.reveal_to_one(0, nullptr); \
}); \
} \
); \
_t[2] = std::thread( \
[&] () { \
ContextHolder::template run_with_context( \
_exec_ctx.get(), _mpc_ctx[2], \
[&](){ \
Fix64N16::calc_tp_fp_fn(&lhs, &lhs, &ret2); \
ret2.reveal_to_one(0, nullptr); \
}); \
} \
); \
for (auto &t: _t) { \
t.join(); \
} \
hipMemcpy(buf, tensor[8]->data(), sizeof(buf), hipMemcpyDeviceToHost); \
hipStreamSynchronize(get_gpu_ctx()->stream()); \
EXPECT_NEAR(3, buf[0] / 65536.0, 0.1);
EXPECT_NEAR(0, buf[1] / 65536.0, 0.1);
EXPECT_NEAR(0, buf[1] / 65536.0, 0.1);
}
TEST_F(FixedTensorTest, precision_recall) {
int64_t buf[3];
std::shared_ptr<TensorAdapter<int64_t> > tensor[9]; \
for (int i = 0; i < 5; i++) { \
tensor[i] = gen(3.33, { 3 }); \
} \
Fix64N16 lhs(tensor[0].get(), tensor[1].get()); \
_t[0] = std::thread( \
[&] () { \
ContextHolder::template run_with_context( \
_exec_ctx.get(), _mpc_ctx[0], \
[&](){ \
Fix64N16::calc_precision_recall(&lhs, tensor[2].get()); \
}); \
} \
); \
_t[1] = std::thread( \
[&] () { \
ContextHolder::template run_with_context( \
_exec_ctx.get(), _mpc_ctx[1], \
[&](){ \
Fix64N16::calc_precision_recall(&lhs, tensor[3].get()); \
}); \
} \
); \
_t[2] = std::thread( \
[&] () { \
ContextHolder::template run_with_context( \
_exec_ctx.get(), _mpc_ctx[2], \
[&](){ \
Fix64N16::calc_precision_recall(&lhs, tensor[4].get()); \
}); \
} \
); \
for (auto &t: _t) { \
t.join(); \
} \
hipMemcpy(buf, tensor[2]->data(), sizeof(buf), hipMemcpyDeviceToHost); \
hipStreamSynchronize(get_gpu_ctx()->stream()); \
EXPECT_NEAR(0.5, buf[0] / 65536.0, 0.1);
EXPECT_NEAR(0.5, buf[1] / 65536.0, 0.1);
EXPECT_NEAR(0.5, buf[2] / 65536.0, 0.1);
}
} // namespace aby3
| 072d5963265ced08026cfd337b345b8fbc0dc5ae.cu | /* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "gtest/gtest.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/scope.h"
#include "core/paddlefl_mpc/mpc_protocol/network/cuda_copy_network.h"
#include "core/paddlefl_mpc/mpc_protocol/context_holder.h"
#include "core/paddlefl_mpc/mpc_protocol/abstract_context.h"
#include "core/common/paddle_tensor_impl.cu.h"
#include "aby3_context.h"
#include "fixedpoint_tensor.h"
#include "fixedpoint_tensor_imp.h"
namespace aby3 {
using g_ctx_holder = paddle::mpc::ContextHolder;
template<typename T>
using CudaPaddleTensor = common::CudaPaddleTensor<T>;
using Fix64N16 = FixedPointTensor<int64_t, 16>;
using BTensor = BooleanTensor<int64_t>;
using AbstractContext = paddle::mpc::AbstractContext;
class FixedTensorTest : public ::testing::Test {
public:
const paddle::platform::CUDADeviceContext* get_gpu_ctx() {
paddle::platform::CUDAPlace gpu(0);
auto& pool = paddle::platform::DeviceContextPool::Instance();
return pool.template GetByPlace<paddle::platform::CUDAPlace>(gpu);
}
std::shared_ptr<paddle::framework::ExecutionContext> _exec_ctx;
std::shared_ptr<AbstractContext> _mpc_ctx[3];
std::shared_ptr<TensorAdapterFactory> _s_tensor_factory;
std::thread _t[3];
static void SetUpTestCase() {
paddle::platform::CUDAPlace gpu(0);
paddle::platform::DeviceContextPool::Init({gpu});
auto& pool = paddle::platform::DeviceContextPool::Instance();
paddle::mpc::AbstractContext::_s_stream = pool.GetByPlace(gpu)->stream();
}
void SetUp() {
paddle::framework::OperatorBase* op = nullptr;
paddle::framework::Scope scope;
paddle::framework::RuntimeContext ctx({}, {});
// only device_ctx is needed
_exec_ctx = std::make_shared<paddle::framework::ExecutionContext>(
*op, scope, *get_gpu_ctx(), ctx);
_s_tensor_factory = std::make_shared<::common::CudaPaddleTensorFactory>(get_gpu_ctx());
std::thread t[3];
for (size_t i = 0; i < 3; ++i) {
_t[i] = std::thread(&FixedTensorTest::gen_mpc_ctx, this, i);
// using namespace std::chrono_literals;
// std::this_thread::sleep_for(20ms);
}
for (auto& ti : _t) {
ti.join();
}
}
void gen_mpc_ctx(size_t idx) {
auto net = std::make_shared<paddle::mpc::CudaCopyNetwork>(idx, 3,
get_gpu_ctx()->stream());
// net->init();
_mpc_ctx[idx] = std::make_shared<ABY3Context>(idx, net);
}
std::shared_ptr<TensorAdapter<int64_t> > gen(float val, std::vector<size_t> shape) {
auto ret = _s_tensor_factory->template create<int64_t>(shape);
dynamic_cast<CudaPaddleTensor<int64_t>*>(ret.get())->from_float_point_scalar(val, shape, 16);
return ret;
}
};
using paddle::mpc::ContextHolder;
#define TEST_SIZE 2
#define COMMA ,
#define TEST_METHOD_CIPHER(method, buf) __TEST_METHOD_CIPHER(method, (&rhs) COMMA, buf, TEST_SIZE COMMA TEST_SIZE, Fix64N16)
#define TEST_METHOD_CIPHER_PLAIN(method, buf) __TEST_METHOD_CIPHER(method, (tensor[0].get()) COMMA, buf, TEST_SIZE COMMA TEST_SIZE, Fix64N16)
#define TEST_METHOD_MONO(method, buf) __TEST_METHOD_CIPHER(method, , buf, TEST_SIZE COMMA TEST_SIZE, Fix64N16)
#define __TEST_METHOD_CIPHER(method, input, buf, ret_shape, ret_type) do {\
std::shared_ptr<TensorAdapter<int64_t> > tensor[9]; \
for (int i = 0; i < 2; i++) { \
tensor[i] = gen(1.23, {TEST_SIZE, TEST_SIZE}); \
} \
for (int i = 2; i < 9; i++) { \
tensor[i] = gen(1.23, { ret_shape }); \
} \
Fix64N16 lhs(tensor[0].get(), tensor[1].get()); \
Fix64N16 rhs(tensor[0].get(), tensor[1].get()); \
ret_type ret0(tensor[2].get(), tensor[3].get()); \
ret_type ret1(tensor[4].get(), tensor[5].get()); \
ret_type ret2(tensor[6].get(), tensor[7].get()); \
_t[0] = std::thread( \
[&] () { \
ContextHolder::template run_with_context( \
_exec_ctx.get(), _mpc_ctx[0], \
[&](){ \
lhs.method(input &ret0); \
ret0.reveal_to_one(0, tensor[8].get()); \
}); \
} \
); \
_t[1] = std::thread( \
[&] () { \
ContextHolder::template run_with_context( \
_exec_ctx.get(), _mpc_ctx[1], \
[&](){ \
lhs.method(input &ret1); \
ret1.reveal_to_one(0, nullptr); \
}); \
} \
); \
_t[2] = std::thread( \
[&] () { \
ContextHolder::template run_with_context( \
_exec_ctx.get(), _mpc_ctx[2], \
[&](){ \
lhs.method(input &ret2); \
ret2.reveal_to_one(0, nullptr); \
}); \
} \
); \
for (auto &t: _t) { \
t.join(); \
} \
cudaMemcpy(buf, tensor[8]->data(), sizeof(buf), cudaMemcpyDeviceToHost); \
cudaStreamSynchronize(get_gpu_ctx()->stream()); \
} while(0)
TEST_F(FixedTensorTest, add) {
int64_t buf[4];
TEST_METHOD_CIPHER(add, buf);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1.23 * 3 * 2, buf[i] / 65536.0, 0.001);
}
}
TEST_F(FixedTensorTest, add_plain) {
int64_t buf[4];
TEST_METHOD_CIPHER_PLAIN(add, buf);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1.23 * 4, buf[i] / 65536.0, 0.001);
}
}
TEST_F(FixedTensorTest, sub) {
int64_t buf[4];
TEST_METHOD_CIPHER(sub, buf);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(0, buf[i] / 65536.0, 0.001);
}
}
TEST_F(FixedTensorTest, sub_plain) {
int64_t buf[4];
TEST_METHOD_CIPHER_PLAIN(sub, buf);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1.23 * 2, buf[i] / 65536.0, 0.001);
}
}
TEST_F(FixedTensorTest, mul) {
int64_t buf[4];
TEST_METHOD_CIPHER(mul, buf);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1.23 * 3 * 1.23 * 3, buf[i] / 65536.0, 0.001);
}
}
TEST_F(FixedTensorTest, mul_plain) {
int64_t buf[4];
TEST_METHOD_CIPHER_PLAIN(mul, buf);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1.23 * 3 * 1.23, buf[i] / 65536.0, 0.001);
}
}
TEST_F(FixedTensorTest, mat_mul) {
int64_t buf[4];
TEST_METHOD_CIPHER(mat_mul, buf);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1.23 * 3 * 1.23 * 3 * TEST_SIZE, buf[i] / 65536.0, 0.1);
}
}
TEST_F(FixedTensorTest, mat_mul_plain) {
int64_t buf[4];
TEST_METHOD_CIPHER_PLAIN(mat_mul, buf);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1.23 * 3 * 1.23 * TEST_SIZE, buf[i] / 65536.0, 0.05);
}
}
TEST_F(FixedTensorTest, neg) {
int64_t buf[4];
TEST_METHOD_MONO(negative, buf);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(-1.23 * 3, buf[i] / 65536.0, 0.001);
}
}
TEST_F(FixedTensorTest, div_plain) {
int64_t buf[4];
TEST_METHOD_CIPHER_PLAIN(div, buf);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(3, buf[i] / 65536.0, 0.001);
}
}
TEST_F(FixedTensorTest, div) {
int64_t buf[4];
TEST_METHOD_CIPHER(div, buf);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1, buf[i] / 65536.0, 0.001);
}
}
TEST_F(FixedTensorTest, long_div) {
int64_t buf[4];
TEST_METHOD_CIPHER(long_div, buf);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1, buf[i] / 65536.0, 0.01);
}
}
TEST_F(FixedTensorTest, inverse_square_root) {
int64_t buf[4];
TEST_METHOD_MONO(inverse_square_root, buf);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(0.52, buf[i] / 65536.0, 0.001);
}
}
TEST_F(FixedTensorTest, exp) {
int64_t buf[4];
TEST_METHOD_MONO(exp, buf);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(40, buf[i] / 65536.0, 2);
}
}
TEST_F(FixedTensorTest, sum) {
int64_t buf[1];
__TEST_METHOD_CIPHER(sum, , buf, 1, Fix64N16);
for (int i = 0; i < 1; ++i) {
EXPECT_NEAR(3 * 1.23 * TEST_SIZE * TEST_SIZE, buf[i] / 65536.0, 0.001);
}
}
TEST_F(FixedTensorTest, dot_mul) {
int64_t buf[1];
__TEST_METHOD_CIPHER(dot_mul, (&rhs) COMMA, buf, 1, Fix64N16);
for (int i = 0; i < 1; ++i) {
EXPECT_NEAR(3 *3 * 1.23 * 1.23 * TEST_SIZE * TEST_SIZE, buf[i] / 65536.0, 0.001);
}
}
TEST_F(FixedTensorTest, dot_mul_plain) {
int64_t buf[1];
__TEST_METHOD_CIPHER(dot_mul, (tensor[0].get()) COMMA, buf, 1, Fix64N16);
for (int i = 0; i < 1; ++i) {
EXPECT_NEAR(3 * 1.23 * 1.23 * TEST_SIZE * TEST_SIZE, buf[i] / 65536.0, 0.001);
}
}
TEST_F(FixedTensorTest, relu) {
int64_t buf[4];
TEST_METHOD_MONO(relu, buf);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1.23 * 3, buf[i] / 65536.0, 2);
}
}
TEST_F(FixedTensorTest, sigmoid) {
int64_t buf[4];
TEST_METHOD_MONO(sigmoid, buf);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1.0, buf[i] / 65536.0, 2);
}
}
TEST_F(FixedTensorTest, sigmoid_enhanced) {
int64_t buf[4];
TEST_METHOD_MONO(sigmoid_enhanced, buf);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1.0, buf[i] / 65536.0, 2);
}
}
TEST_F(FixedTensorTest, sigmoid_chebyshev) {
int64_t buf[4];
TEST_METHOD_MONO(sigmoid_chebyshev, buf);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1.0, buf[i] / 65536.0, 2);
}
}
TEST_F(FixedTensorTest, sigmoid_high_precision) {
int64_t buf[4];
TEST_METHOD_MONO(sigmoid_high_precision, buf);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1.0, buf[i] / 65536.0, 2);
}
}
TEST_F(FixedTensorTest, softmax) {
int64_t buf[4];
TEST_METHOD_MONO(softmax, buf);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(0.5, buf[i] / 65536.0, 2);
}
}
TEST_F(FixedTensorTest, lt) {
int64_t buf[4];
__TEST_METHOD_CIPHER(lt, (&rhs) COMMA, buf, TEST_SIZE COMMA TEST_SIZE, BTensor);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(0, buf[i], 0.001);
}
}
TEST_F(FixedTensorTest, lt_plain) {
int64_t buf[4];
__TEST_METHOD_CIPHER(lt, (tensor[0].get()) COMMA, buf, TEST_SIZE COMMA TEST_SIZE, BTensor);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(0, buf[i], 0.001);
}
}
TEST_F(FixedTensorTest, leq) {
int64_t buf[4];
__TEST_METHOD_CIPHER(leq, (&rhs) COMMA, buf, TEST_SIZE COMMA TEST_SIZE, BTensor);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1, buf[i], 0.001);
}
}
TEST_F(FixedTensorTest, leq_plain) {
int64_t buf[4];
__TEST_METHOD_CIPHER(leq, (tensor[0].get()) COMMA, buf, TEST_SIZE COMMA TEST_SIZE, BTensor);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(0, buf[i], 0.001);
}
}
TEST_F(FixedTensorTest, gt) {
int64_t buf[4];
__TEST_METHOD_CIPHER(gt, (&rhs) COMMA, buf, TEST_SIZE COMMA TEST_SIZE, BTensor);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(0, buf[i], 0.001);
}
}
TEST_F(FixedTensorTest, gt_plain) {
int64_t buf[4];
__TEST_METHOD_CIPHER(gt, (tensor[0].get()) COMMA, buf, TEST_SIZE COMMA TEST_SIZE, BTensor);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1, buf[i], 0.001);
}
}
TEST_F(FixedTensorTest, geq) {
int64_t buf[4];
__TEST_METHOD_CIPHER(geq, (&rhs) COMMA, buf, TEST_SIZE COMMA TEST_SIZE, BTensor);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1, buf[i], 0.001);
}
}
TEST_F(FixedTensorTest, geq_plain) {
int64_t buf[4];
__TEST_METHOD_CIPHER(geq, (tensor[0].get()) COMMA, buf, TEST_SIZE COMMA TEST_SIZE, BTensor);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1, buf[i], 0.001);
}
}
TEST_F(FixedTensorTest, eq) {
int64_t buf[4];
__TEST_METHOD_CIPHER(eq, (&rhs) COMMA, buf, TEST_SIZE COMMA TEST_SIZE, BTensor);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1, buf[i], 0.001);
}
}
TEST_F(FixedTensorTest, eq_plain) {
int64_t buf[4];
__TEST_METHOD_CIPHER(eq, (tensor[0].get()) COMMA, buf, TEST_SIZE COMMA TEST_SIZE, BTensor);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(0, buf[i], 0.001);
}
}
TEST_F(FixedTensorTest, neq) {
int64_t buf[4];
__TEST_METHOD_CIPHER(neq, (&rhs) COMMA, buf, TEST_SIZE COMMA TEST_SIZE, BTensor);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(0, buf[i], 0.001);
}
}
TEST_F(FixedTensorTest, neq_plain) {
int64_t buf[4];
__TEST_METHOD_CIPHER(neq, (tensor[0].get()) COMMA, buf, TEST_SIZE COMMA TEST_SIZE, BTensor);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1, buf[i], 0.001);
}
}
TEST_F(FixedTensorTest, max) {
int64_t buf[4];
TEST_METHOD_CIPHER(max, buf);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1.23 * 3, buf[i] / 65536.0, 0.001);
}
}
TEST_F(FixedTensorTest, max_plain) {
int64_t buf[4];
TEST_METHOD_CIPHER_PLAIN(max, buf);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1.23 * 3, buf[i] / 65536.0, 0.001);
}
}
TEST_F(FixedTensorTest, max_pooling) {
int64_t buf[4];
__TEST_METHOD_CIPHER(max_pooling, , buf, 1 COMMA TEST_SIZE, Fix64N16);
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1.23 * 3, buf[i] / 65536.0, 0.001);
}
}
TEST_F(FixedTensorTest, preds_to_idx) {
int64_t buf[4];
std::shared_ptr<TensorAdapter<int64_t> > tensor[9]; \
for (int i = 0; i < 9; i++) { \
tensor[i] = gen(0.33, { 4 }); \
} \
Fix64N16 lhs(tensor[0].get(), tensor[1].get()); \
Fix64N16 ret0(tensor[2].get(), tensor[3].get()); \
Fix64N16 ret1(tensor[4].get(), tensor[5].get()); \
Fix64N16 ret2(tensor[6].get(), tensor[7].get()); \
_t[0] = std::thread( \
[&] () { \
ContextHolder::template run_with_context( \
_exec_ctx.get(), _mpc_ctx[0], \
[&](){ \
Fix64N16::preds_to_indices(&lhs, &ret0); \
ret0.reveal_to_one(0, tensor[8].get()); \
}); \
} \
); \
_t[1] = std::thread( \
[&] () { \
ContextHolder::template run_with_context( \
_exec_ctx.get(), _mpc_ctx[1], \
[&](){ \
Fix64N16::preds_to_indices(&lhs, &ret1); \
ret1.reveal_to_one(0, nullptr); \
}); \
} \
); \
_t[2] = std::thread( \
[&] () { \
ContextHolder::template run_with_context( \
_exec_ctx.get(), _mpc_ctx[2], \
[&](){ \
Fix64N16::preds_to_indices(&lhs, &ret2); \
ret2.reveal_to_one(0, nullptr); \
}); \
} \
); \
for (auto &t: _t) { \
t.join(); \
} \
cudaMemcpy(buf, tensor[8]->data(), sizeof(buf), cudaMemcpyDeviceToHost); \
cudaStreamSynchronize(get_gpu_ctx()->stream()); \
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(1, buf[i] / 65536.0, 0.001);
}
}
TEST_F(FixedTensorTest, tp_fp_fn) {
int64_t buf[3];
std::shared_ptr<TensorAdapter<int64_t> > tensor[9]; \
for (int i = 0; i < 9; i++) { \
tensor[i] = gen(0.33, { 3 }); \
} \
Fix64N16 lhs(tensor[0].get(), tensor[1].get()); \
Fix64N16 ret0(tensor[2].get(), tensor[3].get()); \
Fix64N16 ret1(tensor[4].get(), tensor[5].get()); \
Fix64N16 ret2(tensor[6].get(), tensor[7].get()); \
_t[0] = std::thread( \
[&] () { \
ContextHolder::template run_with_context( \
_exec_ctx.get(), _mpc_ctx[0], \
[&](){ \
Fix64N16::calc_tp_fp_fn(&lhs, &lhs, &ret0); \
ret0.reveal_to_one(0, tensor[8].get()); \
}); \
} \
); \
_t[1] = std::thread( \
[&] () { \
ContextHolder::template run_with_context( \
_exec_ctx.get(), _mpc_ctx[1], \
[&](){ \
Fix64N16::calc_tp_fp_fn(&lhs, &lhs, &ret1); \
ret1.reveal_to_one(0, nullptr); \
}); \
} \
); \
_t[2] = std::thread( \
[&] () { \
ContextHolder::template run_with_context( \
_exec_ctx.get(), _mpc_ctx[2], \
[&](){ \
Fix64N16::calc_tp_fp_fn(&lhs, &lhs, &ret2); \
ret2.reveal_to_one(0, nullptr); \
}); \
} \
); \
for (auto &t: _t) { \
t.join(); \
} \
cudaMemcpy(buf, tensor[8]->data(), sizeof(buf), cudaMemcpyDeviceToHost); \
cudaStreamSynchronize(get_gpu_ctx()->stream()); \
EXPECT_NEAR(3, buf[0] / 65536.0, 0.1);
EXPECT_NEAR(0, buf[1] / 65536.0, 0.1);
EXPECT_NEAR(0, buf[1] / 65536.0, 0.1);
}
TEST_F(FixedTensorTest, precision_recall) {
int64_t buf[3];
std::shared_ptr<TensorAdapter<int64_t> > tensor[9]; \
for (int i = 0; i < 5; i++) { \
tensor[i] = gen(3.33, { 3 }); \
} \
Fix64N16 lhs(tensor[0].get(), tensor[1].get()); \
_t[0] = std::thread( \
[&] () { \
ContextHolder::template run_with_context( \
_exec_ctx.get(), _mpc_ctx[0], \
[&](){ \
Fix64N16::calc_precision_recall(&lhs, tensor[2].get()); \
}); \
} \
); \
_t[1] = std::thread( \
[&] () { \
ContextHolder::template run_with_context( \
_exec_ctx.get(), _mpc_ctx[1], \
[&](){ \
Fix64N16::calc_precision_recall(&lhs, tensor[3].get()); \
}); \
} \
); \
_t[2] = std::thread( \
[&] () { \
ContextHolder::template run_with_context( \
_exec_ctx.get(), _mpc_ctx[2], \
[&](){ \
Fix64N16::calc_precision_recall(&lhs, tensor[4].get()); \
}); \
} \
); \
for (auto &t: _t) { \
t.join(); \
} \
cudaMemcpy(buf, tensor[2]->data(), sizeof(buf), cudaMemcpyDeviceToHost); \
cudaStreamSynchronize(get_gpu_ctx()->stream()); \
EXPECT_NEAR(0.5, buf[0] / 65536.0, 0.1);
EXPECT_NEAR(0.5, buf[1] / 65536.0, 0.1);
EXPECT_NEAR(0.5, buf[2] / 65536.0, 0.1);
}
} // namespace aby3
|
task4_cycle_nest.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "cuda_error_handling.h"
#include <iostream>
#include <algorithm>
#include <ctime>
#include <cstdio>
#include <vector>
#include <chrono>
#include <string>
using namespace std;
using namespace std::chrono;
__global__ void nest_add(double* d_a, const double* d_b, int n) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int idy = threadIdx.y + blockDim.y * blockIdx.y;
int idz = threadIdx.z + blockDim.z * blockIdx.z;
int id = (idz*n+idy)*n+idx;
int id_inc = (idz*n+idy+1)*n+idx;
if (id_inc < n*n*n) {
d_a[id] = d_a[id_inc] + d_b[id_inc];
}
}
void cpu_nest_add (vector<vector<vector<double> > >& h_a,
const vector<vector<vector<double> > >& h_b, int n)
{
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n-1; ++j) {
for (int k = 0; k < n; ++k) {
h_a[i][j][k] = h_a[i][j+1][k] + h_b[i][j+1][k];
}
}
}
}
int main(int argc, char* argv[]) {
int deviceCount = 0;
hipError_t error_id = hipGetDeviceCount(&deviceCount);
if (error_id != hipSuccess) {
printf("hipGetDeviceCount returned %d\n-> %s\n",
static_cast<int>(error_id), hipGetErrorString(error_id));
printf("Result = FAIL\n");
exit(EXIT_FAILURE);
}
if (deviceCount == 0) {
printf("There are no available device(s) that support CUDA\n");
} else {
printf("Detected %d CUDA Capable device(s)\n", deviceCount);
}
if (argc < 2) {
cout << "Incorrect number of arguments\n";
return 0;
}
int n = stoi(argv[1]);
size_t size = n*n*n * sizeof(double);
// initialize vectors n^3
vector<double> h_a(n*n*n), h_b(n*n*n);
for (int i = 0; i < n*n*n; ++i) {
h_a[i] = (double)rand()/RAND_MAX;
h_b[i] = (double)rand()/RAND_MAX;
}
// initialize vector of matrices for direct cpu nest loop
vector<vector<vector<double> > > m_a(n, vector<vector<double>>(n, vector<double>(n)));
vector<vector<vector<double> > > m_b(n, vector<vector<double>>(n, vector<double>(n)));
// copy values from 1D array to 3D arrays
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
for (int k = 0; k < n; ++k) {
m_a[i][j][k] = h_a[(i*n+j)*n+k];
m_b[i][j][k] = h_b[(i*n+j)*n+k];
}
}
}
// Start cpu nest loop
auto cpu_start = high_resolution_clock::now();
cpu_nest_add(m_a, m_b, n);
auto cpu_stop = high_resolution_clock::now();
duration<double> cpu_time = (cpu_stop - cpu_start)*1e4;
// Allocate CUDA 1D arrays
double *d_a, *d_b;
SAFE_CALL(hipMalloc(&d_a, size));
SAFE_CALL(hipMalloc(&d_b, size));
// Copy values from Device 1D arrays
SAFE_CALL(hipMemcpy(d_a, &h_a[0], size, hipMemcpyHostToDevice));
SAFE_CALL(hipMemcpy(d_b, &h_b[0], size, hipMemcpyHostToDevice));
dim3 blockSize(16,16,4);
dim3 gridSize((n-1)/blockSize.x + 1,(n-1)/blockSize.y + 1,(n-1)/blockSize.z + 1);
printf("blockSize %d %d %d \n", blockSize.x, blockSize.y, blockSize.z);
printf("gridSize %d %d %d \n", gridSize.x, gridSize.y, gridSize.z);
printf("n_elements %d\n", n*n*n);
hipEvent_t start, stop;
SAFE_CALL(hipEventCreate(&start));
SAFE_CALL(hipEventCreate(&stop));
SAFE_CALL(hipEventRecord(start));
hipLaunchKernelGGL(( SAFE_KERNEL_CALL((nest_add), dim3(gridSize), dim3(blockSize), 0, 0, d_a, d_b, n)));
SAFE_CALL(hipDeviceSynchronize());
SAFE_CALL(hipEventRecord(stop));
// Initialize device vector for Cuda transfer
vector<double> h_ans(n*n*n);
SAFE_CALL(hipMemcpy(&h_ans[0], d_a, size, hipMemcpyDeviceToHost));
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
cout << "CPU time " << cpu_time.count() << '\n';
cout << "CUDA time " << milliseconds << '\n';
hipFree(d_a);
hipFree(d_b);
} | task4_cycle_nest.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cuda_error_handling.h"
#include <iostream>
#include <algorithm>
#include <ctime>
#include <cstdio>
#include <vector>
#include <chrono>
#include <string>
using namespace std;
using namespace std::chrono;
__global__ void nest_add(double* d_a, const double* d_b, int n) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int idy = threadIdx.y + blockDim.y * blockIdx.y;
int idz = threadIdx.z + blockDim.z * blockIdx.z;
int id = (idz*n+idy)*n+idx;
int id_inc = (idz*n+idy+1)*n+idx;
if (id_inc < n*n*n) {
d_a[id] = d_a[id_inc] + d_b[id_inc];
}
}
void cpu_nest_add (vector<vector<vector<double> > >& h_a,
const vector<vector<vector<double> > >& h_b, int n)
{
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n-1; ++j) {
for (int k = 0; k < n; ++k) {
h_a[i][j][k] = h_a[i][j+1][k] + h_b[i][j+1][k];
}
}
}
}
int main(int argc, char* argv[]) {
int deviceCount = 0;
cudaError_t error_id = cudaGetDeviceCount(&deviceCount);
if (error_id != cudaSuccess) {
printf("cudaGetDeviceCount returned %d\n-> %s\n",
static_cast<int>(error_id), cudaGetErrorString(error_id));
printf("Result = FAIL\n");
exit(EXIT_FAILURE);
}
if (deviceCount == 0) {
printf("There are no available device(s) that support CUDA\n");
} else {
printf("Detected %d CUDA Capable device(s)\n", deviceCount);
}
if (argc < 2) {
cout << "Incorrect number of arguments\n";
return 0;
}
int n = stoi(argv[1]);
size_t size = n*n*n * sizeof(double);
// initialize vectors n^3
vector<double> h_a(n*n*n), h_b(n*n*n);
for (int i = 0; i < n*n*n; ++i) {
h_a[i] = (double)rand()/RAND_MAX;
h_b[i] = (double)rand()/RAND_MAX;
}
// initialize vector of matrices for direct cpu nest loop
vector<vector<vector<double> > > m_a(n, vector<vector<double>>(n, vector<double>(n)));
vector<vector<vector<double> > > m_b(n, vector<vector<double>>(n, vector<double>(n)));
// copy values from 1D array to 3D arrays
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
for (int k = 0; k < n; ++k) {
m_a[i][j][k] = h_a[(i*n+j)*n+k];
m_b[i][j][k] = h_b[(i*n+j)*n+k];
}
}
}
// Start cpu nest loop
auto cpu_start = high_resolution_clock::now();
cpu_nest_add(m_a, m_b, n);
auto cpu_stop = high_resolution_clock::now();
duration<double> cpu_time = (cpu_stop - cpu_start)*1e4;
// Allocate CUDA 1D arrays
double *d_a, *d_b;
SAFE_CALL(cudaMalloc(&d_a, size));
SAFE_CALL(cudaMalloc(&d_b, size));
// Copy values from Device 1D arrays
SAFE_CALL(cudaMemcpy(d_a, &h_a[0], size, cudaMemcpyHostToDevice));
SAFE_CALL(cudaMemcpy(d_b, &h_b[0], size, cudaMemcpyHostToDevice));
dim3 blockSize(16,16,4);
dim3 gridSize((n-1)/blockSize.x + 1,(n-1)/blockSize.y + 1,(n-1)/blockSize.z + 1);
printf("blockSize %d %d %d \n", blockSize.x, blockSize.y, blockSize.z);
printf("gridSize %d %d %d \n", gridSize.x, gridSize.y, gridSize.z);
printf("n_elements %d\n", n*n*n);
cudaEvent_t start, stop;
SAFE_CALL(cudaEventCreate(&start));
SAFE_CALL(cudaEventCreate(&stop));
SAFE_CALL(cudaEventRecord(start));
SAFE_KERNEL_CALL((nest_add<<<gridSize, blockSize>>>(d_a, d_b, n)));
SAFE_CALL(cudaDeviceSynchronize());
SAFE_CALL(cudaEventRecord(stop));
// Initialize device vector for Cuda transfer
vector<double> h_ans(n*n*n);
SAFE_CALL(cudaMemcpy(&h_ans[0], d_a, size, cudaMemcpyDeviceToHost));
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cout << "CPU time " << cpu_time.count() << '\n';
cout << "CUDA time " << milliseconds << '\n';
cudaFree(d_a);
cudaFree(d_b);
} |
15c0624d0ea73820b23e4b31d79441afaf9d146c.hip | // !!! This is a file automatically generated by hipify!!!
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "row_filter.h"
namespace filter
{
template void linearRow<uchar, float>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, hipStream_t stream);
}
#endif /* CUDA_DISABLER */
| 15c0624d0ea73820b23e4b31d79441afaf9d146c.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "row_filter.h"
namespace filter
{
template void linearRow<uchar, float>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, cudaStream_t stream);
}
#endif /* CUDA_DISABLER */
|
76c886ee59d142685d85cfd15fc722aff918795c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
extern "C" {
#include "convolutional_layer.h"
#include "batchnorm_layer.h"
#include "gemm.h"
#include "blas.h"
#include "im2col.h"
#include "col2im.h"
#include "utils.h"
#include "hip/hip_runtime.h"
}
__global__ void binarize_kernel(float *x, int n, float *binary)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= n) return;
binary[i] = (x[i] >= 0) ? 1 : -1;
}
void binarize_gpu(float *x, int n, float *binary)
{
hipLaunchKernelGGL(( binarize_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, x, n, binary);
check_error(hipPeekAtLastError());
}
__global__ void binarize_input_kernel(float *input, int n, int size, float *binary)
{
int s = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (s >= size) return;
int i = 0;
float mean = 0;
for(i = 0; i < n; ++i){
mean += abs(input[i*size + s]);
}
mean = mean / n;
for(i = 0; i < n; ++i){
binary[i*size + s] = (input[i*size + s] > 0) ? mean : -mean;
}
}
void binarize_input_gpu(float *input, int n, int size, float *binary)
{
hipLaunchKernelGGL(( binarize_input_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, input, n, size, binary);
check_error(hipPeekAtLastError());
}
__global__ void binarize_weights_kernel(float *weights, int n, int size, float *binary)
{
int f = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (f >= n) return;
int i = 0;
float mean = 0;
for(i = 0; i < size; ++i){
mean += abs(weights[f*size + i]);
}
mean = mean / size;
for(i = 0; i < size; ++i){
binary[f*size + i] = (weights[f*size + i] > 0) ? mean : -mean;
//binary[f*size + i] = weights[f*size + i];
}
}
void binarize_weights_gpu(float *weights, int n, int size, float *binary)
{
hipLaunchKernelGGL(( binarize_weights_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, weights, n, size, binary);
check_error(hipPeekAtLastError());
}
void forward_convolutional_layer_gpu(convolutional_layer l, network_state state)
{
fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1);
if(l.binary){
binarize_weights_gpu(l.weights_gpu, l.n, l.c*l.size*l.size, l.binary_weights_gpu);
swap_binary(&l);
}
if(l.xnor){
binarize_weights_gpu(l.weights_gpu, l.n, l.c*l.size*l.size, l.binary_weights_gpu);
swap_binary(&l);
binarize_gpu(state.input, l.c*l.h*l.w*l.batch, l.binary_input_gpu);
state.input = l.binary_input_gpu;
}
#ifdef CUDNN
float one = 1;
cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
state.input,
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
state.workspace,
l.workspace_size,
&one,
l.dstTensorDesc,
l.output_gpu);
#else
int i;
int m = l.n;
int k = l.size*l.size*l.c;
int n = l.out_w*l.out_h;
for(i = 0; i < l.batch; ++i){
im2col_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, state.workspace);
float * a = l.weights_gpu;
float * b = state.workspace;
float * c = l.output_gpu;
gemm_ongpu(0,0,m,n,k,1.,a,k,b,n,1.,c+i*m*n,n);
}
#endif
if (l.batch_normalize) {
forward_batchnorm_layer_gpu(l, state);
}
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
//if(l.dot > 0) dot_error_gpu(l);
if(l.binary || l.xnor) swap_binary(&l);
}
void backward_convolutional_layer_gpu(convolutional_layer l, network_state state)
{
//constrain_ongpu(l.outputs*l.batch, 1, l.delta_gpu, 1);
gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu);
backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h);
if(l.batch_normalize){
backward_batchnorm_layer_gpu(l, state);
//axpy_ongpu(l.outputs*l.batch, -state.net.decay, l.x_gpu, 1, l.delta_gpu, 1);
} else {
//axpy_ongpu(l.outputs*l.batch, -state.net.decay, l.output_gpu, 1, l.delta_gpu, 1);
}
float *original_input = state.input;
if(l.xnor) state.input = l.binary_input_gpu;
#ifdef CUDNN
float one = 1;
cudnnConvolutionBackwardFilter(cudnn_handle(),
&one,
l.srcTensorDesc,
state.input,
l.ddstTensorDesc,
l.delta_gpu,
l.convDesc,
l.bf_algo,
state.workspace,
l.workspace_size,
&one,
l.dweightDesc,
l.weight_updates_gpu);
if(state.delta){
if(l.binary || l.xnor) swap_binary(&l);
cudnnConvolutionBackwardData(cudnn_handle(),
&one,
l.weightDesc,
l.weights_gpu,
l.ddstTensorDesc,
l.delta_gpu,
l.convDesc,
l.bd_algo,
state.workspace,
l.workspace_size,
&one,
l.dsrcTensorDesc,
state.delta);
if(l.binary || l.xnor) swap_binary(&l);
if(l.xnor) gradient_array_ongpu(original_input, l.batch*l.c*l.h*l.w, HARDTAN, state.delta);
}
#else
int m = l.n;
int n = l.size*l.size*l.c;
int k = l.out_w*l.out_h;
int i;
for(i = 0; i < l.batch; ++i){
float * a = l.delta_gpu;
float * b = state.workspace;
float * c = l.weight_updates_gpu;
im2col_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, state.workspace);
gemm_ongpu(0,1,m,n,k,1,a + i*m*k,k,b,k,1,c,n);
if(state.delta){
if(l.binary || l.xnor) swap_binary(&l);
float * a = l.weights_gpu;
float * b = l.delta_gpu;
float * c = state.workspace;
gemm_ongpu(1,0,n,k,m,1,a,n,b + i*k*m,k,0,c,k);
col2im_ongpu(state.workspace, l.c, l.h, l.w, l.size, l.stride, l.pad, state.delta + i*l.c*l.h*l.w);
if(l.binary || l.xnor) {
swap_binary(&l);
}
if(l.xnor) gradient_array_ongpu(original_input + i*l.c*l.h*l.w, l.c*l.h*l.w, HARDTAN, state.delta + i*l.c*l.h*l.w);
}
}
#endif
}
void pull_convolutional_layer(convolutional_layer layer)
{
cuda_pull_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.biases_gpu, layer.biases, layer.n);
cuda_pull_array(layer.weight_updates_gpu, layer.weight_updates, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.bias_updates_gpu, layer.bias_updates, layer.n);
if (layer.batch_normalize){
cuda_pull_array(layer.scales_gpu, layer.scales, layer.n);
cuda_pull_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n);
cuda_pull_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n);
}
if (layer.adam){
cuda_pull_array(layer.m_gpu, layer.m, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.v_gpu, layer.v, layer.c*layer.n*layer.size*layer.size);
}
}
void push_convolutional_layer(convolutional_layer layer)
{
cuda_push_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.biases_gpu, layer.biases, layer.n);
cuda_push_array(layer.weight_updates_gpu, layer.weight_updates, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.bias_updates_gpu, layer.bias_updates, layer.n);
if (layer.batch_normalize){
cuda_push_array(layer.scales_gpu, layer.scales, layer.n);
cuda_push_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n);
cuda_push_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n);
}
if (layer.adam){
cuda_push_array(layer.m_gpu, layer.m, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.v_gpu, layer.v, layer.c*layer.n*layer.size*layer.size);
}
}
void update_convolutional_layer_gpu(convolutional_layer layer, int batch, float learning_rate, float momentum, float decay)
{
int size = layer.size*layer.size*layer.c*layer.n;
axpy_ongpu(layer.n, learning_rate/batch, layer.bias_updates_gpu, 1, layer.biases_gpu, 1);
scal_ongpu(layer.n, momentum, layer.bias_updates_gpu, 1);
if(layer.scales_gpu){
axpy_ongpu(layer.n, learning_rate/batch, layer.scale_updates_gpu, 1, layer.scales_gpu, 1);
scal_ongpu(layer.n, momentum, layer.scale_updates_gpu, 1);
}
if(layer.adam){
scal_ongpu(size, layer.B1, layer.m_gpu, 1);
scal_ongpu(size, layer.B2, layer.v_gpu, 1);
axpy_ongpu(size, -decay*batch, layer.weights_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, -(1-layer.B1), layer.weight_updates_gpu, 1, layer.m_gpu, 1);
mul_ongpu(size, layer.weight_updates_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, (1-layer.B2), layer.weight_updates_gpu, 1, layer.v_gpu, 1);
adam_gpu(size, layer.weights_gpu, layer.m_gpu, layer.v_gpu, layer.B1, layer.B2, learning_rate/batch, layer.eps, layer.t+1);
fill_ongpu(size, 0, layer.weight_updates_gpu, 1);
}else{
axpy_ongpu(size, -decay*batch, layer.weights_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, learning_rate/batch, layer.weight_updates_gpu, 1, layer.weights_gpu, 1);
scal_ongpu(size, momentum, layer.weight_updates_gpu, 1);
}
}
| 76c886ee59d142685d85cfd15fc722aff918795c.cu | #include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
extern "C" {
#include "convolutional_layer.h"
#include "batchnorm_layer.h"
#include "gemm.h"
#include "blas.h"
#include "im2col.h"
#include "col2im.h"
#include "utils.h"
#include "cuda.h"
}
__global__ void binarize_kernel(float *x, int n, float *binary)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= n) return;
binary[i] = (x[i] >= 0) ? 1 : -1;
}
void binarize_gpu(float *x, int n, float *binary)
{
binarize_kernel<<<cuda_gridsize(n), BLOCK>>>(x, n, binary);
check_error(cudaPeekAtLastError());
}
__global__ void binarize_input_kernel(float *input, int n, int size, float *binary)
{
int s = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (s >= size) return;
int i = 0;
float mean = 0;
for(i = 0; i < n; ++i){
mean += abs(input[i*size + s]);
}
mean = mean / n;
for(i = 0; i < n; ++i){
binary[i*size + s] = (input[i*size + s] > 0) ? mean : -mean;
}
}
void binarize_input_gpu(float *input, int n, int size, float *binary)
{
binarize_input_kernel<<<cuda_gridsize(size), BLOCK>>>(input, n, size, binary);
check_error(cudaPeekAtLastError());
}
__global__ void binarize_weights_kernel(float *weights, int n, int size, float *binary)
{
int f = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (f >= n) return;
int i = 0;
float mean = 0;
for(i = 0; i < size; ++i){
mean += abs(weights[f*size + i]);
}
mean = mean / size;
for(i = 0; i < size; ++i){
binary[f*size + i] = (weights[f*size + i] > 0) ? mean : -mean;
//binary[f*size + i] = weights[f*size + i];
}
}
void binarize_weights_gpu(float *weights, int n, int size, float *binary)
{
binarize_weights_kernel<<<cuda_gridsize(n), BLOCK>>>(weights, n, size, binary);
check_error(cudaPeekAtLastError());
}
void forward_convolutional_layer_gpu(convolutional_layer l, network_state state)
{
fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1);
if(l.binary){
binarize_weights_gpu(l.weights_gpu, l.n, l.c*l.size*l.size, l.binary_weights_gpu);
swap_binary(&l);
}
if(l.xnor){
binarize_weights_gpu(l.weights_gpu, l.n, l.c*l.size*l.size, l.binary_weights_gpu);
swap_binary(&l);
binarize_gpu(state.input, l.c*l.h*l.w*l.batch, l.binary_input_gpu);
state.input = l.binary_input_gpu;
}
#ifdef CUDNN
float one = 1;
cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
state.input,
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
state.workspace,
l.workspace_size,
&one,
l.dstTensorDesc,
l.output_gpu);
#else
int i;
int m = l.n;
int k = l.size*l.size*l.c;
int n = l.out_w*l.out_h;
for(i = 0; i < l.batch; ++i){
im2col_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, state.workspace);
float * a = l.weights_gpu;
float * b = state.workspace;
float * c = l.output_gpu;
gemm_ongpu(0,0,m,n,k,1.,a,k,b,n,1.,c+i*m*n,n);
}
#endif
if (l.batch_normalize) {
forward_batchnorm_layer_gpu(l, state);
}
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
//if(l.dot > 0) dot_error_gpu(l);
if(l.binary || l.xnor) swap_binary(&l);
}
void backward_convolutional_layer_gpu(convolutional_layer l, network_state state)
{
//constrain_ongpu(l.outputs*l.batch, 1, l.delta_gpu, 1);
gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu);
backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h);
if(l.batch_normalize){
backward_batchnorm_layer_gpu(l, state);
//axpy_ongpu(l.outputs*l.batch, -state.net.decay, l.x_gpu, 1, l.delta_gpu, 1);
} else {
//axpy_ongpu(l.outputs*l.batch, -state.net.decay, l.output_gpu, 1, l.delta_gpu, 1);
}
float *original_input = state.input;
if(l.xnor) state.input = l.binary_input_gpu;
#ifdef CUDNN
float one = 1;
cudnnConvolutionBackwardFilter(cudnn_handle(),
&one,
l.srcTensorDesc,
state.input,
l.ddstTensorDesc,
l.delta_gpu,
l.convDesc,
l.bf_algo,
state.workspace,
l.workspace_size,
&one,
l.dweightDesc,
l.weight_updates_gpu);
if(state.delta){
if(l.binary || l.xnor) swap_binary(&l);
cudnnConvolutionBackwardData(cudnn_handle(),
&one,
l.weightDesc,
l.weights_gpu,
l.ddstTensorDesc,
l.delta_gpu,
l.convDesc,
l.bd_algo,
state.workspace,
l.workspace_size,
&one,
l.dsrcTensorDesc,
state.delta);
if(l.binary || l.xnor) swap_binary(&l);
if(l.xnor) gradient_array_ongpu(original_input, l.batch*l.c*l.h*l.w, HARDTAN, state.delta);
}
#else
int m = l.n;
int n = l.size*l.size*l.c;
int k = l.out_w*l.out_h;
int i;
for(i = 0; i < l.batch; ++i){
float * a = l.delta_gpu;
float * b = state.workspace;
float * c = l.weight_updates_gpu;
im2col_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, state.workspace);
gemm_ongpu(0,1,m,n,k,1,a + i*m*k,k,b,k,1,c,n);
if(state.delta){
if(l.binary || l.xnor) swap_binary(&l);
float * a = l.weights_gpu;
float * b = l.delta_gpu;
float * c = state.workspace;
gemm_ongpu(1,0,n,k,m,1,a,n,b + i*k*m,k,0,c,k);
col2im_ongpu(state.workspace, l.c, l.h, l.w, l.size, l.stride, l.pad, state.delta + i*l.c*l.h*l.w);
if(l.binary || l.xnor) {
swap_binary(&l);
}
if(l.xnor) gradient_array_ongpu(original_input + i*l.c*l.h*l.w, l.c*l.h*l.w, HARDTAN, state.delta + i*l.c*l.h*l.w);
}
}
#endif
}
void pull_convolutional_layer(convolutional_layer layer)
{
cuda_pull_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.biases_gpu, layer.biases, layer.n);
cuda_pull_array(layer.weight_updates_gpu, layer.weight_updates, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.bias_updates_gpu, layer.bias_updates, layer.n);
if (layer.batch_normalize){
cuda_pull_array(layer.scales_gpu, layer.scales, layer.n);
cuda_pull_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n);
cuda_pull_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n);
}
if (layer.adam){
cuda_pull_array(layer.m_gpu, layer.m, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.v_gpu, layer.v, layer.c*layer.n*layer.size*layer.size);
}
}
void push_convolutional_layer(convolutional_layer layer)
{
cuda_push_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.biases_gpu, layer.biases, layer.n);
cuda_push_array(layer.weight_updates_gpu, layer.weight_updates, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.bias_updates_gpu, layer.bias_updates, layer.n);
if (layer.batch_normalize){
cuda_push_array(layer.scales_gpu, layer.scales, layer.n);
cuda_push_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n);
cuda_push_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n);
}
if (layer.adam){
cuda_push_array(layer.m_gpu, layer.m, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.v_gpu, layer.v, layer.c*layer.n*layer.size*layer.size);
}
}
void update_convolutional_layer_gpu(convolutional_layer layer, int batch, float learning_rate, float momentum, float decay)
{
int size = layer.size*layer.size*layer.c*layer.n;
axpy_ongpu(layer.n, learning_rate/batch, layer.bias_updates_gpu, 1, layer.biases_gpu, 1);
scal_ongpu(layer.n, momentum, layer.bias_updates_gpu, 1);
if(layer.scales_gpu){
axpy_ongpu(layer.n, learning_rate/batch, layer.scale_updates_gpu, 1, layer.scales_gpu, 1);
scal_ongpu(layer.n, momentum, layer.scale_updates_gpu, 1);
}
if(layer.adam){
scal_ongpu(size, layer.B1, layer.m_gpu, 1);
scal_ongpu(size, layer.B2, layer.v_gpu, 1);
axpy_ongpu(size, -decay*batch, layer.weights_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, -(1-layer.B1), layer.weight_updates_gpu, 1, layer.m_gpu, 1);
mul_ongpu(size, layer.weight_updates_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, (1-layer.B2), layer.weight_updates_gpu, 1, layer.v_gpu, 1);
adam_gpu(size, layer.weights_gpu, layer.m_gpu, layer.v_gpu, layer.B1, layer.B2, learning_rate/batch, layer.eps, layer.t+1);
fill_ongpu(size, 0, layer.weight_updates_gpu, 1);
}else{
axpy_ongpu(size, -decay*batch, layer.weights_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, learning_rate/batch, layer.weight_updates_gpu, 1, layer.weights_gpu, 1);
scal_ongpu(size, momentum, layer.weight_updates_gpu, 1);
}
}
|
4a5d1c5727e96de6fd110f8a45839ca88c3fb8e3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
__global__ void MatrixMulKernel(float* M, float* N, float* P, int Width)
{
// Calculate the row index of the P element and M
int Row = blockIdx.y*blockDim.y+threadIdx.y;
// Calculate the column index of P and N
int Col = blockIdx.x*blockDim.x+threadIdx.x;
if ((Row < Width) && (Col < Width)) {
float Pvalue = 0;
// each thread computes one element of the block sub-matrix
for (int k = 0; k < Width; ++k) {
Pvalue += M[Row*Width+k]*N[k*Width+Col];
}
P[Row*Width+Col] = Pvalue;
}
}
int main()
{
const int w = 2;
float M[w][w] = {{0, 1}, {2, 3}};
float N[w][w] = {{0, 1}, {2, 3}};
float P[w][w];
int sizeMat = w * w * sizeof(float);
float *d_M, *d_N, *d_P;
hipMalloc((void **) &d_M, w * w * sizeof(float));
hipMalloc((void **) &d_N, w * w * sizeof(float));
hipMalloc((void **) &d_P, w * w * sizeof(float));
hipMemcpy(d_M, M, sizeMat, hipMemcpyHostToDevice);
hipMemcpy(d_N, N, sizeMat, hipMemcpyHostToDevice);
dim3 dimBlock(w, w, 1);
hipLaunchKernelGGL(( MatrixMulKernel), dim3(1), dim3(dimBlock), 0, 0, d_M, d_N, d_P, w);
hipMemcpy(P, d_P, sizeMat, hipMemcpyDeviceToHost);
for (int i = 0; i < w; ++i)
{
for (int j = 0; j < w; ++j)
{
std::cout << P[i][j] << " ";
}
std::cout << "\n";
}
return 0;
} | 4a5d1c5727e96de6fd110f8a45839ca88c3fb8e3.cu | #include <iostream>
__global__ void MatrixMulKernel(float* M, float* N, float* P, int Width)
{
// Calculate the row index of the P element and M
int Row = blockIdx.y*blockDim.y+threadIdx.y;
// Calculate the column index of P and N
int Col = blockIdx.x*blockDim.x+threadIdx.x;
if ((Row < Width) && (Col < Width)) {
float Pvalue = 0;
// each thread computes one element of the block sub-matrix
for (int k = 0; k < Width; ++k) {
Pvalue += M[Row*Width+k]*N[k*Width+Col];
}
P[Row*Width+Col] = Pvalue;
}
}
int main()
{
const int w = 2;
float M[w][w] = {{0, 1}, {2, 3}};
float N[w][w] = {{0, 1}, {2, 3}};
float P[w][w];
int sizeMat = w * w * sizeof(float);
float *d_M, *d_N, *d_P;
cudaMalloc((void **) &d_M, w * w * sizeof(float));
cudaMalloc((void **) &d_N, w * w * sizeof(float));
cudaMalloc((void **) &d_P, w * w * sizeof(float));
cudaMemcpy(d_M, M, sizeMat, cudaMemcpyHostToDevice);
cudaMemcpy(d_N, N, sizeMat, cudaMemcpyHostToDevice);
dim3 dimBlock(w, w, 1);
MatrixMulKernel<<<1, dimBlock>>>(d_M, d_N, d_P, w);
cudaMemcpy(P, d_P, sizeMat, cudaMemcpyDeviceToHost);
for (int i = 0; i < w; ++i)
{
for (int j = 0; j < w; ++j)
{
std::cout << P[i][j] << " ";
}
std::cout << "\n";
}
return 0;
} |
4129234f55545045576d3c0fb2990c7e9733866d.hip | // !!! This is a file automatically generated by hipify!!!
#include "device_vrp.h"
DeviceVrp::DeviceVrp(void)
{
hipMalloc((void **)&vrp_, sizeof(vrp_problem));
}
DeviceVrp::DeviceVrp(const HostVrp& host_vrp)
{
hipMalloc((void **)&vrp_, sizeof(vrp_problem));
int vertnum = host_vrp.customer_size()+1;
hipMemcpy(&vrp_->vertnum, &vertnum, sizeof(int),
hipMemcpyHostToDevice);
int vehicle_size = host_vrp.vehicle_size();
hipMemcpy(&vrp_->numroutes, &vehicle_size, sizeof(int),
hipMemcpyHostToDevice);
int capacity = host_vrp.capacity();
hipMemcpy(&vrp_->capacity, &capacity, sizeof(int),
hipMemcpyHostToDevice);
int *host_cost = new int[host_vrp->edge_size()];
for (int i=0; i < vertnum; i++)
for (int j=0; j < i; j++)
host_cost[INDEX(i, j)] = host_vrp->cost(i, j);
int *device_cost = NULL;
size_t device_cost_bytes = host_vrp->edge_size() * sizeof(int);
hipMalloc((void **)&device_cost, device_cost_bytes);
hipMemcpy(device_cost, host_cost, device_cost_bytes,
hipMemcpyHostToDevice);
hipMemcpy(&vrp_->dist.cost, &device_cost, sizeof(int *),
hipMemcpyHostToDevice);
delete[] host_cost;
int *host_demand = new int[vertnum];
for (int i=0; i < vertnum; i++)
host_demand[i] = host_vrp->demand(i);
int *device_demand = NULL;
size_t device_demand_bytes = vertnum * sizeof(int);
hipMalloc((void **)&device_demand, device_demand_bytes);
hipMemcpy(device_demand, host_demand, device_demand_bytes,
hipMemcpyHostToDevice);
hipMemcpy(&vrp_->demand, &device_demand, sizeof(int *),
hipMemcpyHostToDevice);
delete[] host_demand;
}
DeviceVrp::~DeviceVrp(void)
{
/* TODO */
}
const DeviceVrp& operator=(const HostVrp& host_vrp)
{
/* TODO */
}
| 4129234f55545045576d3c0fb2990c7e9733866d.cu | #include "device_vrp.h"
DeviceVrp::DeviceVrp(void)
{
cudaMalloc((void **)&vrp_, sizeof(vrp_problem));
}
DeviceVrp::DeviceVrp(const HostVrp& host_vrp)
{
cudaMalloc((void **)&vrp_, sizeof(vrp_problem));
int vertnum = host_vrp.customer_size()+1;
cudaMemcpy(&vrp_->vertnum, &vertnum, sizeof(int),
cudaMemcpyHostToDevice);
int vehicle_size = host_vrp.vehicle_size();
cudaMemcpy(&vrp_->numroutes, &vehicle_size, sizeof(int),
cudaMemcpyHostToDevice);
int capacity = host_vrp.capacity();
cudaMemcpy(&vrp_->capacity, &capacity, sizeof(int),
cudaMemcpyHostToDevice);
int *host_cost = new int[host_vrp->edge_size()];
for (int i=0; i < vertnum; i++)
for (int j=0; j < i; j++)
host_cost[INDEX(i, j)] = host_vrp->cost(i, j);
int *device_cost = NULL;
size_t device_cost_bytes = host_vrp->edge_size() * sizeof(int);
cudaMalloc((void **)&device_cost, device_cost_bytes);
cudaMemcpy(device_cost, host_cost, device_cost_bytes,
cudaMemcpyHostToDevice);
cudaMemcpy(&vrp_->dist.cost, &device_cost, sizeof(int *),
cudaMemcpyHostToDevice);
delete[] host_cost;
int *host_demand = new int[vertnum];
for (int i=0; i < vertnum; i++)
host_demand[i] = host_vrp->demand(i);
int *device_demand = NULL;
size_t device_demand_bytes = vertnum * sizeof(int);
cudaMalloc((void **)&device_demand, device_demand_bytes);
cudaMemcpy(device_demand, host_demand, device_demand_bytes,
cudaMemcpyHostToDevice);
cudaMemcpy(&vrp_->demand, &device_demand, sizeof(int *),
cudaMemcpyHostToDevice);
delete[] host_demand;
}
DeviceVrp::~DeviceVrp(void)
{
/* TODO */
}
const DeviceVrp& operator=(const HostVrp& host_vrp)
{
/* TODO */
}
|
15570562d93482fae0260d7030f9c3e4c670c1fa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "scalar.h"
//scalar and current element
__device__ double op(double d1,double d2,double *params) {
if(d2 < d1) {return 1;}
return 0;
}
extern "C"
__global__ void lessthan_scalar_double(int n, int idx,double dx,double *dy,int incx,double *params,double *result) {
transform(n,idx,dx,dy,incx,params,result);
}
| 15570562d93482fae0260d7030f9c3e4c670c1fa.cu | #include "scalar.h"
//scalar and current element
__device__ double op(double d1,double d2,double *params) {
if(d2 < d1) {return 1;}
return 0;
}
extern "C"
__global__ void lessthan_scalar_double(int n, int idx,double dx,double *dy,int incx,double *params,double *result) {
transform(n,idx,dx,dy,incx,params,result);
}
|
be0db6986459248872c44d44b1923a9c393ee0e9.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <rocblas.h>
#include "maxpool_layer.h"
#include "convolutional_layer.h"
#include "blas.h"
#include "dark_cuda.h"
__global__ void forward_maxpool_depth_layer_kernel(int n, int w, int h, int c, int out_c, int batch, float *input, float *output, int *indexes)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= n) return;
int j = id % w;
id = id / w;
int i = id % h;
id = id / h;
//int g = id % out_c;
//id = id / out_c;
int b = id % batch;
int k;
for (int g = 0; g < out_c; ++g)
{
int out_index = j + w*(i + h*(g + out_c*b));
float max = -FLT_MAX;
int max_i = -1;
for (k = g; k < c; k += out_c)
{
int in_index = j + w*(i + h*(k + c*b));
float val = input[in_index];
max_i = (val > max) ? in_index : max_i;
max = (val > max) ? val : max;
}
output[out_index] = max;
if (indexes) indexes[out_index] = max_i;
}
}
__global__ void backward_maxpool_depth_layer_kernel(int n, int w, int h, int c, int batch, float *delta, float *prev_delta, int *indexes)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= n) return;
int index = indexes[id];
prev_delta[index] += delta[id];
}
__global__ void forward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride_x, int stride_y, int size, int pad, float *input, float *output, int *indexes)
{
int h = (in_h + pad - size) / stride_y + 1;
int w = (in_w + pad - size) / stride_x + 1;
int c = in_c;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int j = id % w;
id /= w;
int i = id % h;
id /= h;
int k = id % c;
id /= c;
int b = id;
int w_offset = -pad / 2;
int h_offset = -pad / 2;
int out_index = j + w*(i + h*(k + c*b));
float max = -INFINITY;
int max_i = -1;
int l, m;
for(l = 0; l < size; ++l){
for(m = 0; m < size; ++m){
int cur_h = h_offset + i*stride_y + l;
int cur_w = w_offset + j*stride_x + m;
int index = cur_w + in_w*(cur_h + in_h*(k + b*in_c));
int valid = (cur_h >= 0 && cur_h < in_h &&
cur_w >= 0 && cur_w < in_w);
float val = (valid != 0) ? input[index] : -INFINITY;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
output[out_index] = max;
if (indexes) indexes[out_index] = max_i;
}
__global__ void backward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride_x, int stride_y, int size, int pad, float *delta, float *prev_delta, int *indexes)
{
int h = (in_h + pad - size) / stride_y + 1;
int w = (in_w + pad - size) / stride_x + 1;
int c = in_c;
int area_x = (size - 1) / stride_x;
int area_y = (size - 1) / stride_y;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int index = id;
int j = id % in_w;
id /= in_w;
int i = id % in_h;
id /= in_h;
int k = id % in_c;
id /= in_c;
int b = id;
int w_offset = -pad / 2;
int h_offset = -pad / 2;
float d = 0;
int l, m;
for(l = -area_y; l < area_y+1; ++l){
for(m = -area_x; m < area_x+1; ++m){
int out_w = (j-w_offset)/stride_x + m;
int out_h = (i-h_offset)/stride_y + l;
int out_index = out_w + w*(out_h + h*(k + c*b));
int valid = (out_w >= 0 && out_w < w &&
out_h >= 0 && out_h < h);
d += (valid && indexes[out_index] == index) ? delta[out_index] : 0;
}
}
prev_delta[index] += d;
}
extern "C" void forward_maxpool_layer_gpu(maxpool_layer layer, network_state state)
{
if (layer.maxpool_depth) {
int h = layer.out_h;
int w = layer.out_w;
int c = 1;// layer.out_c;
size_t n = h*w*c*layer.batch;
forward_maxpool_depth_layer_kernel << <cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >> >(
n, layer.w, layer.h, layer.c, layer.out_c, layer.batch, state.input, layer.output_gpu, layer.indexes_gpu);
CHECK_CUDA(hipPeekAtLastError());
return;
}
#ifdef CUDNN_DISABLED
if (!state.train && layer.stride == layer.size) {
// cudnnPoolingBackward
cudnnStatus_t maxpool_status;
float alpha = 1, beta = 0;
maxpool_status = cudnnPoolingForward(
cudnn_handle(),
layer.poolingDesc,
&alpha,
layer.srcTensorDesc,
state.input,
&beta,
layer.dstTensorDesc,
layer.output_gpu);
//maxpool_status = cudnnDestroyPoolingDescriptor(poolingDesc);
//cudnnDestroyTensorDescriptor(layer.srcTensorDesc);
//cudnnDestroyTensorDescriptor(layer.dstTensorDesc);
}
else
#endif
{
int h = layer.out_h;
int w = layer.out_w;
int c = layer.out_c;
size_t n = h*w*c*layer.batch;
forward_maxpool_layer_kernel << <cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >> > (n, layer.h, layer.w, layer.c, layer.stride_x, layer.stride_y, layer.size, layer.pad, state.input, layer.output_gpu, layer.indexes_gpu);
CHECK_CUDA(hipPeekAtLastError());
}
if (layer.antialiasing) {
network_state s = { 0 };
s.train = state.train;
s.workspace = state.workspace;
s.net = state.net;
if (!state.train) s.index = state.index; // don't use TC for training (especially without cuda_convert_f32_to_f16() )
s.input = layer.output_gpu;
forward_convolutional_layer_gpu(*(layer.input_layer), s);
simple_copy_ongpu(layer.outputs*layer.batch, layer.output_gpu, layer.input_antialiasing_gpu);
simple_copy_ongpu(layer.input_layer->outputs*layer.input_layer->batch, layer.input_layer->output_gpu, layer.output_gpu);
}
}
extern "C" void backward_maxpool_layer_gpu(maxpool_layer layer, network_state state)
{
if (layer.antialiasing) {
network_state s = { 0 };
s.train = state.train;
s.workspace = state.workspace;
s.net = state.net;
s.delta = layer.delta_gpu; // s.delta will be returned to l.delta_gpu
s.input = layer.input_antialiasing_gpu;
//if (!state.train) s.index = state.index; // don't use TC for training (especially without cuda_convert_f32_to_f16() )
simple_copy_ongpu(layer.input_layer->outputs*layer.input_layer->batch, layer.delta_gpu, layer.input_layer->delta_gpu);
backward_convolutional_layer_gpu(*(layer.input_layer), s);
//simple_copy_ongpu(layer.outputs*layer.batch, layer.input_antialiasing_gpu, layer.output_gpu);
}
if (layer.maxpool_depth) {
int h = layer.out_h;
int w = layer.out_w;
int c = layer.out_c;
size_t n = h * w * c * layer.batch;
backward_maxpool_depth_layer_kernel << <cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >> >(n, layer.w, layer.h, layer.c, layer.batch, layer.delta_gpu, state.delta, layer.indexes_gpu);
CHECK_CUDA(hipPeekAtLastError());
return;
}
size_t n = layer.h*layer.w*layer.c*layer.batch;
hipLaunchKernelGGL(( backward_maxpool_layer_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, get_cuda_stream() , n, layer.h, layer.w, layer.c, layer.stride_x, layer.stride_y, layer.size, layer.pad, layer.delta_gpu, state.delta, layer.indexes_gpu);
CHECK_CUDA(hipPeekAtLastError());
}
| be0db6986459248872c44d44b1923a9c393ee0e9.cu | #include <cuda_runtime.h>
#include <curand.h>
#include <cublas_v2.h>
#include "maxpool_layer.h"
#include "convolutional_layer.h"
#include "blas.h"
#include "dark_cuda.h"
__global__ void forward_maxpool_depth_layer_kernel(int n, int w, int h, int c, int out_c, int batch, float *input, float *output, int *indexes)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= n) return;
int j = id % w;
id = id / w;
int i = id % h;
id = id / h;
//int g = id % out_c;
//id = id / out_c;
int b = id % batch;
int k;
for (int g = 0; g < out_c; ++g)
{
int out_index = j + w*(i + h*(g + out_c*b));
float max = -FLT_MAX;
int max_i = -1;
for (k = g; k < c; k += out_c)
{
int in_index = j + w*(i + h*(k + c*b));
float val = input[in_index];
max_i = (val > max) ? in_index : max_i;
max = (val > max) ? val : max;
}
output[out_index] = max;
if (indexes) indexes[out_index] = max_i;
}
}
__global__ void backward_maxpool_depth_layer_kernel(int n, int w, int h, int c, int batch, float *delta, float *prev_delta, int *indexes)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= n) return;
int index = indexes[id];
prev_delta[index] += delta[id];
}
__global__ void forward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride_x, int stride_y, int size, int pad, float *input, float *output, int *indexes)
{
int h = (in_h + pad - size) / stride_y + 1;
int w = (in_w + pad - size) / stride_x + 1;
int c = in_c;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int j = id % w;
id /= w;
int i = id % h;
id /= h;
int k = id % c;
id /= c;
int b = id;
int w_offset = -pad / 2;
int h_offset = -pad / 2;
int out_index = j + w*(i + h*(k + c*b));
float max = -INFINITY;
int max_i = -1;
int l, m;
for(l = 0; l < size; ++l){
for(m = 0; m < size; ++m){
int cur_h = h_offset + i*stride_y + l;
int cur_w = w_offset + j*stride_x + m;
int index = cur_w + in_w*(cur_h + in_h*(k + b*in_c));
int valid = (cur_h >= 0 && cur_h < in_h &&
cur_w >= 0 && cur_w < in_w);
float val = (valid != 0) ? input[index] : -INFINITY;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
output[out_index] = max;
if (indexes) indexes[out_index] = max_i;
}
__global__ void backward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride_x, int stride_y, int size, int pad, float *delta, float *prev_delta, int *indexes)
{
int h = (in_h + pad - size) / stride_y + 1;
int w = (in_w + pad - size) / stride_x + 1;
int c = in_c;
int area_x = (size - 1) / stride_x;
int area_y = (size - 1) / stride_y;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int index = id;
int j = id % in_w;
id /= in_w;
int i = id % in_h;
id /= in_h;
int k = id % in_c;
id /= in_c;
int b = id;
int w_offset = -pad / 2;
int h_offset = -pad / 2;
float d = 0;
int l, m;
for(l = -area_y; l < area_y+1; ++l){
for(m = -area_x; m < area_x+1; ++m){
int out_w = (j-w_offset)/stride_x + m;
int out_h = (i-h_offset)/stride_y + l;
int out_index = out_w + w*(out_h + h*(k + c*b));
int valid = (out_w >= 0 && out_w < w &&
out_h >= 0 && out_h < h);
d += (valid && indexes[out_index] == index) ? delta[out_index] : 0;
}
}
prev_delta[index] += d;
}
extern "C" void forward_maxpool_layer_gpu(maxpool_layer layer, network_state state)
{
if (layer.maxpool_depth) {
int h = layer.out_h;
int w = layer.out_w;
int c = 1;// layer.out_c;
size_t n = h*w*c*layer.batch;
forward_maxpool_depth_layer_kernel << <cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >> >(
n, layer.w, layer.h, layer.c, layer.out_c, layer.batch, state.input, layer.output_gpu, layer.indexes_gpu);
CHECK_CUDA(cudaPeekAtLastError());
return;
}
#ifdef CUDNN_DISABLED
if (!state.train && layer.stride == layer.size) {
// cudnnPoolingBackward
cudnnStatus_t maxpool_status;
float alpha = 1, beta = 0;
maxpool_status = cudnnPoolingForward(
cudnn_handle(),
layer.poolingDesc,
&alpha,
layer.srcTensorDesc,
state.input,
&beta,
layer.dstTensorDesc,
layer.output_gpu);
//maxpool_status = cudnnDestroyPoolingDescriptor(poolingDesc);
//cudnnDestroyTensorDescriptor(layer.srcTensorDesc);
//cudnnDestroyTensorDescriptor(layer.dstTensorDesc);
}
else
#endif
{
int h = layer.out_h;
int w = layer.out_w;
int c = layer.out_c;
size_t n = h*w*c*layer.batch;
forward_maxpool_layer_kernel << <cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >> > (n, layer.h, layer.w, layer.c, layer.stride_x, layer.stride_y, layer.size, layer.pad, state.input, layer.output_gpu, layer.indexes_gpu);
CHECK_CUDA(cudaPeekAtLastError());
}
if (layer.antialiasing) {
network_state s = { 0 };
s.train = state.train;
s.workspace = state.workspace;
s.net = state.net;
if (!state.train) s.index = state.index; // don't use TC for training (especially without cuda_convert_f32_to_f16() )
s.input = layer.output_gpu;
forward_convolutional_layer_gpu(*(layer.input_layer), s);
simple_copy_ongpu(layer.outputs*layer.batch, layer.output_gpu, layer.input_antialiasing_gpu);
simple_copy_ongpu(layer.input_layer->outputs*layer.input_layer->batch, layer.input_layer->output_gpu, layer.output_gpu);
}
}
extern "C" void backward_maxpool_layer_gpu(maxpool_layer layer, network_state state)
{
if (layer.antialiasing) {
network_state s = { 0 };
s.train = state.train;
s.workspace = state.workspace;
s.net = state.net;
s.delta = layer.delta_gpu; // s.delta will be returned to l.delta_gpu
s.input = layer.input_antialiasing_gpu;
//if (!state.train) s.index = state.index; // don't use TC for training (especially without cuda_convert_f32_to_f16() )
simple_copy_ongpu(layer.input_layer->outputs*layer.input_layer->batch, layer.delta_gpu, layer.input_layer->delta_gpu);
backward_convolutional_layer_gpu(*(layer.input_layer), s);
//simple_copy_ongpu(layer.outputs*layer.batch, layer.input_antialiasing_gpu, layer.output_gpu);
}
if (layer.maxpool_depth) {
int h = layer.out_h;
int w = layer.out_w;
int c = layer.out_c;
size_t n = h * w * c * layer.batch;
backward_maxpool_depth_layer_kernel << <cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >> >(n, layer.w, layer.h, layer.c, layer.batch, layer.delta_gpu, state.delta, layer.indexes_gpu);
CHECK_CUDA(cudaPeekAtLastError());
return;
}
size_t n = layer.h*layer.w*layer.c*layer.batch;
backward_maxpool_layer_kernel<<<cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >>>(n, layer.h, layer.w, layer.c, layer.stride_x, layer.stride_y, layer.size, layer.pad, layer.delta_gpu, state.delta, layer.indexes_gpu);
CHECK_CUDA(cudaPeekAtLastError());
}
|
d7f1f63539a11db4aac3f0dbcea91d48fe0609d6.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2020 Michael Koesel and respective contributors
// SPDX-License-Identifier: MIT
// See accompanying LICENSE file for detailed information
#include "dogm/common.h"
#include "dogm/cuda_utils.h"
#include "dogm/dogm_types.h"
#include "dogm/kernel/predict.h"
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
namespace dogm
{
__global__ void predictKernel(ParticlesSoA particle_array, hiprandState_t* __restrict__ global_state, float velocity,
int grid_size, float p_S, const glm::mat4x4 transition_matrix,
float process_noise_position, float process_noise_velocity, int particle_count)
{
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
hiprandState_t local_state = global_state[thread_id];
for (int i = thread_id; i < particle_count; i += stride)
{
float noise_pos_x = hiprand_normal(&local_state, 0.0f, process_noise_position);
float noise_pos_y = hiprand_normal(&local_state, 0.0f, process_noise_position);
float noise_vel_x = hiprand_normal(&local_state, 0.0f, process_noise_velocity);
float noise_vel_y = hiprand_normal(&local_state, 0.0f, process_noise_velocity);
glm::vec4 process_noise(noise_pos_x, noise_pos_y, noise_vel_x, noise_vel_y);
particle_array.state[i] = transition_matrix * particle_array.state[i] + process_noise;
particle_array.weight[i] = p_S * particle_array.weight[i];
glm::vec4 state = particle_array.state[i];
float x = state[0];
float y = state[1];
// Particle out of grid so decrease its chance of being resampled
if ((x > grid_size - 1 || x < 0) || (y > grid_size - 1 || y < 0))
{
particle_array.weight[i] = 0.0f;
}
int pos_x = clamp(static_cast<int>(x), 0, grid_size - 1);
int pos_y = clamp(static_cast<int>(y), 0, grid_size - 1);
particle_array.grid_cell_idx[i] = pos_x + grid_size * pos_y;
// printf("X: %d, Y: %d, Cell index: %d\n", pos_x, pos_y, (pos_x + grid_size * pos_y));
}
global_state[thread_id] = local_state;
}
} /* namespace dogm */
| d7f1f63539a11db4aac3f0dbcea91d48fe0609d6.cu | // Copyright (c) 2020 Michael Koesel and respective contributors
// SPDX-License-Identifier: MIT
// See accompanying LICENSE file for detailed information
#include "dogm/common.h"
#include "dogm/cuda_utils.h"
#include "dogm/dogm_types.h"
#include "dogm/kernel/predict.h"
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
namespace dogm
{
__global__ void predictKernel(ParticlesSoA particle_array, curandState* __restrict__ global_state, float velocity,
int grid_size, float p_S, const glm::mat4x4 transition_matrix,
float process_noise_position, float process_noise_velocity, int particle_count)
{
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
curandState local_state = global_state[thread_id];
for (int i = thread_id; i < particle_count; i += stride)
{
float noise_pos_x = curand_normal(&local_state, 0.0f, process_noise_position);
float noise_pos_y = curand_normal(&local_state, 0.0f, process_noise_position);
float noise_vel_x = curand_normal(&local_state, 0.0f, process_noise_velocity);
float noise_vel_y = curand_normal(&local_state, 0.0f, process_noise_velocity);
glm::vec4 process_noise(noise_pos_x, noise_pos_y, noise_vel_x, noise_vel_y);
particle_array.state[i] = transition_matrix * particle_array.state[i] + process_noise;
particle_array.weight[i] = p_S * particle_array.weight[i];
glm::vec4 state = particle_array.state[i];
float x = state[0];
float y = state[1];
// Particle out of grid so decrease its chance of being resampled
if ((x > grid_size - 1 || x < 0) || (y > grid_size - 1 || y < 0))
{
particle_array.weight[i] = 0.0f;
}
int pos_x = clamp(static_cast<int>(x), 0, grid_size - 1);
int pos_y = clamp(static_cast<int>(y), 0, grid_size - 1);
particle_array.grid_cell_idx[i] = pos_x + grid_size * pos_y;
// printf("X: %d, Y: %d, Cell index: %d\n", pos_x, pos_y, (pos_x + grid_size * pos_y));
}
global_state[thread_id] = local_state;
}
} /* namespace dogm */
|
29eba1618f994b269e9207a254bcf507012192cb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cudapars.h"
#include "paramssteeringtest1.h"
/////////////////////////////////////
// standard imports
/////////////////////////////////////
#include <stdio.h>
#include <math.h>
#include "step.h"
/////////////////////////////////////
// kernel function (CUDA device)
/////////////////////////////////////
#include "gradops_dc1.cuh"
__device__ __host__
real transportflux (real *dw, real *wd, real *w, struct params *p,int ix, int iy,int field, int direction) {
real ddc=0;
real fi, fim1;
real fip2=0, fim2=0;
// real ddc1;
real ddcx=0,ddcy=0;
real flux=0;
//transport flux
switch(direction)
{
case 0:
flux= wd[fencode_dc1(p,ix,iy,vel1)]*w[fencode_dc1(p,ix,iy,field)];
break;
case 1:
flux= wd[fencode_dc1(p,ix,iy,vel2)]*w[fencode_dc1(p,ix,iy,field)];
break;
case 2:
;//flux= wd[fencode_dc1(p,ix,iy,vel3)]*w[fencode_dc1(p,ix,iy,field)];
break;
}
return flux;
//return ( ddc1-ddc2);
}
__device__ __host__
real fluxmom1 (real *dw, real *wd, real *w, struct params *p,int ix, int iy,int field, int direction) {
real ddc=0;
real fi, fim1;
real fip2=0, fim2=0;
// real ddc1;
real ddcx=0,ddcy=0;
real flux=0;
//transport flux
switch(direction)
{
case 0:
#ifdef USE_SAC
flux= -w[fencode_dc1(p,ix,iy,field+4)]*w[fencode_dc1(p,ix,iy,b1)]-w[fencode_dc1(p,ix,iy,field+4)]*w[fencode_dc1(p,ix,iy,b1b)]-w[fencode_dc1(p,ix,iy,field+9)]*w[fencode_dc1(p,ix,iy,b1)];
#else
flux= -w[fencode_dc1(p,ix,iy,field+4)]*w[fencode_dc1(p,ix,iy,b1)];
#endif
if((field==mom1 ) )
flux+=wd[fencode_dc1(p,ix,iy,pressuret)];
break;
case 1:
#ifdef USE_SAC
flux= -w[fencode_dc1(p,ix,iy,field+4)]*w[fencode_dc1(p,ix,iy,b2)]-w[fencode_dc1(p,ix,iy,field+4)]*w[fencode_dc1(p,ix,iy,b2b)]-w[fencode_dc1(p,ix,iy,field+9)]*w[fencode_dc1(p,ix,iy,b2)];
#else
flux= -w[fencode_dc1(p,ix,iy,field+4)]*w[fencode_dc1(p,ix,iy,b2)];
#endif
if((field==mom2 && direction==1) )
flux+=wd[fencode_dc1(p,ix,iy,pressuret)];
break;
case 2:
#ifdef USE_SAC
flux= -w[fencode_dc1(p,ix,iy,field+4)]*w[fencode_dc1(p,ix,iy,b3)]-w[fencode_dc1(p,ix,iy,field+4)]*w[fencode_dc1(p,ix,iy,b3b)]-w[fencode_dc1(p,ix,iy,field+9)]*w[fencode_dc1(p,ix,iy,b3)];
#else
flux= -w[fencode_dc1(p,ix,iy,field+4)]*w[fencode_dc1(p,ix,iy,b3)];
#endif
if((field==mom3 && direction==2))
flux+=wd[fencode_dc1(p,ix,iy,pressuret)];
break;
}
return flux;
//return ( ddc1-ddc2);
}
__device__ __host__
int computefluxrho (real *dw, real *wd, real *w, struct params *p,int ix, int iy) {
int field, direction;
int status=0;
for(direction=0;direction<3;direction++)
#ifdef USE_SAC
wd[fencode_dc1(p,ix,iy,f1+direction)]= transportflux(dw,wd,w,p,ix,iy,rho,direction)+transportflux(dw,wd,w,p,ix,iy,rhob,direction);
#else
wd[fencode_dc1(p,ix,iy,f1+direction)]= transportflux(dw,wd,w,p,ix,iy,rho,direction);
#endif
return ( status);
}
__device__ __host__
int computefluxmom (real *dw, real *wd, real *w, struct params *p,int ix, int iy, int field) {
int direction;
int status=0;
for(direction=0;direction<3;direction++)
{
switch(direction)
{
case 0:
#ifdef ADIABHYDRO
wd[fencode_dc1(p,ix,iy,f1)]= transportflux(dw,wd,w,p,ix,iy,field,direction);
#else
wd[fencode_dc1(p,ix,iy,f1)]= transportflux(dw,wd,w,p,ix,iy,field,direction)+fluxmom1(dw,wd,w,p,ix,iy,field,direction);
#endif
break;
case 1:
#ifdef ADIABHYDRO
wd[fencode_dc1(p,ix,iy,f2)]= transportflux(dw,wd,w,p,ix,iy,field,direction);
#else
wd[fencode_dc1(p,ix,iy,f2)]= transportflux(dw,wd,w,p,ix,iy,field,direction)+fluxmom1(dw,wd,w,p,ix,iy,field,direction);
#endif
break;
case 2:
#ifdef ADIABHYDRO
wd[fencode_dc1(p,ix,iy,f3)]= transportflux(dw,wd,w,p,ix,iy,field,direction);
#else
wd[fencode_dc1(p,ix,iy,f3)]= transportflux(dw,wd,w,p,ix,iy,field,direction)+fluxmom1(dw,wd,w,p,ix,iy,field,direction);
#endif
break;
}
}
return ( status);
}
__device__ __host__
int divflux1(real *dw, real *wd, real *w, struct params *p,int ix, int iy,int field) {
int direction;
int status=0;
real divflux=0;
dw[fencode_dc1(p,ix,iy,field)]= -grad_dc1(wd,p,ix,iy,f1,0)-grad_dc1(wd,p,ix,iy,f2,1);
return ( status);
}
//rho, mom1, mom2, mom3, energy, b1, b2, b3
__device__ __host__
void computeflux (real *dw, real *wd, real *w, struct params *p,int ix, int iy, int field) {
//int status=0;
switch(field)
{
case rho:
computefluxrho(dw,wd,w,p,ix,iy);
break;
case mom1:
computefluxmom(dw,wd,w,p,ix,iy,field);
break;
case mom2:
computefluxmom(dw,wd,w,p,ix,iy,field);
break;
case mom3:
computefluxmom(dw,wd,w,p,ix,iy,field);
break;
}
//return ( status);
}
__global__ void derivcurrent1_parallel(struct params *p, real *w, real *wnew, real *wmod,
real *dwn1, real *wd, int order)
{
// compute the global index in the vector from
// the number of the current block, blockIdx,
// the number of threads per block, blockDim,
// and the number of the current thread within the block, threadIdx
//int i = blockIdx.x * blockDim.x + threadIdx.x;
//int j = blockIdx.y * blockDim.y + threadIdx.y;
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
//real g=p->g;
// dt=1.0;
//dt=0.05;
//enum vars rho, mom1, mom2, mom3, energy, b1, b2, b3;
j=iindex/ni;
//i=iindex-j*(iindex/ni);
i=iindex-(j*ni);
for(int f=rho; f<=mom3; f++)
{
if(i>1 && j >1 && i<(ni-2) && j<(nj-2))
computeflux(dwn1+(NVAR*(p->n[0])*(p->n[1])*order),wd,wmod,p,i,j,f);
//might need to set boundaries correctly
__syncthreads();
//if(i>1 && j >1 && i<(ni-2) && j<(nj-2))
// divflux1(dwn1+(NVAR*(p->n[0])*(p->n[1])*order),wd,wmod,p,i,j,f);
if(i>1 && j >1 && i<(ni-2) && j<(nj-2))
divflux1(dwn1+(NVAR*(p->n[0])*(p->n[1])*order),wd,wmod,p,i,j,f);
}
}
/////////////////////////////////////
// error checking routine
/////////////////////////////////////
void checkErrors_dc1(char *label)
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
hipError_t err;
err = hipDeviceSynchronize();
if (err != hipSuccess)
{
char *e = (char*) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
err = hipGetLastError();
if (err != hipSuccess)
{
char *e = (char*) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
}
int cuderivcurrent1(struct params **p, real **w, real **wnew, struct params **d_p, real **d_w, real **d_wnew, real **d_wmod, real **d_dwn1, real **d_wd, int order)
{
//printf("calling propagate solution\n");
//dim3 dimBlock(blocksize, blocksize);
//dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
dim3 dimBlock(dimblock, 1);
//dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
int numBlocks = (((*p)->n[0])*((*p)->n[1])+numThreadsPerBlock-1) / numThreadsPerBlock;
// hipMemcpy(*w, *d_w, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), hipMemcpyDeviceToHost);
// if(order==0)
hipMemcpy(*d_p, *p, sizeof(struct params), hipMemcpyHostToDevice);
//__global__ void prop_parallel(struct params *p, real *b, real *w, real *wnew, real *wmod,
// real *dwn1, real *dwn2, real *dwn3, real *dwn4, real *wd)
//init_parallel(struct params *p, real *b, real *u, real *v, real *h)
hipLaunchKernelGGL(( derivcurrent1_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p,*d_w,*d_wnew, *d_wmod, *d_dwn1, *d_wd, order);
//prop_parallel<<<dimGrid,dimBlock>>>(*d_p,*d_b,*d_u,*d_v,*d_h);
//printf("called prop\n");
hipDeviceSynchronize();
//boundary_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_b,*d_w,*d_wnew);
//printf("called boundary\n");
//hipDeviceSynchronize();
//update_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_b,*d_w,*d_wnew);
//printf("called update\n");
// hipDeviceSynchronize();
// hipMemcpy(*w, *d_w, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), hipMemcpyDeviceToHost);
//hipMemcpy(*wnew, *d_wnew, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), hipMemcpyDeviceToHost);
//hipMemcpy(*b, *d_b, (((*p)->n[0])* ((*p)->n[1]))*sizeof(real), hipMemcpyDeviceToHost);
//checkErrors("copy data from device");
}
| 29eba1618f994b269e9207a254bcf507012192cb.cu | #include "cudapars.h"
#include "paramssteeringtest1.h"
/////////////////////////////////////
// standard imports
/////////////////////////////////////
#include <stdio.h>
#include <math.h>
#include "step.h"
/////////////////////////////////////
// kernel function (CUDA device)
/////////////////////////////////////
#include "gradops_dc1.cuh"
__device__ __host__
real transportflux (real *dw, real *wd, real *w, struct params *p,int ix, int iy,int field, int direction) {
real ddc=0;
real fi, fim1;
real fip2=0, fim2=0;
// real ddc1;
real ddcx=0,ddcy=0;
real flux=0;
//transport flux
switch(direction)
{
case 0:
flux= wd[fencode_dc1(p,ix,iy,vel1)]*w[fencode_dc1(p,ix,iy,field)];
break;
case 1:
flux= wd[fencode_dc1(p,ix,iy,vel2)]*w[fencode_dc1(p,ix,iy,field)];
break;
case 2:
;//flux= wd[fencode_dc1(p,ix,iy,vel3)]*w[fencode_dc1(p,ix,iy,field)];
break;
}
return flux;
//return ( ddc1-ddc2);
}
__device__ __host__
real fluxmom1 (real *dw, real *wd, real *w, struct params *p,int ix, int iy,int field, int direction) {
real ddc=0;
real fi, fim1;
real fip2=0, fim2=0;
// real ddc1;
real ddcx=0,ddcy=0;
real flux=0;
//transport flux
switch(direction)
{
case 0:
#ifdef USE_SAC
flux= -w[fencode_dc1(p,ix,iy,field+4)]*w[fencode_dc1(p,ix,iy,b1)]-w[fencode_dc1(p,ix,iy,field+4)]*w[fencode_dc1(p,ix,iy,b1b)]-w[fencode_dc1(p,ix,iy,field+9)]*w[fencode_dc1(p,ix,iy,b1)];
#else
flux= -w[fencode_dc1(p,ix,iy,field+4)]*w[fencode_dc1(p,ix,iy,b1)];
#endif
if((field==mom1 ) )
flux+=wd[fencode_dc1(p,ix,iy,pressuret)];
break;
case 1:
#ifdef USE_SAC
flux= -w[fencode_dc1(p,ix,iy,field+4)]*w[fencode_dc1(p,ix,iy,b2)]-w[fencode_dc1(p,ix,iy,field+4)]*w[fencode_dc1(p,ix,iy,b2b)]-w[fencode_dc1(p,ix,iy,field+9)]*w[fencode_dc1(p,ix,iy,b2)];
#else
flux= -w[fencode_dc1(p,ix,iy,field+4)]*w[fencode_dc1(p,ix,iy,b2)];
#endif
if((field==mom2 && direction==1) )
flux+=wd[fencode_dc1(p,ix,iy,pressuret)];
break;
case 2:
#ifdef USE_SAC
flux= -w[fencode_dc1(p,ix,iy,field+4)]*w[fencode_dc1(p,ix,iy,b3)]-w[fencode_dc1(p,ix,iy,field+4)]*w[fencode_dc1(p,ix,iy,b3b)]-w[fencode_dc1(p,ix,iy,field+9)]*w[fencode_dc1(p,ix,iy,b3)];
#else
flux= -w[fencode_dc1(p,ix,iy,field+4)]*w[fencode_dc1(p,ix,iy,b3)];
#endif
if((field==mom3 && direction==2))
flux+=wd[fencode_dc1(p,ix,iy,pressuret)];
break;
}
return flux;
//return ( ddc1-ddc2);
}
__device__ __host__
int computefluxrho (real *dw, real *wd, real *w, struct params *p,int ix, int iy) {
int field, direction;
int status=0;
for(direction=0;direction<3;direction++)
#ifdef USE_SAC
wd[fencode_dc1(p,ix,iy,f1+direction)]= transportflux(dw,wd,w,p,ix,iy,rho,direction)+transportflux(dw,wd,w,p,ix,iy,rhob,direction);
#else
wd[fencode_dc1(p,ix,iy,f1+direction)]= transportflux(dw,wd,w,p,ix,iy,rho,direction);
#endif
return ( status);
}
__device__ __host__
int computefluxmom (real *dw, real *wd, real *w, struct params *p,int ix, int iy, int field) {
int direction;
int status=0;
for(direction=0;direction<3;direction++)
{
switch(direction)
{
case 0:
#ifdef ADIABHYDRO
wd[fencode_dc1(p,ix,iy,f1)]= transportflux(dw,wd,w,p,ix,iy,field,direction);
#else
wd[fencode_dc1(p,ix,iy,f1)]= transportflux(dw,wd,w,p,ix,iy,field,direction)+fluxmom1(dw,wd,w,p,ix,iy,field,direction);
#endif
break;
case 1:
#ifdef ADIABHYDRO
wd[fencode_dc1(p,ix,iy,f2)]= transportflux(dw,wd,w,p,ix,iy,field,direction);
#else
wd[fencode_dc1(p,ix,iy,f2)]= transportflux(dw,wd,w,p,ix,iy,field,direction)+fluxmom1(dw,wd,w,p,ix,iy,field,direction);
#endif
break;
case 2:
#ifdef ADIABHYDRO
wd[fencode_dc1(p,ix,iy,f3)]= transportflux(dw,wd,w,p,ix,iy,field,direction);
#else
wd[fencode_dc1(p,ix,iy,f3)]= transportflux(dw,wd,w,p,ix,iy,field,direction)+fluxmom1(dw,wd,w,p,ix,iy,field,direction);
#endif
break;
}
}
return ( status);
}
__device__ __host__
int divflux1(real *dw, real *wd, real *w, struct params *p,int ix, int iy,int field) {
int direction;
int status=0;
real divflux=0;
dw[fencode_dc1(p,ix,iy,field)]= -grad_dc1(wd,p,ix,iy,f1,0)-grad_dc1(wd,p,ix,iy,f2,1);
return ( status);
}
//rho, mom1, mom2, mom3, energy, b1, b2, b3
__device__ __host__
void computeflux (real *dw, real *wd, real *w, struct params *p,int ix, int iy, int field) {
//int status=0;
switch(field)
{
case rho:
computefluxrho(dw,wd,w,p,ix,iy);
break;
case mom1:
computefluxmom(dw,wd,w,p,ix,iy,field);
break;
case mom2:
computefluxmom(dw,wd,w,p,ix,iy,field);
break;
case mom3:
computefluxmom(dw,wd,w,p,ix,iy,field);
break;
}
//return ( status);
}
__global__ void derivcurrent1_parallel(struct params *p, real *w, real *wnew, real *wmod,
real *dwn1, real *wd, int order)
{
// compute the global index in the vector from
// the number of the current block, blockIdx,
// the number of threads per block, blockDim,
// and the number of the current thread within the block, threadIdx
//int i = blockIdx.x * blockDim.x + threadIdx.x;
//int j = blockIdx.y * blockDim.y + threadIdx.y;
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
//real g=p->g;
// dt=1.0;
//dt=0.05;
//enum vars rho, mom1, mom2, mom3, energy, b1, b2, b3;
j=iindex/ni;
//i=iindex-j*(iindex/ni);
i=iindex-(j*ni);
for(int f=rho; f<=mom3; f++)
{
if(i>1 && j >1 && i<(ni-2) && j<(nj-2))
computeflux(dwn1+(NVAR*(p->n[0])*(p->n[1])*order),wd,wmod,p,i,j,f);
//might need to set boundaries correctly
__syncthreads();
//if(i>1 && j >1 && i<(ni-2) && j<(nj-2))
// divflux1(dwn1+(NVAR*(p->n[0])*(p->n[1])*order),wd,wmod,p,i,j,f);
if(i>1 && j >1 && i<(ni-2) && j<(nj-2))
divflux1(dwn1+(NVAR*(p->n[0])*(p->n[1])*order),wd,wmod,p,i,j,f);
}
}
/////////////////////////////////////
// error checking routine
/////////////////////////////////////
void checkErrors_dc1(char *label)
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
cudaError_t err;
err = cudaThreadSynchronize();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
err = cudaGetLastError();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
}
int cuderivcurrent1(struct params **p, real **w, real **wnew, struct params **d_p, real **d_w, real **d_wnew, real **d_wmod, real **d_dwn1, real **d_wd, int order)
{
//printf("calling propagate solution\n");
//dim3 dimBlock(blocksize, blocksize);
//dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
dim3 dimBlock(dimblock, 1);
//dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
int numBlocks = (((*p)->n[0])*((*p)->n[1])+numThreadsPerBlock-1) / numThreadsPerBlock;
// cudaMemcpy(*w, *d_w, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), cudaMemcpyDeviceToHost);
// if(order==0)
cudaMemcpy(*d_p, *p, sizeof(struct params), cudaMemcpyHostToDevice);
//__global__ void prop_parallel(struct params *p, real *b, real *w, real *wnew, real *wmod,
// real *dwn1, real *dwn2, real *dwn3, real *dwn4, real *wd)
//init_parallel(struct params *p, real *b, real *u, real *v, real *h)
derivcurrent1_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_w,*d_wnew, *d_wmod, *d_dwn1, *d_wd, order);
//prop_parallel<<<dimGrid,dimBlock>>>(*d_p,*d_b,*d_u,*d_v,*d_h);
//printf("called prop\n");
cudaThreadSynchronize();
//boundary_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_b,*d_w,*d_wnew);
//printf("called boundary\n");
//cudaThreadSynchronize();
//update_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_b,*d_w,*d_wnew);
//printf("called update\n");
// cudaThreadSynchronize();
// cudaMemcpy(*w, *d_w, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), cudaMemcpyDeviceToHost);
//cudaMemcpy(*wnew, *d_wnew, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), cudaMemcpyDeviceToHost);
//cudaMemcpy(*b, *d_b, (((*p)->n[0])* ((*p)->n[1]))*sizeof(real), cudaMemcpyDeviceToHost);
//checkErrors("copy data from device");
}
|
fixpassthrust.hip | // !!! This is a file automatically generated by hipify!!!
/*
============================================================================
Name : sorting_segments.cu
Author : Rafael Schmid
Version :
Copyright : Your copyright notice
Description : Compute sum of reciprocals using STL on CPU and Thrust on GPU
============================================================================
*/
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <thrust/extrema.h>
#include <thrust/transform.h>
#include <iostream>
#ifndef ELAPSED_TIME
#define ELAPSED_TIME 0
#endif
template <typename T, typename Op>
struct Operation {
uint shift_val;
Operation(uint shift_val) {
this->shift_val = shift_val;
}
__host__ __device__
T operator()(const T x, const T y)
{
T fix = y << shift_val;
Op op = Op();
return op(x, fix);
}
};
void print(thrust::host_vector<uint> h_vec) {
std::cout << "\n";
for (uint i = 0; i < h_vec.size(); i++) {
std::cout << h_vec[i] << " ";
}
std::cout << "\n";
}
int main(void) {
uint num_of_segments;
uint num_of_elements;
scanf("%d", &num_of_segments);
thrust::host_vector<uint> h_seg_aux(num_of_segments + 1);
for (uint i = 0; i < num_of_segments + 1; i++)
scanf("%d", &h_seg_aux[i]);
scanf("%d", &num_of_elements);
thrust::host_vector<uint> h_vec(num_of_elements);
for (uint i = 0; i < num_of_elements; i++)
scanf("%d", &h_vec[i]);
thrust::host_vector<uint> h_seg(num_of_elements);
for (uint i = 0; i < num_of_segments; i++) {
for (uint j = h_seg_aux[i]; j < h_seg_aux[i + 1]; j++) {
h_seg[j] = i;
}
}
hipEvent_t startPre, stopPre, startPos, stopPos;
hipEventCreate(&startPre);
hipEventCreate(&stopPre);
hipEventCreate(&startPos);
hipEventCreate(&stopPos);
thrust::device_vector<uint> d_vec(num_of_elements);
thrust::device_vector<uint> d_seg = h_seg;
float averageExecutions = 0;
for (uint i = 0; i < EXECUTIONS; i++) {
thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin());
/*
* maximum element of the array.
*/
hipEventRecord(startPre);
thrust::device_vector<uint>::iterator iter = thrust::max_element(d_vec.begin(), d_vec.end());
uint max_val = *iter;
uint mostSignificantBit = (uint)log2((double)max_val) + 1;
/*
* add prefix to the elements
*/
Operation< uint, thrust::plus<uint> > op_plus(mostSignificantBit);
thrust::transform(d_vec.begin(), d_vec.end(), d_seg.begin(), d_vec.begin(), op_plus);
hipEventRecord(stopPre);
hipEventSynchronize(stopPre);
/*
* sort the segments
*/
thrust::sort(d_vec.begin(), d_vec.end());
/*
* update back the array elements
*/
hipEventRecord(startPos);
Operation< uint, thrust::minus<uint> > op_minus(mostSignificantBit);
thrust::transform(d_vec.begin(), d_vec.end(), d_seg.begin(), d_vec.begin(), op_minus);
hipEventRecord(stopPos);
hipEventSynchronize(stopPos);
if (ELAPSED_TIME == 1) {
float millisecondsPre = 0, millisecondsPos = 0;
hipEventElapsedTime(&millisecondsPre, startPre, stopPre);
hipEventElapsedTime(&millisecondsPos, startPos, stopPos);
//std::cout << millisecondsPre + millisecondsPos << "\n";
averageExecutions += millisecondsPre + millisecondsPos;
}
hipError_t errSync = hipGetLastError();
hipError_t errAsync = hipDeviceSynchronize();
if (errSync != hipSuccess)
printf("4: Sync kernel error: %s\n", hipGetErrorString(errSync));
if (errAsync != hipSuccess)
printf("4: Async kernel error: %s\n", hipGetErrorString(errAsync));
}
thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin());
if (ELAPSED_TIME != 1) {
print(h_vec);
}
else {std::cout << averageExecutions/EXECUTIONS << "\n";}
return 0;
}
| fixpassthrust.cu | /*
============================================================================
Name : sorting_segments.cu
Author : Rafael Schmid
Version :
Copyright : Your copyright notice
Description : Compute sum of reciprocals using STL on CPU and Thrust on GPU
============================================================================
*/
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <thrust/extrema.h>
#include <thrust/transform.h>
#include <iostream>
#ifndef ELAPSED_TIME
#define ELAPSED_TIME 0
#endif
template <typename T, typename Op>
struct Operation {
uint shift_val;
Operation(uint shift_val) {
this->shift_val = shift_val;
}
__host__ __device__
T operator()(const T x, const T y)
{
T fix = y << shift_val;
Op op = Op();
return op(x, fix);
}
};
void print(thrust::host_vector<uint> h_vec) {
std::cout << "\n";
for (uint i = 0; i < h_vec.size(); i++) {
std::cout << h_vec[i] << " ";
}
std::cout << "\n";
}
int main(void) {
uint num_of_segments;
uint num_of_elements;
scanf("%d", &num_of_segments);
thrust::host_vector<uint> h_seg_aux(num_of_segments + 1);
for (uint i = 0; i < num_of_segments + 1; i++)
scanf("%d", &h_seg_aux[i]);
scanf("%d", &num_of_elements);
thrust::host_vector<uint> h_vec(num_of_elements);
for (uint i = 0; i < num_of_elements; i++)
scanf("%d", &h_vec[i]);
thrust::host_vector<uint> h_seg(num_of_elements);
for (uint i = 0; i < num_of_segments; i++) {
for (uint j = h_seg_aux[i]; j < h_seg_aux[i + 1]; j++) {
h_seg[j] = i;
}
}
cudaEvent_t startPre, stopPre, startPos, stopPos;
cudaEventCreate(&startPre);
cudaEventCreate(&stopPre);
cudaEventCreate(&startPos);
cudaEventCreate(&stopPos);
thrust::device_vector<uint> d_vec(num_of_elements);
thrust::device_vector<uint> d_seg = h_seg;
float averageExecutions = 0;
for (uint i = 0; i < EXECUTIONS; i++) {
thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin());
/*
* maximum element of the array.
*/
cudaEventRecord(startPre);
thrust::device_vector<uint>::iterator iter = thrust::max_element(d_vec.begin(), d_vec.end());
uint max_val = *iter;
uint mostSignificantBit = (uint)log2((double)max_val) + 1;
/*
* add prefix to the elements
*/
Operation< uint, thrust::plus<uint> > op_plus(mostSignificantBit);
thrust::transform(d_vec.begin(), d_vec.end(), d_seg.begin(), d_vec.begin(), op_plus);
cudaEventRecord(stopPre);
cudaEventSynchronize(stopPre);
/*
* sort the segments
*/
thrust::sort(d_vec.begin(), d_vec.end());
/*
* update back the array elements
*/
cudaEventRecord(startPos);
Operation< uint, thrust::minus<uint> > op_minus(mostSignificantBit);
thrust::transform(d_vec.begin(), d_vec.end(), d_seg.begin(), d_vec.begin(), op_minus);
cudaEventRecord(stopPos);
cudaEventSynchronize(stopPos);
if (ELAPSED_TIME == 1) {
float millisecondsPre = 0, millisecondsPos = 0;
cudaEventElapsedTime(&millisecondsPre, startPre, stopPre);
cudaEventElapsedTime(&millisecondsPos, startPos, stopPos);
//std::cout << millisecondsPre + millisecondsPos << "\n";
averageExecutions += millisecondsPre + millisecondsPos;
}
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if (errSync != cudaSuccess)
printf("4: Sync kernel error: %s\n", cudaGetErrorString(errSync));
if (errAsync != cudaSuccess)
printf("4: Async kernel error: %s\n", cudaGetErrorString(errAsync));
}
thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin());
if (ELAPSED_TIME != 1) {
print(h_vec);
}
else {std::cout << averageExecutions/EXECUTIONS << "\n";}
return 0;
}
|
dc79ff16e508e3020e663cd0f6d84e5adb7352b3.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
using namespace std;
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#define THREADS pow(2,8)// 256
#define BLOCKS pow(2,13)
#define NUM_VALS THREADS*BLOCKS //
int* dynamic_alloc(int n)
{
int* arr = new int[n];
return arr;
}
void delete_alloc(int** arr, int n)
{
for (int j = 0; j < n; j++)
{
delete[] arr[j];
}
delete[] arr;
}
void print_rt(clock_t start, clock_t stop)
{
double time = ((double)(stop - start)) / CLOCKS_PER_SEC;
printf("Run time: %.3fs\n", time);
}
__global__ void bitonic_sort_step(int* dev_values, int j, int k)
{
unsigned int i, ixj; /* Sorting partners: i and ixj */
i = threadIdx.x + blockDim.x * blockIdx.x;// thread
ixj = i ^ j;
/* The threads with the lowest ids sort the array. */
if ((ixj) > i) {
if ((i & k) == 0) {
/* Sort ascending */
if (dev_values[i] > dev_values[ixj]) {
/* exchange(i,ixj); */
int temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
if ((i & k) != 0) {
/* Sort descending */
if (dev_values[i] < dev_values[ixj]) {
/* exchange(i,ixj); */
int temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
}
}
void bitonic_sort(int* values)
{
int* dev_values;
size_t size = NUM_VALS * sizeof(int);
hipMalloc((void**)&dev_values, size); //
hipMemcpy(dev_values, values, size, hipMemcpyHostToDevice);// dev_values
// Block Thread 1
dim3 blocks(BLOCKS, 1); /* Number of blocks */
dim3 threads(THREADS, 1); /* Number of threads */
int j, k;
/* Major step */
for (k = 2; k <= NUM_VALS; k <<= 1) {
/* Minor step */
for (j = k >> 1; j > 0; j = j >> 1) {
bitonic_sort_step << <blocks, threads >> > (dev_values, j, k); //dev_values
//1 block threads thread blocks*threads
}
}
hipMemcpy(values, dev_values, size, hipMemcpyDeviceToHost); // values
hipFree(dev_values); //
}
void _swap(int arr[], int a, int b)
{
int temp = arr[b];
arr[b] = arr[a];
arr[a] = temp;
}
void InsertSort(int a[], int l, int r)
{
int i, j, temp;
for (i = l + 1; i <= r; i++)
{
temp = a[i];
for (j = i - 1; j >= l && a[j] > temp; j--)
a[j + 1] = a[j];
a[j + 1] = temp;
}
}
//Quick Sort - a : / l : / r :
void ThreeMedianInsertQS(int a[], int l, int r, int n)
{
int i, j, m, v;
if (r - l <= n) // n ,
InsertSort(a, l, r);
else if (r - l > 1) // 2
{
m = (l + r) / 2; // m
//
//a[l], a[m], a[r] m swap
if (a[l] > a[m])
_swap(a, l, m);
if (a[l] > a[r])
_swap(a, l, r);
if (a[m] > a[r])
_swap(a, m, r);
_swap(a, m, r - 1); // a[m] r
v = a[r - 1]; //
i = l; //left
j = r - 1; //right
// ( ), ( )
for (;;)
{
while (a[++i] < v)
; //left
while (a[--j] > v)
; //right
if (i >= j)
break; //left right ,
//left , right
//if(a[i] > v || a[j] < v)
_swap(a, i, j); // swap
}
_swap(a, i, r - 1); //
ThreeMedianInsertQS(a, l, i - 1, n); //
ThreeMedianInsertQS(a, i + 1, r, n); //
}
else if (a[l] > a[r]) // 2 ,
_swap(a, l, r); // swap
}
// v : key / k : gap sequence index
void ShellSort(int a[], int n)
{
int i, j, v, k;
int gap[20];
gap[0] = 1; //gap sequence 1
k = 0;
while (gap[k] < n) {
k++;
gap[k] = pow(4, k) + 3 * pow(2, k - 1) + 1;//Sedgewick gap sequence
}
for (; k >= 0; k--) {
for (i = gap[k]; i < n; i++) { //gap
v = a[i];
j = i;
while (j >= gap[k] && a[j - gap[k]] > v) {
a[j] = a[j - gap[k]];
j = j - gap[k];
}
a[j] = v;
}
}
}
// sorting check
void checkSort(int a[], int n)
{
int i, sorted;
sorted = true;
for (i = 0; i < n - 1; i++)
{
if (a[i] > a[i + 1])
{
sorted = false;
}
if (!sorted)
{
break;
}
}
if (sorted)
{
cout << "Sorting complete!" << endl;
}
else
{
cout << "Error during sorting..." << endl;
}
}
int main(void) {
clock_t start, stop;
int* a1 = dynamic_alloc(NUM_VALS);
int* a2 = dynamic_alloc(NUM_VALS);
int* a3 = dynamic_alloc(NUM_VALS);
cout << "Number of Values : " << NUM_VALS << "\n";
srand(time(NULL));
for (int i = 0; i < NUM_VALS; i++)
{
a1[i] = rand() % RAND_MAX;
a2[i] = a1[i];
a3[i] = a1[i];
}
start = clock();
bitonic_sort(a1);
stop = clock();
checkSort(a1, NUM_VALS);
cout << "Bitonic_cuda ";
print_rt(start, stop);
start = clock();
ThreeMedianInsertQS(a2, 0, NUM_VALS - 1, 15);
stop = clock();
checkSort(a2, NUM_VALS);
cout << "Quick ";
print_rt(start, stop);
start = clock();
ShellSort(a3, NUM_VALS);
stop = clock();
checkSort(a3, NUM_VALS);
cout << "Shell ";
print_rt(start, stop);
} | dc79ff16e508e3020e663cd0f6d84e5adb7352b3.cu | #include <iostream>
using namespace std;
#include <cuda_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#define THREADS pow(2,8)// 256
#define BLOCKS pow(2,13)
#define NUM_VALS THREADS*BLOCKS //정렬할 원소 개수
int* dynamic_alloc(int n)
{
int* arr = new int[n];
return arr;
}
void delete_alloc(int** arr, int n)
{
for (int j = 0; j < n; j++)
{
delete[] arr[j];
}
delete[] arr;
}
void print_rt(clock_t start, clock_t stop)
{
double time = ((double)(stop - start)) / CLOCKS_PER_SEC;
printf("Run time: %.3fs\n", time);
}
__global__ void bitonic_sort_step(int* dev_values, int j, int k)
{
unsigned int i, ixj; /* Sorting partners: i and ixj */
i = threadIdx.x + blockDim.x * blockIdx.x;// thread 인덱싱
ixj = i ^ j;
/* The threads with the lowest ids sort the array. */
if ((ixj) > i) {
if ((i & k) == 0) {
/* Sort ascending */
if (dev_values[i] > dev_values[ixj]) {
/* exchange(i,ixj); */
int temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
if ((i & k) != 0) {
/* Sort descending */
if (dev_values[i] < dev_values[ixj]) {
/* exchange(i,ixj); */
int temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
}
}
void bitonic_sort(int* values)
{
int* dev_values;
size_t size = NUM_VALS * sizeof(int);
cudaMalloc((void**)&dev_values, size); //쿠다 메모리 할당
cudaMemcpy(dev_values, values, size, cudaMemcpyHostToDevice);//정렬할 배열을 dev_values에 복사
// Block과 Thread는 1차원
dim3 blocks(BLOCKS, 1); /* Number of blocks */
dim3 threads(THREADS, 1); /* Number of threads */
int j, k;
/* Major step */
for (k = 2; k <= NUM_VALS; k <<= 1) {
/* Minor step */
for (j = k >> 1; j > 0; j = j >> 1) {
bitonic_sort_step << <blocks, threads >> > (dev_values, j, k); //dev_values 배열 정렬
//1 block당 threads가 할당됨 즉 전체 thread 개수는 blocks*threads
}
}
cudaMemcpy(values, dev_values, size, cudaMemcpyDeviceToHost); //정렬 완료된 배열을 values에 저장
cudaFree(dev_values); //메모리 해제
}
void _swap(int arr[], int a, int b)
{
int temp = arr[b];
arr[b] = arr[a];
arr[a] = temp;
}
void InsertSort(int a[], int l, int r)
{
int i, j, temp;
for (i = l + 1; i <= r; i++)
{
temp = a[i];
for (j = i - 1; j >= l && a[j] > temp; j--)
a[j + 1] = a[j];
a[j + 1] = temp;
}
}
//Quick Sort - a : 배열 / l : 시작 인덱스 / r : 끝 인덱스
void ThreeMedianInsertQS(int a[], int l, int r, int n)
{
int i, j, m, v;
if (r - l <= n) //배열의 크기가 n보다 작거나 같을시, 삽입정렬 수행
InsertSort(a, l, r);
else if (r - l > 1) //배열의 크기가 2보다 크면
{
m = (l + r) / 2; //중간 인덱스값 m
//중간값을 이용하여 성능 향상
//a[l], a[m], a[r] 값중 중간값이 m 인덱스에 가도록 swap
if (a[l] > a[m])
_swap(a, l, m);
if (a[l] > a[r])
_swap(a, l, r);
if (a[m] > a[r])
_swap(a, m, r);
_swap(a, m, r - 1); //중간값 a[m]을 가장 끝 인덱스 r 로 이동
v = a[r - 1]; //피벗 값
i = l; //left 포인터
j = r - 1; //right 포인터
//피벗값을 기준으로 좌측 부분(피벗보다 작은 값), 우측 부분(피벗보다 큰 값)으로 분할
for (;;)
{
while (a[++i] < v)
; //left 포인터가 가리키는 값이 피벗보다 작으면 포인터 우측 이동
while (a[--j] > v)
; //right 포인터가 가리키는 값이 피벗보다 크면 포인터 좌측 이동
if (i >= j)
break; //left 포인터와 right 포인터가 교차하면, 반복문 탈출
//left 포인터가 가리키는 값이 피벗보다 크거나, right 포인터가 가리키는 값이 피벗보다 작으면
//if(a[i] > v || a[j] < v)
_swap(a, i, j); //두 값 swap
}
_swap(a, i, r - 1); //피벗 값을 좌측 부분과 우측 부분 사이로 이동
ThreeMedianInsertQS(a, l, i - 1, n); //좌측 부분 재귀 호출
ThreeMedianInsertQS(a, i + 1, r, n); //우측 부분 재귀 호출
}
else if (a[l] > a[r]) //배열의 크기가 2보다 작을 때, 좌측 값이 우측 값보다 크면
_swap(a, l, r); //두 값 swap
}
// v : 삽입 정렬 key값 / k : gap sequence index
void ShellSort(int a[], int n)
{
int i, j, v, k;
int gap[20];
gap[0] = 1; //gap sequence 초항은 1
k = 0;
while (gap[k] < n) {
k++;
gap[k] = pow(4, k) + 3 * pow(2, k - 1) + 1;//Sedgewick gap sequence
}
for (; k >= 0; k--) {
for (i = gap[k]; i < n; i++) { //gap만큼 점프하며 삽입 정렬
v = a[i];
j = i;
while (j >= gap[k] && a[j - gap[k]] > v) {
a[j] = a[j - gap[k]];
j = j - gap[k];
}
a[j] = v;
}
}
}
// sorting이 정순으로 되었는지 check
void checkSort(int a[], int n)
{
int i, sorted;
sorted = true;
for (i = 0; i < n - 1; i++)
{
if (a[i] > a[i + 1])
{
sorted = false;
}
if (!sorted)
{
break;
}
}
if (sorted)
{
cout << "Sorting complete!" << endl;
}
else
{
cout << "Error during sorting..." << endl;
}
}
int main(void) {
clock_t start, stop;
int* a1 = dynamic_alloc(NUM_VALS);
int* a2 = dynamic_alloc(NUM_VALS);
int* a3 = dynamic_alloc(NUM_VALS);
cout << "Number of Values : " << NUM_VALS << "\n";
srand(time(NULL));
for (int i = 0; i < NUM_VALS; i++)
{
a1[i] = rand() % RAND_MAX;
a2[i] = a1[i];
a3[i] = a1[i];
}
start = clock();
bitonic_sort(a1);
stop = clock();
checkSort(a1, NUM_VALS);
cout << "Bitonic_cuda ";
print_rt(start, stop);
start = clock();
ThreeMedianInsertQS(a2, 0, NUM_VALS - 1, 15);
stop = clock();
checkSort(a2, NUM_VALS);
cout << "Quick ";
print_rt(start, stop);
start = clock();
ShellSort(a3, NUM_VALS);
stop = clock();
checkSort(a3, NUM_VALS);
cout << "Shell ";
print_rt(start, stop);
} |
4295b2b00a8eb1f870340dcbe115a810b12762b6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ------------------------------------------------------------------
// Non-Maximum Suppression - 1D case
// Adapted from Shaoqing Ren's 2D implementation for Faster R-CNN
// By Claire Li, 2019
// ------------------------------------------------------------------
#include <stdbool.h>
#include <stdio.h>
#include <vector>
#include <iostream>
#include "nms_cuda_kernel.h"
#define CUDA_WARN(XXX) \
do { if (XXX != hipSuccess) std::cout << "CUDA Error: " << \
hipGetErrorString(XXX) << ", at line " << __LINE__ \
<< std::endl; hipDeviceSynchronize(); } while (0)
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of hipError_t error */ \
do { \
hipError_t error = condition; \
if (error != hipSuccess) { \
std::cout << hipGetErrorString(error) << std::endl; \
} \
} while (0)
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) //divide and round-up
int const threadsPerBlock = sizeof(unsigned long long) * 8; // number of threads per block = number of bits of an ULL
// This is because we will use a ULL to record the overlapping
// in each thread.
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[1], b[1]);
float interS = max(right - left + 1, 0.f);
float Sa = (a[1] - a[0] + 1);
float Sb = (b[1] - b[0] + 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(int n_boxes, float nms_overlap_thresh,float *dev_boxes, unsigned long long *dev_mask) {
/* The kernel function works INSIDE a THREAD
* Parameters
* n_boxes: number of proposals
* nms_overlap_thresh: overlapping threshold
* dev_boxes: 2d-array [n_boxes][3]
* dev_mask: currently empty [n_boxes][n_boxes/threadPerBlock=col_blocks]
*/
const int row_start = blockIdx.y; // row index of current block, 0 <= row_start < col_blocks
const int col_start = blockIdx.x; // column index of current block, 0 <= col_start < col_blocks
// if (row_start > col_start) return; // enable this to avoid redundant computation
const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 3]; // a "__shared__" variable is shared among all threads within this block
// (my speculation is, although every thread has this line, it is only
// executed once for each block)
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 3 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 3 + 0];
block_boxes[threadIdx.x * 3 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 3 + 1];
block_boxes[threadIdx.x * 3 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 3 + 2];
} // retrieve the box descriptor
// __syncthreads() waits until all threads in the thread block have reached this point and all global and
// shared memory accesses made by these threads prior to __syncthreads() are visible to all threads in the block.
__syncthreads();
// -->> block_boxes is filled up with valid data now
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; // index of current box
const float *cur_box = dev_boxes + cur_box_idx * 3;
int i = 0;
unsigned long long t = 0; // number of threads per block = number of bits in 't'
int start = 0;
if (row_start == col_start) { // to compute overlapping
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 3) > nms_overlap_thresh) {
t |= 1ULL << i; // bit i is set to 1 if 'cur_box' overlaps with 'block_boxes[i]'
}
}
const int col_blocks = DIVUP(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t; // overlapping condition between box y*threadsPerBlock+t and [x*threadsPerBlock:(x+1)*threadsPerBlock]
}
}
void nms_cuda_compute(int* keep_out, int *num_out, float* boxes_host, int boxes_num, int boxes_dim,
float nms_overlap_thresh) {
/* Input:
* boxes_host: original proposals N * 3
* boxes_num: number of proposals N
* boxes_dim: number of elements used to describe a proposal (e.g. 1+4 for 2D box, 1+2 for 1D segment)
* nms_overlap_thresh: overlapping threshold
* Output:
* keep_out: vector containing indices of kept boxes
* num_out: number of kept boxes
*/
// printf("nms threshold %f\n", nms_overlap_thresh);
float* boxes_dev = NULL;
unsigned long long* mask_dev = NULL;
const int col_blocks = DIVUP(boxes_num, threadsPerBlock); // total number of blocks needed
CUDA_CHECK(hipMalloc(&boxes_dev, boxes_num * boxes_dim * sizeof(float))); // N x 3
// copy host memory to device memory
CUDA_CHECK(hipMemcpy(boxes_dev, boxes_host, boxes_num * boxes_dim * sizeof(float), hipMemcpyHostToDevice));
// mask_dev keeps the overlapping condition between any pair of boxes
CUDA_CHECK(hipMalloc(&mask_dev, boxes_num * col_blocks * sizeof(unsigned long long))); // N x col_blocks (x threadsPerBlock bits)
dim3 blocks(col_blocks, col_blocks); // col_blocks x col_blocks x 1, we use 2D grid because NMS requires pair-wise operation
dim3 threads(threadsPerBlock); // threadsPerBlock x 1 x 1
// num of works: (col_blocks * col_blocks) x threadsPerBlock = box_num x col_blocks
hipLaunchKernelGGL(( nms_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num, nms_overlap_thresh, boxes_dev, mask_dev);
std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
CUDA_CHECK(hipMemcpy(&mask_host[0], mask_dev, sizeof(unsigned long long) * boxes_num * col_blocks,
hipMemcpyDeviceToHost)); // copy device memory to host memory
std::vector<unsigned long long> remv(col_blocks); // col_blocks (x threadsPerBlock bits), recording whether a box has been removed
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
// create a memory for keep_out on cpu
// the following computation is done on cpu
int* keep_out_cpu = new int[boxes_num];
int num_to_keep = 0;
int i, j;
for (i = 0; i < boxes_num; i++) { // enumerate all the boxes
int nblock = i / threadsPerBlock; // index of block
int inblock = i % threadsPerBlock; // index of thread
if (!(remv[nblock] & (1ULL << inblock))) { // if current box has not been removed before
keep_out_cpu[num_to_keep++] = i; // add it to the keep list
unsigned long long *p = &mask_host[0] + i * col_blocks; // extract the row of overlapping condition between box i and all other boxes
for (j = nblock; j < col_blocks; j++) { // blocks before current block has already been considered
remv[j] |= p[j]; // remove every box overlapping with i
}
}
}
// printf("kept boxes (should be at least 1): %d\n", num_to_keep);
// copy keep_out_cpu to keep_out on gpu
CUDA_WARN(hipMemcpy(keep_out, keep_out_cpu, boxes_num * sizeof(int), hipMemcpyHostToDevice));
// copy num_to_keep to num_out on gpu
CUDA_WARN(hipMemcpy(num_out, &num_to_keep, 1 * sizeof(int), hipMemcpyHostToDevice));
// release cuda memory
CUDA_CHECK(hipFree(boxes_dev));
CUDA_CHECK(hipFree(mask_dev));
// release cpu memory
delete []keep_out_cpu;
} | 4295b2b00a8eb1f870340dcbe115a810b12762b6.cu | // ------------------------------------------------------------------
// Non-Maximum Suppression - 1D case
// Adapted from Shaoqing Ren's 2D implementation for Faster R-CNN
// By Claire Li, 2019
// ------------------------------------------------------------------
#include <stdbool.h>
#include <stdio.h>
#include <vector>
#include <iostream>
#include "nms_cuda_kernel.h"
#define CUDA_WARN(XXX) \
do { if (XXX != cudaSuccess) std::cout << "CUDA Error: " << \
cudaGetErrorString(XXX) << ", at line " << __LINE__ \
<< std::endl; cudaDeviceSynchronize(); } while (0)
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
if (error != cudaSuccess) { \
std::cout << cudaGetErrorString(error) << std::endl; \
} \
} while (0)
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) //divide and round-up
int const threadsPerBlock = sizeof(unsigned long long) * 8; // number of threads per block = number of bits of an ULL
// This is because we will use a ULL to record the overlapping
// in each thread.
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[1], b[1]);
float interS = max(right - left + 1, 0.f);
float Sa = (a[1] - a[0] + 1);
float Sb = (b[1] - b[0] + 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(int n_boxes, float nms_overlap_thresh,float *dev_boxes, unsigned long long *dev_mask) {
/* The kernel function works INSIDE a THREAD
* Parameters
* n_boxes: number of proposals
* nms_overlap_thresh: overlapping threshold
* dev_boxes: 2d-array [n_boxes][3]
* dev_mask: currently empty [n_boxes][n_boxes/threadPerBlock=col_blocks]
*/
const int row_start = blockIdx.y; // row index of current block, 0 <= row_start < col_blocks
const int col_start = blockIdx.x; // column index of current block, 0 <= col_start < col_blocks
// if (row_start > col_start) return; // enable this to avoid redundant computation
const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 3]; // a "__shared__" variable is shared among all threads within this block
// (my speculation is, although every thread has this line, it is only
// executed once for each block)
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 3 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 3 + 0];
block_boxes[threadIdx.x * 3 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 3 + 1];
block_boxes[threadIdx.x * 3 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 3 + 2];
} // retrieve the box descriptor
// __syncthreads() waits until all threads in the thread block have reached this point and all global and
// shared memory accesses made by these threads prior to __syncthreads() are visible to all threads in the block.
__syncthreads();
// -->> block_boxes is filled up with valid data now
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; // index of current box
const float *cur_box = dev_boxes + cur_box_idx * 3;
int i = 0;
unsigned long long t = 0; // number of threads per block = number of bits in 't'
int start = 0;
if (row_start == col_start) { // to compute overlapping
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 3) > nms_overlap_thresh) {
t |= 1ULL << i; // bit i is set to 1 if 'cur_box' overlaps with 'block_boxes[i]'
}
}
const int col_blocks = DIVUP(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t; // overlapping condition between box y*threadsPerBlock+t and [x*threadsPerBlock:(x+1)*threadsPerBlock]
}
}
void nms_cuda_compute(int* keep_out, int *num_out, float* boxes_host, int boxes_num, int boxes_dim,
float nms_overlap_thresh) {
/* Input:
* boxes_host: original proposals N * 3
* boxes_num: number of proposals N
* boxes_dim: number of elements used to describe a proposal (e.g. 1+4 for 2D box, 1+2 for 1D segment)
* nms_overlap_thresh: overlapping threshold
* Output:
* keep_out: vector containing indices of kept boxes
* num_out: number of kept boxes
*/
// printf("nms threshold %f\n", nms_overlap_thresh);
float* boxes_dev = NULL;
unsigned long long* mask_dev = NULL;
const int col_blocks = DIVUP(boxes_num, threadsPerBlock); // total number of blocks needed
CUDA_CHECK(cudaMalloc(&boxes_dev, boxes_num * boxes_dim * sizeof(float))); // N x 3
// copy host memory to device memory
CUDA_CHECK(cudaMemcpy(boxes_dev, boxes_host, boxes_num * boxes_dim * sizeof(float), cudaMemcpyHostToDevice));
// mask_dev keeps the overlapping condition between any pair of boxes
CUDA_CHECK(cudaMalloc(&mask_dev, boxes_num * col_blocks * sizeof(unsigned long long))); // N x col_blocks (x threadsPerBlock bits)
dim3 blocks(col_blocks, col_blocks); // col_blocks x col_blocks x 1, we use 2D grid because NMS requires pair-wise operation
dim3 threads(threadsPerBlock); // threadsPerBlock x 1 x 1
// num of works: (col_blocks * col_blocks) x threadsPerBlock = box_num x col_blocks
nms_kernel<<<blocks, threads>>>(boxes_num, nms_overlap_thresh, boxes_dev, mask_dev);
std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
CUDA_CHECK(cudaMemcpy(&mask_host[0], mask_dev, sizeof(unsigned long long) * boxes_num * col_blocks,
cudaMemcpyDeviceToHost)); // copy device memory to host memory
std::vector<unsigned long long> remv(col_blocks); // col_blocks (x threadsPerBlock bits), recording whether a box has been removed
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
// create a memory for keep_out on cpu
// the following computation is done on cpu
int* keep_out_cpu = new int[boxes_num];
int num_to_keep = 0;
int i, j;
for (i = 0; i < boxes_num; i++) { // enumerate all the boxes
int nblock = i / threadsPerBlock; // index of block
int inblock = i % threadsPerBlock; // index of thread
if (!(remv[nblock] & (1ULL << inblock))) { // if current box has not been removed before
keep_out_cpu[num_to_keep++] = i; // add it to the keep list
unsigned long long *p = &mask_host[0] + i * col_blocks; // extract the row of overlapping condition between box i and all other boxes
for (j = nblock; j < col_blocks; j++) { // blocks before current block has already been considered
remv[j] |= p[j]; // remove every box overlapping with i
}
}
}
// printf("kept boxes (should be at least 1): %d\n", num_to_keep);
// copy keep_out_cpu to keep_out on gpu
CUDA_WARN(cudaMemcpy(keep_out, keep_out_cpu, boxes_num * sizeof(int), cudaMemcpyHostToDevice));
// copy num_to_keep to num_out on gpu
CUDA_WARN(cudaMemcpy(num_out, &num_to_keep, 1 * sizeof(int), cudaMemcpyHostToDevice));
// release cuda memory
CUDA_CHECK(cudaFree(boxes_dev));
CUDA_CHECK(cudaFree(mask_dev));
// release cpu memory
delete []keep_out_cpu;
} |
9a1363a86d67610ea0621a63b9fd12cbe1d777d9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#pragma once
//extern __shared__ unsigned char SMem[];
#include "../Util/ptx.cu"
#include "../Util/GlobalWrite.cu"
namespace scc4k{
extern __shared__ unsigned char SMem[];
#define PRIMEQ 2654435769u // ((sqrt(5)-1)/2) << 32
template<int BlockDim>
__device__ __forceinline__ void DuplicateRemove(const int dest, volatile long long int* HashTable, int* Queue, int &founds) {
unsigned hash = dest * PRIMEQ;
hash = (hash & (unsigned)(SMem_Per_Block(BlockDim)/12 - 1)) + (hash & (unsigned)(SMem_Per_Block(BlockDim)/24 - 1));
int2 toWrite = make_int2(Tid, dest);
HashTable[hash] = reinterpret_cast<volatile long long int&>(toWrite);
int2 recover = reinterpret_cast<int2*>( const_cast<long long int*>(HashTable) )[hash];
if (recover.x == Tid || recover.y != dest)
Queue[founds++] = dest;
else if (COUNT_DUP && recover.x != Tid && recover.y == dest)
atomicAdd(&duplicateCounter, 1);
}
template<int BlockDim, bool DUP_REM, bool forward, VisitType visitType>
__device__ __forceinline__ void KVisit( const int V, const int dest, dist_t* devDistance,
int* Queue, int& founds, const int level, volatile long long int* HashTable,
color_t* color, const int colorToConfront, const int colorToTakeValue)
{
color_t c = color[ (dest << 1) | 1];
switch(visitType)
{
case SCC_Decomposition:
if(forward)
{
if (c == colorToConfront) {
color[ (dest << 1) | 1 ] = colorToTakeValue;
//devDistance[dest] = (dist_t) level;
if (DUP_REM)
DuplicateRemove<BlockDim>(dest, HashTable, Queue, founds);
else
Queue[founds++] = dest;
}
}
else
{
bool upd = false;
if (c == colorToConfront) { color[ (dest << 1) | 1 ] = colorToTakeValue; upd = true;} // not visited forward
if (c+V == colorToTakeValue) { color[ (dest << 1) | 1 ] = colorToTakeValue+V; upd = true;} //visited forward
if(upd ) {
//color[ (dest << 1) | 1 ] = c + colorToTakeValue;
//devDistance[dest] = (dist_t) level;
if (DUP_REM)
DuplicateRemove<BlockDim>(dest, HashTable, Queue, founds);
else
Queue[founds++] = dest;
}
}
break;
case Coloring:
//if( c3.color < 0 ) break;//non processare elementi gi processati
if (c >= 0 && -c == colorToConfront) {
color[ dest << 1 ] = -c;
color[ ( dest << 1 ) | 1 ] = -c;
if (DUP_REM)
DuplicateRemove<BlockDim>(dest, HashTable, Queue, founds);
else
Queue[founds++] = dest;
}
case BFS:
if( devDistance[dest] == INF )
{
devDistance[dest] = level;
Queue[founds++] = dest;
}
break;
}
}
template<int BlockDim, int WARP_SZ, bool DUP_REM, bool forward, VisitType visitType>
__device__ __forceinline__ void EdgeVisit(const int V, int* __restrict__ devEdge,
dist_t* __restrict__ devDistance,
int* __restrict__ devF2,
int* __restrict__ devF2SizePrt,
int start, int end,
int* Queue, int& founds, const int level, volatile long long int* HashTable,
color_t* __restrict__ color, const int colorToConfront, const int colorToTakeValue) {
#if SAFE == 0
for (int k = start + (Tid & _Mod2<WARP_SZ>::VALUE); k < end; k += WARP_SZ) {
const int dest = devEdge[k];
KVisit<BlockDim, DUP_REM, forward, visitType>(V, dest, devDistance, Queue, founds, level, HashTable, color, colorToConfront, colorToTakeValue);
}
#elif SAFE == 1
bool flag = true;
int k = start + (Tid & _Mod2<WARP_SZ>::VALUE);
while (flag) {
while (k < end && founds < REG_QUEUE) {
const int dest = devEdge[k];
KVisit<BlockDim, DUP_REM, forward, visitType>(V, dest, devDistance, Queue, founds, level, HashTable, color, colorToConfront, colorToTakeValue);
k += WARP_SZ;
}
if (__any(founds >= REG_QUEUE)) {
FrontierWrite::Write<BlockDim, FrontierWrite::SIMPLE>(devF2, devF2SizePrt, Queue, founds);
founds = 0;
} else
flag = false;
}
#endif
}
}
| 9a1363a86d67610ea0621a63b9fd12cbe1d777d9.cu | #pragma once
//extern __shared__ unsigned char SMem[];
#include "../Util/ptx.cu"
#include "../Util/GlobalWrite.cu"
namespace scc4k{
extern __shared__ unsigned char SMem[];
#define PRIMEQ 2654435769u // ((sqrt(5)-1)/2) << 32
template<int BlockDim>
__device__ __forceinline__ void DuplicateRemove(const int dest, volatile long long int* HashTable, int* Queue, int &founds) {
unsigned hash = dest * PRIMEQ;
hash = (hash & (unsigned)(SMem_Per_Block(BlockDim)/12 - 1)) + (hash & (unsigned)(SMem_Per_Block(BlockDim)/24 - 1));
int2 toWrite = make_int2(Tid, dest);
HashTable[hash] = reinterpret_cast<volatile long long int&>(toWrite);
int2 recover = reinterpret_cast<int2*>( const_cast<long long int*>(HashTable) )[hash];
if (recover.x == Tid || recover.y != dest)
Queue[founds++] = dest;
else if (COUNT_DUP && recover.x != Tid && recover.y == dest)
atomicAdd(&duplicateCounter, 1);
}
template<int BlockDim, bool DUP_REM, bool forward, VisitType visitType>
__device__ __forceinline__ void KVisit( const int V, const int dest, dist_t* devDistance,
int* Queue, int& founds, const int level, volatile long long int* HashTable,
color_t* color, const int colorToConfront, const int colorToTakeValue)
{
color_t c = color[ (dest << 1) | 1];
switch(visitType)
{
case SCC_Decomposition:
if(forward)
{
if (c == colorToConfront) {
color[ (dest << 1) | 1 ] = colorToTakeValue;
//devDistance[dest] = (dist_t) level;
if (DUP_REM)
DuplicateRemove<BlockDim>(dest, HashTable, Queue, founds);
else
Queue[founds++] = dest;
}
}
else
{
bool upd = false;
if (c == colorToConfront) { color[ (dest << 1) | 1 ] = colorToTakeValue; upd = true;} // not visited forward
if (c+V == colorToTakeValue) { color[ (dest << 1) | 1 ] = colorToTakeValue+V; upd = true;} //visited forward
if(upd ) {
//color[ (dest << 1) | 1 ] = c + colorToTakeValue;
//devDistance[dest] = (dist_t) level;
if (DUP_REM)
DuplicateRemove<BlockDim>(dest, HashTable, Queue, founds);
else
Queue[founds++] = dest;
}
}
break;
case Coloring:
//if( c3.color < 0 ) break;//non processare elementi già processati
if (c >= 0 && -c == colorToConfront) {
color[ dest << 1 ] = -c;
color[ ( dest << 1 ) | 1 ] = -c;
if (DUP_REM)
DuplicateRemove<BlockDim>(dest, HashTable, Queue, founds);
else
Queue[founds++] = dest;
}
case BFS:
if( devDistance[dest] == INF )
{
devDistance[dest] = level;
Queue[founds++] = dest;
}
break;
}
}
template<int BlockDim, int WARP_SZ, bool DUP_REM, bool forward, VisitType visitType>
__device__ __forceinline__ void EdgeVisit(const int V, int* __restrict__ devEdge,
dist_t* __restrict__ devDistance,
int* __restrict__ devF2,
int* __restrict__ devF2SizePrt,
int start, int end,
int* Queue, int& founds, const int level, volatile long long int* HashTable,
color_t* __restrict__ color, const int colorToConfront, const int colorToTakeValue) {
#if SAFE == 0
for (int k = start + (Tid & _Mod2<WARP_SZ>::VALUE); k < end; k += WARP_SZ) {
const int dest = devEdge[k];
KVisit<BlockDim, DUP_REM, forward, visitType>(V, dest, devDistance, Queue, founds, level, HashTable, color, colorToConfront, colorToTakeValue);
}
#elif SAFE == 1
bool flag = true;
int k = start + (Tid & _Mod2<WARP_SZ>::VALUE);
while (flag) {
while (k < end && founds < REG_QUEUE) {
const int dest = devEdge[k];
KVisit<BlockDim, DUP_REM, forward, visitType>(V, dest, devDistance, Queue, founds, level, HashTable, color, colorToConfront, colorToTakeValue);
k += WARP_SZ;
}
if (__any(founds >= REG_QUEUE)) {
FrontierWrite::Write<BlockDim, FrontierWrite::SIMPLE>(devF2, devF2SizePrt, Queue, founds);
founds = 0;
} else
flag = false;
}
#endif
}
}
|
a2524db6d9ffc8ac11ebf4ae61cbac539afbf114.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "mixed_tentusscher_myo_epi_2004.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu)
{
print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium GPU model\n\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(hipMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(hipMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
// Get the mapping array
uint32_t *mapping = NULL;
uint32_t *mapping_device = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
check_cuda_error(hipMalloc((void **)&mapping_device, extra_data_bytes_size));
check_cuda_error(hipMemcpy(mapping_device, mapping, extra_data_bytes_size, hipMemcpyHostToDevice));
}
hipLaunchKernelGGL(( kernel_set_model_inital_conditions) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, *sv, mapping_device, num_volumes);
check_cuda_error( hipPeekAtLastError() );
hipDeviceSynchronize();
check_cuda_error(hipFree(mapping_device));
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu)
{
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(hipMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(hipMemcpy(stims_currents_device, stim_currents, stim_currents_size, hipMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL)
{
check_cuda_error(hipMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(hipMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, hipMemcpyHostToDevice));
}
// Get the mapping array
uint32_t *mapping = NULL;
uint32_t *mapping_device = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
check_cuda_error(hipMalloc((void **)&mapping_device, extra_data_bytes_size));
check_cuda_error(hipMemcpy(mapping_device, mapping, extra_data_bytes_size, hipMemcpyHostToDevice));
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
hipLaunchKernelGGL(( solve_gpu) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, dt, sv, stims_currents_device, cells_to_solve_device, mapping_device, num_cells_to_solve, num_steps);
check_cuda_error( hipPeekAtLastError() );
check_cuda_error(hipFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(hipFree(cells_to_solve_device));
if(mapping_device) check_cuda_error(hipFree(mapping_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, uint32_t *mapping, int num_volumes)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if (threadID < num_volumes)
{
// Initial conditions for TenTusscher 2004 myocardium
if (mapping[threadID] == 0)
{
// *((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
// *((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M
// *((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H
// *((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J
// *((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1
// *((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2
// *((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs
// *((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S
// *((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R
// *((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D
// *((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F
// *((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa
// *((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G
// *((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai
// *((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR
// *((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai
// *((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki
real sv11[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633};
*((real * )((char *) sv + pitch * 0) + threadID) =sv11[0]; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) =sv11[1]; //M
*((real * )((char *) sv + pitch * 2) + threadID) =sv11[2]; //H
*((real * )((char *) sv + pitch * 3) + threadID) = sv11[3]; //J
*((real * )((char *) sv + pitch * 4) + threadID) =sv11[4]; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) =sv11[5]; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = sv11[6]; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) =sv11[7]; //S
*((real * )((char *) sv + pitch * 8) + threadID) =sv11[8]; //R
*((real * )((char *) sv + pitch * 9) + threadID) =sv11[9]; //D
*((real * )((char *) sv + pitch * 10) + threadID) =sv11[10]; //F
*((real * )((char *) sv + pitch * 11) + threadID) =sv11[11]; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) =sv11[12]; //G
*((real * )((char *) sv + pitch * 13) + threadID) = sv11[13]; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) =sv11[14]; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = sv11[15]; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = sv11[16]; //Ki
}
// Initial conditions for TenTusscher 2004 epicardium
else
{
// *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
// *((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M
// *((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H
// *((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J
// *((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1
// *((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2
// *((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs
// *((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S
// *((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R
// *((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D
// *((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F
// *((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa
// *((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G
// *((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai
// *((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR
// *((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai
// *((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki
/// initial condition
real sv11[]={-86.4172552153702,0.00133233093318418,0.775980725003160,0.775871451583533,0.000178484465968596,0.483518904573916,0.00297208335439809,0.999998297825169,1.98274727808946e-08,1.92952362196655e-05,0.999768268008847,1.00667048889468,0.999984854519288,5.50424977684767e-05,0.352485262813812,10.8673127043200,138.860197273148};
*((real * )((char *) sv + pitch * 0) + threadID) =sv11[0]; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) =sv11[1]; //M
*((real * )((char *) sv + pitch * 2) + threadID) =sv11[2]; //H
*((real * )((char *) sv + pitch * 3) + threadID) = sv11[3]; //J
*((real * )((char *) sv + pitch * 4) + threadID) =sv11[4]; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) =sv11[5]; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = sv11[6]; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) =sv11[7]; //S
*((real * )((char *) sv + pitch * 8) + threadID) =sv11[8]; //R
*((real * )((char *) sv + pitch * 9) + threadID) =sv11[9]; //D
*((real * )((char *) sv + pitch * 10) + threadID) =sv11[10]; //F
*((real * )((char *) sv + pitch * 11) + threadID) =sv11[11]; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) =sv11[12]; //G
*((real * )((char *) sv + pitch * 13) + threadID) = sv11[13]; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) =sv11[14]; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = sv11[15]; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = sv11[16]; //Ki
}
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t *mapping, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve)
{
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n)
{
if (mapping[sv_id] == 0)
{
RHS_gpu_myo(sv, rDY, stim_currents[threadID], sv_id, dt);
for(int i = 0; i < NEQ; i++)
{
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
else
{
RHS_gpu_epi(sv, rDY, stim_currents[threadID], sv_id, dt);
for (int i = 0; i < NEQ; i++)
{
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
}
}
}
inline __device__ void RHS_gpu_myo (real *sv_, real *rDY_, real stim_current, int threadID_, real dt)
{
// State variables
real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Myocardium cell
real Gks=0.062;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Myocardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
// [!] Myocardium cell
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
inline __device__ void RHS_gpu_epi (real *sv_, real *rDY_, real stim_current, int threadID_, real dt)
{
// State variables
real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Epicardium cell
real Gks=0.245;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Epicardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
| a2524db6d9ffc8ac11ebf4ae61cbac539afbf114.cu | #include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "mixed_tentusscher_myo_epi_2004.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu)
{
print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium GPU model\n\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(cudaMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
// Get the mapping array
uint32_t *mapping = NULL;
uint32_t *mapping_device = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
check_cuda_error(cudaMalloc((void **)&mapping_device, extra_data_bytes_size));
check_cuda_error(cudaMemcpy(mapping_device, mapping, extra_data_bytes_size, cudaMemcpyHostToDevice));
}
kernel_set_model_inital_conditions <<<GRID, BLOCK_SIZE>>>(*sv, mapping_device, num_volumes);
check_cuda_error( cudaPeekAtLastError() );
cudaDeviceSynchronize();
check_cuda_error(cudaFree(mapping_device));
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu)
{
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(cudaMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL)
{
check_cuda_error(cudaMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice));
}
// Get the mapping array
uint32_t *mapping = NULL;
uint32_t *mapping_device = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
check_cuda_error(cudaMalloc((void **)&mapping_device, extra_data_bytes_size));
check_cuda_error(cudaMemcpy(mapping_device, mapping, extra_data_bytes_size, cudaMemcpyHostToDevice));
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
solve_gpu <<<GRID, BLOCK_SIZE>>>(dt, sv, stims_currents_device, cells_to_solve_device, mapping_device, num_cells_to_solve, num_steps);
check_cuda_error( cudaPeekAtLastError() );
check_cuda_error(cudaFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device));
if(mapping_device) check_cuda_error(cudaFree(mapping_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, uint32_t *mapping, int num_volumes)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if (threadID < num_volumes)
{
// Initial conditions for TenTusscher 2004 myocardium
if (mapping[threadID] == 0)
{
// *((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
// *((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M
// *((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H
// *((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J
// *((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1
// *((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2
// *((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs
// *((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S
// *((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R
// *((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D
// *((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F
// *((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa
// *((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G
// *((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai
// *((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR
// *((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai
// *((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki
real sv11[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633};
*((real * )((char *) sv + pitch * 0) + threadID) =sv11[0]; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) =sv11[1]; //M
*((real * )((char *) sv + pitch * 2) + threadID) =sv11[2]; //H
*((real * )((char *) sv + pitch * 3) + threadID) = sv11[3]; //J
*((real * )((char *) sv + pitch * 4) + threadID) =sv11[4]; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) =sv11[5]; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = sv11[6]; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) =sv11[7]; //S
*((real * )((char *) sv + pitch * 8) + threadID) =sv11[8]; //R
*((real * )((char *) sv + pitch * 9) + threadID) =sv11[9]; //D
*((real * )((char *) sv + pitch * 10) + threadID) =sv11[10]; //F
*((real * )((char *) sv + pitch * 11) + threadID) =sv11[11]; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) =sv11[12]; //G
*((real * )((char *) sv + pitch * 13) + threadID) = sv11[13]; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) =sv11[14]; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = sv11[15]; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = sv11[16]; //Ki
}
// Initial conditions for TenTusscher 2004 epicardium
else
{
// *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
// *((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M
// *((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H
// *((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J
// *((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1
// *((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2
// *((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs
// *((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S
// *((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R
// *((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D
// *((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F
// *((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa
// *((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G
// *((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai
// *((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR
// *((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai
// *((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki
/// initial condition
real sv11[]={-86.4172552153702,0.00133233093318418,0.775980725003160,0.775871451583533,0.000178484465968596,0.483518904573916,0.00297208335439809,0.999998297825169,1.98274727808946e-08,1.92952362196655e-05,0.999768268008847,1.00667048889468,0.999984854519288,5.50424977684767e-05,0.352485262813812,10.8673127043200,138.860197273148};
*((real * )((char *) sv + pitch * 0) + threadID) =sv11[0]; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) =sv11[1]; //M
*((real * )((char *) sv + pitch * 2) + threadID) =sv11[2]; //H
*((real * )((char *) sv + pitch * 3) + threadID) = sv11[3]; //J
*((real * )((char *) sv + pitch * 4) + threadID) =sv11[4]; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) =sv11[5]; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = sv11[6]; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) =sv11[7]; //S
*((real * )((char *) sv + pitch * 8) + threadID) =sv11[8]; //R
*((real * )((char *) sv + pitch * 9) + threadID) =sv11[9]; //D
*((real * )((char *) sv + pitch * 10) + threadID) =sv11[10]; //F
*((real * )((char *) sv + pitch * 11) + threadID) =sv11[11]; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) =sv11[12]; //G
*((real * )((char *) sv + pitch * 13) + threadID) = sv11[13]; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) =sv11[14]; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = sv11[15]; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = sv11[16]; //Ki
}
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t *mapping, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve)
{
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n)
{
if (mapping[sv_id] == 0)
{
RHS_gpu_myo(sv, rDY, stim_currents[threadID], sv_id, dt);
for(int i = 0; i < NEQ; i++)
{
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
else
{
RHS_gpu_epi(sv, rDY, stim_currents[threadID], sv_id, dt);
for (int i = 0; i < NEQ; i++)
{
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
}
}
}
inline __device__ void RHS_gpu_myo (real *sv_, real *rDY_, real stim_current, int threadID_, real dt)
{
// State variables
real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Myocardium cell
real Gks=0.062;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Myocardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
// [!] Myocardium cell
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
inline __device__ void RHS_gpu_epi (real *sv_, real *rDY_, real stim_current, int threadID_, real dt)
{
// State variables
real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Epicardium cell
real Gks=0.245;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Epicardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
ecbedd8e8178e805dca3e20f172c89031c00325c.hip | // !!! This is a file automatically generated by hipify!!!
/*
*
* Multiplicacin de Matrices en CUDA
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <cassert>
#include <time.h>
//PP#include <hip/hip_runtime.h>
/* Utilidad para checar errores de CUDA */
void checkCUDAError(const char*);
// Kernel de multiplicacin de matrices
__global__ void matrix_multiplication(float *d_A, float *d_B, float *d_C, int N) {
// calculando rengln y columna
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
// producto punto entre rengln de A y columna de B
d_C[row * N + col] = (float)0;
if (row < N && col < N) {
for (int i = 0; i < N; i++) {
d_C[row * N + col] += d_A[row * N + i] * d_B[i * N + col];
}
}
}
// Verificando resultado en el CPU
void verify_result(float *A, float *B, float *C, int N) {
for (unsigned int i = 0; i < N; i++) {
for (unsigned int j = 0; j < N; j++) {
float sum = 0;
for (unsigned int k = 0; k < N; k++) {
sum += A[i * N + k] * B[k * N + j];
}
// check against GPU result
assert(sum == C[i * N + j]);
}
}
}
// Main routine
int main(int argc, char *argv[]) {
float *h_A, *h_B, *h_C; // matrices en CPU
float *d_A, *d_B, *d_C; // matrices en GPU
if (argc < 2) {
printf("usage: mul <matrix-dimension-power-2>\n");
exit(-1);
}
if (atoi(argv[1]) < 5) {
printf("Please provide a dimension higher than 4\n");
exit(-1);
}
int N = 1 << atoi(argv[1]); // filas y renglones
int MTX_SIZE = N * N; // matriz de tamao
size_t size = MTX_SIZE * sizeof(float); // tamao de matriz en bytes
// Reservar memoria en CPU
h_A = (float *) malloc(size);
h_B = (float *) malloc(size);
h_C = (float *) malloc(size);
// Reservar memoria en GPU
hipMalloc((void **) &d_A, size);
hipMalloc((void **) &d_B, size);
hipMalloc((void **) &d_C, size);
// inicializando matrices
for (int i = 0; i < MTX_SIZE; i++) {
h_A[i] = (float)(rand() % 100);
h_B[i] = (float)(rand() % 100);
h_C[i] = (float)0;
}
// copiando de CPU a GPU
hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
// verificando tiempo de ejecucin
time_t t1, t2;
// corriendo kernel en el GPU
int n_threads = 32;
int n_blocks = N / n_threads;
dim3 dimBlock(n_threads, n_threads);
dim3 dimGrid(n_blocks, n_blocks);
t1 = time(NULL);
hipLaunchKernelGGL(( matrix_multiplication), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C, N);
// esperando a que acaben los hilos
hipDeviceSynchronize();
checkCUDAError("kernel invocation");
// timing execution
t2 = time(NULL);
printf("Execution time: %f sec\n", difftime(t2, t1));
// copiando resultado de regreso al CPU
hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
checkCUDAError("memcpy");
// verificando resultado
// printf("Verifying result in CPU...\n");
// verify_result(h_A, h_B, h_C, N);
printf("Success!\n");
// Liberar memoria
free(h_A);
free(h_B);
free(h_C);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
// Utility function to check for and report CUDA errors
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
} | ecbedd8e8178e805dca3e20f172c89031c00325c.cu | /*
*
* Multiplicación de Matrices en CUDA
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <cassert>
#include <time.h>
//PP#include <cuda.h>
/* Utilidad para checar errores de CUDA */
void checkCUDAError(const char*);
// Kernel de multiplicación de matrices
__global__ void matrix_multiplication(float *d_A, float *d_B, float *d_C, int N) {
// calculando renglón y columna
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
// producto punto entre renglón de A y columna de B
d_C[row * N + col] = (float)0;
if (row < N && col < N) {
for (int i = 0; i < N; i++) {
d_C[row * N + col] += d_A[row * N + i] * d_B[i * N + col];
}
}
}
// Verificando resultado en el CPU
void verify_result(float *A, float *B, float *C, int N) {
for (unsigned int i = 0; i < N; i++) {
for (unsigned int j = 0; j < N; j++) {
float sum = 0;
for (unsigned int k = 0; k < N; k++) {
sum += A[i * N + k] * B[k * N + j];
}
// check against GPU result
assert(sum == C[i * N + j]);
}
}
}
// Main routine
int main(int argc, char *argv[]) {
float *h_A, *h_B, *h_C; // matrices en CPU
float *d_A, *d_B, *d_C; // matrices en GPU
if (argc < 2) {
printf("usage: mul <matrix-dimension-power-2>\n");
exit(-1);
}
if (atoi(argv[1]) < 5) {
printf("Please provide a dimension higher than 4\n");
exit(-1);
}
int N = 1 << atoi(argv[1]); // filas y renglones
int MTX_SIZE = N * N; // matriz de tamaño
size_t size = MTX_SIZE * sizeof(float); // tamaño de matriz en bytes
// Reservar memoria en CPU
h_A = (float *) malloc(size);
h_B = (float *) malloc(size);
h_C = (float *) malloc(size);
// Reservar memoria en GPU
cudaMalloc((void **) &d_A, size);
cudaMalloc((void **) &d_B, size);
cudaMalloc((void **) &d_C, size);
// inicializando matrices
for (int i = 0; i < MTX_SIZE; i++) {
h_A[i] = (float)(rand() % 100);
h_B[i] = (float)(rand() % 100);
h_C[i] = (float)0;
}
// copiando de CPU a GPU
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
// verificando tiempo de ejecución
time_t t1, t2;
// corriendo kernel en el GPU
int n_threads = 32;
int n_blocks = N / n_threads;
dim3 dimBlock(n_threads, n_threads);
dim3 dimGrid(n_blocks, n_blocks);
t1 = time(NULL);
matrix_multiplication<<<dimGrid, dimBlock>>>(d_A, d_B, d_C, N);
// esperando a que acaben los hilos
cudaThreadSynchronize();
checkCUDAError("kernel invocation");
// timing execution
t2 = time(NULL);
printf("Execution time: %f sec\n", difftime(t2, t1));
// copiando resultado de regreso al CPU
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
checkCUDAError("memcpy");
// verificando resultado
// printf("Verifying result in CPU...\n");
// verify_result(h_A, h_B, h_C, N);
printf("Success!\n");
// Liberar memoria
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
// Utility function to check for and report CUDA errors
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
} |
3b5c5f6e6d5bba3823dbd729d373eee0654f599c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* a simple test
*/
__shared__ float data1[32][32];
__shared__ float data2[32][32];
__shared__ float data3[32][32];
__device__ void mult(float d1[32][32],
float d2[32][32],
float d3[32][32],
int idx0,
int idx1,
int idx2,
int incr)
{
int i;
int iv0 = 0;
int iv1 = 0;
int iv2 = 0;
for (i = 0; i < 32; i++)
{
d1[idx0][iv2] = d2[idx1][iv0] + d3[idx2][iv1];
iv0 += incr;
iv1 += incr;
iv2 += incr;
}
}
__global__ void doit(int start, int end, int incr) {
int i;
int id0 = start;
int id1 = start;
int id2 = start;
for (i = start; i < end; i++) {
mult(data1, data1, data1, id0, id1, id2, incr);
id0 += incr;
id1 += incr;
id2 += incr;
}
}
| 3b5c5f6e6d5bba3823dbd729d373eee0654f599c.cu | /*
* a simple test
*/
__shared__ float data1[32][32];
__shared__ float data2[32][32];
__shared__ float data3[32][32];
__device__ void mult(float d1[32][32],
float d2[32][32],
float d3[32][32],
int idx0,
int idx1,
int idx2,
int incr)
{
int i;
int iv0 = 0;
int iv1 = 0;
int iv2 = 0;
for (i = 0; i < 32; i++)
{
d1[idx0][iv2] = d2[idx1][iv0] + d3[idx2][iv1];
iv0 += incr;
iv1 += incr;
iv2 += incr;
}
}
__global__ void doit(int start, int end, int incr) {
int i;
int id0 = start;
int id1 = start;
int id2 = start;
for (i = start; i < end; i++) {
mult(data1, data1, data1, id0, id1, id2, incr);
id0 += incr;
id1 += incr;
id2 += incr;
}
}
|
3f56a278abeb310febaa6394a72f29de46a37667.hip | // !!! This is a file automatically generated by hipify!!!
/***********************************************************************************
Implementing Breadth first search on CUDA using algorithm given in HiPC'07
paper "Accelerating Large Graph Algorithms on the GPU using CUDA"
Copyright (c) 2008 International Institute of Information Technology - Hyderabad.
All rights reserved.
Permission to use, copy, modify and distribute this software and its documentation for
educational purpose is hereby granted without fee, provided that the above copyright
notice and this permission notice appear in all copies of this software and that you do
not sell the software.
THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR
OTHERWISE.
Created by Pawan Harish.
************************************************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h>
#define MAX_THREADS_PER_BLOCK 512
int no_of_nodes;
int edge_list_size;
FILE *fp;
//Structure to hold a node information
struct Node
{
int starting;
int no_of_edges;
};
#include "kernel.hip"
#include "kernel2.cu"
void BFSGraph(int argc, char** argv);
////////////////////////////////////////////////////////////////////////////////
// Main Program
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
no_of_nodes=0;
edge_list_size=0;
BFSGraph( argc, argv);
}
void Usage(int argc, char**argv){
fprintf(stderr,"Usage: %s <input_file>\n", argv[0]);
}
////////////////////////////////////////////////////////////////////////////////
//Apply BFS on a Graph using CUDA
////////////////////////////////////////////////////////////////////////////////
void BFSGraph( int argc, char** argv)
{
char *input_f;
if(argc!=2){
Usage(argc, argv);
exit(0);
}
input_f = argv[1];
printf("Reading File\n");
//Read in Graph from a file
fp = fopen(input_f,"r");
if(!fp)
{
printf("Error Reading graph file\n");
return;
}
int source = 0;
fscanf(fp,"%d",&no_of_nodes);
int num_of_blocks = 1;
int num_of_threads_per_block = no_of_nodes;
//Make execution Parameters according to the number of nodes
//Distribute threads across multiple Blocks if necessary
if(no_of_nodes>MAX_THREADS_PER_BLOCK)
{
num_of_blocks = (int)ceil(no_of_nodes/(double)MAX_THREADS_PER_BLOCK);
num_of_threads_per_block = MAX_THREADS_PER_BLOCK;
}
// allocate host memory
Node* h_graph_nodes = (Node*) malloc(sizeof(Node)*no_of_nodes);
bool *h_graph_mask = (bool*) malloc(sizeof(bool)*no_of_nodes);
bool *h_updating_graph_mask = (bool*) malloc(sizeof(bool)*no_of_nodes);
bool *h_graph_visited = (bool*) malloc(sizeof(bool)*no_of_nodes);
int start, edgeno;
// initalize the memory
for( unsigned int i = 0; i < no_of_nodes; i++)
{
fscanf(fp,"%d %d",&start,&edgeno);
h_graph_nodes[i].starting = start;
h_graph_nodes[i].no_of_edges = edgeno;
h_graph_mask[i]=false;
h_updating_graph_mask[i]=false;
h_graph_visited[i]=false;
}
//read the source node from the file
fscanf(fp,"%d",&source);
source=0;
//set the source node as true in the mask
h_graph_mask[source]=true;
h_graph_visited[source]=true;
fscanf(fp,"%d",&edge_list_size);
int id,cost;
int* h_graph_edges = (int*) malloc(sizeof(int)*edge_list_size);
for(int i=0; i < edge_list_size ; i++)
{
fscanf(fp,"%d",&id);
fscanf(fp,"%d",&cost);
h_graph_edges[i] = id;
}
if(fp)
fclose(fp);
printf("Read File\n");
//Copy the Node list to device memory
Node* d_graph_nodes;
hipMalloc( (void**) &d_graph_nodes, sizeof(Node)*no_of_nodes) ;
hipMemcpy( d_graph_nodes, h_graph_nodes, sizeof(Node)*no_of_nodes, hipMemcpyHostToDevice) ;
//Copy the Edge List to device Memory
int* d_graph_edges;
hipMalloc( (void**) &d_graph_edges, sizeof(int)*edge_list_size) ;
hipMemcpy( d_graph_edges, h_graph_edges, sizeof(int)*edge_list_size, hipMemcpyHostToDevice) ;
//Copy the Mask to device memory
bool* d_graph_mask;
hipMalloc( (void**) &d_graph_mask, sizeof(bool)*no_of_nodes) ;
hipMemcpy( d_graph_mask, h_graph_mask, sizeof(bool)*no_of_nodes, hipMemcpyHostToDevice) ;
bool* d_updating_graph_mask;
hipMalloc( (void**) &d_updating_graph_mask, sizeof(bool)*no_of_nodes) ;
hipMemcpy( d_updating_graph_mask, h_updating_graph_mask, sizeof(bool)*no_of_nodes, hipMemcpyHostToDevice) ;
//Copy the Visited nodes array to device memory
bool* d_graph_visited;
hipMalloc( (void**) &d_graph_visited, sizeof(bool)*no_of_nodes) ;
hipMemcpy( d_graph_visited, h_graph_visited, sizeof(bool)*no_of_nodes, hipMemcpyHostToDevice) ;
// allocate mem for the result on host side
int* h_cost = (int*) malloc( sizeof(int)*no_of_nodes);
for(int i=0;i<no_of_nodes;i++)
h_cost[i]=-1;
h_cost[source]=0;
// allocate device memory for result
int* d_cost;
hipMalloc( (void**) &d_cost, sizeof(int)*no_of_nodes);
hipMemcpy( d_cost, h_cost, sizeof(int)*no_of_nodes, hipMemcpyHostToDevice) ;
//make a bool to check if the execution is over
bool *d_over;
hipMalloc( (void**) &d_over, sizeof(bool));
printf("Copied Everything to GPU memory\n");
// setup execution parameters
dim3 grid( num_of_blocks, 1, 1);
dim3 threads( num_of_threads_per_block, 1, 1);
int k=0;
printf("Start traversing the tree\n");
bool stop;
//Call the Kernel untill all the elements of Frontier are not false
do
{
//if no thread changes this value then the loop stops
stop=false;
hipMemcpy( d_over, &stop, sizeof(bool), hipMemcpyHostToDevice) ;
hipLaunchKernelGGL(( Kernel), dim3(grid), dim3(threads), 0 , 0, d_graph_nodes, d_graph_edges, d_graph_mask, d_updating_graph_mask, d_graph_visited, d_cost, no_of_nodes);
// check if kernel execution generated and error
hipLaunchKernelGGL(( Kernel2), dim3(grid), dim3(threads), 0 , 0, d_graph_mask, d_updating_graph_mask, d_graph_visited, d_over, no_of_nodes);
// check if kernel execution generated and error
hipMemcpy( &stop, d_over, sizeof(bool), hipMemcpyDeviceToHost) ;
k++;
}
while(stop);
printf("Kernel Executed %d times\n",k);
// copy result from device to host
hipMemcpy( h_cost, d_cost, sizeof(int)*no_of_nodes, hipMemcpyDeviceToHost) ;
//Store the result into a file
FILE *fpo = fopen("result.txt","w");
for(int i=0;i<no_of_nodes;i++)
fprintf(fpo,"%d) cost:%d\n",i,h_cost[i]);
fclose(fpo);
printf("Result stored in result.txt\n");
// cleanup memory
free( h_graph_nodes);
free( h_graph_edges);
free( h_graph_mask);
free( h_updating_graph_mask);
free( h_graph_visited);
free( h_cost);
hipFree(d_graph_nodes);
hipFree(d_graph_edges);
hipFree(d_graph_mask);
hipFree(d_updating_graph_mask);
hipFree(d_graph_visited);
hipFree(d_cost);
}
| 3f56a278abeb310febaa6394a72f29de46a37667.cu | /***********************************************************************************
Implementing Breadth first search on CUDA using algorithm given in HiPC'07
paper "Accelerating Large Graph Algorithms on the GPU using CUDA"
Copyright (c) 2008 International Institute of Information Technology - Hyderabad.
All rights reserved.
Permission to use, copy, modify and distribute this software and its documentation for
educational purpose is hereby granted without fee, provided that the above copyright
notice and this permission notice appear in all copies of this software and that you do
not sell the software.
THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR
OTHERWISE.
Created by Pawan Harish.
************************************************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda.h>
#define MAX_THREADS_PER_BLOCK 512
int no_of_nodes;
int edge_list_size;
FILE *fp;
//Structure to hold a node information
struct Node
{
int starting;
int no_of_edges;
};
#include "kernel.cu"
#include "kernel2.cu"
void BFSGraph(int argc, char** argv);
////////////////////////////////////////////////////////////////////////////////
// Main Program
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
no_of_nodes=0;
edge_list_size=0;
BFSGraph( argc, argv);
}
void Usage(int argc, char**argv){
fprintf(stderr,"Usage: %s <input_file>\n", argv[0]);
}
////////////////////////////////////////////////////////////////////////////////
//Apply BFS on a Graph using CUDA
////////////////////////////////////////////////////////////////////////////////
void BFSGraph( int argc, char** argv)
{
char *input_f;
if(argc!=2){
Usage(argc, argv);
exit(0);
}
input_f = argv[1];
printf("Reading File\n");
//Read in Graph from a file
fp = fopen(input_f,"r");
if(!fp)
{
printf("Error Reading graph file\n");
return;
}
int source = 0;
fscanf(fp,"%d",&no_of_nodes);
int num_of_blocks = 1;
int num_of_threads_per_block = no_of_nodes;
//Make execution Parameters according to the number of nodes
//Distribute threads across multiple Blocks if necessary
if(no_of_nodes>MAX_THREADS_PER_BLOCK)
{
num_of_blocks = (int)ceil(no_of_nodes/(double)MAX_THREADS_PER_BLOCK);
num_of_threads_per_block = MAX_THREADS_PER_BLOCK;
}
// allocate host memory
Node* h_graph_nodes = (Node*) malloc(sizeof(Node)*no_of_nodes);
bool *h_graph_mask = (bool*) malloc(sizeof(bool)*no_of_nodes);
bool *h_updating_graph_mask = (bool*) malloc(sizeof(bool)*no_of_nodes);
bool *h_graph_visited = (bool*) malloc(sizeof(bool)*no_of_nodes);
int start, edgeno;
// initalize the memory
for( unsigned int i = 0; i < no_of_nodes; i++)
{
fscanf(fp,"%d %d",&start,&edgeno);
h_graph_nodes[i].starting = start;
h_graph_nodes[i].no_of_edges = edgeno;
h_graph_mask[i]=false;
h_updating_graph_mask[i]=false;
h_graph_visited[i]=false;
}
//read the source node from the file
fscanf(fp,"%d",&source);
source=0;
//set the source node as true in the mask
h_graph_mask[source]=true;
h_graph_visited[source]=true;
fscanf(fp,"%d",&edge_list_size);
int id,cost;
int* h_graph_edges = (int*) malloc(sizeof(int)*edge_list_size);
for(int i=0; i < edge_list_size ; i++)
{
fscanf(fp,"%d",&id);
fscanf(fp,"%d",&cost);
h_graph_edges[i] = id;
}
if(fp)
fclose(fp);
printf("Read File\n");
//Copy the Node list to device memory
Node* d_graph_nodes;
cudaMalloc( (void**) &d_graph_nodes, sizeof(Node)*no_of_nodes) ;
cudaMemcpy( d_graph_nodes, h_graph_nodes, sizeof(Node)*no_of_nodes, cudaMemcpyHostToDevice) ;
//Copy the Edge List to device Memory
int* d_graph_edges;
cudaMalloc( (void**) &d_graph_edges, sizeof(int)*edge_list_size) ;
cudaMemcpy( d_graph_edges, h_graph_edges, sizeof(int)*edge_list_size, cudaMemcpyHostToDevice) ;
//Copy the Mask to device memory
bool* d_graph_mask;
cudaMalloc( (void**) &d_graph_mask, sizeof(bool)*no_of_nodes) ;
cudaMemcpy( d_graph_mask, h_graph_mask, sizeof(bool)*no_of_nodes, cudaMemcpyHostToDevice) ;
bool* d_updating_graph_mask;
cudaMalloc( (void**) &d_updating_graph_mask, sizeof(bool)*no_of_nodes) ;
cudaMemcpy( d_updating_graph_mask, h_updating_graph_mask, sizeof(bool)*no_of_nodes, cudaMemcpyHostToDevice) ;
//Copy the Visited nodes array to device memory
bool* d_graph_visited;
cudaMalloc( (void**) &d_graph_visited, sizeof(bool)*no_of_nodes) ;
cudaMemcpy( d_graph_visited, h_graph_visited, sizeof(bool)*no_of_nodes, cudaMemcpyHostToDevice) ;
// allocate mem for the result on host side
int* h_cost = (int*) malloc( sizeof(int)*no_of_nodes);
for(int i=0;i<no_of_nodes;i++)
h_cost[i]=-1;
h_cost[source]=0;
// allocate device memory for result
int* d_cost;
cudaMalloc( (void**) &d_cost, sizeof(int)*no_of_nodes);
cudaMemcpy( d_cost, h_cost, sizeof(int)*no_of_nodes, cudaMemcpyHostToDevice) ;
//make a bool to check if the execution is over
bool *d_over;
cudaMalloc( (void**) &d_over, sizeof(bool));
printf("Copied Everything to GPU memory\n");
// setup execution parameters
dim3 grid( num_of_blocks, 1, 1);
dim3 threads( num_of_threads_per_block, 1, 1);
int k=0;
printf("Start traversing the tree\n");
bool stop;
//Call the Kernel untill all the elements of Frontier are not false
do
{
//if no thread changes this value then the loop stops
stop=false;
cudaMemcpy( d_over, &stop, sizeof(bool), cudaMemcpyHostToDevice) ;
Kernel<<< grid, threads, 0 >>>( d_graph_nodes, d_graph_edges, d_graph_mask, d_updating_graph_mask, d_graph_visited, d_cost, no_of_nodes);
// check if kernel execution generated and error
Kernel2<<< grid, threads, 0 >>>( d_graph_mask, d_updating_graph_mask, d_graph_visited, d_over, no_of_nodes);
// check if kernel execution generated and error
cudaMemcpy( &stop, d_over, sizeof(bool), cudaMemcpyDeviceToHost) ;
k++;
}
while(stop);
printf("Kernel Executed %d times\n",k);
// copy result from device to host
cudaMemcpy( h_cost, d_cost, sizeof(int)*no_of_nodes, cudaMemcpyDeviceToHost) ;
//Store the result into a file
FILE *fpo = fopen("result.txt","w");
for(int i=0;i<no_of_nodes;i++)
fprintf(fpo,"%d) cost:%d\n",i,h_cost[i]);
fclose(fpo);
printf("Result stored in result.txt\n");
// cleanup memory
free( h_graph_nodes);
free( h_graph_edges);
free( h_graph_mask);
free( h_updating_graph_mask);
free( h_graph_visited);
free( h_cost);
cudaFree(d_graph_nodes);
cudaFree(d_graph_edges);
cudaFree(d_graph_mask);
cudaFree(d_updating_graph_mask);
cudaFree(d_graph_visited);
cudaFree(d_cost);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.