hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
b2a493b6a5612d15c6a7a5dd97455449e02ddef4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "externalClass.cuh" #define THREAD_X 16 #define THREAD_Y 16 #define THREAD_LUT 32 //Thread_X 16 //Thread_LUT 32 void externalClass::medianCUDAKernel(const cv::gpu::GpuMat &src, cv::gpu::GpuMat &dst, int szK) { dim3 cthreads(THREAD_X, THREAD_Y); dim3 cblocks(static_cast<int>(::ceil(src.size().width / static_cast<double>(cthreads.x))) , static_cast<int>(::ceil(src.size().height / static_cast<double>(cthreads.y)))); hipLaunchKernelGGL(( medianKernel), dim3(cblocks), dim3(cthreads), 0, 0, src.ptr(), dst.ptr(), src.cols, src.rows, szK); } void externalClass::convolutionCUDAKernel(const cv::gpu::GpuMat &src, cv::gpu::GpuMat &dst, int szK) { dim3 cthreads(THREAD_X, THREAD_Y); dim3 cblocks(static_cast<int>(::ceil(src.size().width / static_cast<double>(cthreads.x))) , static_cast<int>(::ceil(src.size().height / static_cast<double>(cthreads.y)))); /*unsigned short* arrayValue = 0; size_t num_bytes = szK * szK * sizeof(unsigned short); hipMalloc ( (void**)& arrayValue, num_bytes);*/ hipLaunchKernelGGL(( convolutionKernel), dim3(cblocks), dim3(cthreads), 0, 0, src.ptr(), dst.ptr(), src.cols, src.rows, szK); } void externalClass::convolutionThreshCUDAKernel(const cv::gpu::GpuMat &src, cv::gpu::GpuMat &dst, int szK, int thresh, int maxval) { dim3 cthreads(THREAD_X, THREAD_Y); dim3 cblocks(static_cast<int>(::ceil(src.size().width / static_cast<double>(cthreads.x))) , static_cast<int>(::ceil(src.size().height / static_cast<double>(cthreads.y)))); hipLaunchKernelGGL(( convolutionKernelThreshold), dim3(cblocks), dim3(cthreads), 0, 0, src.ptr(), dst.ptr(), src.cols, src.rows, szK, thresh, maxval); } void externalClass::fillImgCUDAKernel(const cv::gpu::GpuMat &mask, cv::gpu::GpuMat &dst, int tlX, int tlY, int brX, int brY) { dim3 cthreads(THREAD_X, THREAD_Y); dim3 cblocks(static_cast<int>(::ceil(dst.size().width / static_cast<double>(cthreads.x))) , static_cast<int>(::ceil(dst.size().height / static_cast<double>(cthreads.y)))); hipLaunchKernelGGL(( fillImgKernel), dim3(cblocks), dim3(cthreads), 0, 0, mask.ptr(), dst.ptr(), mask.cols, dst.cols, tlX, tlY, brX, brY); } void externalClass::LUT(cv::gpu::GpuMat& lut, const double outByteDepth, const int minValue, const int maxValue) { dim3 cthreads(THREAD_LUT, 1); dim3 cblocks(static_cast<int>(::ceil(lut.size().width / static_cast<double>(cthreads.x))) , static_cast<int>(::ceil(lut.size().height / static_cast<double>(cthreads.y)))); hipLaunchKernelGGL(( lutKernel), dim3(cblocks), dim3(cthreads), 0, 0, lut.ptr<float>(), lut.cols, outByteDepth, minValue, maxValue); } void externalClass::stretching(const cv::gpu::GpuMat& src, const cv::gpu::GpuMat& lut, cv::gpu::GpuMat& dst) { dim3 cthreads(THREAD_X, THREAD_Y); dim3 cblocks(static_cast<int>(::ceil(dst.size().width / static_cast<double>(cthreads.x))) , static_cast<int>(::ceil(dst.size().height / static_cast<double>(cthreads.y)))); hipLaunchKernelGGL(( stretchingKernel), dim3(cblocks), dim3(cthreads), 0, 0, src.ptr<ushort>(), lut.ptr<float>(), dst.ptr(), src.cols, src.rows); } void externalClass::histogram(const cv::gpu::GpuMat& src, cv::gpu::GpuMat& hist) { int threadPerBlock = 1024; int size = src.cols * src.rows; #if 1 int blockPerGrid = 1;//20; #else /*int blockPerGrid = static_cast<int>(::ceil( static_cast<double>(size)/static_cast<double>(threadPerBlock)));*/ #endif hipLaunchKernelGGL(( histKernel), dim3(blockPerGrid), dim3(threadPerBlock), 0, 0, src.ptr<ushort>(), size, hist.ptr<int>()); } void externalClass::lowerLimKernel(const cv::gpu::GpuMat& hist, const int peakPos, const double thresh, int& minValue) { int threadPerBlock = 1; int blockPerGrid = 1; int h_val = 0; int* d_val; hipMalloc(&d_val, sizeof(int)); hipLaunchKernelGGL(( lowerLimit), dim3(blockPerGrid), dim3(threadPerBlock), 0, 0, hist.ptr<int>(), peakPos, thresh, d_val); hipMemcpy(&h_val, d_val, sizeof(int), hipMemcpyDeviceToHost); hipFree(d_val); minValue = h_val; } void externalClass::upperLimKernel(const cv::gpu::GpuMat& hist, const int peakPos, const double thresh, int& maxValue) { int threadPerBlock = 1; int blockPerGrid = 1; int h_val = 0; int* d_val; hipMalloc(&d_val, sizeof(int)); hipLaunchKernelGGL(( upperLimit), dim3(blockPerGrid), dim3(threadPerBlock), 0, 0, hist.ptr<int>(), peakPos, hist.cols, thresh, d_val); hipMemcpy(&h_val, d_val, sizeof(int), hipMemcpyDeviceToHost); hipFree(d_val); maxValue = h_val; }
b2a493b6a5612d15c6a7a5dd97455449e02ddef4.cu
#include "externalClass.cuh" #define THREAD_X 16 #define THREAD_Y 16 #define THREAD_LUT 32 //Thread_X 16 //Thread_LUT 32 void externalClass::medianCUDAKernel(const cv::gpu::GpuMat &src, cv::gpu::GpuMat &dst, int szK) { dim3 cthreads(THREAD_X, THREAD_Y); dim3 cblocks(static_cast<int>(std::ceil(src.size().width / static_cast<double>(cthreads.x))) , static_cast<int>(std::ceil(src.size().height / static_cast<double>(cthreads.y)))); medianKernel<<<cblocks, cthreads>>>(src.ptr(), dst.ptr(), src.cols, src.rows, szK); } void externalClass::convolutionCUDAKernel(const cv::gpu::GpuMat &src, cv::gpu::GpuMat &dst, int szK) { dim3 cthreads(THREAD_X, THREAD_Y); dim3 cblocks(static_cast<int>(std::ceil(src.size().width / static_cast<double>(cthreads.x))) , static_cast<int>(std::ceil(src.size().height / static_cast<double>(cthreads.y)))); /*unsigned short* arrayValue = 0; size_t num_bytes = szK * szK * sizeof(unsigned short); cudaMalloc ( (void**)& arrayValue, num_bytes);*/ convolutionKernel<<<cblocks, cthreads>>>(src.ptr(), dst.ptr(), src.cols, src.rows, szK); } void externalClass::convolutionThreshCUDAKernel(const cv::gpu::GpuMat &src, cv::gpu::GpuMat &dst, int szK, int thresh, int maxval) { dim3 cthreads(THREAD_X, THREAD_Y); dim3 cblocks(static_cast<int>(std::ceil(src.size().width / static_cast<double>(cthreads.x))) , static_cast<int>(std::ceil(src.size().height / static_cast<double>(cthreads.y)))); convolutionKernelThreshold<<<cblocks, cthreads>>>(src.ptr(), dst.ptr(), src.cols, src.rows, szK, thresh, maxval); } void externalClass::fillImgCUDAKernel(const cv::gpu::GpuMat &mask, cv::gpu::GpuMat &dst, int tlX, int tlY, int brX, int brY) { dim3 cthreads(THREAD_X, THREAD_Y); dim3 cblocks(static_cast<int>(std::ceil(dst.size().width / static_cast<double>(cthreads.x))) , static_cast<int>(std::ceil(dst.size().height / static_cast<double>(cthreads.y)))); fillImgKernel<<<cblocks, cthreads>>>(mask.ptr(), dst.ptr(), mask.cols, dst.cols, tlX, tlY, brX, brY); } void externalClass::LUT(cv::gpu::GpuMat& lut, const double outByteDepth, const int minValue, const int maxValue) { dim3 cthreads(THREAD_LUT, 1); dim3 cblocks(static_cast<int>(std::ceil(lut.size().width / static_cast<double>(cthreads.x))) , static_cast<int>(std::ceil(lut.size().height / static_cast<double>(cthreads.y)))); lutKernel<<<cblocks, cthreads>>>(lut.ptr<float>(), lut.cols, outByteDepth, minValue, maxValue); } void externalClass::stretching(const cv::gpu::GpuMat& src, const cv::gpu::GpuMat& lut, cv::gpu::GpuMat& dst) { dim3 cthreads(THREAD_X, THREAD_Y); dim3 cblocks(static_cast<int>(std::ceil(dst.size().width / static_cast<double>(cthreads.x))) , static_cast<int>(std::ceil(dst.size().height / static_cast<double>(cthreads.y)))); stretchingKernel<<<cblocks, cthreads>>>(src.ptr<ushort>(), lut.ptr<float>(), dst.ptr(), src.cols, src.rows); } void externalClass::histogram(const cv::gpu::GpuMat& src, cv::gpu::GpuMat& hist) { int threadPerBlock = 1024; int size = src.cols * src.rows; #if 1 int blockPerGrid = 1;//20; #else /*int blockPerGrid = static_cast<int>(std::ceil( static_cast<double>(size)/static_cast<double>(threadPerBlock)));*/ #endif histKernel<<<blockPerGrid, threadPerBlock>>>(src.ptr<ushort>(), size, hist.ptr<int>()); } void externalClass::lowerLimKernel(const cv::gpu::GpuMat& hist, const int peakPos, const double thresh, int& minValue) { int threadPerBlock = 1; int blockPerGrid = 1; int h_val = 0; int* d_val; cudaMalloc(&d_val, sizeof(int)); lowerLimit<<<blockPerGrid, threadPerBlock>>>(hist.ptr<int>(), peakPos, thresh, d_val); cudaMemcpy(&h_val, d_val, sizeof(int), cudaMemcpyDeviceToHost); cudaFree(d_val); minValue = h_val; } void externalClass::upperLimKernel(const cv::gpu::GpuMat& hist, const int peakPos, const double thresh, int& maxValue) { int threadPerBlock = 1; int blockPerGrid = 1; int h_val = 0; int* d_val; cudaMalloc(&d_val, sizeof(int)); upperLimit<<<blockPerGrid, threadPerBlock>>>(hist.ptr<int>(), peakPos, hist.cols, thresh, d_val); cudaMemcpy(&h_val, d_val, sizeof(int), cudaMemcpyDeviceToHost); cudaFree(d_val); maxValue = h_val; }
b376ffd35b85fe95bf06d66e75a39f13f672736d.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. * */ // Graph analytics features // Author: Alex Fender afender@nvidia.com #include <cugraph.h> #include "graph_utils.cuh" #include "pagerank.cuh" #include "COOtoCSR.cuh" #include "utilities/error_utils.h" #include "bfs.cuh" #include <rmm_utils.h> void gdf_col_delete(gdf_column* col) { if (col) { col->size = 0; if(col->data) { ALLOC_FREE_TRY(col->data, nullptr); } #if 1 // If delete col is executed, the memory pointed by col is no longer valid and // can be used in another memory allocation, so executing col->data = nullptr // after delete col is dangerous, also, col = nullptr has no effect here (the // address is passed by value, for col = nullptr should work, the input // parameter should be gdf_column*& col (or alternatively, gdf_column** col and // *col = nullptr also work) col->data = nullptr; delete col; #else delete col; col->data = nullptr; col = nullptr; #endif } } void gdf_col_release(gdf_column* col) { delete col; } void cpy_column_view(const gdf_column *in, gdf_column *out) { if (in != nullptr && out !=nullptr) { gdf_column_view(out, in->data, in->valid, in->size, in->dtype); } } gdf_error gdf_adj_list_view(gdf_graph *graph, const gdf_column *offsets, const gdf_column *indices, const gdf_column *edge_data) { GDF_REQUIRE( offsets->null_count == 0 , GDF_VALIDITY_UNSUPPORTED ); GDF_REQUIRE( indices->null_count == 0 , GDF_VALIDITY_UNSUPPORTED ); GDF_REQUIRE( (offsets->dtype == indices->dtype), GDF_UNSUPPORTED_DTYPE ); GDF_REQUIRE( ((offsets->dtype == GDF_INT32) || (offsets->dtype == GDF_INT64)), GDF_UNSUPPORTED_DTYPE ); GDF_REQUIRE( (offsets->size > 0), GDF_DATASET_EMPTY ); GDF_REQUIRE( (graph->adjList == nullptr) , GDF_INVALID_API_CALL); graph->adjList = new gdf_adj_list; graph->adjList->offsets = new gdf_column; graph->adjList->indices = new gdf_column; graph->adjList->ownership = 0; cpy_column_view(offsets, graph->adjList->offsets); cpy_column_view(indices, graph->adjList->indices); if (edge_data) { GDF_REQUIRE( indices->size == edge_data->size, GDF_COLUMN_SIZE_MISMATCH ); graph->adjList->edge_data = new gdf_column; cpy_column_view(edge_data, graph->adjList->edge_data); } else { graph->adjList->edge_data = nullptr; } return GDF_SUCCESS; } gdf_error gdf_adj_list::get_vertex_identifiers(gdf_column *identifiers) { GDF_REQUIRE( offsets != nullptr , GDF_INVALID_API_CALL); GDF_REQUIRE( offsets->data != nullptr , GDF_INVALID_API_CALL); cugraph::sequence<int>((int)offsets->size-1, (int*)identifiers->data); return GDF_SUCCESS; } gdf_error gdf_adj_list::get_source_indices (gdf_column *src_indices) { GDF_REQUIRE( offsets != nullptr , GDF_INVALID_API_CALL); GDF_REQUIRE( offsets->data != nullptr , GDF_INVALID_API_CALL); GDF_REQUIRE( src_indices->size == indices->size, GDF_COLUMN_SIZE_MISMATCH ); GDF_REQUIRE( src_indices->dtype == indices->dtype, GDF_UNSUPPORTED_DTYPE ); GDF_REQUIRE( src_indices->size > 0, GDF_DATASET_EMPTY ); cugraph::offsets_to_indices<int>((int*)offsets->data, offsets->size-1, (int*)src_indices->data); return GDF_SUCCESS; } gdf_error gdf_edge_list_view(gdf_graph *graph, const gdf_column *src_indices, const gdf_column *dest_indices, const gdf_column *edge_data) { GDF_REQUIRE( src_indices->size == dest_indices->size, GDF_COLUMN_SIZE_MISMATCH ); GDF_REQUIRE( src_indices->dtype == dest_indices->dtype, GDF_UNSUPPORTED_DTYPE ); GDF_REQUIRE( ((src_indices->dtype == GDF_INT32) || (src_indices->dtype == GDF_INT64)), GDF_UNSUPPORTED_DTYPE ); GDF_REQUIRE( src_indices->size > 0, GDF_DATASET_EMPTY ); GDF_REQUIRE( src_indices->null_count == 0 , GDF_VALIDITY_UNSUPPORTED ); GDF_REQUIRE( dest_indices->null_count == 0 , GDF_VALIDITY_UNSUPPORTED ); GDF_REQUIRE( graph->edgeList == nullptr , GDF_INVALID_API_CALL); graph->edgeList = new gdf_edge_list; graph->edgeList->src_indices = new gdf_column; graph->edgeList->dest_indices = new gdf_column; graph->edgeList->ownership = 0; cpy_column_view(src_indices, graph->edgeList->src_indices); cpy_column_view(dest_indices, graph->edgeList->dest_indices); if (edge_data) { GDF_REQUIRE( src_indices->size == edge_data->size, GDF_COLUMN_SIZE_MISMATCH ); graph->edgeList->edge_data = new gdf_column; cpy_column_view(edge_data, graph->edgeList->edge_data); } else { graph->edgeList->edge_data = nullptr; } return GDF_SUCCESS; } template <typename WT> gdf_error gdf_add_adj_list_impl (gdf_graph *graph) { if (graph->adjList == nullptr) { GDF_REQUIRE( graph->edgeList != nullptr , GDF_INVALID_API_CALL); int nnz = graph->edgeList->src_indices->size, status = 0; graph->adjList = new gdf_adj_list; graph->adjList->offsets = new gdf_column; graph->adjList->indices = new gdf_column; graph->adjList->ownership = 1; if (graph->edgeList->edge_data!= nullptr) { graph->adjList->edge_data = new gdf_column; CSR_Result_Weighted<int,WT> adj_list; status = ConvertCOOtoCSR_weighted((int*)graph->edgeList->src_indices->data, (int*)graph->edgeList->dest_indices->data, (WT*)graph->edgeList->edge_data->data, nnz, adj_list); gdf_column_view(graph->adjList->offsets, adj_list.rowOffsets, nullptr, adj_list.size+1, graph->edgeList->src_indices->dtype); gdf_column_view(graph->adjList->indices, adj_list.colIndices, nullptr, adj_list.nnz, graph->edgeList->src_indices->dtype); gdf_column_view(graph->adjList->edge_data, adj_list.edgeWeights, nullptr, adj_list.nnz, graph->edgeList->edge_data->dtype); } else { CSR_Result<int> adj_list; status = ConvertCOOtoCSR((int*)graph->edgeList->src_indices->data,(int*)graph->edgeList->dest_indices->data, nnz, adj_list); gdf_column_view(graph->adjList->offsets, adj_list.rowOffsets, nullptr, adj_list.size+1, graph->edgeList->src_indices->dtype); gdf_column_view(graph->adjList->indices, adj_list.colIndices, nullptr, adj_list.nnz, graph->edgeList->src_indices->dtype); } if (status !=0) { std::cerr << "Could not generate the adj_list" << std::endl; return GDF_CUDA_ERROR; } } return GDF_SUCCESS; } gdf_error gdf_add_edge_list (gdf_graph *graph) { if (graph->edgeList == nullptr) { GDF_REQUIRE( graph->adjList != nullptr , GDF_INVALID_API_CALL); int *d_src; graph->edgeList = new gdf_edge_list; graph->edgeList->src_indices = new gdf_column; graph->edgeList->dest_indices = new gdf_column; graph->edgeList->ownership = 2; CUDA_TRY(hipMallocManaged ((void**)&d_src, sizeof(int) * graph->adjList->indices->size)); cugraph::offsets_to_indices<int>((int*)graph->adjList->offsets->data, graph->adjList->offsets->size-1, (int*)d_src); gdf_column_view(graph->edgeList->src_indices, d_src, nullptr, graph->adjList->indices->size, graph->adjList->indices->dtype); cpy_column_view(graph->adjList->indices, graph->edgeList->dest_indices); if (graph->adjList->edge_data != nullptr) { graph->edgeList->edge_data = new gdf_column; cpy_column_view(graph->adjList->edge_data, graph->edgeList->edge_data); } } return GDF_SUCCESS; } template <typename WT> gdf_error gdf_add_transpose_impl (gdf_graph *graph) { if (graph->transposedAdjList == nullptr ) { GDF_REQUIRE( graph->edgeList != nullptr , GDF_INVALID_API_CALL); int nnz = graph->edgeList->src_indices->size, status = 0; graph->transposedAdjList = new gdf_adj_list; graph->transposedAdjList->offsets = new gdf_column; graph->transposedAdjList->indices = new gdf_column; graph->transposedAdjList->ownership = 1; if (graph->edgeList->edge_data) { graph->transposedAdjList->edge_data = new gdf_column; CSR_Result_Weighted<int,WT> adj_list; status = ConvertCOOtoCSR_weighted( (int*)graph->edgeList->dest_indices->data, (int*)graph->edgeList->src_indices->data, (WT*)graph->edgeList->edge_data->data, nnz, adj_list); gdf_column_view(graph->transposedAdjList->offsets, adj_list.rowOffsets, nullptr, adj_list.size+1, graph->edgeList->src_indices->dtype); gdf_column_view(graph->transposedAdjList->indices, adj_list.colIndices, nullptr, adj_list.nnz, graph->edgeList->src_indices->dtype); gdf_column_view(graph->transposedAdjList->edge_data, adj_list.edgeWeights, nullptr, adj_list.nnz, graph->edgeList->edge_data->dtype); } else { CSR_Result<int> adj_list; status = ConvertCOOtoCSR((int*)graph->edgeList->dest_indices->data, (int*)graph->edgeList->src_indices->data, nnz, adj_list); gdf_column_view(graph->transposedAdjList->offsets, adj_list.rowOffsets, nullptr, adj_list.size+1, graph->edgeList->src_indices->dtype); gdf_column_view(graph->transposedAdjList->indices, adj_list.colIndices, nullptr, adj_list.nnz, graph->edgeList->src_indices->dtype); } if (status !=0) { std::cerr << "Could not generate the adj_list" << std::endl; return GDF_CUDA_ERROR; } } return GDF_SUCCESS; } template <typename WT> gdf_error gdf_pagerank_impl (gdf_graph *graph, gdf_column *pagerank, float alpha = 0.85, float tolerance = 1e-4, int max_iter = 200, bool has_guess = false) { GDF_REQUIRE( graph->edgeList != nullptr, GDF_VALIDITY_UNSUPPORTED ); GDF_REQUIRE( graph->edgeList->src_indices->size == graph->edgeList->dest_indices->size, GDF_COLUMN_SIZE_MISMATCH ); GDF_REQUIRE( graph->edgeList->src_indices->dtype == graph->edgeList->dest_indices->dtype, GDF_UNSUPPORTED_DTYPE ); GDF_REQUIRE( graph->edgeList->src_indices->null_count == 0 , GDF_VALIDITY_UNSUPPORTED ); GDF_REQUIRE( graph->edgeList->dest_indices->null_count == 0 , GDF_VALIDITY_UNSUPPORTED ); GDF_REQUIRE( pagerank != nullptr , GDF_INVALID_API_CALL ); GDF_REQUIRE( pagerank->data != nullptr , GDF_INVALID_API_CALL ); GDF_REQUIRE( pagerank->null_count == 0 , GDF_VALIDITY_UNSUPPORTED ); GDF_REQUIRE( pagerank->size > 0 , GDF_INVALID_API_CALL ); int m=pagerank->size, nnz = graph->edgeList->src_indices->size, status = 0; WT *d_pr, *d_val = nullptr, *d_leaf_vector = nullptr; WT res = 1.0; WT *residual = &res; if (graph->transposedAdjList == nullptr) { gdf_add_transpose(graph); } hipStream_t stream{nullptr}; ALLOC_MANAGED_TRY((void**)&d_leaf_vector, sizeof(WT) * m, stream); ALLOC_MANAGED_TRY((void**)&d_val, sizeof(WT) * nnz , stream); ALLOC_MANAGED_TRY((void**)&d_pr, sizeof(WT) * m, stream); cugraph::HT_matrix_csc_coo(m, nnz, (int*)graph->transposedAdjList->offsets->data, (int*)graph->transposedAdjList->indices->data, d_val, d_leaf_vector); if (has_guess) { GDF_REQUIRE( pagerank->data != nullptr, GDF_VALIDITY_UNSUPPORTED ); cugraph::copy<WT>(m, (WT*)pagerank->data, d_pr); } status = cugraph::pagerank<int,WT>( m,nnz, (int*)graph->transposedAdjList->offsets->data, (int*)graph->transposedAdjList->indices->data, d_val, alpha, d_leaf_vector, false, tolerance, max_iter, d_pr, residual); if (status !=0) switch ( status ) { case -1: std::cerr<< "Error : bad parameters in Pagerank"<<std::endl; return GDF_CUDA_ERROR; case 1: std::cerr<< "Warning : Pagerank did not reached the desired tolerance"<<std::endl; return GDF_CUDA_ERROR; default: std::cerr<< "Pagerank failed"<<std::endl; return GDF_CUDA_ERROR; } cugraph::copy<WT>(m, d_pr, (WT*)pagerank->data); ALLOC_FREE_TRY(d_val, stream); ALLOC_FREE_TRY(d_pr, stream); ALLOC_FREE_TRY(d_leaf_vector, stream); return GDF_SUCCESS; } gdf_error gdf_add_adj_list(gdf_graph *graph) { if (graph->adjList != nullptr) return GDF_SUCCESS; GDF_REQUIRE( graph->edgeList != nullptr , GDF_INVALID_API_CALL); GDF_REQUIRE( graph->adjList == nullptr , GDF_INVALID_API_CALL); if (graph->edgeList->edge_data != nullptr) { switch (graph->edgeList->edge_data->dtype) { case GDF_FLOAT32: return gdf_add_adj_list_impl<float>(graph); case GDF_FLOAT64: return gdf_add_adj_list_impl<double>(graph); default: return GDF_UNSUPPORTED_DTYPE; } } else { return gdf_add_adj_list_impl<float>(graph); } } gdf_error gdf_add_transpose(gdf_graph *graph) { if (graph->edgeList == nullptr) gdf_add_edge_list(graph); if (graph->edgeList->edge_data != nullptr) { switch (graph->edgeList->edge_data->dtype) { case GDF_FLOAT32: return gdf_add_transpose_impl<float>(graph); case GDF_FLOAT64: return gdf_add_transpose_impl<double>(graph); default: return GDF_UNSUPPORTED_DTYPE; } } else { return gdf_add_transpose_impl<float>(graph); } } gdf_error gdf_delete_adj_list(gdf_graph *graph) { if (graph->adjList) { delete graph->adjList; } graph->adjList = nullptr; return GDF_SUCCESS; } gdf_error gdf_delete_edge_list(gdf_graph *graph) { if (graph->edgeList) { delete graph->edgeList; } graph->edgeList = nullptr; return GDF_SUCCESS; } gdf_error gdf_delete_transpose(gdf_graph *graph) { if (graph->transposedAdjList) { delete graph->transposedAdjList; } graph->transposedAdjList = nullptr; return GDF_SUCCESS; } gdf_error gdf_pagerank(gdf_graph *graph, gdf_column *pagerank, float alpha, float tolerance, int max_iter, bool has_guess) { switch (pagerank->dtype) { case GDF_FLOAT32: return gdf_pagerank_impl<float>(graph, pagerank, alpha, tolerance, max_iter, has_guess); case GDF_FLOAT64: return gdf_pagerank_impl<double>(graph, pagerank, alpha, tolerance, max_iter, has_guess); default: return GDF_UNSUPPORTED_DTYPE; } } gdf_error gdf_bfs(gdf_graph *graph, gdf_column *distances, gdf_column *predecessors, int start_node, bool directed) { GDF_REQUIRE(graph->adjList != nullptr || graph->edgeList != nullptr, GDF_INVALID_API_CALL); gdf_error err = gdf_add_adj_list(graph); if (err != GDF_SUCCESS) return err; GDF_REQUIRE(graph->adjList->offsets->dtype == GDF_INT32, GDF_UNSUPPORTED_DTYPE); GDF_REQUIRE(graph->adjList->indices->dtype == GDF_INT32, GDF_UNSUPPORTED_DTYPE); GDF_REQUIRE(distances->dtype == GDF_INT32, GDF_UNSUPPORTED_DTYPE); GDF_REQUIRE(predecessors->dtype == GDF_INT32, GDF_UNSUPPORTED_DTYPE); int n = graph->adjList->offsets->size - 1; int e = graph->adjList->indices->size; int* offsets_ptr = (int*)graph->adjList->offsets->data; int* indices_ptr = (int*)graph->adjList->indices->data; int* distances_ptr = (int*)distances->data; int* predecessors_ptr = (int*)predecessors->data; int alpha = 15; int beta = 18; cugraph::Bfs<int> bfs(n, e, offsets_ptr, indices_ptr, directed, alpha, beta); bfs.configure(distances_ptr, predecessors_ptr, nullptr); bfs.traverse(start_node); return GDF_SUCCESS; }
b376ffd35b85fe95bf06d66e75a39f13f672736d.cu
/* * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. * */ // Graph analytics features // Author: Alex Fender afender@nvidia.com #include <cugraph.h> #include "graph_utils.cuh" #include "pagerank.cuh" #include "COOtoCSR.cuh" #include "utilities/error_utils.h" #include "bfs.cuh" #include <rmm_utils.h> void gdf_col_delete(gdf_column* col) { if (col) { col->size = 0; if(col->data) { ALLOC_FREE_TRY(col->data, nullptr); } #if 1 // If delete col is executed, the memory pointed by col is no longer valid and // can be used in another memory allocation, so executing col->data = nullptr // after delete col is dangerous, also, col = nullptr has no effect here (the // address is passed by value, for col = nullptr should work, the input // parameter should be gdf_column*& col (or alternatively, gdf_column** col and // *col = nullptr also work) col->data = nullptr; delete col; #else delete col; col->data = nullptr; col = nullptr; #endif } } void gdf_col_release(gdf_column* col) { delete col; } void cpy_column_view(const gdf_column *in, gdf_column *out) { if (in != nullptr && out !=nullptr) { gdf_column_view(out, in->data, in->valid, in->size, in->dtype); } } gdf_error gdf_adj_list_view(gdf_graph *graph, const gdf_column *offsets, const gdf_column *indices, const gdf_column *edge_data) { GDF_REQUIRE( offsets->null_count == 0 , GDF_VALIDITY_UNSUPPORTED ); GDF_REQUIRE( indices->null_count == 0 , GDF_VALIDITY_UNSUPPORTED ); GDF_REQUIRE( (offsets->dtype == indices->dtype), GDF_UNSUPPORTED_DTYPE ); GDF_REQUIRE( ((offsets->dtype == GDF_INT32) || (offsets->dtype == GDF_INT64)), GDF_UNSUPPORTED_DTYPE ); GDF_REQUIRE( (offsets->size > 0), GDF_DATASET_EMPTY ); GDF_REQUIRE( (graph->adjList == nullptr) , GDF_INVALID_API_CALL); graph->adjList = new gdf_adj_list; graph->adjList->offsets = new gdf_column; graph->adjList->indices = new gdf_column; graph->adjList->ownership = 0; cpy_column_view(offsets, graph->adjList->offsets); cpy_column_view(indices, graph->adjList->indices); if (edge_data) { GDF_REQUIRE( indices->size == edge_data->size, GDF_COLUMN_SIZE_MISMATCH ); graph->adjList->edge_data = new gdf_column; cpy_column_view(edge_data, graph->adjList->edge_data); } else { graph->adjList->edge_data = nullptr; } return GDF_SUCCESS; } gdf_error gdf_adj_list::get_vertex_identifiers(gdf_column *identifiers) { GDF_REQUIRE( offsets != nullptr , GDF_INVALID_API_CALL); GDF_REQUIRE( offsets->data != nullptr , GDF_INVALID_API_CALL); cugraph::sequence<int>((int)offsets->size-1, (int*)identifiers->data); return GDF_SUCCESS; } gdf_error gdf_adj_list::get_source_indices (gdf_column *src_indices) { GDF_REQUIRE( offsets != nullptr , GDF_INVALID_API_CALL); GDF_REQUIRE( offsets->data != nullptr , GDF_INVALID_API_CALL); GDF_REQUIRE( src_indices->size == indices->size, GDF_COLUMN_SIZE_MISMATCH ); GDF_REQUIRE( src_indices->dtype == indices->dtype, GDF_UNSUPPORTED_DTYPE ); GDF_REQUIRE( src_indices->size > 0, GDF_DATASET_EMPTY ); cugraph::offsets_to_indices<int>((int*)offsets->data, offsets->size-1, (int*)src_indices->data); return GDF_SUCCESS; } gdf_error gdf_edge_list_view(gdf_graph *graph, const gdf_column *src_indices, const gdf_column *dest_indices, const gdf_column *edge_data) { GDF_REQUIRE( src_indices->size == dest_indices->size, GDF_COLUMN_SIZE_MISMATCH ); GDF_REQUIRE( src_indices->dtype == dest_indices->dtype, GDF_UNSUPPORTED_DTYPE ); GDF_REQUIRE( ((src_indices->dtype == GDF_INT32) || (src_indices->dtype == GDF_INT64)), GDF_UNSUPPORTED_DTYPE ); GDF_REQUIRE( src_indices->size > 0, GDF_DATASET_EMPTY ); GDF_REQUIRE( src_indices->null_count == 0 , GDF_VALIDITY_UNSUPPORTED ); GDF_REQUIRE( dest_indices->null_count == 0 , GDF_VALIDITY_UNSUPPORTED ); GDF_REQUIRE( graph->edgeList == nullptr , GDF_INVALID_API_CALL); graph->edgeList = new gdf_edge_list; graph->edgeList->src_indices = new gdf_column; graph->edgeList->dest_indices = new gdf_column; graph->edgeList->ownership = 0; cpy_column_view(src_indices, graph->edgeList->src_indices); cpy_column_view(dest_indices, graph->edgeList->dest_indices); if (edge_data) { GDF_REQUIRE( src_indices->size == edge_data->size, GDF_COLUMN_SIZE_MISMATCH ); graph->edgeList->edge_data = new gdf_column; cpy_column_view(edge_data, graph->edgeList->edge_data); } else { graph->edgeList->edge_data = nullptr; } return GDF_SUCCESS; } template <typename WT> gdf_error gdf_add_adj_list_impl (gdf_graph *graph) { if (graph->adjList == nullptr) { GDF_REQUIRE( graph->edgeList != nullptr , GDF_INVALID_API_CALL); int nnz = graph->edgeList->src_indices->size, status = 0; graph->adjList = new gdf_adj_list; graph->adjList->offsets = new gdf_column; graph->adjList->indices = new gdf_column; graph->adjList->ownership = 1; if (graph->edgeList->edge_data!= nullptr) { graph->adjList->edge_data = new gdf_column; CSR_Result_Weighted<int,WT> adj_list; status = ConvertCOOtoCSR_weighted((int*)graph->edgeList->src_indices->data, (int*)graph->edgeList->dest_indices->data, (WT*)graph->edgeList->edge_data->data, nnz, adj_list); gdf_column_view(graph->adjList->offsets, adj_list.rowOffsets, nullptr, adj_list.size+1, graph->edgeList->src_indices->dtype); gdf_column_view(graph->adjList->indices, adj_list.colIndices, nullptr, adj_list.nnz, graph->edgeList->src_indices->dtype); gdf_column_view(graph->adjList->edge_data, adj_list.edgeWeights, nullptr, adj_list.nnz, graph->edgeList->edge_data->dtype); } else { CSR_Result<int> adj_list; status = ConvertCOOtoCSR((int*)graph->edgeList->src_indices->data,(int*)graph->edgeList->dest_indices->data, nnz, adj_list); gdf_column_view(graph->adjList->offsets, adj_list.rowOffsets, nullptr, adj_list.size+1, graph->edgeList->src_indices->dtype); gdf_column_view(graph->adjList->indices, adj_list.colIndices, nullptr, adj_list.nnz, graph->edgeList->src_indices->dtype); } if (status !=0) { std::cerr << "Could not generate the adj_list" << std::endl; return GDF_CUDA_ERROR; } } return GDF_SUCCESS; } gdf_error gdf_add_edge_list (gdf_graph *graph) { if (graph->edgeList == nullptr) { GDF_REQUIRE( graph->adjList != nullptr , GDF_INVALID_API_CALL); int *d_src; graph->edgeList = new gdf_edge_list; graph->edgeList->src_indices = new gdf_column; graph->edgeList->dest_indices = new gdf_column; graph->edgeList->ownership = 2; CUDA_TRY(cudaMallocManaged ((void**)&d_src, sizeof(int) * graph->adjList->indices->size)); cugraph::offsets_to_indices<int>((int*)graph->adjList->offsets->data, graph->adjList->offsets->size-1, (int*)d_src); gdf_column_view(graph->edgeList->src_indices, d_src, nullptr, graph->adjList->indices->size, graph->adjList->indices->dtype); cpy_column_view(graph->adjList->indices, graph->edgeList->dest_indices); if (graph->adjList->edge_data != nullptr) { graph->edgeList->edge_data = new gdf_column; cpy_column_view(graph->adjList->edge_data, graph->edgeList->edge_data); } } return GDF_SUCCESS; } template <typename WT> gdf_error gdf_add_transpose_impl (gdf_graph *graph) { if (graph->transposedAdjList == nullptr ) { GDF_REQUIRE( graph->edgeList != nullptr , GDF_INVALID_API_CALL); int nnz = graph->edgeList->src_indices->size, status = 0; graph->transposedAdjList = new gdf_adj_list; graph->transposedAdjList->offsets = new gdf_column; graph->transposedAdjList->indices = new gdf_column; graph->transposedAdjList->ownership = 1; if (graph->edgeList->edge_data) { graph->transposedAdjList->edge_data = new gdf_column; CSR_Result_Weighted<int,WT> adj_list; status = ConvertCOOtoCSR_weighted( (int*)graph->edgeList->dest_indices->data, (int*)graph->edgeList->src_indices->data, (WT*)graph->edgeList->edge_data->data, nnz, adj_list); gdf_column_view(graph->transposedAdjList->offsets, adj_list.rowOffsets, nullptr, adj_list.size+1, graph->edgeList->src_indices->dtype); gdf_column_view(graph->transposedAdjList->indices, adj_list.colIndices, nullptr, adj_list.nnz, graph->edgeList->src_indices->dtype); gdf_column_view(graph->transposedAdjList->edge_data, adj_list.edgeWeights, nullptr, adj_list.nnz, graph->edgeList->edge_data->dtype); } else { CSR_Result<int> adj_list; status = ConvertCOOtoCSR((int*)graph->edgeList->dest_indices->data, (int*)graph->edgeList->src_indices->data, nnz, adj_list); gdf_column_view(graph->transposedAdjList->offsets, adj_list.rowOffsets, nullptr, adj_list.size+1, graph->edgeList->src_indices->dtype); gdf_column_view(graph->transposedAdjList->indices, adj_list.colIndices, nullptr, adj_list.nnz, graph->edgeList->src_indices->dtype); } if (status !=0) { std::cerr << "Could not generate the adj_list" << std::endl; return GDF_CUDA_ERROR; } } return GDF_SUCCESS; } template <typename WT> gdf_error gdf_pagerank_impl (gdf_graph *graph, gdf_column *pagerank, float alpha = 0.85, float tolerance = 1e-4, int max_iter = 200, bool has_guess = false) { GDF_REQUIRE( graph->edgeList != nullptr, GDF_VALIDITY_UNSUPPORTED ); GDF_REQUIRE( graph->edgeList->src_indices->size == graph->edgeList->dest_indices->size, GDF_COLUMN_SIZE_MISMATCH ); GDF_REQUIRE( graph->edgeList->src_indices->dtype == graph->edgeList->dest_indices->dtype, GDF_UNSUPPORTED_DTYPE ); GDF_REQUIRE( graph->edgeList->src_indices->null_count == 0 , GDF_VALIDITY_UNSUPPORTED ); GDF_REQUIRE( graph->edgeList->dest_indices->null_count == 0 , GDF_VALIDITY_UNSUPPORTED ); GDF_REQUIRE( pagerank != nullptr , GDF_INVALID_API_CALL ); GDF_REQUIRE( pagerank->data != nullptr , GDF_INVALID_API_CALL ); GDF_REQUIRE( pagerank->null_count == 0 , GDF_VALIDITY_UNSUPPORTED ); GDF_REQUIRE( pagerank->size > 0 , GDF_INVALID_API_CALL ); int m=pagerank->size, nnz = graph->edgeList->src_indices->size, status = 0; WT *d_pr, *d_val = nullptr, *d_leaf_vector = nullptr; WT res = 1.0; WT *residual = &res; if (graph->transposedAdjList == nullptr) { gdf_add_transpose(graph); } cudaStream_t stream{nullptr}; ALLOC_MANAGED_TRY((void**)&d_leaf_vector, sizeof(WT) * m, stream); ALLOC_MANAGED_TRY((void**)&d_val, sizeof(WT) * nnz , stream); ALLOC_MANAGED_TRY((void**)&d_pr, sizeof(WT) * m, stream); cugraph::HT_matrix_csc_coo(m, nnz, (int*)graph->transposedAdjList->offsets->data, (int*)graph->transposedAdjList->indices->data, d_val, d_leaf_vector); if (has_guess) { GDF_REQUIRE( pagerank->data != nullptr, GDF_VALIDITY_UNSUPPORTED ); cugraph::copy<WT>(m, (WT*)pagerank->data, d_pr); } status = cugraph::pagerank<int,WT>( m,nnz, (int*)graph->transposedAdjList->offsets->data, (int*)graph->transposedAdjList->indices->data, d_val, alpha, d_leaf_vector, false, tolerance, max_iter, d_pr, residual); if (status !=0) switch ( status ) { case -1: std::cerr<< "Error : bad parameters in Pagerank"<<std::endl; return GDF_CUDA_ERROR; case 1: std::cerr<< "Warning : Pagerank did not reached the desired tolerance"<<std::endl; return GDF_CUDA_ERROR; default: std::cerr<< "Pagerank failed"<<std::endl; return GDF_CUDA_ERROR; } cugraph::copy<WT>(m, d_pr, (WT*)pagerank->data); ALLOC_FREE_TRY(d_val, stream); ALLOC_FREE_TRY(d_pr, stream); ALLOC_FREE_TRY(d_leaf_vector, stream); return GDF_SUCCESS; } gdf_error gdf_add_adj_list(gdf_graph *graph) { if (graph->adjList != nullptr) return GDF_SUCCESS; GDF_REQUIRE( graph->edgeList != nullptr , GDF_INVALID_API_CALL); GDF_REQUIRE( graph->adjList == nullptr , GDF_INVALID_API_CALL); if (graph->edgeList->edge_data != nullptr) { switch (graph->edgeList->edge_data->dtype) { case GDF_FLOAT32: return gdf_add_adj_list_impl<float>(graph); case GDF_FLOAT64: return gdf_add_adj_list_impl<double>(graph); default: return GDF_UNSUPPORTED_DTYPE; } } else { return gdf_add_adj_list_impl<float>(graph); } } gdf_error gdf_add_transpose(gdf_graph *graph) { if (graph->edgeList == nullptr) gdf_add_edge_list(graph); if (graph->edgeList->edge_data != nullptr) { switch (graph->edgeList->edge_data->dtype) { case GDF_FLOAT32: return gdf_add_transpose_impl<float>(graph); case GDF_FLOAT64: return gdf_add_transpose_impl<double>(graph); default: return GDF_UNSUPPORTED_DTYPE; } } else { return gdf_add_transpose_impl<float>(graph); } } gdf_error gdf_delete_adj_list(gdf_graph *graph) { if (graph->adjList) { delete graph->adjList; } graph->adjList = nullptr; return GDF_SUCCESS; } gdf_error gdf_delete_edge_list(gdf_graph *graph) { if (graph->edgeList) { delete graph->edgeList; } graph->edgeList = nullptr; return GDF_SUCCESS; } gdf_error gdf_delete_transpose(gdf_graph *graph) { if (graph->transposedAdjList) { delete graph->transposedAdjList; } graph->transposedAdjList = nullptr; return GDF_SUCCESS; } gdf_error gdf_pagerank(gdf_graph *graph, gdf_column *pagerank, float alpha, float tolerance, int max_iter, bool has_guess) { switch (pagerank->dtype) { case GDF_FLOAT32: return gdf_pagerank_impl<float>(graph, pagerank, alpha, tolerance, max_iter, has_guess); case GDF_FLOAT64: return gdf_pagerank_impl<double>(graph, pagerank, alpha, tolerance, max_iter, has_guess); default: return GDF_UNSUPPORTED_DTYPE; } } gdf_error gdf_bfs(gdf_graph *graph, gdf_column *distances, gdf_column *predecessors, int start_node, bool directed) { GDF_REQUIRE(graph->adjList != nullptr || graph->edgeList != nullptr, GDF_INVALID_API_CALL); gdf_error err = gdf_add_adj_list(graph); if (err != GDF_SUCCESS) return err; GDF_REQUIRE(graph->adjList->offsets->dtype == GDF_INT32, GDF_UNSUPPORTED_DTYPE); GDF_REQUIRE(graph->adjList->indices->dtype == GDF_INT32, GDF_UNSUPPORTED_DTYPE); GDF_REQUIRE(distances->dtype == GDF_INT32, GDF_UNSUPPORTED_DTYPE); GDF_REQUIRE(predecessors->dtype == GDF_INT32, GDF_UNSUPPORTED_DTYPE); int n = graph->adjList->offsets->size - 1; int e = graph->adjList->indices->size; int* offsets_ptr = (int*)graph->adjList->offsets->data; int* indices_ptr = (int*)graph->adjList->indices->data; int* distances_ptr = (int*)distances->data; int* predecessors_ptr = (int*)predecessors->data; int alpha = 15; int beta = 18; cugraph::Bfs<int> bfs(n, e, offsets_ptr, indices_ptr, directed, alpha, beta); bfs.configure(distances_ptr, predecessors_ptr, nullptr); bfs.traverse(start_node); return GDF_SUCCESS; }
c1b4da10991fcb83ca34141340c9c0e0402a7d9b.hip
// !!! This is a file automatically generated by hipify!!! // random generator includes #include <thrust/random/linear_congruential_engine.h> #include <thrust/random/xor_combine_engine.h> #include <thrust/random.h> #include <hiprand/hiprand_kernel.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/for_each.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/transform.h> #include <thrust/sequence.h> #include <thrust/copy.h> #include <thrust/fill.h> #include <thrust/replace.h> #include <thrust/functional.h> #include <thrust/execution_policy.h> #include <thrust/device_ptr.h> #include <thrust/transform_reduce.h> #include <thrust/binary_search.h> #include <thrust/adjacent_difference.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/iterator/counting_iterator.h> #include <map> #include <iostream> #include <cstdlib> #include <cmath> #include <math.h> #include <string> #include <boost/math/tools/roots.hpp> #include <thrust/tuple.h> #include "STE_DataStructures_double.cuh" #include <vector> // to write 6-vector to screen // __host__ std::ostream& operator<< (std::ostream& os, const float6& p) // { // os << std::setw(15) << "x" << std::setw(15) << "y" << std::setw(15) << "z" << std::endl; // os << std::setw(15) << p.x << std::setw(15) << p.px << std::setw(15) << p.y << std::endl; // os << std::setw(15) << p.py << std::setw(15) << p.t <<std::setw(15) << p.delta << std::endl;; // return os; // }; // // to write 2-vector to screen // __host__ std::ostream& operator<< (std::ostream& os, const float2& p) // { // os << std::setw(21) << p.x << std::setw(21) << p.y; // // os << printf("%.16f",p.x) << "\t" << printf("%.16f",p.y) << std::endl; // // os << printf("%.16f \t %.16f\n",p.x,p.y); // return os; // };
c1b4da10991fcb83ca34141340c9c0e0402a7d9b.cu
// random generator includes #include <thrust/random/linear_congruential_engine.h> #include <thrust/random/xor_combine_engine.h> #include <thrust/random.h> #include <curand_kernel.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/for_each.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/transform.h> #include <thrust/sequence.h> #include <thrust/copy.h> #include <thrust/fill.h> #include <thrust/replace.h> #include <thrust/functional.h> #include <thrust/execution_policy.h> #include <thrust/device_ptr.h> #include <thrust/transform_reduce.h> #include <thrust/binary_search.h> #include <thrust/adjacent_difference.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/iterator/counting_iterator.h> #include <map> #include <iostream> #include <cstdlib> #include <cmath> #include <math.h> #include <string> #include <boost/math/tools/roots.hpp> #include <thrust/tuple.h> #include "STE_DataStructures_double.cuh" #include <vector> // to write 6-vector to screen // __host__ std::ostream& operator<< (std::ostream& os, const float6& p) // { // os << std::setw(15) << "x" << std::setw(15) << "y" << std::setw(15) << "z" << std::endl; // os << std::setw(15) << p.x << std::setw(15) << p.px << std::setw(15) << p.y << std::endl; // os << std::setw(15) << p.py << std::setw(15) << p.t <<std::setw(15) << p.delta << std::endl;; // return os; // }; // // to write 2-vector to screen // __host__ std::ostream& operator<< (std::ostream& os, const float2& p) // { // os << std::setw(21) << p.x << std::setw(21) << p.y; // // os << printf("%.16f",p.x) << "\t" << printf("%.16f",p.y) << std::endl; // // os << printf("%.16f \t %.16f\n",p.x,p.y); // return os; // };
d1e0230798fc500f19d9e6fb09128eb2a6efadbd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<stdlib.h> #include<omp.h> #include<cuda_runtime.h> #include<hiprand/hiprand.h> #include<hiprand/hiprand_kernel.h> #include<unistd.h> float* h_mean; // Store vector from d_mean float* h_sigma; // Store vector from d_sigma float* hh_mean; // to store the sum up h_mean per thread float* hh_sigma; // to store the sum up h_sigma per thread // Host code double f(double*); double W(double*); // weight function parameter double a = 0.25; // Device code __global__ void Metropolis(int N, float a, float* mean, float* sigma, unsigned int seed); int main(void){ // Setting up random number generator srand(101); // Settings // Sampling N points // N = pow(2, n); // n = 1, 2 , ..., 16 // array x --> holds the random number for coordinate. // Store temperary random number in r, y. double mean, sigma; int N; double x[10], x_old[10]; double r; FILE *output; float gputime_tot, cputime; // Settings for omp int cpu_thread_id = 0; // Settings for GPU int NGPU; // numbers of GPU int *Dev; // Store the GPU id int m, threadsPerBlock, blocksPerGrid; int sb; // Size of output array from individual GPU int sm; // Size of the shared memory per block hipEvent_t start, stop; // Get the settings of GPU printf("\n* Initial parameters for GPU:\n"); printf(" Enter the number of GPUs: "); scanf("%d", &NGPU); printf("%d\n", NGPU); Dev = (int *)malloc(sizeof(int) * NGPU); for (int i = 0; i < NGPU; i = i+1) { printf(" Enter the GPU ID (0/1/...): "); scanf("%d",&(Dev[i])); printf("%d\n", Dev[i]); } printf("\n* Solve Monte Carlo Integration in 10-dim:\n"); printf(" Enter the number of sample points: "); scanf("%d",&N); printf("%d\n",N); // Check can N be divided by NGPU, for saving my time on how to distribute workload!!! if (N % NGPU != 0) { printf("!!! Invalid partition of lattice: N %% NGPU != 0\n"); exit(1); } // Set the number of threads per block // Since I would use parallel reduction , threads per block should be 2^m printf(" Enter the power (m) of threads per block (2^m): "); scanf("%d", &m); printf("%d\n", m); threadsPerBlock = pow(2, m); if( threadsPerBlock > 1024 ) { printf("!!! The number of threads per block must be less than 1024.\n"); exit(0); } printf("threads per block = %d\n", threadsPerBlock); printf(" Enter the number of blocks per grid: "); scanf("%d", &blocksPerGrid); printf("%d\n", blocksPerGrid); if( blocksPerGrid > 2147483647 ) { printf("!!! The number of blocks per grid must be less than 2147483647.\n"); exit(0); } // Since in GPU, I calculate blockDim.x * GridDim.x of x first , // then go to next round , so N > blockDim.x * GridDim.x // The most ideal number of threads per grid for the integral to be more accurate, // is to make it run more turns. if( (N / NGPU) < blocksPerGrid * threadsPerBlock){ printf("!!! The number of threads per grid must be less than number of sample points N / NGPU.\n"); exit(0); } // Output to a file output = fopen("integration_result_MA.txt", "a"); fprintf(output, "N BlockSize GridSize MAGPU MAGPUerror MACPU MACPUerror SpeedUp\n"); fclose(output); /* Monte Carlo integration with GPU */ sb = blocksPerGrid * sizeof(float); sm = threadsPerBlock * sizeof(float); h_mean = (float*)malloc(sb * NGPU); h_sigma = (float*)malloc(sb * NGPU); hh_mean = (float*)malloc(sizeof(float) * NGPU); hh_sigma = (float*)malloc(sizeof(float) * NGPU); for(int i = 0; i < NGPU; i = i+1){ hh_mean[i] = 0.0; hh_sigma[i] = 0.0; } // Set numbers of threads = numbers of GPU omp_set_num_threads(NGPU); #pragma omp parallel private(cpu_thread_id) { // Declare private pointer float* d_mean; float* d_sigma; // Get thread num, and set the GPU accordingly cpu_thread_id = omp_get_thread_num(); hipSetDevice(Dev[cpu_thread_id]); // Create the timer and start it at thread = 0 only if(cpu_thread_id == 0) { hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); } // Allocate device memory hipMalloc((void**)&d_mean, sb); hipMalloc((void**)&d_sigma, sb); // Since I need to stored mean and sigma seperately, // so shared memory must x2 // set the seed as time(NULL) + cpu_thread_id * 10.0 so that different gpu has different seed hipLaunchKernelGGL(( Metropolis), dim3(blocksPerGrid), dim3(threadsPerBlock), 2 * sm, 0, N / NGPU, a, d_mean, d_sigma, time(NULL) + cpu_thread_id * 10.0); // Copy d_mean and d_sigma from device to host hipMemcpy(h_mean + blocksPerGrid * cpu_thread_id, d_mean, sb, hipMemcpyDeviceToHost); hipMemcpy(h_sigma + blocksPerGrid * cpu_thread_id, d_sigma, sb, hipMemcpyDeviceToHost); hipFree(d_mean); hipFree(d_sigma); for(int i = blocksPerGrid * cpu_thread_id; i < (blocksPerGrid * cpu_thread_id) + blocksPerGrid; i = i+1) { hh_mean[cpu_thread_id] = hh_mean[cpu_thread_id] + h_mean[i]; hh_sigma[cpu_thread_id] = hh_sigma[cpu_thread_id] + h_sigma[i]; } // Wait till OpenMP threads are finish! #pragma omp barrier } // Calculate the final result mean = 0.0; sigma = 0.0; for(int i = 0; i < NGPU; i = i+1){ mean = mean + (double)hh_mean[i]; sigma = sigma + (double)hh_sigma[i]; } mean = mean / (double) N; sigma = sqrt(((1.0 / (double) N) * sigma + pow(mean, 2)) / (double) N); // stop the timer hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&gputime_tot, start, stop); // Write result to file output = fopen("integration_result_MA.txt", "a"); fprintf(output, "%d %d %d %.5e %.5e ", N, threadsPerBlock, blocksPerGrid, mean, sigma); fclose(output); // Print the result from GPU printf("------GPU result------\n"); printf("Mean = %lf\n", mean); printf("Sigma = %lf\n", sigma); printf("GPU total time used = %.3lf (ms)\n\n", gputime_tot); /* Monte Carlo integration with CPU */ /*-----Metropolis Algorithm-----*/ mean = 0.0; sigma = 0.0; hipEventRecord(start, 0); // Get initial x --> x_old (N = 1) for(int j = 0; j < 10; j = j+1){ x_old[j] = (double) rand() / (double) RAND_MAX; } mean = mean + f(x_old) / W(x_old); sigma = sigma + pow(f(x_old) / W(x_old), 2); // Get the other (N-1) sample points for(int i = 2; i <= N; i = i+1){ // Get new x --> x for(int j = 0; j < 10; j = j+1){ x[j] = (double) rand() / (double) RAND_MAX; } // Check acceptance if(W(x) >= W(x_old)){ // Accept x, and to avoid overflow memcpy(x_old, x, sizeof(x_old)); } else{ r = (double) rand() / (double) RAND_MAX; if(r < (W(x) / W(x_old))){ // Accept x, and to avoid overflow memcpy(x_old, x, sizeof(x_old)); } } mean = mean + f(x_old) / W(x_old); sigma = sigma + pow(f(x_old) / W(x_old), 2); } mean = mean / (double) N; sigma = sqrt(((1.0 / (double) N) * sigma + pow(mean, 2)) / (double) N); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&cputime, start, stop); // Write result to file output = fopen("integration_result_MA.txt", "a"); fprintf(output, "%.5e +- %.5e %.3f\n", mean, sigma, cputime / gputime_tot); fclose(output); // Print the result from CPU printf("------CPU result------\n"); printf("Mean = %lf\n", mean); printf("Sigma = %lf\n", sigma); printf("GPU total time used = %.3lf (ms)\n\n", cputime); // All done , reset and free source free(h_mean); free(h_sigma); free(hh_mean); free(hh_sigma); // Destroy timer hipEventDestroy(start); hipEventDestroy(stop); // Reset GPU for(int i = 0; i < NGPU; i = i+1){ hipSetDevice(Dev[i]); hipDeviceReset(); } return 0; } // function to be integrated double f(double *x){ double result = 1.0; for(int i = 0; i <= 9; i = i+1){ result = result + pow(x[i], 2); } result = 1.0 / result; return result; } // weight function double W(double *x){ double weight = 1.0; double c; // Find c, so that integral c*exp(-ax) between [0,1] = 1 c = a / (1.0 - exp(-a)); // Calculate the weight function for(int i = 0; i <= 9; i = i+1){ weight = weight * c * exp(-a * x[i]); } return weight; } __global__ void Metropolis(int N, float a, float* mean, float* sigma, unsigned int seed) { /* The mean and sigma here just sum them up with x and x^2 Divided them and further calculate the real mean and sigma at host. For Metropolis x series, each threads generate their own series. */ extern __shared__ float cache[]; float *meanCache = &cache[0]; float *sigmaCache = &cache[blockDim.x]; int i = blockDim.x * blockIdx.x + threadIdx.x; int cacheIndex = threadIdx.x; float f_old, W_old; float f_new, W_new; float c = a / (1.0 - expf(-a)); float temp_mean = 0.0; float temp_sigma = 0.0; float temp_rand; // initialize cuRAND hiprandState_t state; seed = seed + i; hiprand_init(seed, i, 0, &state); // For each threads, generate the first {x}. // Get initial x-->x_old (N = 1), and calculate f and W directly. f_old = 0.0; W_old = 1.0; for(int j = 0; j < 10; j = j+1){ temp_rand = hiprand_uniform(&state); f_old = f_old + powf(temp_rand, 2); W_old = W_old * c * expf(-a * temp_rand); } f_old = f_old + 1.0; f_old = 1.0 / f_old; temp_mean = temp_mean + f_old / W_old; temp_sigma = temp_sigma + powf(f_old / W_old, 2); // Get the other (N-1) sample points while (i < (N - blockDim.x * gridDim.x)){ // Get new x --> x , and calculate f and W directly // so that I don't have to store addition x[10] array. f_new = 0.0; W_new = 1.0; for(int j = 0; j < 10; j = j+1){ temp_rand = hiprand_uniform(&state); f_new = f_new + powf(temp_rand, 2); W_new = W_new * c * expf(-a * temp_rand); } f_new = f_new + 1.0; f_new = 1.0 / f_new; // Check acceptance if(W_new > W_old){ // Accept x, and record it. f_old = f_new; W_old = W_new; } else{ temp_rand = hiprand_uniform(&state); if(temp_rand < (W_new / W_old)){ // Accept x, and record it. f_old = f_new; W_old = W_new; } } temp_mean = temp_mean + f_old / W_old; temp_sigma = temp_sigma + powf(f_old / W_old, 2); // Go to next round i = i + blockDim.x * gridDim.x; } meanCache[cacheIndex] = temp_mean; sigmaCache[cacheIndex] = temp_sigma; __syncthreads(); // perform parallel reduction, threadsPerBlock must be 2^m int ib = blockDim.x / 2; while (ib != 0) { if(cacheIndex < ib){ meanCache[cacheIndex] += meanCache[cacheIndex + ib]; sigmaCache[cacheIndex] += sigmaCache[cacheIndex + ib]; } __syncthreads(); ib /=2; } if(cacheIndex == 0){ mean[blockIdx.x] = meanCache[0]; sigma[blockIdx.x] = sigmaCache[0]; } }
d1e0230798fc500f19d9e6fb09128eb2a6efadbd.cu
#include<stdio.h> #include<stdlib.h> #include<omp.h> #include<cuda_runtime.h> #include<curand.h> #include<curand_kernel.h> #include<unistd.h> float* h_mean; // Store vector from d_mean float* h_sigma; // Store vector from d_sigma float* hh_mean; // to store the sum up h_mean per thread float* hh_sigma; // to store the sum up h_sigma per thread // Host code double f(double*); double W(double*); // weight function parameter double a = 0.25; // Device code __global__ void Metropolis(int N, float a, float* mean, float* sigma, unsigned int seed); int main(void){ // Setting up random number generator srand(101); // Settings // Sampling N points // N = pow(2, n); // n = 1, 2 , ..., 16 // array x --> holds the random number for coordinate. // Store temperary random number in r, y. double mean, sigma; int N; double x[10], x_old[10]; double r; FILE *output; float gputime_tot, cputime; // Settings for omp int cpu_thread_id = 0; // Settings for GPU int NGPU; // numbers of GPU int *Dev; // Store the GPU id int m, threadsPerBlock, blocksPerGrid; int sb; // Size of output array from individual GPU int sm; // Size of the shared memory per block cudaEvent_t start, stop; // Get the settings of GPU printf("\n* Initial parameters for GPU:\n"); printf(" Enter the number of GPUs: "); scanf("%d", &NGPU); printf("%d\n", NGPU); Dev = (int *)malloc(sizeof(int) * NGPU); for (int i = 0; i < NGPU; i = i+1) { printf(" Enter the GPU ID (0/1/...): "); scanf("%d",&(Dev[i])); printf("%d\n", Dev[i]); } printf("\n* Solve Monte Carlo Integration in 10-dim:\n"); printf(" Enter the number of sample points: "); scanf("%d",&N); printf("%d\n",N); // Check can N be divided by NGPU, for saving my time on how to distribute workload!!! if (N % NGPU != 0) { printf("!!! Invalid partition of lattice: N %% NGPU != 0\n"); exit(1); } // Set the number of threads per block // Since I would use parallel reduction , threads per block should be 2^m printf(" Enter the power (m) of threads per block (2^m): "); scanf("%d", &m); printf("%d\n", m); threadsPerBlock = pow(2, m); if( threadsPerBlock > 1024 ) { printf("!!! The number of threads per block must be less than 1024.\n"); exit(0); } printf("threads per block = %d\n", threadsPerBlock); printf(" Enter the number of blocks per grid: "); scanf("%d", &blocksPerGrid); printf("%d\n", blocksPerGrid); if( blocksPerGrid > 2147483647 ) { printf("!!! The number of blocks per grid must be less than 2147483647.\n"); exit(0); } // Since in GPU, I calculate blockDim.x * GridDim.x of x first , // then go to next round , so N > blockDim.x * GridDim.x // The most ideal number of threads per grid for the integral to be more accurate, // is to make it run more turns. if( (N / NGPU) < blocksPerGrid * threadsPerBlock){ printf("!!! The number of threads per grid must be less than number of sample points N / NGPU.\n"); exit(0); } // Output to a file output = fopen("integration_result_MA.txt", "a"); fprintf(output, "N BlockSize GridSize MAGPU MAGPUerror MACPU MACPUerror SpeedUp\n"); fclose(output); /* Monte Carlo integration with GPU */ sb = blocksPerGrid * sizeof(float); sm = threadsPerBlock * sizeof(float); h_mean = (float*)malloc(sb * NGPU); h_sigma = (float*)malloc(sb * NGPU); hh_mean = (float*)malloc(sizeof(float) * NGPU); hh_sigma = (float*)malloc(sizeof(float) * NGPU); for(int i = 0; i < NGPU; i = i+1){ hh_mean[i] = 0.0; hh_sigma[i] = 0.0; } // Set numbers of threads = numbers of GPU omp_set_num_threads(NGPU); #pragma omp parallel private(cpu_thread_id) { // Declare private pointer float* d_mean; float* d_sigma; // Get thread num, and set the GPU accordingly cpu_thread_id = omp_get_thread_num(); cudaSetDevice(Dev[cpu_thread_id]); // Create the timer and start it at thread = 0 only if(cpu_thread_id == 0) { cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); } // Allocate device memory cudaMalloc((void**)&d_mean, sb); cudaMalloc((void**)&d_sigma, sb); // Since I need to stored mean and sigma seperately, // so shared memory must x2 // set the seed as time(NULL) + cpu_thread_id * 10.0 so that different gpu has different seed Metropolis<<<blocksPerGrid, threadsPerBlock, 2 * sm>>>(N / NGPU, a, d_mean, d_sigma, time(NULL) + cpu_thread_id * 10.0); // Copy d_mean and d_sigma from device to host cudaMemcpy(h_mean + blocksPerGrid * cpu_thread_id, d_mean, sb, cudaMemcpyDeviceToHost); cudaMemcpy(h_sigma + blocksPerGrid * cpu_thread_id, d_sigma, sb, cudaMemcpyDeviceToHost); cudaFree(d_mean); cudaFree(d_sigma); for(int i = blocksPerGrid * cpu_thread_id; i < (blocksPerGrid * cpu_thread_id) + blocksPerGrid; i = i+1) { hh_mean[cpu_thread_id] = hh_mean[cpu_thread_id] + h_mean[i]; hh_sigma[cpu_thread_id] = hh_sigma[cpu_thread_id] + h_sigma[i]; } // Wait till OpenMP threads are finish! #pragma omp barrier } // Calculate the final result mean = 0.0; sigma = 0.0; for(int i = 0; i < NGPU; i = i+1){ mean = mean + (double)hh_mean[i]; sigma = sigma + (double)hh_sigma[i]; } mean = mean / (double) N; sigma = sqrt(((1.0 / (double) N) * sigma + pow(mean, 2)) / (double) N); // stop the timer cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&gputime_tot, start, stop); // Write result to file output = fopen("integration_result_MA.txt", "a"); fprintf(output, "%d %d %d %.5e %.5e ", N, threadsPerBlock, blocksPerGrid, mean, sigma); fclose(output); // Print the result from GPU printf("------GPU result------\n"); printf("Mean = %lf\n", mean); printf("Sigma = %lf\n", sigma); printf("GPU total time used = %.3lf (ms)\n\n", gputime_tot); /* Monte Carlo integration with CPU */ /*-----Metropolis Algorithm-----*/ mean = 0.0; sigma = 0.0; cudaEventRecord(start, 0); // Get initial x --> x_old (N = 1) for(int j = 0; j < 10; j = j+1){ x_old[j] = (double) rand() / (double) RAND_MAX; } mean = mean + f(x_old) / W(x_old); sigma = sigma + pow(f(x_old) / W(x_old), 2); // Get the other (N-1) sample points for(int i = 2; i <= N; i = i+1){ // Get new x --> x for(int j = 0; j < 10; j = j+1){ x[j] = (double) rand() / (double) RAND_MAX; } // Check acceptance if(W(x) >= W(x_old)){ // Accept x, and to avoid overflow memcpy(x_old, x, sizeof(x_old)); } else{ r = (double) rand() / (double) RAND_MAX; if(r < (W(x) / W(x_old))){ // Accept x, and to avoid overflow memcpy(x_old, x, sizeof(x_old)); } } mean = mean + f(x_old) / W(x_old); sigma = sigma + pow(f(x_old) / W(x_old), 2); } mean = mean / (double) N; sigma = sqrt(((1.0 / (double) N) * sigma + pow(mean, 2)) / (double) N); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&cputime, start, stop); // Write result to file output = fopen("integration_result_MA.txt", "a"); fprintf(output, "%.5e +- %.5e %.3f\n", mean, sigma, cputime / gputime_tot); fclose(output); // Print the result from CPU printf("------CPU result------\n"); printf("Mean = %lf\n", mean); printf("Sigma = %lf\n", sigma); printf("GPU total time used = %.3lf (ms)\n\n", cputime); // All done , reset and free source free(h_mean); free(h_sigma); free(hh_mean); free(hh_sigma); // Destroy timer cudaEventDestroy(start); cudaEventDestroy(stop); // Reset GPU for(int i = 0; i < NGPU; i = i+1){ cudaSetDevice(Dev[i]); cudaDeviceReset(); } return 0; } // function to be integrated double f(double *x){ double result = 1.0; for(int i = 0; i <= 9; i = i+1){ result = result + pow(x[i], 2); } result = 1.0 / result; return result; } // weight function double W(double *x){ double weight = 1.0; double c; // Find c, so that integral c*exp(-ax) between [0,1] = 1 c = a / (1.0 - exp(-a)); // Calculate the weight function for(int i = 0; i <= 9; i = i+1){ weight = weight * c * exp(-a * x[i]); } return weight; } __global__ void Metropolis(int N, float a, float* mean, float* sigma, unsigned int seed) { /* The mean and sigma here just sum them up with x and x^2 Divided them and further calculate the real mean and sigma at host. For Metropolis x series, each threads generate their own series. */ extern __shared__ float cache[]; float *meanCache = &cache[0]; float *sigmaCache = &cache[blockDim.x]; int i = blockDim.x * blockIdx.x + threadIdx.x; int cacheIndex = threadIdx.x; float f_old, W_old; float f_new, W_new; float c = a / (1.0 - expf(-a)); float temp_mean = 0.0; float temp_sigma = 0.0; float temp_rand; // initialize cuRAND curandState_t state; seed = seed + i; curand_init(seed, i, 0, &state); // For each threads, generate the first {x}. // Get initial x-->x_old (N = 1), and calculate f and W directly. f_old = 0.0; W_old = 1.0; for(int j = 0; j < 10; j = j+1){ temp_rand = curand_uniform(&state); f_old = f_old + powf(temp_rand, 2); W_old = W_old * c * expf(-a * temp_rand); } f_old = f_old + 1.0; f_old = 1.0 / f_old; temp_mean = temp_mean + f_old / W_old; temp_sigma = temp_sigma + powf(f_old / W_old, 2); // Get the other (N-1) sample points while (i < (N - blockDim.x * gridDim.x)){ // Get new x --> x , and calculate f and W directly // so that I don't have to store addition x[10] array. f_new = 0.0; W_new = 1.0; for(int j = 0; j < 10; j = j+1){ temp_rand = curand_uniform(&state); f_new = f_new + powf(temp_rand, 2); W_new = W_new * c * expf(-a * temp_rand); } f_new = f_new + 1.0; f_new = 1.0 / f_new; // Check acceptance if(W_new > W_old){ // Accept x, and record it. f_old = f_new; W_old = W_new; } else{ temp_rand = curand_uniform(&state); if(temp_rand < (W_new / W_old)){ // Accept x, and record it. f_old = f_new; W_old = W_new; } } temp_mean = temp_mean + f_old / W_old; temp_sigma = temp_sigma + powf(f_old / W_old, 2); // Go to next round i = i + blockDim.x * gridDim.x; } meanCache[cacheIndex] = temp_mean; sigmaCache[cacheIndex] = temp_sigma; __syncthreads(); // perform parallel reduction, threadsPerBlock must be 2^m int ib = blockDim.x / 2; while (ib != 0) { if(cacheIndex < ib){ meanCache[cacheIndex] += meanCache[cacheIndex + ib]; sigmaCache[cacheIndex] += sigmaCache[cacheIndex + ib]; } __syncthreads(); ib /=2; } if(cacheIndex == 0){ mean[blockIdx.x] = meanCache[0]; sigma[blockIdx.x] = sigmaCache[0]; } }
8a43e34fbce58f548ac95513c502f19d028a8f0e.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright 2021 by XGBoost Contributors */ #include <thrust/scan.h> #include <hipcub/hipcub.hpp> #include <cassert> #include <limits> #include <memory> #include <utility> #include <tuple> #include "rabit/rabit.h" #include "xgboost/span.h" #include "xgboost/data.h" #include "auc.h" #include "../common/device_helpers.cuh" #include "../common/ranking_utils.cuh" namespace xgboost { namespace metric { namespace { template <typename T> using Discard = thrust::discard_iterator<T>; struct GetWeightOp { common::Span<float const> weights; common::Span<size_t const> sorted_idx; __device__ float operator()(size_t i) const { return weights.empty() ? 1.0f : weights[sorted_idx[i]]; } }; } // namespace /** * A cache to GPU data to avoid reallocating memory. */ struct DeviceAUCCache { // Pair of FP/TP using Pair = thrust::pair<float, float>; // index sorted by prediction value dh::device_vector<size_t> sorted_idx; // track FP/TP for computation on trapesoid area dh::device_vector<Pair> fptp; // track FP_PREV/TP_PREV for computation on trapesoid area dh::device_vector<Pair> neg_pos; // index of unique prediction values. dh::device_vector<size_t> unique_idx; // p^T: transposed prediction matrix, used by MultiClassAUC dh::device_vector<float> predts_t; std::unique_ptr<dh::AllReducer> reducer; void Init(common::Span<float const> predts, bool is_multi, int32_t device) { if (sorted_idx.size() != predts.size()) { sorted_idx.resize(predts.size()); fptp.resize(sorted_idx.size()); unique_idx.resize(sorted_idx.size()); neg_pos.resize(sorted_idx.size()); if (is_multi) { predts_t.resize(sorted_idx.size()); } } if (is_multi && !reducer) { reducer.reset(new dh::AllReducer); reducer->Init(device); } } }; /** * The GPU implementation uses same calculation as CPU with a few more steps to distribute * work across threads: * * - Run scan to obtain TP/FP values, which are right coordinates of trapesoid. * - Find distinct prediction values and get the corresponding FP_PREV/TP_PREV value, * which are left coordinates of trapesoids. * - Reduce the scan array into 1 AUC value. */ std::tuple<float, float, float> GPUBinaryAUC(common::Span<float const> predts, MetaInfo const &info, int32_t device, std::shared_ptr<DeviceAUCCache> *p_cache) { auto& cache = *p_cache; if (!cache) { cache.reset(new DeviceAUCCache); } cache->Init(predts, false, device); auto labels = info.labels_.ConstDeviceSpan(); auto weights = info.weights_.ConstDeviceSpan(); dh::safe_cuda(hipSetDevice(device)); CHECK(!labels.empty()); CHECK_EQ(labels.size(), predts.size()); /** * Create sorted index for each class */ auto d_sorted_idx = dh::ToSpan(cache->sorted_idx); dh::ArgSort<false>(predts, d_sorted_idx); /** * Linear scan */ auto get_weight = GetWeightOp{weights, d_sorted_idx}; using Pair = thrust::pair<float, float>; auto get_fp_tp = [=]__device__(size_t i) { size_t idx = d_sorted_idx[i]; float label = labels[idx]; float w = get_weight(i); float fp = (1.0 - label) * w; float tp = label * w; return thrust::make_pair(fp, tp); }; // NOLINT auto d_fptp = dh::ToSpan(cache->fptp); dh::LaunchN(d_sorted_idx.size(), [=] __device__(size_t i) { d_fptp[i] = get_fp_tp(i); }); dh::XGBDeviceAllocator<char> alloc; auto d_unique_idx = dh::ToSpan(cache->unique_idx); dh::Iota(d_unique_idx); auto uni_key = dh::MakeTransformIterator<float>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { return predts[d_sorted_idx[i]]; }); auto end_unique = thrust::unique_by_key_copy( thrust::hip::par(alloc), uni_key, uni_key + d_sorted_idx.size(), dh::tbegin(d_unique_idx), thrust::make_discard_iterator(), dh::tbegin(d_unique_idx)); d_unique_idx = d_unique_idx.subspan(0, end_unique.second - dh::tbegin(d_unique_idx)); dh::InclusiveScan( dh::tbegin(d_fptp), dh::tbegin(d_fptp), [=] __device__(Pair const &l, Pair const &r) { return thrust::make_pair(l.first + r.first, l.second + r.second); }, d_fptp.size()); auto d_neg_pos = dh::ToSpan(cache->neg_pos); // scatter unique negaive/positive values // shift to right by 1 with initial value being 0 dh::LaunchN(d_unique_idx.size(), [=] __device__(size_t i) { if (d_unique_idx[i] == 0) { // first unique index is 0 assert(i == 0); d_neg_pos[0] = {0, 0}; return; } d_neg_pos[d_unique_idx[i]] = d_fptp[d_unique_idx[i] - 1]; if (i == d_unique_idx.size() - 1) { // last one needs to be included, may override above assignment if the last // prediction value is distinct from previous one. d_neg_pos.back() = d_fptp[d_unique_idx[i] - 1]; return; } }); auto in = dh::MakeTransformIterator<float>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { float fp, tp; float fp_prev, tp_prev; if (i == 0) { // handle the last element thrust::tie(fp, tp) = d_fptp.back(); thrust::tie(fp_prev, tp_prev) = d_neg_pos[d_unique_idx.back()]; } else { thrust::tie(fp, tp) = d_fptp[d_unique_idx[i] - 1]; thrust::tie(fp_prev, tp_prev) = d_neg_pos[d_unique_idx[i - 1]]; } return TrapesoidArea(fp_prev, fp, tp_prev, tp); }); Pair last = cache->fptp.back(); float auc = thrust::reduce(thrust::hip::par(alloc), in, in + d_unique_idx.size()); return std::make_tuple(last.first, last.second, auc); } void Transpose(common::Span<float const> in, common::Span<float> out, size_t m, size_t n, int32_t device) { CHECK_EQ(in.size(), out.size()); CHECK_EQ(in.size(), m * n); dh::LaunchN(in.size(), [=] __device__(size_t i) { size_t col = i / m; size_t row = i % m; size_t idx = row * n + col; out[i] = in[idx]; }); } /** * Last index of a group in a CSR style of index pointer. */ template <typename Idx> XGBOOST_DEVICE size_t LastOf(size_t group, common::Span<Idx> indptr) { return indptr[group + 1] - 1; } float ScaleClasses(common::Span<float> results, common::Span<float> local_area, common::Span<float> fp, common::Span<float> tp, common::Span<float> auc, std::shared_ptr<DeviceAUCCache> cache, size_t n_classes) { dh::XGBDeviceAllocator<char> alloc; if (rabit::IsDistributed()) { CHECK_EQ(dh::CudaGetPointerDevice(results.data()), dh::CurrentDevice()); cache->reducer->AllReduceSum(results.data(), results.data(), results.size()); } auto reduce_in = dh::MakeTransformIterator<thrust::pair<float, float>>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { if (local_area[i] > 0) { return thrust::make_pair(auc[i] / local_area[i] * tp[i], tp[i]); } return thrust::make_pair(std::numeric_limits<float>::quiet_NaN(), 0.0f); }); float tp_sum; float auc_sum; thrust::tie(auc_sum, tp_sum) = thrust::reduce( thrust::hip::par(alloc), reduce_in, reduce_in + n_classes, thrust::make_pair(0.0f, 0.0f), [=] __device__(auto const &l, auto const &r) { return thrust::make_pair(l.first + r.first, l.second + r.second); }); if (tp_sum != 0 && !std::isnan(auc_sum)) { auc_sum /= tp_sum; } else { return std::numeric_limits<float>::quiet_NaN(); } return auc_sum; } /** * MultiClass implementation is similar to binary classification, except we need to split * up each class in all kernels. */ float GPUMultiClassAUCOVR(common::Span<float const> predts, MetaInfo const &info, int32_t device, std::shared_ptr<DeviceAUCCache>* p_cache, size_t n_classes) { dh::safe_cuda(hipSetDevice(device)); auto& cache = *p_cache; if (!cache) { cache.reset(new DeviceAUCCache); } cache->Init(predts, true, device); auto labels = info.labels_.ConstDeviceSpan(); auto weights = info.weights_.ConstDeviceSpan(); size_t n_samples = labels.size(); if (n_samples == 0) { dh::TemporaryArray<float> resutls(n_classes * 4, 0.0f); auto d_results = dh::ToSpan(resutls); dh::LaunchN(n_classes * 4, [=] __device__(size_t i) { d_results[i] = 0.0f; }); auto local_area = d_results.subspan(0, n_classes); auto fp = d_results.subspan(n_classes, n_classes); auto tp = d_results.subspan(2 * n_classes, n_classes); auto auc = d_results.subspan(3 * n_classes, n_classes); return ScaleClasses(d_results, local_area, fp, tp, auc, cache, n_classes); } /** * Create sorted index for each class */ auto d_predts_t = dh::ToSpan(cache->predts_t); Transpose(predts, d_predts_t, n_samples, n_classes, device); dh::TemporaryArray<uint32_t> class_ptr(n_classes + 1, 0); auto d_class_ptr = dh::ToSpan(class_ptr); dh::LaunchN(n_classes + 1, [=] __device__(size_t i) { d_class_ptr[i] = i * n_samples; }); // no out-of-place sort for thrust, cub sort doesn't accept general iterator. So can't // use transform iterator in sorting. auto d_sorted_idx = dh::ToSpan(cache->sorted_idx); dh::SegmentedArgSort<false>(d_predts_t, d_class_ptr, d_sorted_idx); /** * Linear scan */ dh::caching_device_vector<float> d_auc(n_classes, 0); auto s_d_auc = dh::ToSpan(d_auc); auto get_weight = GetWeightOp{weights, d_sorted_idx}; using Pair = thrust::pair<float, float>; auto d_fptp = dh::ToSpan(cache->fptp); auto get_fp_tp = [=]__device__(size_t i) { size_t idx = d_sorted_idx[i]; size_t class_id = i / n_samples; // labels is a vector of size n_samples. float label = labels[idx % n_samples] == class_id; float w = get_weight(i % n_samples); float fp = (1.0 - label) * w; float tp = label * w; return thrust::make_pair(fp, tp); }; // NOLINT dh::LaunchN(d_sorted_idx.size(), [=] __device__(size_t i) { d_fptp[i] = get_fp_tp(i); }); /** * Handle duplicated predictions */ dh::XGBDeviceAllocator<char> alloc; auto d_unique_idx = dh::ToSpan(cache->unique_idx); dh::Iota(d_unique_idx); auto uni_key = dh::MakeTransformIterator<thrust::pair<uint32_t, float>>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { uint32_t class_id = i / n_samples; float predt = d_predts_t[d_sorted_idx[i]]; return thrust::make_pair(class_id, predt); }); // unique values are sparse, so we need a CSR style indptr dh::TemporaryArray<uint32_t> unique_class_ptr(class_ptr.size()); auto d_unique_class_ptr = dh::ToSpan(unique_class_ptr); auto n_uniques = dh::SegmentedUniqueByKey( thrust::hip::par(alloc), dh::tbegin(d_class_ptr), dh::tend(d_class_ptr), uni_key, uni_key + d_sorted_idx.size(), dh::tbegin(d_unique_idx), d_unique_class_ptr.data(), dh::tbegin(d_unique_idx), thrust::equal_to<thrust::pair<uint32_t, float>>{}); d_unique_idx = d_unique_idx.subspan(0, n_uniques); using Triple = thrust::tuple<uint32_t, float, float>; // expand to tuple to include class id auto fptp_it_in = dh::MakeTransformIterator<Triple>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { return thrust::make_tuple(i, d_fptp[i].first, d_fptp[i].second); }); // shrink down to pair auto fptp_it_out = thrust::make_transform_output_iterator( dh::TypedDiscard<Triple>{}, [d_fptp] __device__(Triple const &t) { d_fptp[thrust::get<0>(t)] = thrust::make_pair(thrust::get<1>(t), thrust::get<2>(t)); return t; }); dh::InclusiveScan( fptp_it_in, fptp_it_out, [=] __device__(Triple const &l, Triple const &r) { uint32_t l_cid = thrust::get<0>(l) / n_samples; uint32_t r_cid = thrust::get<0>(r) / n_samples; if (l_cid != r_cid) { return r; } return Triple(thrust::get<0>(r), thrust::get<1>(l) + thrust::get<1>(r), // fp thrust::get<2>(l) + thrust::get<2>(r)); // tp }, d_fptp.size()); // scatter unique FP_PREV/TP_PREV values auto d_neg_pos = dh::ToSpan(cache->neg_pos); // When dataset is not empty, each class must have at least 1 (unique) sample // prediction, so no need to handle special case. dh::LaunchN(d_unique_idx.size(), [=] __device__(size_t i) { if (d_unique_idx[i] % n_samples == 0) { // first unique index is 0 assert(d_unique_idx[i] % n_samples == 0); d_neg_pos[d_unique_idx[i]] = {0, 0}; // class_id * n_samples = i return; } uint32_t class_id = d_unique_idx[i] / n_samples; d_neg_pos[d_unique_idx[i]] = d_fptp[d_unique_idx[i] - 1]; if (i == LastOf(class_id, d_unique_class_ptr)) { // last one needs to be included. size_t last = d_unique_idx[LastOf(class_id, d_unique_class_ptr)]; d_neg_pos[LastOf(class_id, d_class_ptr)] = d_fptp[last - 1]; return; } }); /** * Reduce the result for each class */ auto key_in = dh::MakeTransformIterator<uint32_t>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { size_t class_id = d_unique_idx[i] / n_samples; return class_id; }); auto val_in = dh::MakeTransformIterator<float>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { size_t class_id = d_unique_idx[i] / n_samples; float fp, tp; float fp_prev, tp_prev; if (i == d_unique_class_ptr[class_id]) { // first item is ignored, we use this thread to calculate the last item thrust::tie(fp, tp) = d_fptp[class_id * n_samples + (n_samples - 1)]; thrust::tie(fp_prev, tp_prev) = d_neg_pos[d_unique_idx[LastOf(class_id, d_unique_class_ptr)]]; } else { thrust::tie(fp, tp) = d_fptp[d_unique_idx[i] - 1]; thrust::tie(fp_prev, tp_prev) = d_neg_pos[d_unique_idx[i - 1]]; } float auc = TrapesoidArea(fp_prev, fp, tp_prev, tp); return auc; }); thrust::reduce_by_key(thrust::hip::par(alloc), key_in, key_in + d_unique_idx.size(), val_in, thrust::make_discard_iterator(), d_auc.begin()); /** * Scale the classes with number of samples for each class. */ dh::TemporaryArray<float> resutls(n_classes * 4); auto d_results = dh::ToSpan(resutls); auto local_area = d_results.subspan(0, n_classes); auto fp = d_results.subspan(n_classes, n_classes); auto tp = d_results.subspan(2 * n_classes, n_classes); auto auc = d_results.subspan(3 * n_classes, n_classes); dh::LaunchN(n_classes, [=] __device__(size_t c) { auc[c] = s_d_auc[c]; auto last = d_fptp[n_samples * c + (n_samples - 1)]; fp[c] = last.first; tp[c] = last.second; local_area[c] = last.first * last.second; }); return ScaleClasses(d_results, local_area, fp, tp, auc, cache, n_classes); } namespace { struct RankScanItem { size_t idx; float predt; float w; bst_group_t group_id; }; } // anonymous namespace std::pair<float, uint32_t> GPURankingAUC(common::Span<float const> predts, MetaInfo const &info, int32_t device, std::shared_ptr<DeviceAUCCache> *p_cache) { auto& cache = *p_cache; if (!cache) { cache.reset(new DeviceAUCCache); } cache->Init(predts, false, device); dh::caching_device_vector<bst_group_t> group_ptr(info.group_ptr_); dh::XGBCachingDeviceAllocator<char> alloc; auto d_group_ptr = dh::ToSpan(group_ptr); /** * Validate the dataset */ auto check_it = dh::MakeTransformIterator<size_t>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { return d_group_ptr[i + 1] - d_group_ptr[i]; }); size_t n_valid = thrust::count_if( thrust::hip::par(alloc), check_it, check_it + group_ptr.size() - 1, [=] __device__(size_t len) { return len >= 3; }); if (n_valid < info.group_ptr_.size() - 1) { InvalidGroupAUC(); } if (n_valid == 0) { return std::make_pair(0.0f, 0); } /** * Sort the labels */ auto d_labels = info.labels_.ConstDeviceSpan(); auto d_sorted_idx = dh::ToSpan(cache->sorted_idx); dh::SegmentedArgSort<false>(d_labels, d_group_ptr, d_sorted_idx); auto d_weights = info.weights_.ConstDeviceSpan(); dh::caching_device_vector<size_t> threads_group_ptr(group_ptr.size(), 0); auto d_threads_group_ptr = dh::ToSpan(threads_group_ptr); // Use max to represent triangle auto n_threads = common::SegmentedTrapezoidThreads( d_group_ptr, d_threads_group_ptr, std::numeric_limits<size_t>::max()); // get the coordinate in nested summation auto get_i_j = [=]__device__(size_t idx, size_t query_group_idx) { auto data_group_begin = d_group_ptr[query_group_idx]; size_t n_samples = d_group_ptr[query_group_idx + 1] - data_group_begin; auto thread_group_begin = d_threads_group_ptr[query_group_idx]; auto idx_in_thread_group = idx - thread_group_begin; size_t i, j; common::UnravelTrapeziodIdx(idx_in_thread_group, n_samples, &i, &j); // we use global index among all groups for sorted idx, so i, j should also be global // index. i += data_group_begin; j += data_group_begin; return thrust::make_pair(i, j); }; // NOLINT auto in = dh::MakeTransformIterator<RankScanItem>( thrust::make_counting_iterator(0), [=] __device__(size_t idx) { bst_group_t query_group_idx = dh::SegmentId(d_threads_group_ptr, idx); auto data_group_begin = d_group_ptr[query_group_idx]; size_t n_samples = d_group_ptr[query_group_idx + 1] - data_group_begin; if (n_samples < 3) { // at least 3 documents are required. return RankScanItem{idx, 0, 0, query_group_idx}; } size_t i, j; thrust::tie(i, j) = get_i_j(idx, query_group_idx); float predt = predts[d_sorted_idx[i]] - predts[d_sorted_idx[j]]; float w = common::Sqr(d_weights.empty() ? 1.0f : d_weights[query_group_idx]); if (predt > 0) { predt = 1.0; } else if (predt == 0) { predt = 0.5; } else { predt = 0; } predt *= w; return RankScanItem{idx, predt, w, query_group_idx}; }); dh::TemporaryArray<float> d_auc(group_ptr.size() - 1); auto s_d_auc = dh::ToSpan(d_auc); auto out = thrust::make_transform_output_iterator( dh::TypedDiscard<RankScanItem>{}, [=] __device__(RankScanItem const &item) -> RankScanItem { auto group_id = item.group_id; assert(group_id < d_group_ptr.size()); auto data_group_begin = d_group_ptr[group_id]; size_t n_samples = d_group_ptr[group_id + 1] - data_group_begin; // last item of current group if (item.idx == LastOf(group_id, d_threads_group_ptr)) { if (item.w > 0) { s_d_auc[group_id] = item.predt / item.w; } else { s_d_auc[group_id] = 0; } } return {}; // discard }); dh::InclusiveScan( in, out, [] __device__(RankScanItem const &l, RankScanItem const &r) { if (l.group_id != r.group_id) { return r; } return RankScanItem{r.idx, l.predt + r.predt, l.w + r.w, l.group_id}; }, n_threads); /** * Scale the AUC with number of items in each group. */ float auc = thrust::reduce(thrust::hip::par(alloc), dh::tbegin(s_d_auc), dh::tend(s_d_auc), 0.0f); return std::make_pair(auc, n_valid); } } // namespace metric } // namespace xgboost
8a43e34fbce58f548ac95513c502f19d028a8f0e.cu
/*! * Copyright 2021 by XGBoost Contributors */ #include <thrust/scan.h> #include <cub/cub.cuh> #include <cassert> #include <limits> #include <memory> #include <utility> #include <tuple> #include "rabit/rabit.h" #include "xgboost/span.h" #include "xgboost/data.h" #include "auc.h" #include "../common/device_helpers.cuh" #include "../common/ranking_utils.cuh" namespace xgboost { namespace metric { namespace { template <typename T> using Discard = thrust::discard_iterator<T>; struct GetWeightOp { common::Span<float const> weights; common::Span<size_t const> sorted_idx; __device__ float operator()(size_t i) const { return weights.empty() ? 1.0f : weights[sorted_idx[i]]; } }; } // namespace /** * A cache to GPU data to avoid reallocating memory. */ struct DeviceAUCCache { // Pair of FP/TP using Pair = thrust::pair<float, float>; // index sorted by prediction value dh::device_vector<size_t> sorted_idx; // track FP/TP for computation on trapesoid area dh::device_vector<Pair> fptp; // track FP_PREV/TP_PREV for computation on trapesoid area dh::device_vector<Pair> neg_pos; // index of unique prediction values. dh::device_vector<size_t> unique_idx; // p^T: transposed prediction matrix, used by MultiClassAUC dh::device_vector<float> predts_t; std::unique_ptr<dh::AllReducer> reducer; void Init(common::Span<float const> predts, bool is_multi, int32_t device) { if (sorted_idx.size() != predts.size()) { sorted_idx.resize(predts.size()); fptp.resize(sorted_idx.size()); unique_idx.resize(sorted_idx.size()); neg_pos.resize(sorted_idx.size()); if (is_multi) { predts_t.resize(sorted_idx.size()); } } if (is_multi && !reducer) { reducer.reset(new dh::AllReducer); reducer->Init(device); } } }; /** * The GPU implementation uses same calculation as CPU with a few more steps to distribute * work across threads: * * - Run scan to obtain TP/FP values, which are right coordinates of trapesoid. * - Find distinct prediction values and get the corresponding FP_PREV/TP_PREV value, * which are left coordinates of trapesoids. * - Reduce the scan array into 1 AUC value. */ std::tuple<float, float, float> GPUBinaryAUC(common::Span<float const> predts, MetaInfo const &info, int32_t device, std::shared_ptr<DeviceAUCCache> *p_cache) { auto& cache = *p_cache; if (!cache) { cache.reset(new DeviceAUCCache); } cache->Init(predts, false, device); auto labels = info.labels_.ConstDeviceSpan(); auto weights = info.weights_.ConstDeviceSpan(); dh::safe_cuda(cudaSetDevice(device)); CHECK(!labels.empty()); CHECK_EQ(labels.size(), predts.size()); /** * Create sorted index for each class */ auto d_sorted_idx = dh::ToSpan(cache->sorted_idx); dh::ArgSort<false>(predts, d_sorted_idx); /** * Linear scan */ auto get_weight = GetWeightOp{weights, d_sorted_idx}; using Pair = thrust::pair<float, float>; auto get_fp_tp = [=]__device__(size_t i) { size_t idx = d_sorted_idx[i]; float label = labels[idx]; float w = get_weight(i); float fp = (1.0 - label) * w; float tp = label * w; return thrust::make_pair(fp, tp); }; // NOLINT auto d_fptp = dh::ToSpan(cache->fptp); dh::LaunchN(d_sorted_idx.size(), [=] __device__(size_t i) { d_fptp[i] = get_fp_tp(i); }); dh::XGBDeviceAllocator<char> alloc; auto d_unique_idx = dh::ToSpan(cache->unique_idx); dh::Iota(d_unique_idx); auto uni_key = dh::MakeTransformIterator<float>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { return predts[d_sorted_idx[i]]; }); auto end_unique = thrust::unique_by_key_copy( thrust::cuda::par(alloc), uni_key, uni_key + d_sorted_idx.size(), dh::tbegin(d_unique_idx), thrust::make_discard_iterator(), dh::tbegin(d_unique_idx)); d_unique_idx = d_unique_idx.subspan(0, end_unique.second - dh::tbegin(d_unique_idx)); dh::InclusiveScan( dh::tbegin(d_fptp), dh::tbegin(d_fptp), [=] __device__(Pair const &l, Pair const &r) { return thrust::make_pair(l.first + r.first, l.second + r.second); }, d_fptp.size()); auto d_neg_pos = dh::ToSpan(cache->neg_pos); // scatter unique negaive/positive values // shift to right by 1 with initial value being 0 dh::LaunchN(d_unique_idx.size(), [=] __device__(size_t i) { if (d_unique_idx[i] == 0) { // first unique index is 0 assert(i == 0); d_neg_pos[0] = {0, 0}; return; } d_neg_pos[d_unique_idx[i]] = d_fptp[d_unique_idx[i] - 1]; if (i == d_unique_idx.size() - 1) { // last one needs to be included, may override above assignment if the last // prediction value is distinct from previous one. d_neg_pos.back() = d_fptp[d_unique_idx[i] - 1]; return; } }); auto in = dh::MakeTransformIterator<float>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { float fp, tp; float fp_prev, tp_prev; if (i == 0) { // handle the last element thrust::tie(fp, tp) = d_fptp.back(); thrust::tie(fp_prev, tp_prev) = d_neg_pos[d_unique_idx.back()]; } else { thrust::tie(fp, tp) = d_fptp[d_unique_idx[i] - 1]; thrust::tie(fp_prev, tp_prev) = d_neg_pos[d_unique_idx[i - 1]]; } return TrapesoidArea(fp_prev, fp, tp_prev, tp); }); Pair last = cache->fptp.back(); float auc = thrust::reduce(thrust::cuda::par(alloc), in, in + d_unique_idx.size()); return std::make_tuple(last.first, last.second, auc); } void Transpose(common::Span<float const> in, common::Span<float> out, size_t m, size_t n, int32_t device) { CHECK_EQ(in.size(), out.size()); CHECK_EQ(in.size(), m * n); dh::LaunchN(in.size(), [=] __device__(size_t i) { size_t col = i / m; size_t row = i % m; size_t idx = row * n + col; out[i] = in[idx]; }); } /** * Last index of a group in a CSR style of index pointer. */ template <typename Idx> XGBOOST_DEVICE size_t LastOf(size_t group, common::Span<Idx> indptr) { return indptr[group + 1] - 1; } float ScaleClasses(common::Span<float> results, common::Span<float> local_area, common::Span<float> fp, common::Span<float> tp, common::Span<float> auc, std::shared_ptr<DeviceAUCCache> cache, size_t n_classes) { dh::XGBDeviceAllocator<char> alloc; if (rabit::IsDistributed()) { CHECK_EQ(dh::CudaGetPointerDevice(results.data()), dh::CurrentDevice()); cache->reducer->AllReduceSum(results.data(), results.data(), results.size()); } auto reduce_in = dh::MakeTransformIterator<thrust::pair<float, float>>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { if (local_area[i] > 0) { return thrust::make_pair(auc[i] / local_area[i] * tp[i], tp[i]); } return thrust::make_pair(std::numeric_limits<float>::quiet_NaN(), 0.0f); }); float tp_sum; float auc_sum; thrust::tie(auc_sum, tp_sum) = thrust::reduce( thrust::cuda::par(alloc), reduce_in, reduce_in + n_classes, thrust::make_pair(0.0f, 0.0f), [=] __device__(auto const &l, auto const &r) { return thrust::make_pair(l.first + r.first, l.second + r.second); }); if (tp_sum != 0 && !std::isnan(auc_sum)) { auc_sum /= tp_sum; } else { return std::numeric_limits<float>::quiet_NaN(); } return auc_sum; } /** * MultiClass implementation is similar to binary classification, except we need to split * up each class in all kernels. */ float GPUMultiClassAUCOVR(common::Span<float const> predts, MetaInfo const &info, int32_t device, std::shared_ptr<DeviceAUCCache>* p_cache, size_t n_classes) { dh::safe_cuda(cudaSetDevice(device)); auto& cache = *p_cache; if (!cache) { cache.reset(new DeviceAUCCache); } cache->Init(predts, true, device); auto labels = info.labels_.ConstDeviceSpan(); auto weights = info.weights_.ConstDeviceSpan(); size_t n_samples = labels.size(); if (n_samples == 0) { dh::TemporaryArray<float> resutls(n_classes * 4, 0.0f); auto d_results = dh::ToSpan(resutls); dh::LaunchN(n_classes * 4, [=] __device__(size_t i) { d_results[i] = 0.0f; }); auto local_area = d_results.subspan(0, n_classes); auto fp = d_results.subspan(n_classes, n_classes); auto tp = d_results.subspan(2 * n_classes, n_classes); auto auc = d_results.subspan(3 * n_classes, n_classes); return ScaleClasses(d_results, local_area, fp, tp, auc, cache, n_classes); } /** * Create sorted index for each class */ auto d_predts_t = dh::ToSpan(cache->predts_t); Transpose(predts, d_predts_t, n_samples, n_classes, device); dh::TemporaryArray<uint32_t> class_ptr(n_classes + 1, 0); auto d_class_ptr = dh::ToSpan(class_ptr); dh::LaunchN(n_classes + 1, [=] __device__(size_t i) { d_class_ptr[i] = i * n_samples; }); // no out-of-place sort for thrust, cub sort doesn't accept general iterator. So can't // use transform iterator in sorting. auto d_sorted_idx = dh::ToSpan(cache->sorted_idx); dh::SegmentedArgSort<false>(d_predts_t, d_class_ptr, d_sorted_idx); /** * Linear scan */ dh::caching_device_vector<float> d_auc(n_classes, 0); auto s_d_auc = dh::ToSpan(d_auc); auto get_weight = GetWeightOp{weights, d_sorted_idx}; using Pair = thrust::pair<float, float>; auto d_fptp = dh::ToSpan(cache->fptp); auto get_fp_tp = [=]__device__(size_t i) { size_t idx = d_sorted_idx[i]; size_t class_id = i / n_samples; // labels is a vector of size n_samples. float label = labels[idx % n_samples] == class_id; float w = get_weight(i % n_samples); float fp = (1.0 - label) * w; float tp = label * w; return thrust::make_pair(fp, tp); }; // NOLINT dh::LaunchN(d_sorted_idx.size(), [=] __device__(size_t i) { d_fptp[i] = get_fp_tp(i); }); /** * Handle duplicated predictions */ dh::XGBDeviceAllocator<char> alloc; auto d_unique_idx = dh::ToSpan(cache->unique_idx); dh::Iota(d_unique_idx); auto uni_key = dh::MakeTransformIterator<thrust::pair<uint32_t, float>>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { uint32_t class_id = i / n_samples; float predt = d_predts_t[d_sorted_idx[i]]; return thrust::make_pair(class_id, predt); }); // unique values are sparse, so we need a CSR style indptr dh::TemporaryArray<uint32_t> unique_class_ptr(class_ptr.size()); auto d_unique_class_ptr = dh::ToSpan(unique_class_ptr); auto n_uniques = dh::SegmentedUniqueByKey( thrust::cuda::par(alloc), dh::tbegin(d_class_ptr), dh::tend(d_class_ptr), uni_key, uni_key + d_sorted_idx.size(), dh::tbegin(d_unique_idx), d_unique_class_ptr.data(), dh::tbegin(d_unique_idx), thrust::equal_to<thrust::pair<uint32_t, float>>{}); d_unique_idx = d_unique_idx.subspan(0, n_uniques); using Triple = thrust::tuple<uint32_t, float, float>; // expand to tuple to include class id auto fptp_it_in = dh::MakeTransformIterator<Triple>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { return thrust::make_tuple(i, d_fptp[i].first, d_fptp[i].second); }); // shrink down to pair auto fptp_it_out = thrust::make_transform_output_iterator( dh::TypedDiscard<Triple>{}, [d_fptp] __device__(Triple const &t) { d_fptp[thrust::get<0>(t)] = thrust::make_pair(thrust::get<1>(t), thrust::get<2>(t)); return t; }); dh::InclusiveScan( fptp_it_in, fptp_it_out, [=] __device__(Triple const &l, Triple const &r) { uint32_t l_cid = thrust::get<0>(l) / n_samples; uint32_t r_cid = thrust::get<0>(r) / n_samples; if (l_cid != r_cid) { return r; } return Triple(thrust::get<0>(r), thrust::get<1>(l) + thrust::get<1>(r), // fp thrust::get<2>(l) + thrust::get<2>(r)); // tp }, d_fptp.size()); // scatter unique FP_PREV/TP_PREV values auto d_neg_pos = dh::ToSpan(cache->neg_pos); // When dataset is not empty, each class must have at least 1 (unique) sample // prediction, so no need to handle special case. dh::LaunchN(d_unique_idx.size(), [=] __device__(size_t i) { if (d_unique_idx[i] % n_samples == 0) { // first unique index is 0 assert(d_unique_idx[i] % n_samples == 0); d_neg_pos[d_unique_idx[i]] = {0, 0}; // class_id * n_samples = i return; } uint32_t class_id = d_unique_idx[i] / n_samples; d_neg_pos[d_unique_idx[i]] = d_fptp[d_unique_idx[i] - 1]; if (i == LastOf(class_id, d_unique_class_ptr)) { // last one needs to be included. size_t last = d_unique_idx[LastOf(class_id, d_unique_class_ptr)]; d_neg_pos[LastOf(class_id, d_class_ptr)] = d_fptp[last - 1]; return; } }); /** * Reduce the result for each class */ auto key_in = dh::MakeTransformIterator<uint32_t>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { size_t class_id = d_unique_idx[i] / n_samples; return class_id; }); auto val_in = dh::MakeTransformIterator<float>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { size_t class_id = d_unique_idx[i] / n_samples; float fp, tp; float fp_prev, tp_prev; if (i == d_unique_class_ptr[class_id]) { // first item is ignored, we use this thread to calculate the last item thrust::tie(fp, tp) = d_fptp[class_id * n_samples + (n_samples - 1)]; thrust::tie(fp_prev, tp_prev) = d_neg_pos[d_unique_idx[LastOf(class_id, d_unique_class_ptr)]]; } else { thrust::tie(fp, tp) = d_fptp[d_unique_idx[i] - 1]; thrust::tie(fp_prev, tp_prev) = d_neg_pos[d_unique_idx[i - 1]]; } float auc = TrapesoidArea(fp_prev, fp, tp_prev, tp); return auc; }); thrust::reduce_by_key(thrust::cuda::par(alloc), key_in, key_in + d_unique_idx.size(), val_in, thrust::make_discard_iterator(), d_auc.begin()); /** * Scale the classes with number of samples for each class. */ dh::TemporaryArray<float> resutls(n_classes * 4); auto d_results = dh::ToSpan(resutls); auto local_area = d_results.subspan(0, n_classes); auto fp = d_results.subspan(n_classes, n_classes); auto tp = d_results.subspan(2 * n_classes, n_classes); auto auc = d_results.subspan(3 * n_classes, n_classes); dh::LaunchN(n_classes, [=] __device__(size_t c) { auc[c] = s_d_auc[c]; auto last = d_fptp[n_samples * c + (n_samples - 1)]; fp[c] = last.first; tp[c] = last.second; local_area[c] = last.first * last.second; }); return ScaleClasses(d_results, local_area, fp, tp, auc, cache, n_classes); } namespace { struct RankScanItem { size_t idx; float predt; float w; bst_group_t group_id; }; } // anonymous namespace std::pair<float, uint32_t> GPURankingAUC(common::Span<float const> predts, MetaInfo const &info, int32_t device, std::shared_ptr<DeviceAUCCache> *p_cache) { auto& cache = *p_cache; if (!cache) { cache.reset(new DeviceAUCCache); } cache->Init(predts, false, device); dh::caching_device_vector<bst_group_t> group_ptr(info.group_ptr_); dh::XGBCachingDeviceAllocator<char> alloc; auto d_group_ptr = dh::ToSpan(group_ptr); /** * Validate the dataset */ auto check_it = dh::MakeTransformIterator<size_t>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { return d_group_ptr[i + 1] - d_group_ptr[i]; }); size_t n_valid = thrust::count_if( thrust::cuda::par(alloc), check_it, check_it + group_ptr.size() - 1, [=] __device__(size_t len) { return len >= 3; }); if (n_valid < info.group_ptr_.size() - 1) { InvalidGroupAUC(); } if (n_valid == 0) { return std::make_pair(0.0f, 0); } /** * Sort the labels */ auto d_labels = info.labels_.ConstDeviceSpan(); auto d_sorted_idx = dh::ToSpan(cache->sorted_idx); dh::SegmentedArgSort<false>(d_labels, d_group_ptr, d_sorted_idx); auto d_weights = info.weights_.ConstDeviceSpan(); dh::caching_device_vector<size_t> threads_group_ptr(group_ptr.size(), 0); auto d_threads_group_ptr = dh::ToSpan(threads_group_ptr); // Use max to represent triangle auto n_threads = common::SegmentedTrapezoidThreads( d_group_ptr, d_threads_group_ptr, std::numeric_limits<size_t>::max()); // get the coordinate in nested summation auto get_i_j = [=]__device__(size_t idx, size_t query_group_idx) { auto data_group_begin = d_group_ptr[query_group_idx]; size_t n_samples = d_group_ptr[query_group_idx + 1] - data_group_begin; auto thread_group_begin = d_threads_group_ptr[query_group_idx]; auto idx_in_thread_group = idx - thread_group_begin; size_t i, j; common::UnravelTrapeziodIdx(idx_in_thread_group, n_samples, &i, &j); // we use global index among all groups for sorted idx, so i, j should also be global // index. i += data_group_begin; j += data_group_begin; return thrust::make_pair(i, j); }; // NOLINT auto in = dh::MakeTransformIterator<RankScanItem>( thrust::make_counting_iterator(0), [=] __device__(size_t idx) { bst_group_t query_group_idx = dh::SegmentId(d_threads_group_ptr, idx); auto data_group_begin = d_group_ptr[query_group_idx]; size_t n_samples = d_group_ptr[query_group_idx + 1] - data_group_begin; if (n_samples < 3) { // at least 3 documents are required. return RankScanItem{idx, 0, 0, query_group_idx}; } size_t i, j; thrust::tie(i, j) = get_i_j(idx, query_group_idx); float predt = predts[d_sorted_idx[i]] - predts[d_sorted_idx[j]]; float w = common::Sqr(d_weights.empty() ? 1.0f : d_weights[query_group_idx]); if (predt > 0) { predt = 1.0; } else if (predt == 0) { predt = 0.5; } else { predt = 0; } predt *= w; return RankScanItem{idx, predt, w, query_group_idx}; }); dh::TemporaryArray<float> d_auc(group_ptr.size() - 1); auto s_d_auc = dh::ToSpan(d_auc); auto out = thrust::make_transform_output_iterator( dh::TypedDiscard<RankScanItem>{}, [=] __device__(RankScanItem const &item) -> RankScanItem { auto group_id = item.group_id; assert(group_id < d_group_ptr.size()); auto data_group_begin = d_group_ptr[group_id]; size_t n_samples = d_group_ptr[group_id + 1] - data_group_begin; // last item of current group if (item.idx == LastOf(group_id, d_threads_group_ptr)) { if (item.w > 0) { s_d_auc[group_id] = item.predt / item.w; } else { s_d_auc[group_id] = 0; } } return {}; // discard }); dh::InclusiveScan( in, out, [] __device__(RankScanItem const &l, RankScanItem const &r) { if (l.group_id != r.group_id) { return r; } return RankScanItem{r.idx, l.predt + r.predt, l.w + r.w, l.group_id}; }, n_threads); /** * Scale the AUC with number of items in each group. */ float auc = thrust::reduce(thrust::cuda::par(alloc), dh::tbegin(s_d_auc), dh::tend(s_d_auc), 0.0f); return std::make_pair(auc, n_valid); } } // namespace metric } // namespace xgboost
538df99f19c3d1a08ccec28369646e75d223224d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * ldcp_decoder.h * ldpc3 * * Created by legal on 02/04/11. * Copyright 2011 ENSEIRB. All rights reserved. * */ /*----------------------------------------------------------------------------*/ #include "CGPU_Decoder_2NMS_SIMD.h" #include "../transpose/GPU_Transpose.h" #include "../transpose/GPU_Transpose_uint8.h" #include "../tools/debug_fx.h" static const size_t BLOCK_SIZE = 128; // 96 for exp. CGPU_Decoder_2NMS_SIMD::CGPU_Decoder_2NMS_SIMD(size_t _nb_frames, size_t n, size_t k, size_t m): CGPUDecoder(_nb_frames, n, k, m) { size_t nb_blocks = nb_frames / BLOCK_SIZE; printf("(II) Decoder configuration: BLOCK_SIZE = %ld, nb_frames = %ld, nb_blocks = %ld\n", BLOCK_SIZE, nb_frames, nb_blocks); struct hipDeviceProp_t devProp; hipGetDeviceProperties(&devProp, 0); printf("(II) Identifiant du GPU (CUDA) : %s\n", devProp.name); printf("(II) Nombre de Multi-Processor : %d\n", devProp.multiProcessorCount); printf("(II) Taille de memoire globale : %ld\n", devProp.totalGlobalMem); printf("(II) Taille de sharedMemPerBlock : %ld\n", devProp.sharedMemPerBlock); /* int regsPerBlock; int warpSize; size_t memPitch; int maxThreadsPerBlock; int clockRate; size_t totalConstMem; int major; int minor; int memoryClockRate; int memoryBusWidth; */ struct hipFuncAttributes attr; hipFuncGetAttributes(&attr, LDPC_Sched_Stage_1_2NMS_SIMD); int nMP = devProp.multiProcessorCount; // NOMBRE DE STREAM PROCESSOR int nWarp = attr.maxThreadsPerBlock/32; // PACKET DE THREADs EXECUTABLES EN PARALLELE int nThreads = nWarp * 32; // NOMBRE DE THREAD MAXI PAR SP int nDOF = nb_frames; int nBperMP = 65536 / (attr.numRegs); // Nr of blocks per MP int minB = min(nBperMP*nThreads,1024); int nBlocks = max(minB/nThreads * nMP, nDOF/nThreads); //Total number of blocks printf("(II) Nombre de Warp : %d\n", nWarp); printf("(II) Nombre de Threads : %d\n", nThreads); printf("(II) LDPC_Sched_Stage_1_MS_SIMD :\n"); printf("(II) - Nombre de regist/thr : %d\n", attr.numRegs); printf("(II) - Nombre de local/thr : %ld\n", attr.localSizeBytes); printf("(II) - Nombre de shared/thr : %ld\n", attr.sharedSizeBytes); printf("(II) Nombre de nDOF : %d\n", nDOF); printf("(II) Nombre de nBperMP : %d\n", nBperMP); printf("(II) Nombre de nBperMP : %d\n", minB); printf("(II) Nombre de nBperMP : %d\n", nBlocks); printf("(II) Best BLOCK_SIZE : %d\n", nThreads * nBperMP); printf("(II) Best #codewords : %d\n", 0); if( attr.numRegs <= 32 ){ printf("(II) Best BLOCK_SIZE : %d\n", 128); printf("(II) Best BLOCK_SIZE : %d\n", nBperMP/256); }else if( attr.numRegs <= 40 ){ printf("(II) Best BLOCK_SIZE : %d\n", 96); printf("(II) Best BLOCK_SIZE : %d\n", nBperMP/256); }else if( attr.numRegs <= 48 ){ printf("(II) Best BLOCK_SIZE : %d\n", 128); printf("(II) Best BLOCK_SIZE : %d\n", nBperMP/256); }else if( attr.numRegs < 64 ){ printf("(II) Best BLOCK_SIZE : %d\n", 96); printf("(II) Best BLOCK_SIZE : %d\n", nBperMP/256); }else{ printf("(II) Best BLOCK_SIZE : ???\n"); exit( 0 ); } } CGPU_Decoder_2NMS_SIMD::~CGPU_Decoder_2NMS_SIMD() { } void CGPU_Decoder_2NMS_SIMD::initialize() { } void CGPU_Decoder_2NMS_SIMD::decode(float Intrinsic_fix[_N], int Rprime_fix[_N], int nombre_iterations) { hipError_t Status; size_t nb_blocks = nb_frames / BLOCK_SIZE; if( nb_frames % BLOCK_SIZE != 0 ){ printf("(%ld - %ld) (%ld - %ld)\n", nb_frames, BLOCK_SIZE, nb_frames/BLOCK_SIZE, nb_frames%BLOCK_SIZE); exit( 0 ); } // // ON COPIE LES DONNEES DANS => device_V // Status = hipMemcpy/*Async*/(d_MSG_C_2_V, Intrinsic_fix, sz_nodes * sizeof(float), hipMemcpyHostToDevice); ERROR_CHECK(Status, __FILE__, __LINE__); { dim3 grid(1, nb_frames/32); dim3 threads(32, 32); hipLaunchKernelGGL(( Interleaver_uint8), dim3(grid), dim3(threads), 0, 0, (int*)d_MSG_C_2_V, (int*)device_V, _N, nb_frames); } hipLaunchKernelGGL(( LDPC_Sched_Stage_1_2NMS_SIMD), dim3(nb_blocks), dim3(BLOCK_SIZE), 0, 0, (unsigned int*)device_V, (unsigned int*)d_MSG_C_2_V, d_transpose, nombre_iterations); // // DESENTRELACEMENT DES DONNEES POST-DECODAGE (device_V => device_R) // #define NORMAL 1 #if NORMAL == 1 { // printf("(II) NB_TRAMES = %d;\n", nb_frames); // printf("(II) FRAME_LENGTH = %d;\n", _N); dim3 grid(1, nb_frames/32); dim3 threads(32, 32); // printf("(II) Processing grid = %d, %d, %d;\n", grid.x, grid.y, grid.z); // printf("(II) Thread grid = %d, %d, %d;\n", threads.x, threads.y, threads.z); hipLaunchKernelGGL(( InvInterleaver_uint8), dim3(grid), dim3(threads), 0, 0, (int*)device_V, (int*)d_MSG_C_2_V, _N, nb_frames); } #else { unsigned int NB_TRAMES = nb_frames; unsigned int FRAME_LENGTH = _N; dim3 grid(NB_TRAMES/TILE_DIM, FRAME_LENGTH/TILE_DIM), threads(TILE_DIM,BLOCK_ROWS); hipLaunchKernelGGL(( transposeDiagonal_and_hard_decision), dim3(grid), dim3(threads), 0, 0, (unsigned int*)d_MSG_C_2_V, (unsigned int*)device_V, NB_TRAMES, FRAME_LENGTH); } #endif // // // Status = hipMemcpy(Rprime_fix, d_MSG_C_2_V, sz_nodes * sizeof(float), hipMemcpyDeviceToHost); ERROR_CHECK(Status, __FILE__, __LINE__); }
538df99f19c3d1a08ccec28369646e75d223224d.cu
/* * ldcp_decoder.h * ldpc3 * * Created by legal on 02/04/11. * Copyright 2011 ENSEIRB. All rights reserved. * */ /*----------------------------------------------------------------------------*/ #include "CGPU_Decoder_2NMS_SIMD.h" #include "../transpose/GPU_Transpose.h" #include "../transpose/GPU_Transpose_uint8.h" #include "../tools/debug_fx.h" static const size_t BLOCK_SIZE = 128; // 96 for exp. CGPU_Decoder_2NMS_SIMD::CGPU_Decoder_2NMS_SIMD(size_t _nb_frames, size_t n, size_t k, size_t m): CGPUDecoder(_nb_frames, n, k, m) { size_t nb_blocks = nb_frames / BLOCK_SIZE; printf("(II) Decoder configuration: BLOCK_SIZE = %ld, nb_frames = %ld, nb_blocks = %ld\n", BLOCK_SIZE, nb_frames, nb_blocks); struct cudaDeviceProp devProp; cudaGetDeviceProperties(&devProp, 0); printf("(II) Identifiant du GPU (CUDA) : %s\n", devProp.name); printf("(II) Nombre de Multi-Processor : %d\n", devProp.multiProcessorCount); printf("(II) Taille de memoire globale : %ld\n", devProp.totalGlobalMem); printf("(II) Taille de sharedMemPerBlock : %ld\n", devProp.sharedMemPerBlock); /* int regsPerBlock; int warpSize; size_t memPitch; int maxThreadsPerBlock; int clockRate; size_t totalConstMem; int major; int minor; int memoryClockRate; int memoryBusWidth; */ struct cudaFuncAttributes attr; cudaFuncGetAttributes(&attr, LDPC_Sched_Stage_1_2NMS_SIMD); int nMP = devProp.multiProcessorCount; // NOMBRE DE STREAM PROCESSOR int nWarp = attr.maxThreadsPerBlock/32; // PACKET DE THREADs EXECUTABLES EN PARALLELE int nThreads = nWarp * 32; // NOMBRE DE THREAD MAXI PAR SP int nDOF = nb_frames; int nBperMP = 65536 / (attr.numRegs); // Nr of blocks per MP int minB = min(nBperMP*nThreads,1024); int nBlocks = max(minB/nThreads * nMP, nDOF/nThreads); //Total number of blocks printf("(II) Nombre de Warp : %d\n", nWarp); printf("(II) Nombre de Threads : %d\n", nThreads); printf("(II) LDPC_Sched_Stage_1_MS_SIMD :\n"); printf("(II) - Nombre de regist/thr : %d\n", attr.numRegs); printf("(II) - Nombre de local/thr : %ld\n", attr.localSizeBytes); printf("(II) - Nombre de shared/thr : %ld\n", attr.sharedSizeBytes); printf("(II) Nombre de nDOF : %d\n", nDOF); printf("(II) Nombre de nBperMP : %d\n", nBperMP); printf("(II) Nombre de nBperMP : %d\n", minB); printf("(II) Nombre de nBperMP : %d\n", nBlocks); printf("(II) Best BLOCK_SIZE : %d\n", nThreads * nBperMP); printf("(II) Best #codewords : %d\n", 0); if( attr.numRegs <= 32 ){ printf("(II) Best BLOCK_SIZE : %d\n", 128); printf("(II) Best BLOCK_SIZE : %d\n", nBperMP/256); }else if( attr.numRegs <= 40 ){ printf("(II) Best BLOCK_SIZE : %d\n", 96); printf("(II) Best BLOCK_SIZE : %d\n", nBperMP/256); }else if( attr.numRegs <= 48 ){ printf("(II) Best BLOCK_SIZE : %d\n", 128); printf("(II) Best BLOCK_SIZE : %d\n", nBperMP/256); }else if( attr.numRegs < 64 ){ printf("(II) Best BLOCK_SIZE : %d\n", 96); printf("(II) Best BLOCK_SIZE : %d\n", nBperMP/256); }else{ printf("(II) Best BLOCK_SIZE : ???\n"); exit( 0 ); } } CGPU_Decoder_2NMS_SIMD::~CGPU_Decoder_2NMS_SIMD() { } void CGPU_Decoder_2NMS_SIMD::initialize() { } void CGPU_Decoder_2NMS_SIMD::decode(float Intrinsic_fix[_N], int Rprime_fix[_N], int nombre_iterations) { cudaError_t Status; size_t nb_blocks = nb_frames / BLOCK_SIZE; if( nb_frames % BLOCK_SIZE != 0 ){ printf("(%ld - %ld) (%ld - %ld)\n", nb_frames, BLOCK_SIZE, nb_frames/BLOCK_SIZE, nb_frames%BLOCK_SIZE); exit( 0 ); } // // ON COPIE LES DONNEES DANS => device_V // Status = cudaMemcpy/*Async*/(d_MSG_C_2_V, Intrinsic_fix, sz_nodes * sizeof(float), cudaMemcpyHostToDevice); ERROR_CHECK(Status, __FILE__, __LINE__); { dim3 grid(1, nb_frames/32); dim3 threads(32, 32); Interleaver_uint8<<<grid, threads>>>((int*)d_MSG_C_2_V, (int*)device_V, _N, nb_frames); } LDPC_Sched_Stage_1_2NMS_SIMD<<<nb_blocks, BLOCK_SIZE>>>((unsigned int*)device_V, (unsigned int*)d_MSG_C_2_V, d_transpose, nombre_iterations); // // DESENTRELACEMENT DES DONNEES POST-DECODAGE (device_V => device_R) // #define NORMAL 1 #if NORMAL == 1 { // printf("(II) NB_TRAMES = %d;\n", nb_frames); // printf("(II) FRAME_LENGTH = %d;\n", _N); dim3 grid(1, nb_frames/32); dim3 threads(32, 32); // printf("(II) Processing grid = %d, %d, %d;\n", grid.x, grid.y, grid.z); // printf("(II) Thread grid = %d, %d, %d;\n", threads.x, threads.y, threads.z); InvInterleaver_uint8<<<grid, threads>>>((int*)device_V, (int*)d_MSG_C_2_V, _N, nb_frames); } #else { unsigned int NB_TRAMES = nb_frames; unsigned int FRAME_LENGTH = _N; dim3 grid(NB_TRAMES/TILE_DIM, FRAME_LENGTH/TILE_DIM), threads(TILE_DIM,BLOCK_ROWS); transposeDiagonal_and_hard_decision<<<grid, threads>>>((unsigned int*)d_MSG_C_2_V, (unsigned int*)device_V, NB_TRAMES, FRAME_LENGTH); } #endif // // // Status = cudaMemcpy(Rprime_fix, d_MSG_C_2_V, sz_nodes * sizeof(float), cudaMemcpyDeviceToHost); ERROR_CHECK(Status, __FILE__, __LINE__); }
dba61d1efce939a9e764977f5b15175efdefb4be.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ### // ### // ### Practical Course: GPU Programming in Computer Vision // ### // ### // ### Technical University Munich, Computer Vision Group // ### Summer Semester 2017, September 11 - October 9 // ### #include "helper.h" #include <iostream> #include "stdio.h" using namespace std; // uncomment to use the camera //#define CAMERA //Exercise 7 float* getKernel(float sigma, int radius) { int len = (size_t) radius * 2 + 1; float *k = new float[len * len]; float sum = 0; float tmp = 0; for (int j = 0; j < len; j++) { for (int i = 0; i < len; i++) { tmp = expf(-(powf((i - radius), 2.f) + powf(j - radius, 2.f)) / (2.f * powf(sigma, 2.f))) / (2.f * 3.1416f * powf(sigma, 2.f)); sum = sum + tmp; k[i + (size_t) j * len] = tmp; } } float upscale = 1.f / sum; for (int ind = 0; ind < len * len; ind++) k[ind] = k[ind] * upscale; return k; } void getkx(float *kx) { kx[0] = -3.f / 32.f; kx[1] = 0.f; kx[2] = 3.f / 32.f; kx[3] = -10.f / 32.f; kx[4] = 0.f; kx[5] = 10.f / 32.f; kx[6] = -3.f / 32.f; kx[7] = 0.f; kx[8] = 3.f / 32.f; } void getky(float *ky) { ky[0] = -3.f/32.f; ky[3] = 0.f; ky[6] = 3.f/32.f; ky[1] = -10.f/32.f; ky[4] = 0.f; ky[7] = 10.f/32.f; ky[2] = -3.f/32.f; ky[5] = 0.f; ky[8] = 3.f/32.f; } __global__ void Gconv2(float *imgO, float *imgI, float *kernel, int w, int h, int nc, int r) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; int kw = (size_t)2*r + 1; float value = 0.f; if(x<w && y<h) { for (int c = 0; c < nc; c++) { float sum = 0.f; size_t ind = x + (size_t) y * w + (size_t) w * h * c; for (int kj = -r; kj < r; kj++) { for (int ki = -r; ki < r; ki++) { int kii = min(max(0, x+ki),w-1); int kjj = min(max(0, y+kj),h-1); value = imgI[kii + (size_t)(kjj*w) + (size_t)w*h*c]; sum += value * kernel[(r - ki) + (r - kj) * kw]; } } imgO[ind] = sum; } } } __global__ void robGrad(float *gradientx, float *gradienty, float *imgI, float *kernelx, float *kernely, int w, int h, int nc) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; int r = 1; int kw = (size_t)2*r + 1; float value = 0.f; if(x<w && y<h) { for (int c = 0; c < nc; c++) { float sumx = 0.f; float sumy = 0.f; size_t ind = x + (size_t) y * w + (size_t) w * h * c; for (int kj = -r; kj < r+1; kj++) { for (int ki = -r; ki < r+1; ki++) { int kii = min(max(0, x+ki), w-1); int kjj = min(max(0, y+kj), h-1); value = imgI[kii + (size_t)(kjj*w) + (size_t)w*h*c]; sumx += value * kernelx[(r + ki) + (r + kj) * kw]; sumy += value * kernely[(r + ki) + (r + kj) * kw]; } } gradientx[ind] = sumx; gradienty[ind] = sumy; } } } __global__ void getM(float *m11, float *m12, float *m22, float *gradientx, float *gradienty, int w, int h, int nc) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; if(x<w && y<h) { size_t ind = x + (size_t) y * w; for (int c = 0; c < nc; c++) { size_t indc = x + (size_t) y * w + (size_t)w*h*c; m11[ind] += gradientx[indc] * gradientx[indc]; m12[ind] += gradientx[indc] * gradienty[indc]; m22[ind] += gradienty[indc] * gradienty[indc]; } } } __device__ void eigenValue(float *e1, float *e2, float a, float b, float c, float d) { float T = a+d; float det = a*d-b*c; *e1 = T/2.f - sqrt(T*T/4.f - det); *e2 = T/2.f + sqrt(T*T/4.f - det); } __global__ void getFeature(float *feature, float *m11, float *m12, float *m22, float *imgI, int w, int h, float alpha, float belta) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; float e1 = 0.f; float e2 = 0.f; if(x<w && y<h) { size_t ind = x + (size_t) y * w; size_t indg = x + (size_t) y * w + (size_t)w*h*1; size_t indb = x + (size_t) y * w + (size_t)w*h*2; eigenValue(&e1, &e2, m11[ind], m12[ind], m12[ind], m22[ind]); if(x >= 5 && x<=w-1 && y == 150) printf("%f %f %f %f %f %f,\n", e1, e2,m11[ind], m12[ind], m12[ind], m22[ind]); if(e2 >= e1 && e1>= alpha){ feature[ind] = 1.f; feature[indg] = 0.f; feature[indb] = 0.f; }else if(e1 <= belta && alpha <= e2){ feature[ind] = 1.f; feature[indg] = 1.f; feature[indb] = 0.f; }else{ feature[ind] = imgI[ind]*0.5f; feature[indg] = imgI[ind]*0.5f; feature[indb] = imgI[ind]*0.5f; } } } int main(int argc, char **argv) { // Before the GPU can process your kernels, a so called "CUDA context" must be initialized // This happens on the very first call to a CUDA function, and takes some time (around half a second) // We will do it right here, so that the run time measurements are accurate hipDeviceSynchronize(); CUDA_CHECK; // Reading command line parameters: // getParam("param", var, argc, argv) looks whether "-param xyz" is specified, and if so stores the value "xyz" in "var" // If "-param" is not specified, the value of "var" remains unchanged // // return value: getParam("param", ...) returns true if "-param" is specified, and false otherwise #ifdef CAMERA #else // input image string image = ""; bool ret = getParam("i", image, argc, argv); if (!ret) cerr << "ERROR: no image specified" << endl; if (argc <= 1) { cout << "Usage: " << argv[0] << " -i <image> [-repeats <repeats>] [-gray]" << endl; return 1; } #endif // number of computation repetitions to get a better run time measurement int repeats = 1; getParam("repeats", repeats, argc, argv); cout << "repeats: " << repeats << endl; // load the input image as grayscale if "-gray" is specifed bool gray = false; getParam("gray", gray, argc, argv); cout << "gray: " << gray << endl; // ### Define your own parameters here as needed // Init camera / Load input image #ifdef CAMERA // Init camera cv::VideoCapture camera(0); if(!camera.isOpened()) { cerr << "ERROR: Could not open camera" << endl; return 1; } int camW = 640; int camH = 480; camera.set(CV_CAP_PROP_FRAME_WIDTH,camW); camera.set(CV_CAP_PROP_FRAME_HEIGHT,camH); // read in first frame to get the dimensions cv::Mat mIn; camera >> mIn; #else // Load the input image using opencv (load as grayscale if "gray==true", otherwise as is (may be color or grayscale)) cv::Mat mIn = cv::imread(image.c_str(), (gray? CV_LOAD_IMAGE_GRAYSCALE : -1)); // check if (mIn.data == NULL) { cerr << "ERROR: Could not load image " << image << endl; return 1; } #endif // convert to float representation (opencv loads image values as single bytes by default) mIn.convertTo(mIn,CV_32F); // convert range of each channel to [0,1] (opencv default is [0,255]) mIn /= 255.f; // get image dimensions int w = mIn.cols; // width int h = mIn.rows; // height int nc = mIn.channels(); // number of channels cout << "image: " << w << " x " << h << endl; // Set the output image format // ### // ### // ### TODO: Change the output image format as needed // ### // ### cv::Mat mOut(h,w,mIn.type()); // mOut will have the same number of channels as the input image, nc layers //cv::Mat mOut(h,w,CV_32FC3); // mOut will be a color image, 3 layers //cv::Mat mOut(h,w,CV_32FC1); // mOut will be a grayscale image, 1 layer // ### Define your own output images here as needed // Allocate arrays // input/output image width: w // input/output image height: h // input image number of channels: nc // output image number of channels: mOut.channels(), as defined above (nc, 3, or 1) // allocate raw input image array float *imgIn = new float[(size_t)w*h*nc]; // allocate raw output array (the computation result will be stored in this array, then later converted to mOut for displaying) // For camera mode: Make a loop to read in camera frames #ifdef CAMERA // Read a camera image frame every 30 milliseconds: // cv::waitKey(30) waits 30 milliseconds for a keyboard input, // returns a value <0 if no key is pressed during this time, returns immediately with a value >=0 if a key is pressed while (cv::waitKey(30) < 0) { // Get camera image camera >> mIn; // convert to float representation (opencv loads image values as single bytes by default) mIn.convertTo(mIn,CV_32F); // convert range of each channel to [0,1] (opencv default is [0,255]) mIn /= 255.f; #endif // Init raw input image array // opencv images are interleaved: rgb rgb rgb... (actually bgr bgr bgr...) // But for CUDA it's better to work with layered images: rrr... ggg... bbb... // So we will convert as necessary, using interleaved "cv::Mat" for loading/saving/displaying, and layered "float*" for CUDA computations convert_mat_to_layered (imgIn, mIn); Timer timer; timer.start(); // ### // ### // ### TODO: Main computation // ### // ### float sigma = 0.5f; int r = ceil(3.f * sigma); int len = w*h*nc; int lenofK = 2 * r + 1; float alpha = 0.001f; float belta = 1e-4f; //## float *k = getKernel(sigma, r); float *dkx = new float[9]; getkx(dkx); float *dky = new float[9]; getky(dky); float *convimO = new float[(size_t)w*h*nc]; float *m11 = new float[(size_t)w*h]; float *m12 = new float[(size_t)w*h]; float *m22 = new float[(size_t)w*h]; float *vx = new float[(size_t)w*h*nc]; float *vy = new float[(size_t)w*h*nc]; float *feature = new float[(size_t)w*h*3]; cv::Mat convImg(h,w,mIn.type()); cv::Mat Im11(h,w,CV_32FC1); cv::Mat Im12(h,w,CV_32FC1); cv::Mat Im22(h,w,CV_32FC1); cv::Mat Ivx(h,w,mIn.type()); cv::Mat Ivy(h,w,mIn.type()); cv::Mat Ifeature(h,w,CV_32FC3); cout<< "GPU is running!!!!"; float *d_k; float *d_imgIn; float *d_kx; float *d_ky; float *d_m11; float *d_m12; float *d_m22; float *d_convimO; float *d_gradientx; float *d_gradienty; float *d_feature; size_t nbytes = (size_t)(len)*sizeof(float); hipMalloc(&d_k, (size_t)lenofK*lenofK*sizeof(float)); CUDA_CHECK; hipMalloc(&d_kx, (size_t)9*sizeof(float)); CUDA_CHECK; hipMalloc(&d_ky, (size_t)9*sizeof(float)); CUDA_CHECK; hipMalloc(&d_imgIn, nbytes); CUDA_CHECK; hipMalloc(&d_convimO, nbytes); CUDA_CHECK; hipMalloc(&d_gradientx, nbytes); CUDA_CHECK; hipMalloc(&d_gradienty, nbytes); CUDA_CHECK; hipMalloc(&d_m11, (size_t)w*h * sizeof(float)); CUDA_CHECK; hipMalloc(&d_m12, (size_t)w*h * sizeof(float)); CUDA_CHECK; hipMalloc(&d_m22, (size_t)w*h * sizeof(float)); CUDA_CHECK; hipMalloc(&d_feature, (size_t)w*h*3 * sizeof(float)); CUDA_CHECK; hipMemset(d_convimO, 0, nbytes);CUDA_CHECK; hipMemset(d_gradientx, 0, nbytes);CUDA_CHECK; hipMemset(d_gradienty, 0, nbytes);CUDA_CHECK; hipMemset(d_m11, 0, (size_t)w*h * sizeof(float));CUDA_CHECK; hipMemset(d_m12, 0, (size_t)w*h * sizeof(float));CUDA_CHECK; hipMemset(d_m22, 0, (size_t)w*h * sizeof(float));CUDA_CHECK; hipMemset(d_feature, 0, (size_t)w*h*3 * sizeof(float));CUDA_CHECK; hipMemcpy(d_k, k, (size_t)lenofK*lenofK*sizeof(float), hipMemcpyHostToDevice);CUDA_CHECK; hipMemcpy(d_kx, dkx, (size_t)9*sizeof(float), hipMemcpyHostToDevice);CUDA_CHECK; hipMemcpy(d_ky, dky, (size_t)9*sizeof(float), hipMemcpyHostToDevice);CUDA_CHECK; hipMemcpy(d_imgIn, imgIn, nbytes, hipMemcpyHostToDevice);CUDA_CHECK; dim3 block = dim3(32,32,1); dim3 grid = dim3((block.x + w-1)/block.x, (block.y + h-1)/block.y, 1); hipLaunchKernelGGL(( Gconv2), dim3(grid), dim3(block), 0, 0, d_convimO, d_imgIn, d_k, w, h, nc, r);CUDA_CHECK; cout << "Convolution with Gauss kernel"<< endl; hipLaunchKernelGGL(( robGrad), dim3(grid), dim3(block), 0, 0, d_gradientx, d_gradienty, d_convimO, d_kx, d_ky, w, h, nc);CUDA_CHECK; cout << "rototianally gradient"<< endl; hipLaunchKernelGGL(( getM), dim3(grid), dim3(block), 0, 0, d_m11, d_m12, d_m22, d_gradientx, d_gradienty, w,h,nc);CUDA_CHECK; hipLaunchKernelGGL(( Gconv2), dim3(grid), dim3(block), 0, 0, d_m11, d_m11, d_k, w, h, 1, r);CUDA_CHECK; hipLaunchKernelGGL(( Gconv2), dim3(grid), dim3(block), 0, 0, d_m12, d_m12, d_k, w, h, 1, r);CUDA_CHECK; hipLaunchKernelGGL(( Gconv2), dim3(grid), dim3(block), 0, 0, d_m22, d_m22, d_k, w, h, 1, r);CUDA_CHECK; hipLaunchKernelGGL(( getFeature), dim3(grid), dim3(block), 0, 0, d_feature, d_m11, d_m12, d_m22, d_imgIn, w, h, alpha, belta);CUDA_CHECK; hipMemcpy(convimO, d_convimO, nbytes, hipMemcpyDeviceToHost);CUDA_CHECK; hipMemcpy(vx, d_gradientx, nbytes, hipMemcpyDeviceToHost);CUDA_CHECK; hipMemcpy(vy, d_gradienty, nbytes, hipMemcpyDeviceToHost);CUDA_CHECK; // for(int xi = (size_t)(h-1)*w; xi < (size_t)h*w; xi++){ // printf("%f \n", vy[xi]); // } hipMemcpy(m11, d_m11, (size_t)w*h * sizeof(float), hipMemcpyDeviceToHost);CUDA_CHECK; hipMemcpy(m12, d_m12, (size_t)w*h * sizeof(float), hipMemcpyDeviceToHost);CUDA_CHECK; hipMemcpy(m22, d_m22, (size_t)w*h * sizeof(float), hipMemcpyDeviceToHost);CUDA_CHECK; hipMemcpy(feature, d_feature, (size_t)w*h*3 * sizeof(float), hipMemcpyDeviceToHost);CUDA_CHECK; hipFree(d_k);CUDA_CHECK; hipFree(d_kx);CUDA_CHECK; hipFree(d_ky);CUDA_CHECK; hipFree(d_convimO);CUDA_CHECK; hipFree(d_m11);CUDA_CHECK; hipFree(d_m12);CUDA_CHECK; hipFree(d_m22);CUDA_CHECK; hipFree(d_gradientx);CUDA_CHECK; hipFree(d_gradienty);CUDA_CHECK; hipFree(d_imgIn);CUDA_CHECK; hipFree(d_feature);CUDA_CHECK; // show input image showImage("Input", mIn, 100, 100); // show at position (x_from_left=100,y_from_above=100) // show output image: first convert to interleaved opencv format from the layered raw array // ### Display your own output images here as needed convert_layered_to_mat(convImg, convimO); convert_layered_to_mat(Im11, m11); convert_layered_to_mat(Im12, m12); convert_layered_to_mat(Im22, m22); convert_layered_to_mat(Ivx, vx); convert_layered_to_mat(Ivy, vy); convert_layered_to_mat(Ifeature, feature); showImage("convolution GPU", convImg, 100, 100); int scaleup = 100.f; Im11 *= scaleup; Im12 *= scaleup; Im22 *= scaleup; showImage("m11", Im11, 100, 100); showImage("m12", Im12, 100, 100); showImage("m22", Im22, 100, 100); Ivx *= 10.f; Ivy *= 10.f; showImage("vx", Ivx, 100, 100); showImage("vy", Ivy, 100, 100); showImage("feature", Ifeature, 100, 100); #ifdef CAMERA // end of camera loop } #else // wait for key inputs cv::waitKey(0); #endif // save input and result // cv::imwrite("image_input.png",mIn*255.f); // "imwrite" assumes channel range [0,255] // cv::imwrite("image_result.png",mOut*255.f); // free allocated arrays delete[] imgIn; // delete[] imgOut; delete[] k; delete[] dkx; delete[] dky; delete[] convimO; // close all opencv windows cvDestroyAllWindows(); return 0; }
dba61d1efce939a9e764977f5b15175efdefb4be.cu
// ### // ### // ### Practical Course: GPU Programming in Computer Vision // ### // ### // ### Technical University Munich, Computer Vision Group // ### Summer Semester 2017, September 11 - October 9 // ### #include "helper.h" #include <iostream> #include "stdio.h" using namespace std; // uncomment to use the camera //#define CAMERA //Exercise 7 float* getKernel(float sigma, int radius) { int len = (size_t) radius * 2 + 1; float *k = new float[len * len]; float sum = 0; float tmp = 0; for (int j = 0; j < len; j++) { for (int i = 0; i < len; i++) { tmp = expf(-(powf((i - radius), 2.f) + powf(j - radius, 2.f)) / (2.f * powf(sigma, 2.f))) / (2.f * 3.1416f * powf(sigma, 2.f)); sum = sum + tmp; k[i + (size_t) j * len] = tmp; } } float upscale = 1.f / sum; for (int ind = 0; ind < len * len; ind++) k[ind] = k[ind] * upscale; return k; } void getkx(float *kx) { kx[0] = -3.f / 32.f; kx[1] = 0.f; kx[2] = 3.f / 32.f; kx[3] = -10.f / 32.f; kx[4] = 0.f; kx[5] = 10.f / 32.f; kx[6] = -3.f / 32.f; kx[7] = 0.f; kx[8] = 3.f / 32.f; } void getky(float *ky) { ky[0] = -3.f/32.f; ky[3] = 0.f; ky[6] = 3.f/32.f; ky[1] = -10.f/32.f; ky[4] = 0.f; ky[7] = 10.f/32.f; ky[2] = -3.f/32.f; ky[5] = 0.f; ky[8] = 3.f/32.f; } __global__ void Gconv2(float *imgO, float *imgI, float *kernel, int w, int h, int nc, int r) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; int kw = (size_t)2*r + 1; float value = 0.f; if(x<w && y<h) { for (int c = 0; c < nc; c++) { float sum = 0.f; size_t ind = x + (size_t) y * w + (size_t) w * h * c; for (int kj = -r; kj < r; kj++) { for (int ki = -r; ki < r; ki++) { int kii = min(max(0, x+ki),w-1); int kjj = min(max(0, y+kj),h-1); value = imgI[kii + (size_t)(kjj*w) + (size_t)w*h*c]; sum += value * kernel[(r - ki) + (r - kj) * kw]; } } imgO[ind] = sum; } } } __global__ void robGrad(float *gradientx, float *gradienty, float *imgI, float *kernelx, float *kernely, int w, int h, int nc) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; int r = 1; int kw = (size_t)2*r + 1; float value = 0.f; if(x<w && y<h) { for (int c = 0; c < nc; c++) { float sumx = 0.f; float sumy = 0.f; size_t ind = x + (size_t) y * w + (size_t) w * h * c; for (int kj = -r; kj < r+1; kj++) { for (int ki = -r; ki < r+1; ki++) { int kii = min(max(0, x+ki), w-1); int kjj = min(max(0, y+kj), h-1); value = imgI[kii + (size_t)(kjj*w) + (size_t)w*h*c]; sumx += value * kernelx[(r + ki) + (r + kj) * kw]; sumy += value * kernely[(r + ki) + (r + kj) * kw]; } } gradientx[ind] = sumx; gradienty[ind] = sumy; } } } __global__ void getM(float *m11, float *m12, float *m22, float *gradientx, float *gradienty, int w, int h, int nc) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; if(x<w && y<h) { size_t ind = x + (size_t) y * w; for (int c = 0; c < nc; c++) { size_t indc = x + (size_t) y * w + (size_t)w*h*c; m11[ind] += gradientx[indc] * gradientx[indc]; m12[ind] += gradientx[indc] * gradienty[indc]; m22[ind] += gradienty[indc] * gradienty[indc]; } } } __device__ void eigenValue(float *e1, float *e2, float a, float b, float c, float d) { float T = a+d; float det = a*d-b*c; *e1 = T/2.f - sqrt(T*T/4.f - det); *e2 = T/2.f + sqrt(T*T/4.f - det); } __global__ void getFeature(float *feature, float *m11, float *m12, float *m22, float *imgI, int w, int h, float alpha, float belta) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; float e1 = 0.f; float e2 = 0.f; if(x<w && y<h) { size_t ind = x + (size_t) y * w; size_t indg = x + (size_t) y * w + (size_t)w*h*1; size_t indb = x + (size_t) y * w + (size_t)w*h*2; eigenValue(&e1, &e2, m11[ind], m12[ind], m12[ind], m22[ind]); if(x >= 5 && x<=w-1 && y == 150) printf("%f %f %f %f %f %f,\n", e1, e2,m11[ind], m12[ind], m12[ind], m22[ind]); if(e2 >= e1 && e1>= alpha){ feature[ind] = 1.f; feature[indg] = 0.f; feature[indb] = 0.f; }else if(e1 <= belta && alpha <= e2){ feature[ind] = 1.f; feature[indg] = 1.f; feature[indb] = 0.f; }else{ feature[ind] = imgI[ind]*0.5f; feature[indg] = imgI[ind]*0.5f; feature[indb] = imgI[ind]*0.5f; } } } int main(int argc, char **argv) { // Before the GPU can process your kernels, a so called "CUDA context" must be initialized // This happens on the very first call to a CUDA function, and takes some time (around half a second) // We will do it right here, so that the run time measurements are accurate cudaDeviceSynchronize(); CUDA_CHECK; // Reading command line parameters: // getParam("param", var, argc, argv) looks whether "-param xyz" is specified, and if so stores the value "xyz" in "var" // If "-param" is not specified, the value of "var" remains unchanged // // return value: getParam("param", ...) returns true if "-param" is specified, and false otherwise #ifdef CAMERA #else // input image string image = ""; bool ret = getParam("i", image, argc, argv); if (!ret) cerr << "ERROR: no image specified" << endl; if (argc <= 1) { cout << "Usage: " << argv[0] << " -i <image> [-repeats <repeats>] [-gray]" << endl; return 1; } #endif // number of computation repetitions to get a better run time measurement int repeats = 1; getParam("repeats", repeats, argc, argv); cout << "repeats: " << repeats << endl; // load the input image as grayscale if "-gray" is specifed bool gray = false; getParam("gray", gray, argc, argv); cout << "gray: " << gray << endl; // ### Define your own parameters here as needed // Init camera / Load input image #ifdef CAMERA // Init camera cv::VideoCapture camera(0); if(!camera.isOpened()) { cerr << "ERROR: Could not open camera" << endl; return 1; } int camW = 640; int camH = 480; camera.set(CV_CAP_PROP_FRAME_WIDTH,camW); camera.set(CV_CAP_PROP_FRAME_HEIGHT,camH); // read in first frame to get the dimensions cv::Mat mIn; camera >> mIn; #else // Load the input image using opencv (load as grayscale if "gray==true", otherwise as is (may be color or grayscale)) cv::Mat mIn = cv::imread(image.c_str(), (gray? CV_LOAD_IMAGE_GRAYSCALE : -1)); // check if (mIn.data == NULL) { cerr << "ERROR: Could not load image " << image << endl; return 1; } #endif // convert to float representation (opencv loads image values as single bytes by default) mIn.convertTo(mIn,CV_32F); // convert range of each channel to [0,1] (opencv default is [0,255]) mIn /= 255.f; // get image dimensions int w = mIn.cols; // width int h = mIn.rows; // height int nc = mIn.channels(); // number of channels cout << "image: " << w << " x " << h << endl; // Set the output image format // ### // ### // ### TODO: Change the output image format as needed // ### // ### cv::Mat mOut(h,w,mIn.type()); // mOut will have the same number of channels as the input image, nc layers //cv::Mat mOut(h,w,CV_32FC3); // mOut will be a color image, 3 layers //cv::Mat mOut(h,w,CV_32FC1); // mOut will be a grayscale image, 1 layer // ### Define your own output images here as needed // Allocate arrays // input/output image width: w // input/output image height: h // input image number of channels: nc // output image number of channels: mOut.channels(), as defined above (nc, 3, or 1) // allocate raw input image array float *imgIn = new float[(size_t)w*h*nc]; // allocate raw output array (the computation result will be stored in this array, then later converted to mOut for displaying) // For camera mode: Make a loop to read in camera frames #ifdef CAMERA // Read a camera image frame every 30 milliseconds: // cv::waitKey(30) waits 30 milliseconds for a keyboard input, // returns a value <0 if no key is pressed during this time, returns immediately with a value >=0 if a key is pressed while (cv::waitKey(30) < 0) { // Get camera image camera >> mIn; // convert to float representation (opencv loads image values as single bytes by default) mIn.convertTo(mIn,CV_32F); // convert range of each channel to [0,1] (opencv default is [0,255]) mIn /= 255.f; #endif // Init raw input image array // opencv images are interleaved: rgb rgb rgb... (actually bgr bgr bgr...) // But for CUDA it's better to work with layered images: rrr... ggg... bbb... // So we will convert as necessary, using interleaved "cv::Mat" for loading/saving/displaying, and layered "float*" for CUDA computations convert_mat_to_layered (imgIn, mIn); Timer timer; timer.start(); // ### // ### // ### TODO: Main computation // ### // ### float sigma = 0.5f; int r = ceil(3.f * sigma); int len = w*h*nc; int lenofK = 2 * r + 1; float alpha = 0.001f; float belta = 1e-4f; //## float *k = getKernel(sigma, r); float *dkx = new float[9]; getkx(dkx); float *dky = new float[9]; getky(dky); float *convimO = new float[(size_t)w*h*nc]; float *m11 = new float[(size_t)w*h]; float *m12 = new float[(size_t)w*h]; float *m22 = new float[(size_t)w*h]; float *vx = new float[(size_t)w*h*nc]; float *vy = new float[(size_t)w*h*nc]; float *feature = new float[(size_t)w*h*3]; cv::Mat convImg(h,w,mIn.type()); cv::Mat Im11(h,w,CV_32FC1); cv::Mat Im12(h,w,CV_32FC1); cv::Mat Im22(h,w,CV_32FC1); cv::Mat Ivx(h,w,mIn.type()); cv::Mat Ivy(h,w,mIn.type()); cv::Mat Ifeature(h,w,CV_32FC3); cout<< "GPU is running!!!!"; float *d_k; float *d_imgIn; float *d_kx; float *d_ky; float *d_m11; float *d_m12; float *d_m22; float *d_convimO; float *d_gradientx; float *d_gradienty; float *d_feature; size_t nbytes = (size_t)(len)*sizeof(float); cudaMalloc(&d_k, (size_t)lenofK*lenofK*sizeof(float)); CUDA_CHECK; cudaMalloc(&d_kx, (size_t)9*sizeof(float)); CUDA_CHECK; cudaMalloc(&d_ky, (size_t)9*sizeof(float)); CUDA_CHECK; cudaMalloc(&d_imgIn, nbytes); CUDA_CHECK; cudaMalloc(&d_convimO, nbytes); CUDA_CHECK; cudaMalloc(&d_gradientx, nbytes); CUDA_CHECK; cudaMalloc(&d_gradienty, nbytes); CUDA_CHECK; cudaMalloc(&d_m11, (size_t)w*h * sizeof(float)); CUDA_CHECK; cudaMalloc(&d_m12, (size_t)w*h * sizeof(float)); CUDA_CHECK; cudaMalloc(&d_m22, (size_t)w*h * sizeof(float)); CUDA_CHECK; cudaMalloc(&d_feature, (size_t)w*h*3 * sizeof(float)); CUDA_CHECK; cudaMemset(d_convimO, 0, nbytes);CUDA_CHECK; cudaMemset(d_gradientx, 0, nbytes);CUDA_CHECK; cudaMemset(d_gradienty, 0, nbytes);CUDA_CHECK; cudaMemset(d_m11, 0, (size_t)w*h * sizeof(float));CUDA_CHECK; cudaMemset(d_m12, 0, (size_t)w*h * sizeof(float));CUDA_CHECK; cudaMemset(d_m22, 0, (size_t)w*h * sizeof(float));CUDA_CHECK; cudaMemset(d_feature, 0, (size_t)w*h*3 * sizeof(float));CUDA_CHECK; cudaMemcpy(d_k, k, (size_t)lenofK*lenofK*sizeof(float), cudaMemcpyHostToDevice);CUDA_CHECK; cudaMemcpy(d_kx, dkx, (size_t)9*sizeof(float), cudaMemcpyHostToDevice);CUDA_CHECK; cudaMemcpy(d_ky, dky, (size_t)9*sizeof(float), cudaMemcpyHostToDevice);CUDA_CHECK; cudaMemcpy(d_imgIn, imgIn, nbytes, cudaMemcpyHostToDevice);CUDA_CHECK; dim3 block = dim3(32,32,1); dim3 grid = dim3((block.x + w-1)/block.x, (block.y + h-1)/block.y, 1); Gconv2<<<grid, block>>>(d_convimO, d_imgIn, d_k, w, h, nc, r);CUDA_CHECK; cout << "Convolution with Gauss kernel"<< endl; robGrad<<<grid, block>>>(d_gradientx, d_gradienty, d_convimO, d_kx, d_ky, w, h, nc);CUDA_CHECK; cout << "rototianally gradient"<< endl; getM<<<grid, block>>>(d_m11, d_m12, d_m22, d_gradientx, d_gradienty, w,h,nc);CUDA_CHECK; Gconv2<<<grid, block>>>(d_m11, d_m11, d_k, w, h, 1, r);CUDA_CHECK; Gconv2<<<grid, block>>>(d_m12, d_m12, d_k, w, h, 1, r);CUDA_CHECK; Gconv2<<<grid, block>>>(d_m22, d_m22, d_k, w, h, 1, r);CUDA_CHECK; getFeature<<<grid, block>>>(d_feature, d_m11, d_m12, d_m22, d_imgIn, w, h, alpha, belta);CUDA_CHECK; cudaMemcpy(convimO, d_convimO, nbytes, cudaMemcpyDeviceToHost);CUDA_CHECK; cudaMemcpy(vx, d_gradientx, nbytes, cudaMemcpyDeviceToHost);CUDA_CHECK; cudaMemcpy(vy, d_gradienty, nbytes, cudaMemcpyDeviceToHost);CUDA_CHECK; // for(int xi = (size_t)(h-1)*w; xi < (size_t)h*w; xi++){ // printf("%f \n", vy[xi]); // } cudaMemcpy(m11, d_m11, (size_t)w*h * sizeof(float), cudaMemcpyDeviceToHost);CUDA_CHECK; cudaMemcpy(m12, d_m12, (size_t)w*h * sizeof(float), cudaMemcpyDeviceToHost);CUDA_CHECK; cudaMemcpy(m22, d_m22, (size_t)w*h * sizeof(float), cudaMemcpyDeviceToHost);CUDA_CHECK; cudaMemcpy(feature, d_feature, (size_t)w*h*3 * sizeof(float), cudaMemcpyDeviceToHost);CUDA_CHECK; cudaFree(d_k);CUDA_CHECK; cudaFree(d_kx);CUDA_CHECK; cudaFree(d_ky);CUDA_CHECK; cudaFree(d_convimO);CUDA_CHECK; cudaFree(d_m11);CUDA_CHECK; cudaFree(d_m12);CUDA_CHECK; cudaFree(d_m22);CUDA_CHECK; cudaFree(d_gradientx);CUDA_CHECK; cudaFree(d_gradienty);CUDA_CHECK; cudaFree(d_imgIn);CUDA_CHECK; cudaFree(d_feature);CUDA_CHECK; // show input image showImage("Input", mIn, 100, 100); // show at position (x_from_left=100,y_from_above=100) // show output image: first convert to interleaved opencv format from the layered raw array // ### Display your own output images here as needed convert_layered_to_mat(convImg, convimO); convert_layered_to_mat(Im11, m11); convert_layered_to_mat(Im12, m12); convert_layered_to_mat(Im22, m22); convert_layered_to_mat(Ivx, vx); convert_layered_to_mat(Ivy, vy); convert_layered_to_mat(Ifeature, feature); showImage("convolution GPU", convImg, 100, 100); int scaleup = 100.f; Im11 *= scaleup; Im12 *= scaleup; Im22 *= scaleup; showImage("m11", Im11, 100, 100); showImage("m12", Im12, 100, 100); showImage("m22", Im22, 100, 100); Ivx *= 10.f; Ivy *= 10.f; showImage("vx", Ivx, 100, 100); showImage("vy", Ivy, 100, 100); showImage("feature", Ifeature, 100, 100); #ifdef CAMERA // end of camera loop } #else // wait for key inputs cv::waitKey(0); #endif // save input and result // cv::imwrite("image_input.png",mIn*255.f); // "imwrite" assumes channel range [0,255] // cv::imwrite("image_result.png",mOut*255.f); // free allocated arrays delete[] imgIn; // delete[] imgOut; delete[] k; delete[] dkx; delete[] dky; delete[] convimO; // close all opencv windows cvDestroyAllWindows(); return 0; }
54d7724226dbc66b79d3fb14a3152c06186f7fd8.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "sqr_mag_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *data = NULL; hipMalloc(&data, XSIZE*YSIZE); float *result = NULL; hipMalloc(&result, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( sqr_mag_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, data,result); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( sqr_mag_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, data,result); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( sqr_mag_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, data,result); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
54d7724226dbc66b79d3fb14a3152c06186f7fd8.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "sqr_mag_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *data = NULL; cudaMalloc(&data, XSIZE*YSIZE); float *result = NULL; cudaMalloc(&result, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); sqr_mag_kernel<<<gridBlock,threadBlock>>>(data,result); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { sqr_mag_kernel<<<gridBlock,threadBlock>>>(data,result); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { sqr_mag_kernel<<<gridBlock,threadBlock>>>(data,result); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
19987e75d565c57768eeffe5e0737b0f97fa61ba.hip
// !!! This is a file automatically generated by hipify!!! /******************************************* ternarytest.cu **************************************/ /*mostra 0 no ndice 0, "c" no ndice 1 e nos ndice pares, mostra valor lixo nos demais ndices */ #include <stdio.h> #include "hip/hip_runtime.h" #include "hip/hip_runtime_api.h" #include <assert.h> #define N 2//64 __global__ void foo(float* A, float c) { A[threadIdx.x == 0 ? 1 : 2*threadIdx.x] = c; } int main() { float* dev_b; float* b; float c = 2.0f; b = (float*)malloc(2*N*sizeof(float)); /* acessvel apenas pela CPU funo main e funes __host__ */ hipMalloc((void**)&dev_b, 2*N*sizeof(float)); /* acessvel apenas pela GPU funes __global__ */ hipLaunchKernelGGL(( foo), dim3(1), dim3(N), 0, 0, dev_b, c); //ESBMC_verify_kernel_f(foo,1,N,dev_b,c); hipMemcpy(b, dev_b, 2*N*sizeof(float), hipMemcpyDeviceToHost); // printf("\n"); for (int i = 0; i < 2*N; ++i) { // printf("%f : ", b[i]); if((i>0)&&(i%2==0)) assert(b[i] == c); } free(b); hipFree(dev_b); return 0; }
19987e75d565c57768eeffe5e0737b0f97fa61ba.cu
/******************************************* ternarytest.cu **************************************/ /*mostra 0 no índice 0, "c" no índice 1 e nos índice pares, mostra valor lixo nos demais índices */ #include <stdio.h> #include "cuda.h" #include "cuda_runtime_api.h" #include <assert.h> #define N 2//64 __global__ void foo(float* A, float c) { A[threadIdx.x == 0 ? 1 : 2*threadIdx.x] = c; } int main() { float* dev_b; float* b; float c = 2.0f; b = (float*)malloc(2*N*sizeof(float)); /* acessível apenas pela CPU função main e funções __host__ */ cudaMalloc((void**)&dev_b, 2*N*sizeof(float)); /* acessível apenas pela GPU funções __global__ */ foo<<<1, N>>>(dev_b, c); //ESBMC_verify_kernel_f(foo,1,N,dev_b,c); cudaMemcpy(b, dev_b, 2*N*sizeof(float), cudaMemcpyDeviceToHost); // printf("\n"); for (int i = 0; i < 2*N; ++i) { // printf("%f : ", b[i]); if((i>0)&&(i%2==0)) assert(b[i] == c); } free(b); cudaFree(dev_b); return 0; }
8a8a84afaff033a9e0943516eab26a87b4e1c866.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <string.h> #include <math.h> #include <time.h> #include <hiprand/hiprand.h> #include "test.h" #include "quicksort.h" #include "gpu_utility.cuh" /* Questa fuzione viene invocata nel caso in cui i parametri passati da console sono errati. Viene stampato a schermo che c' stato un errore e la giusta sintassi con cui lanciare il porgramma. Infine viene terminato il programma con il codice EXIT_FAILURE. */ void wrongParameter() { printf("Errore: parametri errari!\n"); printf("Lanciare: QuickSort_Mantovani.exe{ -f \"path\"| -t N | -i N | -r N} [-g] [-p S] [-c] [-o \"path\" | -s]\n"); printf("\t-f path \t se l'array da ordinare e' nel file \"path\"\n"); printf("\t-t N \t se l'array da ordinare deve essere di dimensione N (con N > 0) e deve essere generato randomicamente\n"); printf("\t-i N \t se l'array da ordinare deve essere di dimensione N (con N > 0) e deve essere letto da standard input\n"); printf("\t-r N \t se l'array da ordinare deve essere di dimensione N (con N > 0) e deve essere generato randomicamente (i numeri generati verranno stampati su standard output)\n"); printf("\t-g \t se si vogliono effettuare i controlli sull'ordine e sulla presenza di tutti e soli gli elementi dell'array iniziale nell'array ordinato (queste operazioni possono essere molto lunghe)\n"); printf("\t-p S \t se si vuole usare il seme S per i generatori pseudo-randomici\n"); printf("\t-c \t se l'ordinamento deve essere decrescente. Se questa opzione manca l'ordinamento e' crescente\n"); printf("\t-o path \t se si vuole salvare l'array ordinato sul file \"path\" (non avvengono controlli: questa opzione puo' portare alla sovrascrittura di file esistenti)\n"); printf("\t-s \t se si vuole stampare l'array ordinato su standard output\n"); exit(EXIT_FAILURE); } /* Main del programma. Viene lanciato con i seguenti parametri: WarpSort_Mantovani.exe {-f "path"| -t N | -i N | -r N} [-g] [-p S] [-c] [-o "path" | -s] dove: -f path se l'array da ordinare e' nel file path -t N se l'array da ordinare deve essere di dimensione N (con N > 0) e deve essere generato randomicamente -i N se l'array da ordinare deve essere di dimensione N (con N > 0) e deve essere letto da standard input -r N se l'array da ordinare deve essere di dimensione N (con N > 0) e deve essere generato randomicamente (i numeri generati verranno stampati su standard output) -g se si vogliono effettuare i controlli sull'ordine e sulla presenza di tutti e soli gli elementi dell'array iniziale nell'array ordinato (queste operazioni possono essere molto lunghe) -p S se si vuole usare il seme S per i generatori pseudo-randomici -c se l'ordinamento deve essere decrescente. Se questa opzione manca l'ordinamento e' crescente -o path se si vuole salvare l'array ordinato sul file path (non avvengono controlli: questa opzione puo' portare alla sovrascrittura di file esistenti) -s se si vuole stampare l'array ordinato su standard output */ int main(int argc, char* argv[]) { //Inizializzo i parametri necessari a gestire l'input letto da console bool f = false, t = false, i = false, r = false, c = false, o = false, s = false, g = false; char* path_input = NULL, * path_output = NULL; int N = 0; long S = (long)time(NULL); //Lettura dei parametri passati da console for (int j = 1; j < argc; ++j) { if (strcmp(argv[j], "-f") == 0) { ++j; if (j < argc) { f = true; path_input = argv[j]; } } else if (strcmp(argv[j], "-t") == 0) { ++j; if (j < argc) { t = true; N = atoi(argv[j]); } } else if (strcmp(argv[j], "-i") == 0) { ++j; if (j < argc) { i = true; N = atoi(argv[j]); } } else if (strcmp(argv[j], "-r") == 0) { ++j; if (j < argc) { r = true; N = atoi(argv[j]); } } else if (strcmp(argv[j], "-g") == 0) { g = true; } else if (strcmp(argv[j], "-p") == 0) { ++j; if (j < argc) { S = atoi(argv[j]); } } else if (strcmp(argv[j], "-c") == 0) { c = true; } else if (strcmp(argv[j], "-o") == 0) { ++j; if (j < argc) { o = true; path_output = argv[j]; } } else if (strcmp(argv[j], "-s") == 0) { s = true; } else { wrongParameter(); } } //Controllo correttezza dei paremetri letti da console if (!((f && !t && !i && !r) || (!f && t && !i && !r) || (!f && !t && i && !r) || (!f && !t && !i && r))) { wrongParameter(); } if (o && s) { wrongParameter(); } if (f) { FILE* file = fopen(path_input, "r"); if (!file) { wrongParameter(); } N = 0; while (!feof(file)) { float temp; fscanf(file, "%f\n", &temp); N++; } fclose(file); } if (N <= 0 || S < 0) { wrongParameter(); } // Creazione puntatori per array in input e per array copia(serve nel caso si vogliano effettuare i test di correttezza con il parametro - g) float* array_in; float* array_in_check; CHECK(hipHostMalloc(&array_in, sizeof(float) * N, hipHostMallocDefault)); if (g) { array_in_check = (float*)malloc(sizeof(float) * N); } printf("Inizio riempimento array... "); //Inizializzazione dell'array di input a seconda del parametro letto da console int j = 0; if (f) { FILE* file = fopen(path_input, "r"); while (!feof(file)) { fscanf(file, "%f\n", &array_in[j]); j++; } fclose(file); } else if (i) { for (j = 0; j < N; ++j) { printf("Numero %d: ", j); scanf("%f", &array_in[j]); } } else { //Generazione attraverso cuRAND dell'array di input randomico hiprandGenerator_t gen; CHECK_CURAND(hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT)); CHECK_CURAND(hiprandSetPseudoRandomGeneratorSeed(gen, S)); CHECK_CURAND(hiprandGenerateUniform(gen, array_in, N)); CHECK(hipDeviceSynchronize()); CHECK_CURAND(hiprandDestroyGenerator(gen)); //Stampa dei valori generati (se presente parametro -r) if (r) { for (j = 0; j < N; ++j) { printf("%f\n", array_in[j]); } } } if (g) { memcpy(array_in_check, array_in, sizeof(float) * N); } printf("Fatto!\nInizio ordinamento... "); //Creazione eventi su stream 0 per calcolare il tempo impiegato dal warpsort hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipEventSynchronize(start); // Chiamata al quicksort. Il risultato viene salvato nell'array di input. //L'ordine sempre crescente quickSort(array_in, N); ///Registrazione evento di fine ordinamento e calcolo del tempo impiegato hipEventRecord(stop, 0); hipEventSynchronize(stop); float elapsedTime; hipEventElapsedTime(&elapsedTime, start, stop); printf("\nCPU quicksort ha ordinato %d elementi (%.2f MB) in %.2f millisecondi\n\n", N, ((sizeof(float) * N) / (1024.0 * 1024.0)), elapsedTime); //Allocazione dell'array ordinato float* array_ord = (float*)malloc(N * sizeof(float)); //Copia dall'array di putput all'array ordinato togliendo il padding. //Se viene passato il parametro -c allora la copia avviene al contrario in modo ra dovesciare gli elementi dell'array di output e ritrovarsi un array ordinato in modo decrescente if (c) { for (int j = 0; j < N; ++j) { array_ord[N - 1 - j] = array_in[j]; } } else { for (int j = 0; j < N; ++j) { array_ord[j] = array_in[j]; } } //Rilascio aarray di output CHECK(hipHostFree(array_in)); //Esecuzione dei test di correttezza (se presente il parametro -g) if (g) { do_tests(array_in_check, array_ord, N, c); free(array_in_check); } //Stampa dell'array ordinato (su console se presente il parametro -s o su file se presente il parametro -o) if (s) { printf("\nArray ordinato:\n"); for (int j = 0; j < N; ++j) { printf("%f\n", array_ord[j]); } } else if (o) { FILE* file = fopen(path_output, "w"); for (int j = 0; j < N; ++j) { fprintf(file, "%f\n", array_ord[j]); } fclose(file); } //Rilascio memeoria array ordinato free(array_ord); //Reset della GPU CHECK(hipDeviceReset()); return EXIT_SUCCESS; }
8a8a84afaff033a9e0943516eab26a87b4e1c866.cu
#include <stdio.h> #include <string.h> #include <math.h> #include <time.h> #include <curand.h> #include "test.h" #include "quicksort.h" #include "gpu_utility.cuh" /* Questa fuzione viene invocata nel caso in cui i parametri passati da console sono errati. Viene stampato a schermo che c'è stato un errore e la giusta sintassi con cui lanciare il porgramma. Infine viene terminato il programma con il codice EXIT_FAILURE. */ void wrongParameter() { printf("Errore: parametri errari!\n"); printf("Lanciare: QuickSort_Mantovani.exe{ -f \"path\"| -t N | -i N | -r N} [-g] [-p S] [-c] [-o \"path\" | -s]\n"); printf("\t-f path \t se l'array da ordinare e' nel file \"path\"\n"); printf("\t-t N \t se l'array da ordinare deve essere di dimensione N (con N > 0) e deve essere generato randomicamente\n"); printf("\t-i N \t se l'array da ordinare deve essere di dimensione N (con N > 0) e deve essere letto da standard input\n"); printf("\t-r N \t se l'array da ordinare deve essere di dimensione N (con N > 0) e deve essere generato randomicamente (i numeri generati verranno stampati su standard output)\n"); printf("\t-g \t se si vogliono effettuare i controlli sull'ordine e sulla presenza di tutti e soli gli elementi dell'array iniziale nell'array ordinato (queste operazioni possono essere molto lunghe)\n"); printf("\t-p S \t se si vuole usare il seme S per i generatori pseudo-randomici\n"); printf("\t-c \t se l'ordinamento deve essere decrescente. Se questa opzione manca l'ordinamento e' crescente\n"); printf("\t-o path \t se si vuole salvare l'array ordinato sul file \"path\" (non avvengono controlli: questa opzione puo' portare alla sovrascrittura di file esistenti)\n"); printf("\t-s \t se si vuole stampare l'array ordinato su standard output\n"); exit(EXIT_FAILURE); } /* Main del programma. Viene lanciato con i seguenti parametri: WarpSort_Mantovani.exe {-f "path"| -t N | -i N | -r N} [-g] [-p S] [-c] [-o "path" | -s] dove: -f path se l'array da ordinare e' nel file path -t N se l'array da ordinare deve essere di dimensione N (con N > 0) e deve essere generato randomicamente -i N se l'array da ordinare deve essere di dimensione N (con N > 0) e deve essere letto da standard input -r N se l'array da ordinare deve essere di dimensione N (con N > 0) e deve essere generato randomicamente (i numeri generati verranno stampati su standard output) -g se si vogliono effettuare i controlli sull'ordine e sulla presenza di tutti e soli gli elementi dell'array iniziale nell'array ordinato (queste operazioni possono essere molto lunghe) -p S se si vuole usare il seme S per i generatori pseudo-randomici -c se l'ordinamento deve essere decrescente. Se questa opzione manca l'ordinamento e' crescente -o path se si vuole salvare l'array ordinato sul file path (non avvengono controlli: questa opzione puo' portare alla sovrascrittura di file esistenti) -s se si vuole stampare l'array ordinato su standard output */ int main(int argc, char* argv[]) { //Inizializzo i parametri necessari a gestire l'input letto da console bool f = false, t = false, i = false, r = false, c = false, o = false, s = false, g = false; char* path_input = NULL, * path_output = NULL; int N = 0; long S = (long)time(NULL); //Lettura dei parametri passati da console for (int j = 1; j < argc; ++j) { if (strcmp(argv[j], "-f") == 0) { ++j; if (j < argc) { f = true; path_input = argv[j]; } } else if (strcmp(argv[j], "-t") == 0) { ++j; if (j < argc) { t = true; N = atoi(argv[j]); } } else if (strcmp(argv[j], "-i") == 0) { ++j; if (j < argc) { i = true; N = atoi(argv[j]); } } else if (strcmp(argv[j], "-r") == 0) { ++j; if (j < argc) { r = true; N = atoi(argv[j]); } } else if (strcmp(argv[j], "-g") == 0) { g = true; } else if (strcmp(argv[j], "-p") == 0) { ++j; if (j < argc) { S = atoi(argv[j]); } } else if (strcmp(argv[j], "-c") == 0) { c = true; } else if (strcmp(argv[j], "-o") == 0) { ++j; if (j < argc) { o = true; path_output = argv[j]; } } else if (strcmp(argv[j], "-s") == 0) { s = true; } else { wrongParameter(); } } //Controllo correttezza dei paremetri letti da console if (!((f && !t && !i && !r) || (!f && t && !i && !r) || (!f && !t && i && !r) || (!f && !t && !i && r))) { wrongParameter(); } if (o && s) { wrongParameter(); } if (f) { FILE* file = fopen(path_input, "r"); if (!file) { wrongParameter(); } N = 0; while (!feof(file)) { float temp; fscanf(file, "%f\n", &temp); N++; } fclose(file); } if (N <= 0 || S < 0) { wrongParameter(); } // Creazione puntatori per array in input e per array copia(serve nel caso si vogliano effettuare i test di correttezza con il parametro - g) float* array_in; float* array_in_check; CHECK(cudaHostAlloc(&array_in, sizeof(float) * N, cudaHostAllocDefault)); if (g) { array_in_check = (float*)malloc(sizeof(float) * N); } printf("Inizio riempimento array... "); //Inizializzazione dell'array di input a seconda del parametro letto da console int j = 0; if (f) { FILE* file = fopen(path_input, "r"); while (!feof(file)) { fscanf(file, "%f\n", &array_in[j]); j++; } fclose(file); } else if (i) { for (j = 0; j < N; ++j) { printf("Numero %d: ", j); scanf("%f", &array_in[j]); } } else { //Generazione attraverso cuRAND dell'array di input randomico curandGenerator_t gen; CHECK_CURAND(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT)); CHECK_CURAND(curandSetPseudoRandomGeneratorSeed(gen, S)); CHECK_CURAND(curandGenerateUniform(gen, array_in, N)); CHECK(cudaDeviceSynchronize()); CHECK_CURAND(curandDestroyGenerator(gen)); //Stampa dei valori generati (se presente parametro -r) if (r) { for (j = 0; j < N; ++j) { printf("%f\n", array_in[j]); } } } if (g) { memcpy(array_in_check, array_in, sizeof(float) * N); } printf("Fatto!\nInizio ordinamento... "); //Creazione eventi su stream 0 per calcolare il tempo impiegato dal warpsort cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); cudaEventSynchronize(start); // Chiamata al quicksort. Il risultato viene salvato nell'array di input. //L'ordine è sempre crescente quickSort(array_in, N); ///Registrazione evento di fine ordinamento e calcolo del tempo impiegato cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); printf("\nCPU quicksort ha ordinato %d elementi (%.2f MB) in %.2f millisecondi\n\n", N, ((sizeof(float) * N) / (1024.0 * 1024.0)), elapsedTime); //Allocazione dell'array ordinato float* array_ord = (float*)malloc(N * sizeof(float)); //Copia dall'array di putput all'array ordinato togliendo il padding. //Se viene passato il parametro -c allora la copia avviene al contrario in modo ra dovesciare gli elementi dell'array di output e ritrovarsi un array ordinato in modo decrescente if (c) { for (int j = 0; j < N; ++j) { array_ord[N - 1 - j] = array_in[j]; } } else { for (int j = 0; j < N; ++j) { array_ord[j] = array_in[j]; } } //Rilascio aarray di output CHECK(cudaFreeHost(array_in)); //Esecuzione dei test di correttezza (se presente il parametro -g) if (g) { do_tests(array_in_check, array_ord, N, c); free(array_in_check); } //Stampa dell'array ordinato (su console se presente il parametro -s o su file se presente il parametro -o) if (s) { printf("\nArray ordinato:\n"); for (int j = 0; j < N; ++j) { printf("%f\n", array_ord[j]); } } else if (o) { FILE* file = fopen(path_output, "w"); for (int j = 0; j < N; ++j) { fprintf(file, "%f\n", array_ord[j]); } fclose(file); } //Rilascio memeoria array ordinato free(array_ord); //Reset della GPU CHECK(cudaDeviceReset()); return EXIT_SUCCESS; }
3354a40dd5713446ee54d278729e7f9fda7582fa.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include <hip/hip_runtime.h> #include <helper_cuda.h> #include <helper_functions.h> // 80 x 80 const int N = 80 * 80; // 160 x 160 //const int N = 160 * 160; // 320 x 320 //const int N = 320 * 320; // 640 x 640 //const int N = 640 * 640; // 1k x 4 //const int N = 1000; // 10k x 4 //const int N = 1000 * 10; // 100k x 4 //const int N = 1000 * 100; // 1M x 4 //const int N = 1000 * 1000; // 10M x 4 //const int N = 1000 * 1000 * 10; //const int N = 1 << 20; #define FLTSIZE sizeof(float) inline int BLK(int data, int blocksize) { return (data + blocksize - 1) / blocksize; } __global__ void kernel_vectorAdd (const float* __restrict__ a_d, const float* __restrict__ b_d, const int N, const int offset, float *c_d) { int tid = threadIdx.x + __mul24(blockIdx.x, blockDim.x); if(tid < N) { c_d[tid + offset] = a_d[tid + offset] + b_d[tid + offset]; } } int main( int argc, char **argv) { int devid = 0 ; int num_streams = 1; if(argc >= 2) devid = atoi(argv[1]); hipSetDevice(devid); /* printf("\nrunning %d cuda streams on device %d\n", num_streams, devid); hipDeviceProp_t prop; hipGetDeviceProperties(&prop, devid); printf("Device Number: %d\n", devid); printf(" Device name: %s\n", prop.name); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); printf(" Concurrent copy and execution: %s\n", (prop.deviceOverlap ? "Yes" : "No")); printf(" Concurrent kernels: %d\n", (prop.concurrentKernels)); printf(" Concurrent copy and kernel execution: %s with %d copy engine(s)\n", (prop.deviceOverlap ? "Yes" : "No"), prop.asyncEngineCount); */ //------------------------------------------------------------------------// // allocate data on the host //------------------------------------------------------------------------// size_t databytes = N * FLTSIZE; //float *a_h = (float*) malloc ( N * num_streams * FLTSIZE); //float *b_h = (float*) malloc ( N * num_streams * FLTSIZE); //float *c_h = (float*) malloc ( N * num_streams * FLTSIZE); float *a_h = NULL; checkCudaErrors(hipHostMalloc((void **)&a_h, N * num_streams * FLTSIZE)); float *b_h = NULL; checkCudaErrors(hipHostMalloc((void **)&b_h, N * num_streams * FLTSIZE)); float *c_h = NULL; checkCudaErrors(hipHostMalloc((void **)&c_h, N * num_streams * FLTSIZE)); for(int i=0; i< N * num_streams; i++) { a_h[i] = 1.1f; b_h[i] = 2.2f; } //------------------------------------------------------------------------// // allocate data on the device //------------------------------------------------------------------------// float *a_d; float *b_d; float *c_d; hipMalloc((void**)&a_d, N * num_streams * FLTSIZE); hipMalloc((void**)&b_d, N * num_streams * FLTSIZE); hipMalloc((void**)&c_d, N * num_streams * FLTSIZE); // kernel configuration dim3 threads = dim3(256, 1, 1); dim3 blocks = dim3(BLK(N, threads.x), 1, 1); // create cuda event handles hipEvent_t start, stop; checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); hipEventRecord(start,0); // copy data to deivce hipMemcpyAsync(&a_d[0], &a_h[0], databytes, hipMemcpyHostToDevice); hipMemcpyAsync(&b_d[0], &b_h[0], databytes, hipMemcpyHostToDevice); // launch one worker kernel per stream hipLaunchKernelGGL(( kernel_vectorAdd) , dim3(blocks), dim3(threads) , 0, 0, a_d, b_d, N, 0, c_d); // copy data back to host hipMemcpyAsync(&c_h[0], &c_d[0], databytes, hipMemcpyDeviceToHost); hipEventRecord(stop, 0); // required for async copy hipDeviceSynchronize(); float gpuTime_ms= 0; hipEventElapsedTime(&gpuTime_ms, start, stop); printf("runtime (ms) : %f\n", gpuTime_ms); // check data bool success = 1; for(int i=0; i< N * num_streams; i++) { if (abs(c_h[i] - 3.3f) > 1e-6) { fprintf(stderr, "%d : %f (error)!\n", i, c_h[i]); success = 0; break; } } if(success) { printf("\nSuccess! Exit.\n"); } //------------------------------------------------------------------------// // free //------------------------------------------------------------------------// checkCudaErrors(hipEventDestroy(start)); checkCudaErrors(hipEventDestroy(stop)); hipHostFree(a_h); hipHostFree(b_h); hipHostFree(c_h); hipFree(a_d); hipFree(b_d); hipFree(c_d); hipDeviceReset(); return 0; }
3354a40dd5713446ee54d278729e7f9fda7582fa.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <cuda_runtime.h> #include <helper_cuda.h> #include <helper_functions.h> // 80 x 80 const int N = 80 * 80; // 160 x 160 //const int N = 160 * 160; // 320 x 320 //const int N = 320 * 320; // 640 x 640 //const int N = 640 * 640; // 1k x 4 //const int N = 1000; // 10k x 4 //const int N = 1000 * 10; // 100k x 4 //const int N = 1000 * 100; // 1M x 4 //const int N = 1000 * 1000; // 10M x 4 //const int N = 1000 * 1000 * 10; //const int N = 1 << 20; #define FLTSIZE sizeof(float) inline int BLK(int data, int blocksize) { return (data + blocksize - 1) / blocksize; } __global__ void kernel_vectorAdd (const float* __restrict__ a_d, const float* __restrict__ b_d, const int N, const int offset, float *c_d) { int tid = threadIdx.x + __mul24(blockIdx.x, blockDim.x); if(tid < N) { c_d[tid + offset] = a_d[tid + offset] + b_d[tid + offset]; } } int main( int argc, char **argv) { int devid = 0 ; int num_streams = 1; if(argc >= 2) devid = atoi(argv[1]); cudaSetDevice(devid); /* printf("\nrunning %d cuda streams on device %d\n", num_streams, devid); cudaDeviceProp prop; cudaGetDeviceProperties(&prop, devid); printf("Device Number: %d\n", devid); printf(" Device name: %s\n", prop.name); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); printf(" Concurrent copy and execution: %s\n", (prop.deviceOverlap ? "Yes" : "No")); printf(" Concurrent kernels: %d\n", (prop.concurrentKernels)); printf(" Concurrent copy and kernel execution: %s with %d copy engine(s)\n", (prop.deviceOverlap ? "Yes" : "No"), prop.asyncEngineCount); */ //------------------------------------------------------------------------// // allocate data on the host //------------------------------------------------------------------------// size_t databytes = N * FLTSIZE; //float *a_h = (float*) malloc ( N * num_streams * FLTSIZE); //float *b_h = (float*) malloc ( N * num_streams * FLTSIZE); //float *c_h = (float*) malloc ( N * num_streams * FLTSIZE); float *a_h = NULL; checkCudaErrors(cudaMallocHost((void **)&a_h, N * num_streams * FLTSIZE)); float *b_h = NULL; checkCudaErrors(cudaMallocHost((void **)&b_h, N * num_streams * FLTSIZE)); float *c_h = NULL; checkCudaErrors(cudaMallocHost((void **)&c_h, N * num_streams * FLTSIZE)); for(int i=0; i< N * num_streams; i++) { a_h[i] = 1.1f; b_h[i] = 2.2f; } //------------------------------------------------------------------------// // allocate data on the device //------------------------------------------------------------------------// float *a_d; float *b_d; float *c_d; cudaMalloc((void**)&a_d, N * num_streams * FLTSIZE); cudaMalloc((void**)&b_d, N * num_streams * FLTSIZE); cudaMalloc((void**)&c_d, N * num_streams * FLTSIZE); // kernel configuration dim3 threads = dim3(256, 1, 1); dim3 blocks = dim3(BLK(N, threads.x), 1, 1); // create cuda event handles cudaEvent_t start, stop; checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); cudaEventRecord(start,0); // copy data to deivce cudaMemcpyAsync(&a_d[0], &a_h[0], databytes, cudaMemcpyHostToDevice); cudaMemcpyAsync(&b_d[0], &b_h[0], databytes, cudaMemcpyHostToDevice); // launch one worker kernel per stream kernel_vectorAdd <<< blocks, threads >>> (a_d, b_d, N, 0, c_d); // copy data back to host cudaMemcpyAsync(&c_h[0], &c_d[0], databytes, cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); // required for async copy cudaDeviceSynchronize(); float gpuTime_ms= 0; cudaEventElapsedTime(&gpuTime_ms, start, stop); printf("runtime (ms) : %f\n", gpuTime_ms); // check data bool success = 1; for(int i=0; i< N * num_streams; i++) { if (abs(c_h[i] - 3.3f) > 1e-6) { fprintf(stderr, "%d : %f (error)!\n", i, c_h[i]); success = 0; break; } } if(success) { printf("\nSuccess! Exit.\n"); } //------------------------------------------------------------------------// // free //------------------------------------------------------------------------// checkCudaErrors(cudaEventDestroy(start)); checkCudaErrors(cudaEventDestroy(stop)); cudaFreeHost(a_h); cudaFreeHost(b_h); cudaFreeHost(c_h); cudaFree(a_d); cudaFree(b_d); cudaFree(c_d); cudaDeviceReset(); return 0; }
dcc1d3ae5aaace1011a370b8eddab59d0e93e438.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/CUDAGenerator.h> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/hip/detail/TensorInfo.cuh> #include <c10/macros/Macros.h> #include <hiprand/hiprand_kernel.h> #include <THH/THHGeneral.h> #include <ATen/native/hip/arc_flag.h> namespace at{ namespace native{ namespace { // philox generates 128 bits of randomness at a time. Kernel uses this explicitly by putting suitably transformed result into float4 // for all members of float4 to be consumed UNROLL has to be 4. Don't change! const int UNROLL = 4; template < typename scalar_t, typename accscalar_t, typename IndexType, int ADims> #if __CUDA_ARCH__ >= 350 C10_LAUNCH_BOUNDS_2(256, 8) #elif defined (__HIP_PLATFORM_HCC__) C10_LAUNCH_BOUNDS_2(256, 4) #endif __global__ void fused_dropout_kernel(cuda::detail::TensorInfo<scalar_t, IndexType> a, cuda::detail::TensorInfo<scalar_t, IndexType> b, cuda::detail::TensorInfo<uint8_t, IndexType> c, IndexType totalElements, accscalar_t p, std::pair<uint64_t, uint64_t> seeds ) { accscalar_t pinv = accscalar_t(1)/p; IndexType idx = blockIdx.x * blockDim.x + threadIdx.x; hiprandStatePhilox4_32_10_t state; hiprand_init( seeds.first, idx, seeds.second, &state); IndexType rounded_size = ((totalElements - 1)/(blockDim.x * gridDim.x * UNROLL)+1) * blockDim.x * gridDim.x * UNROLL; for (IndexType linearIndex = idx; linearIndex < rounded_size; linearIndex += gridDim.x * blockDim.x*UNROLL) { //hiprand_uniform_double was pure evil anyway, not doing what it promises, and there's nothing for halfs, so generate float for everything float4 rand = hiprand_uniform4(&state); scalar_t src[UNROLL]; rand.x = rand.x < p; rand.y = rand.y < p; rand.z = rand.z < p; rand.w = rand.w < p; for (int ii = 0; ii < UNROLL; ii++) { IndexType li = linearIndex + blockDim.x * gridDim.x * ii; if (li < totalElements) { // Convert `linearIndex` into an offset of `a` const IndexType aOffset = cuda::detail::IndexToOffset<scalar_t, IndexType, ADims>::get(li, a); src[ii] = a.data[aOffset]; } } for (int ii = 0; ii < UNROLL; ii++) { IndexType li = linearIndex + blockDim.x * gridDim.x * ii; if (li < totalElements) { // Convert `linearIndex` into an offset of `b` const IndexType bOffset = cuda::detail::IndexToOffset<scalar_t, IndexType, 1>::get(li, b); b.data[bOffset] = src[ii]*(&rand.x)[ii]*pinv; c.data[bOffset] = (uint8_t)(&rand.x)[ii]; } } __syncthreads(); } } template<typename scalar_t, typename accscalar_t> void masked_scale_kernel(at::Tensor& ret, const at::Tensor src, const at::Tensor mask, accscalar_t scale){ at::cuda::CUDA_tensor_apply3<scalar_t, scalar_t, uint8_t>(ret, src, mask, [scale]__device__(scalar_t& ret_val, const scalar_t& src_val, const uint8_t mask_val){ ret_val = (float)mask_val * src_val * scale; }); } } //anonymous namespace std::tuple<Tensor,Tensor> fused_dropout_cuda(const Tensor& self, double p, Generator * gen_){ auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator()); int newTid = ++arc_vm.global_tensor_id_; Tensor ret = at::empty_like(self); ret.unsafeGetTensorImpl()->tensor_id = newTid; // std::cout << "dropout ret newTid: " << newTid << std::endl; newTid = ++arc_vm.global_tensor_id_; Tensor mask = arc_vm.liveness_result[arc_vm.cur_back_num][newTid] ? at::ARCempty(self.sizes(), self.options().dtype(kByte)) : at::empty(self.sizes(), self.options().dtype(kByte)); mask.unsafeGetTensorImpl()->tensor_id = newTid; // std::cout << "dropout mask newTid: " << newTid << std::endl; // std::cout << "dropout self newTid: " << self.unsafeGetTensorImpl()->tensor_id << std::endl; const int64_t nelem = self.numel(); //empty tensors should not get here, but just in case, avoid FPE if (nelem==0) return std::tuple<Tensor,Tensor>(self, mask); const int64_t block_size = 256; unsigned int blocks_per_sm = at::cuda::getCurrentDeviceProperties()->maxThreadsPerMultiProcessor/block_size; dim3 dim_block(block_size); dim3 grid((nelem + block_size -1)/block_size); grid.x = ::min((unsigned int)at::cuda::getCurrentDeviceProperties()->multiProcessorCount * blocks_per_sm, grid.x); //number of times random will be generated per thread, to offset philox counter in thc random state int64_t counter_offset = ((nelem - 1)/(block_size*grid.x*UNROLL)+1)*UNROLL; std::pair<uint64_t, uint64_t> rng_engine_inputs; { // See Note [Acquire lock when using random generators] std::lock_guard<std::mutex> lock(gen->mutex_); rng_engine_inputs = gen->philox_engine_inputs(counter_offset); } if (cuda::detail::canUse32BitIndexMath(self)){ AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "fused_dropout", [&] { using accscalar_t = acc_type<scalar_t, true>; accscalar_t pa = (accscalar_t)(p); auto self_info = cuda::detail::getTensorInfo<scalar_t, unsigned int>(self); auto ret_info = cuda::detail::getTensorInfo<scalar_t, unsigned int>(ret); auto mask_info = cuda::detail::getTensorInfo<uint8_t, unsigned int>(mask); self_info.collapseDims(); ret_info.collapseDims(); mask_info.collapseDims(); //ret and mask are collapsed to 1d contiguous tensor switch (self_info.dims) { case 1: hipLaunchKernelGGL(( fused_dropout_kernel<scalar_t, accscalar_t, unsigned int, 1>), dim3(grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs); break; default: hipLaunchKernelGGL(( fused_dropout_kernel<scalar_t, accscalar_t, unsigned int, -1>), dim3(grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs); } }); } else { AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "fused_dropout", [&] { using accscalar_t = acc_type<scalar_t, true>; accscalar_t pa = (accscalar_t)(p); auto self_info = cuda::detail::getTensorInfo<scalar_t, uint64_t>(self); auto ret_info = cuda::detail::getTensorInfo<scalar_t, uint64_t>(ret); auto mask_info = cuda::detail::getTensorInfo<uint8_t, uint64_t>(mask); self_info.collapseDims(); ret_info.collapseDims(); mask_info.collapseDims(); //ret and mask are collapsed to 1d contiguous tensor switch (self_info.dims) { case 1: hipLaunchKernelGGL(( fused_dropout_kernel<scalar_t, accscalar_t, uint64_t, 1>), dim3(grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs); break; default: hipLaunchKernelGGL(( fused_dropout_kernel<scalar_t, accscalar_t, uint64_t, -1>), dim3(grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs); } }); } // THCudaCheck(hipGetLastError()); return std::tuple<Tensor,Tensor>(ret, mask); } Tensor masked_scale_cuda(const Tensor& self, const Tensor& mask, double scale){ Tensor ret = at::empty_like(self); TORCH_CHECK(mask.scalar_type() == at::ScalarType::Byte, "mask should be torch.uint8 dtype"); AT_DISPATCH_FLOATING_TYPES_AND_HALF(ret.scalar_type(), "masked_scale", [&] { using accscalar_t = acc_type<scalar_t, true>; accscalar_t pa = (accscalar_t)(scale); masked_scale_kernel<scalar_t>(ret, self, mask, pa); }); return ret; } } }
dcc1d3ae5aaace1011a370b8eddab59d0e93e438.cu
#include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/CUDAGenerator.h> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/cuda/detail/TensorInfo.cuh> #include <c10/macros/Macros.h> #include <curand_kernel.h> #include <THC/THCGeneral.h> #include <ATen/native/cuda/arc_flag.h> namespace at{ namespace native{ namespace { // philox generates 128 bits of randomness at a time. Kernel uses this explicitly by putting suitably transformed result into float4 // for all members of float4 to be consumed UNROLL has to be 4. Don't change! const int UNROLL = 4; template < typename scalar_t, typename accscalar_t, typename IndexType, int ADims> #if __CUDA_ARCH__ >= 350 C10_LAUNCH_BOUNDS_2(256, 8) #elif defined (__HIP_PLATFORM_HCC__) C10_LAUNCH_BOUNDS_2(256, 4) #endif __global__ void fused_dropout_kernel(cuda::detail::TensorInfo<scalar_t, IndexType> a, cuda::detail::TensorInfo<scalar_t, IndexType> b, cuda::detail::TensorInfo<uint8_t, IndexType> c, IndexType totalElements, accscalar_t p, std::pair<uint64_t, uint64_t> seeds ) { accscalar_t pinv = accscalar_t(1)/p; IndexType idx = blockIdx.x * blockDim.x + threadIdx.x; curandStatePhilox4_32_10_t state; curand_init( seeds.first, idx, seeds.second, &state); IndexType rounded_size = ((totalElements - 1)/(blockDim.x * gridDim.x * UNROLL)+1) * blockDim.x * gridDim.x * UNROLL; for (IndexType linearIndex = idx; linearIndex < rounded_size; linearIndex += gridDim.x * blockDim.x*UNROLL) { //curand_uniform_double was pure evil anyway, not doing what it promises, and there's nothing for halfs, so generate float for everything float4 rand = curand_uniform4(&state); scalar_t src[UNROLL]; rand.x = rand.x < p; rand.y = rand.y < p; rand.z = rand.z < p; rand.w = rand.w < p; for (int ii = 0; ii < UNROLL; ii++) { IndexType li = linearIndex + blockDim.x * gridDim.x * ii; if (li < totalElements) { // Convert `linearIndex` into an offset of `a` const IndexType aOffset = cuda::detail::IndexToOffset<scalar_t, IndexType, ADims>::get(li, a); src[ii] = a.data[aOffset]; } } for (int ii = 0; ii < UNROLL; ii++) { IndexType li = linearIndex + blockDim.x * gridDim.x * ii; if (li < totalElements) { // Convert `linearIndex` into an offset of `b` const IndexType bOffset = cuda::detail::IndexToOffset<scalar_t, IndexType, 1>::get(li, b); b.data[bOffset] = src[ii]*(&rand.x)[ii]*pinv; c.data[bOffset] = (uint8_t)(&rand.x)[ii]; } } __syncthreads(); } } template<typename scalar_t, typename accscalar_t> void masked_scale_kernel(at::Tensor& ret, const at::Tensor src, const at::Tensor mask, accscalar_t scale){ at::cuda::CUDA_tensor_apply3<scalar_t, scalar_t, uint8_t>(ret, src, mask, [scale]__device__(scalar_t& ret_val, const scalar_t& src_val, const uint8_t mask_val){ ret_val = (float)mask_val * src_val * scale; }); } } //anonymous namespace std::tuple<Tensor,Tensor> fused_dropout_cuda(const Tensor& self, double p, Generator * gen_){ auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator()); int newTid = ++arc_vm.global_tensor_id_; Tensor ret = at::empty_like(self); ret.unsafeGetTensorImpl()->tensor_id = newTid; // std::cout << "dropout ret newTid: " << newTid << std::endl; newTid = ++arc_vm.global_tensor_id_; Tensor mask = arc_vm.liveness_result[arc_vm.cur_back_num][newTid] ? at::ARCempty(self.sizes(), self.options().dtype(kByte)) : at::empty(self.sizes(), self.options().dtype(kByte)); mask.unsafeGetTensorImpl()->tensor_id = newTid; // std::cout << "dropout mask newTid: " << newTid << std::endl; // std::cout << "dropout self newTid: " << self.unsafeGetTensorImpl()->tensor_id << std::endl; const int64_t nelem = self.numel(); //empty tensors should not get here, but just in case, avoid FPE if (nelem==0) return std::tuple<Tensor,Tensor>(self, mask); const int64_t block_size = 256; unsigned int blocks_per_sm = at::cuda::getCurrentDeviceProperties()->maxThreadsPerMultiProcessor/block_size; dim3 dim_block(block_size); dim3 grid((nelem + block_size -1)/block_size); grid.x = std::min((unsigned int)at::cuda::getCurrentDeviceProperties()->multiProcessorCount * blocks_per_sm, grid.x); //number of times random will be generated per thread, to offset philox counter in thc random state int64_t counter_offset = ((nelem - 1)/(block_size*grid.x*UNROLL)+1)*UNROLL; std::pair<uint64_t, uint64_t> rng_engine_inputs; { // See Note [Acquire lock when using random generators] std::lock_guard<std::mutex> lock(gen->mutex_); rng_engine_inputs = gen->philox_engine_inputs(counter_offset); } if (cuda::detail::canUse32BitIndexMath(self)){ AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "fused_dropout", [&] { using accscalar_t = acc_type<scalar_t, true>; accscalar_t pa = (accscalar_t)(p); auto self_info = cuda::detail::getTensorInfo<scalar_t, unsigned int>(self); auto ret_info = cuda::detail::getTensorInfo<scalar_t, unsigned int>(ret); auto mask_info = cuda::detail::getTensorInfo<uint8_t, unsigned int>(mask); self_info.collapseDims(); ret_info.collapseDims(); mask_info.collapseDims(); //ret and mask are collapsed to 1d contiguous tensor switch (self_info.dims) { case 1: fused_dropout_kernel<scalar_t, accscalar_t, unsigned int, 1><<<grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs); break; default: fused_dropout_kernel<scalar_t, accscalar_t, unsigned int, -1><<<grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs); } }); } else { AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "fused_dropout", [&] { using accscalar_t = acc_type<scalar_t, true>; accscalar_t pa = (accscalar_t)(p); auto self_info = cuda::detail::getTensorInfo<scalar_t, uint64_t>(self); auto ret_info = cuda::detail::getTensorInfo<scalar_t, uint64_t>(ret); auto mask_info = cuda::detail::getTensorInfo<uint8_t, uint64_t>(mask); self_info.collapseDims(); ret_info.collapseDims(); mask_info.collapseDims(); //ret and mask are collapsed to 1d contiguous tensor switch (self_info.dims) { case 1: fused_dropout_kernel<scalar_t, accscalar_t, uint64_t, 1><<<grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs); break; default: fused_dropout_kernel<scalar_t, accscalar_t, uint64_t, -1><<<grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs); } }); } // THCudaCheck(cudaGetLastError()); return std::tuple<Tensor,Tensor>(ret, mask); } Tensor masked_scale_cuda(const Tensor& self, const Tensor& mask, double scale){ Tensor ret = at::empty_like(self); TORCH_CHECK(mask.scalar_type() == at::ScalarType::Byte, "mask should be torch.uint8 dtype"); AT_DISPATCH_FLOATING_TYPES_AND_HALF(ret.scalar_type(), "masked_scale", [&] { using accscalar_t = acc_type<scalar_t, true>; accscalar_t pa = (accscalar_t)(scale); masked_scale_kernel<scalar_t>(ret, self, mask, pa); }); return ret; } } }
e5a082e5169b670f75852bf6764181038e95996d.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (C) 2018 ETH Zurich // Copyright (C) 2018 UT-Battelle, LLC // All rights reserved. // // See LICENSE.txt for terms of usage. // See CITATION.txt for citation guidelines if you use this code for scientific publications. // // Author: Giovanni Balduzzi (gbalduzz@itp.phys.ethz.ch) // // This file implements G4Helper::set. #include "dca/phys/dca_step/cluster_solver/shared_tools/accumulation/tp/g4_helper.cuh" #include <algorithm> #include <array> #include <mutex> #include <stdexcept> namespace dca { namespace phys { namespace solver { namespace accumulator { namespace details { // dca::phys::solver::accumulator::details:: __device__ __constant__ G4Helper g4_helper; void G4Helper::set(int nb, int nk, int nw_pos, const std::vector<int>& delta_k, const std::vector<int>& delta_w, const int* add_k, int lda, const int* sub_k, int lds, int k0) { static std::once_flag flag; std::call_once(flag, [=]() { // Initialize the reciprocal cluster if not done already. solver::details::ClusterHelper::set(nk, add_k, lda, sub_k, lds, true); G4Helper host_helper; host_helper.nw_pos_ = nw_pos; host_helper.ext_size_ = 0; for (const int idx : delta_w) host_helper.ext_size_ = ::max(host_helper.ext_size_, std::abs(idx)); const int nw = 2 * nw_pos; const std::array<int, 10> sizes{nb, nb, nb, nb, nk, nw, nk, nw, static_cast<int>(delta_k.size()), static_cast<int>(delta_w.size())}; std::array<int, 10> steps; steps[0] = 1; for (std::size_t i = 1; i < steps.size(); ++i) steps[i] = steps[i - 1] * sizes[i - 1]; std::copy_n(steps.data(), steps.size(), host_helper.sbdm_steps_); hipMalloc(&host_helper.w_ex_indices_, sizeof(int) * delta_w.size()); hipMemcpy(const_cast<int*>(host_helper.w_ex_indices_), const_cast<int*>(delta_w.data()), sizeof(int) * delta_w.size(), hipMemcpyHostToDevice); hipMalloc(&host_helper.k_ex_indices_, sizeof(int) * delta_k.size()); hipMemcpy(const_cast<int*>(host_helper.k_ex_indices_), const_cast<int*>(delta_k.data()), sizeof(int) * delta_k.size(), hipMemcpyHostToDevice); hipMemcpyToSymbol(g4_helper, &host_helper, sizeof(G4Helper)); }); } } // namespace details } // namespace accumulator } // namespace solver } // namespace phys } // namespace dca
e5a082e5169b670f75852bf6764181038e95996d.cu
// Copyright (C) 2018 ETH Zurich // Copyright (C) 2018 UT-Battelle, LLC // All rights reserved. // // See LICENSE.txt for terms of usage. // See CITATION.txt for citation guidelines if you use this code for scientific publications. // // Author: Giovanni Balduzzi (gbalduzz@itp.phys.ethz.ch) // // This file implements G4Helper::set. #include "dca/phys/dca_step/cluster_solver/shared_tools/accumulation/tp/g4_helper.cuh" #include <algorithm> #include <array> #include <mutex> #include <stdexcept> namespace dca { namespace phys { namespace solver { namespace accumulator { namespace details { // dca::phys::solver::accumulator::details:: __device__ __constant__ G4Helper g4_helper; void G4Helper::set(int nb, int nk, int nw_pos, const std::vector<int>& delta_k, const std::vector<int>& delta_w, const int* add_k, int lda, const int* sub_k, int lds, int k0) { static std::once_flag flag; std::call_once(flag, [=]() { // Initialize the reciprocal cluster if not done already. solver::details::ClusterHelper::set(nk, add_k, lda, sub_k, lds, true); G4Helper host_helper; host_helper.nw_pos_ = nw_pos; host_helper.ext_size_ = 0; for (const int idx : delta_w) host_helper.ext_size_ = std::max(host_helper.ext_size_, std::abs(idx)); const int nw = 2 * nw_pos; const std::array<int, 10> sizes{nb, nb, nb, nb, nk, nw, nk, nw, static_cast<int>(delta_k.size()), static_cast<int>(delta_w.size())}; std::array<int, 10> steps; steps[0] = 1; for (std::size_t i = 1; i < steps.size(); ++i) steps[i] = steps[i - 1] * sizes[i - 1]; std::copy_n(steps.data(), steps.size(), host_helper.sbdm_steps_); cudaMalloc(&host_helper.w_ex_indices_, sizeof(int) * delta_w.size()); cudaMemcpy(const_cast<int*>(host_helper.w_ex_indices_), const_cast<int*>(delta_w.data()), sizeof(int) * delta_w.size(), cudaMemcpyHostToDevice); cudaMalloc(&host_helper.k_ex_indices_, sizeof(int) * delta_k.size()); cudaMemcpy(const_cast<int*>(host_helper.k_ex_indices_), const_cast<int*>(delta_k.data()), sizeof(int) * delta_k.size(), cudaMemcpyHostToDevice); cudaMemcpyToSymbol(g4_helper, &host_helper, sizeof(G4Helper)); }); } } // namespace details } // namespace accumulator } // namespace solver } // namespace phys } // namespace dca
3814d0647fcb293567340924f86714f45de13fb8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/VolumetricFractionalMaxPooling.cu" #else void THNN_(VolumetricFractionalMaxPooling_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, int outputT, int outputW, int outputH, int poolSizeT, int poolSizeW, int poolSizeH, THCIndexTensor *indices, THCTensor *randomSamples) { int planeDim = 0; int dimh = 1; int dimw = 2; int dimt = 3; int64_t numBatch = 1; int64_t numInputDims = THCTensor_(nDimensionLegacyNoScalars)(state, input); THCUNN_argCheck(state, !input->is_empty() && (numInputDims == 4 || numInputDims == 5), 2, input, "non-empty 4D or 5D (batch mode) tensor expected for input, but got: %s"); if (numInputDims == 5) { numBatch = THCTensor_(size)(state, input, 0); planeDim++; dimh++; dimw++; dimt++; } /* sizes */ int64_t numPlanes = THCTensor_(size)(state, input, planeDim); int64_t inputH = THCTensor_(size)(state, input, dimh); int64_t inputW = THCTensor_(size)(state, input, dimw); int64_t inputT = THCTensor_(size)(state, input, dimt); THArgCheck(outputH + poolSizeH - 1 < inputH, 7, "poolSizeH (%d) too large relative to input height (%d)", poolSizeH, inputH); THArgCheck(outputW + poolSizeW - 1 < inputW, 6, "poolSizeW (%d) too large relative to input width (%d)", poolSizeW, inputW); THArgCheck(outputT + poolSizeT - 1 < inputW, 5, "poolSizeT (%d) too large relative to input time (%d)", poolSizeT, inputT); THCDeviceTensor<real, 5> devInput; THCDeviceTensor<real, 5> devOutput; THCDeviceTensor<THCIndex_t, 5> devIndices; THCDeviceTensor<real, 3> devSamples = toDeviceTensor<real, 3>(state, randomSamples); if (numInputDims == 4) { /* resize output */ THCTensor_(resize4d)(state, output, numPlanes, outputH, outputW, outputT); /* indices will contain the locations for each output point */ THCIndexTensor_(resize4d)(state, indices, numPlanes, outputH, outputW, outputT); devInput = toDeviceTensor<real, 4>(state, input).upcastOuter<5>(); devOutput = toDeviceTensor<real, 4>(state, output).upcastOuter<5>(); devIndices = toDeviceTensor<THCIndex_t, 4>(state, indices).upcastOuter<5>(); } else { THCTensor_(resize5d)(state, output, numBatch, numPlanes, outputH, outputW, outputT); /* indices will contain the locations for each output point */ THCIndexTensor_(resize5d)(state, indices, numBatch, numPlanes, outputH, outputW, outputT); devInput = toDeviceTensor<real, 5>(state, input); devOutput = toDeviceTensor<real, 5>(state, output); devIndices = toDeviceTensor<THCIndex_t, 5>(state, indices); } // block is limited to 4 warps // grid handles overflow per each plane int outputPlaneSize = devOutput.getSize(2) * devOutput.getSize(3) * devOutput.getSize(4); dim3 grid(THCCeilDiv(outputPlaneSize, 128), devInput.getSize(1), devInput.getSize(0)); dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize); #define SFMP_UPDATE_OUTPUT(POOL_W) \ hipLaunchKernelGGL(( VolumetricFractionalMaxPooling_updateOutput<POOL_W, real, accreal>) \ , dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), \ devInput, devOutput, devIndices, devSamples, poolSizeT, poolSizeW, poolSizeH); #define SFMP_UPDATE_OUTPUT_CASE(POOL_W) \ case POOL_W: SFMP_UPDATE_OUTPUT(POOL_W); break switch (poolSizeW) { SFMP_UPDATE_OUTPUT_CASE(2); SFMP_UPDATE_OUTPUT_CASE(3); SFMP_UPDATE_OUTPUT_CASE(4); SFMP_UPDATE_OUTPUT_CASE(5); SFMP_UPDATE_OUTPUT_CASE(6); SFMP_UPDATE_OUTPUT_CASE(7); default: // dynamic pool width SFMP_UPDATE_OUTPUT_CASE(-1); } THCudaCheck(hipGetLastError()); } void THNN_(VolumetricFractionalMaxPooling_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, int outputT, int outputW, int outputH, int poolSizeT, int poolSizeW, int poolSizeH, THCIndexTensor *indices) { int dimh = 1; int dimw = 2; int dimt = 3; int64_t numInputDims = THCTensor_(nDimensionLegacyNoScalars)(state, input); if (numInputDims == 5) { dimh++; dimw++; dimt++; } /* sizes */ int64_t inputH = THCTensor_(size)(state, input, dimh); int64_t inputW = THCTensor_(size)(state, input, dimw); int64_t inputT = THCTensor_(size)(state, input, dimt); THArgCheck(outputH == THCTensor_(size)(state, gradOutput, dimh), 3, "gradOutput height unexpected"); THArgCheck(outputW == THCTensor_(size)(state, gradOutput, dimw), 3, "gradOutput width unexpected"); THArgCheck(outputT == THCTensor_(size)(state, gradOutput, dimt), 3, "gradOutput time unexpected"); /* resize */ THCTensor_(resizeAs)(state, gradInput, input); THCTensor_(zero)(state, gradInput); THCDeviceTensor<real, 5> devGradInput; THCDeviceTensor<real, 5> devGradOutput; THCDeviceTensor<THCIndex_t, 5> devIndices; /* backprop */ if (numInputDims == 4) { devGradInput = toDeviceTensor<real, 4>(state, gradInput).upcastOuter<5>(); devGradOutput = toDeviceTensor<real, 4>(state, gradOutput).upcastOuter<5>(); devIndices = toDeviceTensor<THCIndex_t, 4>(state, indices).upcastOuter<5>(); } else { devGradInput = toDeviceTensor<real, 5>(state, gradInput); devGradOutput = toDeviceTensor<real, 5>(state, gradOutput); devIndices = toDeviceTensor<THCIndex_t, 5>(state, indices); } // block is limited to 4 warps // grid handles overflow per each plane int outputPlaneSize = devGradOutput.getSize(2) * devGradOutput.getSize(3) * devGradOutput.getSize(4); dim3 grid(THCCeilDiv(outputPlaneSize, 128), devGradInput.getSize(1), devGradInput.getSize(0)); dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize); hipLaunchKernelGGL(( VolumetricFractionalMaxPooling_updateGradInput) , dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), devGradInput, devGradOutput, devIndices); THCudaCheck(hipGetLastError()); } #endif
3814d0647fcb293567340924f86714f45de13fb8.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/VolumetricFractionalMaxPooling.cu" #else void THNN_(VolumetricFractionalMaxPooling_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, int outputT, int outputW, int outputH, int poolSizeT, int poolSizeW, int poolSizeH, THCIndexTensor *indices, THCTensor *randomSamples) { int planeDim = 0; int dimh = 1; int dimw = 2; int dimt = 3; int64_t numBatch = 1; int64_t numInputDims = THCTensor_(nDimensionLegacyNoScalars)(state, input); THCUNN_argCheck(state, !input->is_empty() && (numInputDims == 4 || numInputDims == 5), 2, input, "non-empty 4D or 5D (batch mode) tensor expected for input, but got: %s"); if (numInputDims == 5) { numBatch = THCTensor_(size)(state, input, 0); planeDim++; dimh++; dimw++; dimt++; } /* sizes */ int64_t numPlanes = THCTensor_(size)(state, input, planeDim); int64_t inputH = THCTensor_(size)(state, input, dimh); int64_t inputW = THCTensor_(size)(state, input, dimw); int64_t inputT = THCTensor_(size)(state, input, dimt); THArgCheck(outputH + poolSizeH - 1 < inputH, 7, "poolSizeH (%d) too large relative to input height (%d)", poolSizeH, inputH); THArgCheck(outputW + poolSizeW - 1 < inputW, 6, "poolSizeW (%d) too large relative to input width (%d)", poolSizeW, inputW); THArgCheck(outputT + poolSizeT - 1 < inputW, 5, "poolSizeT (%d) too large relative to input time (%d)", poolSizeT, inputT); THCDeviceTensor<real, 5> devInput; THCDeviceTensor<real, 5> devOutput; THCDeviceTensor<THCIndex_t, 5> devIndices; THCDeviceTensor<real, 3> devSamples = toDeviceTensor<real, 3>(state, randomSamples); if (numInputDims == 4) { /* resize output */ THCTensor_(resize4d)(state, output, numPlanes, outputH, outputW, outputT); /* indices will contain the locations for each output point */ THCIndexTensor_(resize4d)(state, indices, numPlanes, outputH, outputW, outputT); devInput = toDeviceTensor<real, 4>(state, input).upcastOuter<5>(); devOutput = toDeviceTensor<real, 4>(state, output).upcastOuter<5>(); devIndices = toDeviceTensor<THCIndex_t, 4>(state, indices).upcastOuter<5>(); } else { THCTensor_(resize5d)(state, output, numBatch, numPlanes, outputH, outputW, outputT); /* indices will contain the locations for each output point */ THCIndexTensor_(resize5d)(state, indices, numBatch, numPlanes, outputH, outputW, outputT); devInput = toDeviceTensor<real, 5>(state, input); devOutput = toDeviceTensor<real, 5>(state, output); devIndices = toDeviceTensor<THCIndex_t, 5>(state, indices); } // block is limited to 4 warps // grid handles overflow per each plane int outputPlaneSize = devOutput.getSize(2) * devOutput.getSize(3) * devOutput.getSize(4); dim3 grid(THCCeilDiv(outputPlaneSize, 128), devInput.getSize(1), devInput.getSize(0)); dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize); #define SFMP_UPDATE_OUTPUT(POOL_W) \ VolumetricFractionalMaxPooling_updateOutput<POOL_W, real, accreal> \ <<<grid, block, 0, THCState_getCurrentStream(state)>>>( \ devInput, devOutput, devIndices, devSamples, poolSizeT, poolSizeW, poolSizeH); #define SFMP_UPDATE_OUTPUT_CASE(POOL_W) \ case POOL_W: SFMP_UPDATE_OUTPUT(POOL_W); break switch (poolSizeW) { SFMP_UPDATE_OUTPUT_CASE(2); SFMP_UPDATE_OUTPUT_CASE(3); SFMP_UPDATE_OUTPUT_CASE(4); SFMP_UPDATE_OUTPUT_CASE(5); SFMP_UPDATE_OUTPUT_CASE(6); SFMP_UPDATE_OUTPUT_CASE(7); default: // dynamic pool width SFMP_UPDATE_OUTPUT_CASE(-1); } THCudaCheck(cudaGetLastError()); } void THNN_(VolumetricFractionalMaxPooling_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, int outputT, int outputW, int outputH, int poolSizeT, int poolSizeW, int poolSizeH, THCIndexTensor *indices) { int dimh = 1; int dimw = 2; int dimt = 3; int64_t numInputDims = THCTensor_(nDimensionLegacyNoScalars)(state, input); if (numInputDims == 5) { dimh++; dimw++; dimt++; } /* sizes */ int64_t inputH = THCTensor_(size)(state, input, dimh); int64_t inputW = THCTensor_(size)(state, input, dimw); int64_t inputT = THCTensor_(size)(state, input, dimt); THArgCheck(outputH == THCTensor_(size)(state, gradOutput, dimh), 3, "gradOutput height unexpected"); THArgCheck(outputW == THCTensor_(size)(state, gradOutput, dimw), 3, "gradOutput width unexpected"); THArgCheck(outputT == THCTensor_(size)(state, gradOutput, dimt), 3, "gradOutput time unexpected"); /* resize */ THCTensor_(resizeAs)(state, gradInput, input); THCTensor_(zero)(state, gradInput); THCDeviceTensor<real, 5> devGradInput; THCDeviceTensor<real, 5> devGradOutput; THCDeviceTensor<THCIndex_t, 5> devIndices; /* backprop */ if (numInputDims == 4) { devGradInput = toDeviceTensor<real, 4>(state, gradInput).upcastOuter<5>(); devGradOutput = toDeviceTensor<real, 4>(state, gradOutput).upcastOuter<5>(); devIndices = toDeviceTensor<THCIndex_t, 4>(state, indices).upcastOuter<5>(); } else { devGradInput = toDeviceTensor<real, 5>(state, gradInput); devGradOutput = toDeviceTensor<real, 5>(state, gradOutput); devIndices = toDeviceTensor<THCIndex_t, 5>(state, indices); } // block is limited to 4 warps // grid handles overflow per each plane int outputPlaneSize = devGradOutput.getSize(2) * devGradOutput.getSize(3) * devGradOutput.getSize(4); dim3 grid(THCCeilDiv(outputPlaneSize, 128), devGradInput.getSize(1), devGradInput.getSize(0)); dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize); VolumetricFractionalMaxPooling_updateGradInput <<<grid, block, 0, THCState_getCurrentStream(state)>>>( devGradInput, devGradOutput, devIndices); THCudaCheck(cudaGetLastError()); } #endif
e834fc651949d93a140d454761a93c6aad1ae9e2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include "cuda_utils.h" // input: points(b, c, n) idx(b, m) // output: out(b, c, m) __global__ void gather_points_kernel(int b, int c, int n, int m, const float *__restrict__ points, const int *__restrict__ idx, float *__restrict__ out) { for (int i = blockIdx.x; i < b; i += gridDim.x) { for (int l = blockIdx.y; l < c; l += gridDim.y) { for (int j = threadIdx.x; j < m; j += blockDim.x) { int a = idx[i * m + j]; out[(i * c + l) * m + j] = points[(i * c + l) * n + a]; } } } } void gather_points_kernel_wrapper(int b, int c, int n, int npoints, const float *points, const int *idx, float *out) { hipLaunchKernelGGL(( gather_points_kernel), dim3(dim3(b, c, 1)), dim3(opt_n_threads(npoints)), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), b, c, n, npoints, points, idx, out); CUDA_CHECK_ERRORS(); } // input: grad_out(b, c, m) idx(b, m) // output: grad_points(b, c, n) __global__ void gather_points_grad_kernel(int b, int c, int n, int m, const float *__restrict__ grad_out, const int *__restrict__ idx, float *__restrict__ grad_points) { for (int i = blockIdx.x; i < b; i += gridDim.x) { for (int l = blockIdx.y; l < c; l += gridDim.y) { for (int j = threadIdx.x; j < m; j += blockDim.x) { int a = idx[i * m + j]; atomicAdd(grad_points + (i * c + l) * n + a, grad_out[(i * c + l) * m + j]); } } } } void gather_points_grad_kernel_wrapper(int b, int c, int n, int npoints, const float *grad_out, const int *idx, float *grad_points) { hipLaunchKernelGGL(( gather_points_grad_kernel), dim3(dim3(b, c, 1)), dim3(opt_n_threads(npoints)), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), b, c, n, npoints, grad_out, idx, grad_points); CUDA_CHECK_ERRORS(); } __device__ void __update(float *__restrict__ dists, int *__restrict__ dists_i, int idx1, int idx2) { const float v1 = dists[idx1], v2 = dists[idx2]; const int i1 = dists_i[idx1], i2 = dists_i[idx2]; dists[idx1] = max(v1, v2); dists_i[idx1] = v2 > v1 ? i2 : i1; } // Input dataset: (b, n, 3), tmp: (b, n) // Ouput idxs (b, m) template <unsigned int block_size> __global__ void furthest_point_sampling_kernel( int b, int n, int c, int m, const float *__restrict__ dataset, float *__restrict__ temp, int *__restrict__ idxs) { if (m <= 0) return; __shared__ float dists[block_size]; __shared__ int dists_i[block_size]; int batch_index = blockIdx.x; dataset += batch_index * n * c; temp += batch_index * n; idxs += batch_index * m; int tid = threadIdx.x; const int stride = block_size; int old = 0; if (threadIdx.x == 0) idxs[0] = old; __syncthreads(); for (int j = 1; j < m; j++) { int besti = 0; float best = -1; // float x1 = dataset[old * 3 + 0]; // float y1 = dataset[old * 3 + 1]; // float z1 = dataset[old * 3 + 2]; for (int k = tid; k < n; k += stride) { float dis, d = 0.0f; for (int ci = 0; ci < c; ci++){ dis = dataset[old * c + ci] - dataset[k * c + ci]; d += dis * dis; } // float x2, y2, z2; // x2 = dataset[k * 3 + 0]; // y2 = dataset[k * 3 + 1]; // z2 = dataset[k * 3 + 2]; // float d = 0.0; // for (int idx = 0; idx < c; idx += 1){ // float x1 = dataset[old * c + idx]; // float x2 = dataset[k * c + idx]; // d += (x2 - x1) * (x2 - x1); // } // float mag = (x2 * x2) + (y2 * y2) + (z2 * z2); // if (mag <= 1e-3) continue; // float d = // (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1); float d2 = min(d, temp[k]); temp[k] = d2; besti = d2 > best ? k : besti; best = d2 > best ? d2 : best; } dists[tid] = best; dists_i[tid] = besti; __syncthreads(); if (block_size >= 512) { if (tid < 256) { __update(dists, dists_i, tid, tid + 256); } __syncthreads(); } if (block_size >= 256) { if (tid < 128) { __update(dists, dists_i, tid, tid + 128); } __syncthreads(); } if (block_size >= 128) { if (tid < 64) { __update(dists, dists_i, tid, tid + 64); } __syncthreads(); } if (block_size >= 64) { if (tid < 32) { __update(dists, dists_i, tid, tid + 32); } __syncthreads(); } if (block_size >= 32) { if (tid < 16) { __update(dists, dists_i, tid, tid + 16); } __syncthreads(); } if (block_size >= 16) { if (tid < 8) { __update(dists, dists_i, tid, tid + 8); } __syncthreads(); } if (block_size >= 8) { if (tid < 4) { __update(dists, dists_i, tid, tid + 4); } __syncthreads(); } if (block_size >= 4) { if (tid < 2) { __update(dists, dists_i, tid, tid + 2); } __syncthreads(); } if (block_size >= 2) { if (tid < 1) { __update(dists, dists_i, tid, tid + 1); } __syncthreads(); } old = dists_i[0]; if (tid == 0) idxs[j] = old; } } void furthest_point_sampling_kernel_wrapper(int b, int n, int c, int m, const float *dataset, float *temp, int *idxs) { unsigned int n_threads = opt_n_threads(n); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); switch (n_threads) { case 512: hipLaunchKernelGGL(( furthest_point_sampling_kernel<512>) , dim3(b), dim3(n_threads), 0, stream, b, n, c, m, dataset, temp, idxs); break; case 256: hipLaunchKernelGGL(( furthest_point_sampling_kernel<256>) , dim3(b), dim3(n_threads), 0, stream, b, n, c, m, dataset, temp, idxs); break; case 128: hipLaunchKernelGGL(( furthest_point_sampling_kernel<128>) , dim3(b), dim3(n_threads), 0, stream, b, n, c, m, dataset, temp, idxs); break; case 64: hipLaunchKernelGGL(( furthest_point_sampling_kernel<64>) , dim3(b), dim3(n_threads), 0, stream, b, n, c, m, dataset, temp, idxs); break; case 32: hipLaunchKernelGGL(( furthest_point_sampling_kernel<32>) , dim3(b), dim3(n_threads), 0, stream, b, n, c, m, dataset, temp, idxs); break; case 16: hipLaunchKernelGGL(( furthest_point_sampling_kernel<16>) , dim3(b), dim3(n_threads), 0, stream, b, n, c, m, dataset, temp, idxs); break; case 8: hipLaunchKernelGGL(( furthest_point_sampling_kernel<8>) , dim3(b), dim3(n_threads), 0, stream, b, n, c, m, dataset, temp, idxs); break; case 4: hipLaunchKernelGGL(( furthest_point_sampling_kernel<4>) , dim3(b), dim3(n_threads), 0, stream, b, n, c, m, dataset, temp, idxs); break; case 2: hipLaunchKernelGGL(( furthest_point_sampling_kernel<2>) , dim3(b), dim3(n_threads), 0, stream, b, n, c, m, dataset, temp, idxs); break; case 1: hipLaunchKernelGGL(( furthest_point_sampling_kernel<1>) , dim3(b), dim3(n_threads), 0, stream, b, n, c, m, dataset, temp, idxs); break; default: hipLaunchKernelGGL(( furthest_point_sampling_kernel<512>) , dim3(b), dim3(n_threads), 0, stream, b, n, c, m, dataset, temp, idxs); } CUDA_CHECK_ERRORS(); }
e834fc651949d93a140d454761a93c6aad1ae9e2.cu
#include <stdio.h> #include <stdlib.h> #include "cuda_utils.h" // input: points(b, c, n) idx(b, m) // output: out(b, c, m) __global__ void gather_points_kernel(int b, int c, int n, int m, const float *__restrict__ points, const int *__restrict__ idx, float *__restrict__ out) { for (int i = blockIdx.x; i < b; i += gridDim.x) { for (int l = blockIdx.y; l < c; l += gridDim.y) { for (int j = threadIdx.x; j < m; j += blockDim.x) { int a = idx[i * m + j]; out[(i * c + l) * m + j] = points[(i * c + l) * n + a]; } } } } void gather_points_kernel_wrapper(int b, int c, int n, int npoints, const float *points, const int *idx, float *out) { gather_points_kernel<<<dim3(b, c, 1), opt_n_threads(npoints), 0, at::cuda::getCurrentCUDAStream()>>>(b, c, n, npoints, points, idx, out); CUDA_CHECK_ERRORS(); } // input: grad_out(b, c, m) idx(b, m) // output: grad_points(b, c, n) __global__ void gather_points_grad_kernel(int b, int c, int n, int m, const float *__restrict__ grad_out, const int *__restrict__ idx, float *__restrict__ grad_points) { for (int i = blockIdx.x; i < b; i += gridDim.x) { for (int l = blockIdx.y; l < c; l += gridDim.y) { for (int j = threadIdx.x; j < m; j += blockDim.x) { int a = idx[i * m + j]; atomicAdd(grad_points + (i * c + l) * n + a, grad_out[(i * c + l) * m + j]); } } } } void gather_points_grad_kernel_wrapper(int b, int c, int n, int npoints, const float *grad_out, const int *idx, float *grad_points) { gather_points_grad_kernel<<<dim3(b, c, 1), opt_n_threads(npoints), 0, at::cuda::getCurrentCUDAStream()>>>( b, c, n, npoints, grad_out, idx, grad_points); CUDA_CHECK_ERRORS(); } __device__ void __update(float *__restrict__ dists, int *__restrict__ dists_i, int idx1, int idx2) { const float v1 = dists[idx1], v2 = dists[idx2]; const int i1 = dists_i[idx1], i2 = dists_i[idx2]; dists[idx1] = max(v1, v2); dists_i[idx1] = v2 > v1 ? i2 : i1; } // Input dataset: (b, n, 3), tmp: (b, n) // Ouput idxs (b, m) template <unsigned int block_size> __global__ void furthest_point_sampling_kernel( int b, int n, int c, int m, const float *__restrict__ dataset, float *__restrict__ temp, int *__restrict__ idxs) { if (m <= 0) return; __shared__ float dists[block_size]; __shared__ int dists_i[block_size]; int batch_index = blockIdx.x; dataset += batch_index * n * c; temp += batch_index * n; idxs += batch_index * m; int tid = threadIdx.x; const int stride = block_size; int old = 0; if (threadIdx.x == 0) idxs[0] = old; __syncthreads(); for (int j = 1; j < m; j++) { int besti = 0; float best = -1; // float x1 = dataset[old * 3 + 0]; // float y1 = dataset[old * 3 + 1]; // float z1 = dataset[old * 3 + 2]; for (int k = tid; k < n; k += stride) { float dis, d = 0.0f; for (int ci = 0; ci < c; ci++){ dis = dataset[old * c + ci] - dataset[k * c + ci]; d += dis * dis; } // float x2, y2, z2; // x2 = dataset[k * 3 + 0]; // y2 = dataset[k * 3 + 1]; // z2 = dataset[k * 3 + 2]; // float d = 0.0; // for (int idx = 0; idx < c; idx += 1){ // float x1 = dataset[old * c + idx]; // float x2 = dataset[k * c + idx]; // d += (x2 - x1) * (x2 - x1); // } // float mag = (x2 * x2) + (y2 * y2) + (z2 * z2); // if (mag <= 1e-3) continue; // float d = // (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1); float d2 = min(d, temp[k]); temp[k] = d2; besti = d2 > best ? k : besti; best = d2 > best ? d2 : best; } dists[tid] = best; dists_i[tid] = besti; __syncthreads(); if (block_size >= 512) { if (tid < 256) { __update(dists, dists_i, tid, tid + 256); } __syncthreads(); } if (block_size >= 256) { if (tid < 128) { __update(dists, dists_i, tid, tid + 128); } __syncthreads(); } if (block_size >= 128) { if (tid < 64) { __update(dists, dists_i, tid, tid + 64); } __syncthreads(); } if (block_size >= 64) { if (tid < 32) { __update(dists, dists_i, tid, tid + 32); } __syncthreads(); } if (block_size >= 32) { if (tid < 16) { __update(dists, dists_i, tid, tid + 16); } __syncthreads(); } if (block_size >= 16) { if (tid < 8) { __update(dists, dists_i, tid, tid + 8); } __syncthreads(); } if (block_size >= 8) { if (tid < 4) { __update(dists, dists_i, tid, tid + 4); } __syncthreads(); } if (block_size >= 4) { if (tid < 2) { __update(dists, dists_i, tid, tid + 2); } __syncthreads(); } if (block_size >= 2) { if (tid < 1) { __update(dists, dists_i, tid, tid + 1); } __syncthreads(); } old = dists_i[0]; if (tid == 0) idxs[j] = old; } } void furthest_point_sampling_kernel_wrapper(int b, int n, int c, int m, const float *dataset, float *temp, int *idxs) { unsigned int n_threads = opt_n_threads(n); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); switch (n_threads) { case 512: furthest_point_sampling_kernel<512> <<<b, n_threads, 0, stream>>>(b, n, c, m, dataset, temp, idxs); break; case 256: furthest_point_sampling_kernel<256> <<<b, n_threads, 0, stream>>>(b, n, c, m, dataset, temp, idxs); break; case 128: furthest_point_sampling_kernel<128> <<<b, n_threads, 0, stream>>>(b, n, c, m, dataset, temp, idxs); break; case 64: furthest_point_sampling_kernel<64> <<<b, n_threads, 0, stream>>>(b, n, c, m, dataset, temp, idxs); break; case 32: furthest_point_sampling_kernel<32> <<<b, n_threads, 0, stream>>>(b, n, c, m, dataset, temp, idxs); break; case 16: furthest_point_sampling_kernel<16> <<<b, n_threads, 0, stream>>>(b, n, c, m, dataset, temp, idxs); break; case 8: furthest_point_sampling_kernel<8> <<<b, n_threads, 0, stream>>>(b, n, c, m, dataset, temp, idxs); break; case 4: furthest_point_sampling_kernel<4> <<<b, n_threads, 0, stream>>>(b, n, c, m, dataset, temp, idxs); break; case 2: furthest_point_sampling_kernel<2> <<<b, n_threads, 0, stream>>>(b, n, c, m, dataset, temp, idxs); break; case 1: furthest_point_sampling_kernel<1> <<<b, n_threads, 0, stream>>>(b, n, c, m, dataset, temp, idxs); break; default: furthest_point_sampling_kernel<512> <<<b, n_threads, 0, stream>>>(b, n, c, m, dataset, temp, idxs); } CUDA_CHECK_ERRORS(); }
424d209962984b3f1bc6f03fdf4023c92b5aed4d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <fcntl.h> #include "string.h" // #include<time.h> #include<float.h> __constant__ int FIL[32*5*5]; __global__ void conv1(unsigned int *picd, int *resultd){ int i,j,k,l; int sum, offset; i = threadIdx.y; j = threadIdx.x; l = blockIdx.x; offset = l*25; int xsize = 28; int filterdim = 5; k=0; sum =0; if(i<(xsize -filterdim +1)&& j<(xsize -filterdim +1)){ sum = FIL[offset + k]*picd[ xsize * (i) + j ] + FIL[offset+ k+1]*picd[ xsize*(i) + (j+1) ] + FIL[offset+ k+2]*picd[ xsize * (i)+(j+2)] + FIL[offset+k+3]*picd[xsize * (i)+(j+3)] + FIL[offset+k+4]*picd[ xsize * (i)+(j+4)]+ FIL[offset+ k+5]*picd[ xsize*(i+1)+(j) ] + FIL[offset+k+6]*picd[ xsize * (i+1) + (j+1) ] + FIL[offset+ k+7]*picd[ xsize*(i+1) + (j+2) ] + FIL[offset+k+8]*picd[ xsize*(i+1) + (j+3) ] + FIL[offset+k+9]*picd[ xsize*(i+1) + (j+4) ] + FIL[offset+k+10]*picd[ xsize*(i+2) + (j) ] + FIL[offset+k+11]*picd[ xsize * (i+2) + (j+1) ] + FIL[offset+k+12]*picd[ xsize*(i+2) + (j+2)] + FIL[offset+k+13]*picd[ xsize*(i+2) + (j+3)] +FIL[offset+k+14]*picd[ xsize*(i+2) + (j+4)] + FIL[offset +k+15]*picd[ xsize*(i+3) + (j)] + FIL[offset+k+16]*picd[ xsize*(i+3) + (j+1)] + FIL[offset+k+17]*picd[ xsize*(i+3) + (j+2)] + FIL[offset+k+18]*picd[ xsize*(i+3) + (j+3)] + FIL[offset+k+19]*picd[ xsize*(i+3) + (j+4)] + FIL[offset+k+20]*picd[ xsize*(i+4) + (j)] +FIL[offset+k+21]*picd[ xsize*(i+3) + (j+1)] + FIL[offset+k+22]*picd[ xsize*(i+4) + (j+2)] + FIL[offset+k+23]*picd[ xsize*(i+4) + (j+3)] + FIL[offset+ k+24]*picd[ xsize*(i+4) + (j+4)]; resultd[l*(xsize -filterdim +1)*(xsize -filterdim +1) + i*(xsize - filterdim +1)+j] = sum; //printf("resultgpu[%d][%d]=%d\n",l,i*(xsize - filterdim +1)+j,resulth[l*(xsize -filterdim +1)*(xsize -filterdim +1) + i*(xsize - filterdim +1)+j]); } } __global__ void maxpooling(int *maxip1d, int *maxop1d){ int i,j,l; i = threadIdx.y; j = threadIdx.x; l = blockIdx.x; int xsize = 28; int filterdim = 5; if(i<((xsize-filterdim+1)/2)&&(j<((xsize-filterdim+1)/2))){ int a,b,c,d,index, max1, max2; index = l*((xsize -filterdim +1)*(xsize -filterdim +1))+ threadIdx.x*2 + threadIdx.y*2*(xsize -filterdim +1); a = maxip1d[index]; b = maxip1d[index +1]; c = maxip1d[index+(xsize-filterdim+1)]; d = maxip1d[index + (xsize-filterdim+2)]; if(a>b){ max1 = a; } else{ max1 = b; } if(c>d){ max2 = c; } else{ max2 = d; } if(max1>max2){ maxop1d[l*(xsize -filterdim +1)*(xsize -filterdim +1)/4 + i*(xsize - filterdim +1)/2+j]=max1; } else{ maxop1d[l*(xsize -filterdim +1)*(xsize -filterdim +1)/4 + i*(xsize - filterdim +1)/2+j] = max2; } } } __global__ void conv2(int *cip2d, int *filter2d, int *cop2d){ int i,j,l,sum; i = threadIdx.y; j = threadIdx.x; l = blockIdx.x; int lstar; lstar = l*800; sum = 0; int k =0; int di = 12; int disquare = di*di; int m; if(i<8 && j<8){ for(m = 0; m<32; m++){ sum = sum + filter2d[lstar + k]*cip2d[(m*disquare)+ (di*i) + j] + filter2d[lstar + k+1]*cip2d[(m*disquare)+ di*(i) + (j+1)] + filter2d[lstar+ k+2]*cip2d[(m*disquare)+ di*(i)+(j+2)] + filter2d[lstar +k+3]*cip2d[(m*disquare)+ di*(i)+(j+3)] + filter2d[lstar+k+4]*cip2d[(m*disquare)+ di*(i)+(j+4)]+ filter2d[lstar+ k+5]*cip2d[(m*disquare)+ di*(i+1)+(j)] + filter2d[lstar +k+6]*cip2d[(m*disquare)+ di* (i+1) + (j+1) ] + filter2d[lstar+ k+7]*cip2d[(m*disquare)+ di*(i+1)+(j+2)] + filter2d[lstar+k+8]*cip2d[(m*disquare)+ di*(i+1) + (j+3) ] + filter2d[lstar +k+9]*cip2d[(m*disquare)+ di*(i+1) +(j+4)] + filter2d[lstar+k+10]*cip2d[(m*disquare)+ di*(i+2) +(j)] + filter2d[lstar+k+11]*cip2d[(m*disquare)+ di* (i+2) + (j+1)] + filter2d[lstar+k+12]*cip2d[(m*disquare)+ di*(i+2) + (j+2)] +filter2d[lstar+k+13]*cip2d[(m*disquare)+ di*(i+2)+(j+3)] + filter2d[lstar+k+14]*cip2d[(m*disquare)+ di*(i+2)+(j+4)]+filter2d[lstar+k+15]*cip2d[(m*disquare)+ di*(i+3)+(j)] + filter2d[lstar+k+16]*cip2d[(m*disquare)+ di*(i+3)+(j+1)]+filter2d[lstar+k+17]*cip2d[(m*disquare)+ di*(i+3)+(j+2)] + filter2d[lstar+k+18]*cip2d[(m*disquare)+ di*(i+3)+(j+3)] + filter2d[lstar+k+19]*cip2d[(m*disquare)+di*(i+3)+(j+4)] + filter2d[lstar+k+20]*cip2d[(m*disquare)+ di*(i+4)+(j)] +filter2d[lstar+k+21]*cip2d[(m*disquare)+ di*(i+3)+(j+1)] + filter2d[lstar +k+22]*cip2d[(m*disquare)+ di*(i+4)+(j+2)] + filter2d[lstar+k+23]*cip2d[(m*disquare)+ di*(i+4)+(j+3)] + filter2d[lstar+ k+24]*cip2d[(m*disquare)+ di*(i+4) + (j+4)]; k+=25; } cop2d[l*64+i*8+j] = sum; // printf("resultdevice[%d][%d]:%d\n",l,i*8+j,cop2d[l*64+i*8+j]); } } __global__ void maxpool(int *maxip2d, int *maxop2d){ int i,j,l; i = threadIdx.y; j = threadIdx.x; l = blockIdx.x; int xsize = 12; int filterdim = 5; if(i<((xsize-filterdim+1)/2)&&(j<((xsize-filterdim+1)/2))){ int a,b,c,d,index, max1, max2; index = l*((xsize -filterdim +1)*(xsize -filterdim +1))+ threadIdx.x*2 + threadIdx.y*2*(xsize -filterdim +1); a = maxip2d[index]; b = maxip2d[index +1]; c = maxip2d[index+(xsize-filterdim+1)]; d = maxip2d[index + (xsize-filterdim+2)]; if(a>b){ max1 = a; } else{ max1 = b; } if(c>d){ max2 = c; } else{ max2 = d; } if(max1>max2){ maxop2d[l*(xsize -filterdim +1)*(xsize -filterdim +1)/4 + i*(xsize - filterdim +1)/2+j]=max1; } else{ maxop2d[l*(xsize -filterdim +1)*(xsize -filterdim +1)/4 + i*(xsize - filterdim +1)/2+j] = max2; } } } int main(int argc, char **argv){ int xsize; int filterdim; int numfilters; int numfilters1; xsize = 28; filterdim = 5; numfilters = 32; numfilters1 = 64; /*Numbytes required for initial image*/ int numbytes = xsize*xsize*sizeof(int); /*Numbytes require for the output of first convolution layer*/ int numbytes2 = (xsize-filterdim+1)*(xsize-filterdim+1)*sizeof(int); //24x24 /**Numbytes required for output of first maxpool layer**/ int numbytes3 = ((xsize-filterdim+1)*(xsize-filterdim+1)/4)*sizeof(int); //12x12 /*Numbytes required for the output of second convolution layer*/ int numbytes4 = ((xsize-filterdim+1)/2 - filterdim + 1)*((xsize-filterdim+1)/2 - filterdim + 1)*sizeof(int);//8x8 /*Numbytes required for the output of second maxpool layer*/ int numbytes5 = (numbytes4/4)*sizeof(int);//4x4 /*Image on host side*/ /*Ip and op to first conv layer*/ unsigned int *pic = (unsigned int *)malloc(numbytes); int *result; int filter[numfilters*filterdim*filterdim]; /*Ip and op to first maxpool layer*/ int *maxip1; int *maxop1; /*Ip and op of second conv layer*/ int *cip2; int *cop2; int *filter2; /*ip and op to second maxpool layer*/ int *maxip2; int *maxop2; /*Device side variables*/ unsigned int *picd; int *resultd; /*Ip and op to first maxpool layer*/ int *maxip1d; int *maxop1d; /*Ip and op of second conv layer*/ int *cip2d; int *cop2d; int *filter2d; /*ip and op to second maxpool layer*/ int *maxip2d; int *maxop2d; result = (int *)malloc(numfilters*numbytes2); maxip1 = (int *)malloc(numfilters*numbytes2); maxop1 = (int *)malloc(numfilters*numbytes3); cip2 = (int *)malloc(numfilters*numbytes3); cop2 = (int *)malloc(numfilters1*numbytes4); filter2 = (int *)malloc(numfilters1*numfilters*filterdim*filterdim*sizeof(int)); maxip2 = (int *)malloc(numfilters1*numbytes4); maxop2 = (int *)malloc(numfilters1*numbytes5); hipMalloc(&picd, numbytes); hipMalloc(&resultd, numfilters*numbytes2); hipMalloc(&maxip1d, numfilters*numbytes2); hipMalloc(&maxop1d, numfilters*numbytes3); hipMalloc(&cip2d, numfilters*numbytes3); hipMalloc(&cop2d, numfilters1*numbytes4); hipMalloc(&filter2d, numfilters1*numfilters*filterdim*filterdim*sizeof(int)); hipMalloc(&maxip2d, numfilters1*numbytes4); hipMalloc(&maxop2d, numfilters1*numbytes5); /*Initializing the image on host side*/ /*Should modify to later on read in image*/ int i,j,k,l,count,dimx; for (i=0; i<xsize; i++) { for (j=0; j<xsize; j++) { pic[i*xsize + j] = 1; //printf("pic[%d][%d] : %d\t",i,j,pic[i*xsize + j]); } // printf("\n"); } /*Initializing the filter for first conv layer to a value*/ /*TO DO : Read in filter from a file */ for(int k=0;k<numfilters;k++){ for (int i=0; i<filterdim; i++) { for (int j=0; j<filterdim; j++){ filter[k*(filterdim*filterdim) + i*filterdim + j] = 1; // printf("filter[%d][%d]: %d\n",k, i*filterdim + j, filter[k*(filterdim*filterdim) + i*filterdim + j]); } } } /*Initializing the filter for second conv layer to a value*/ /*TO DO : Read in filter from a file */ for(int k=0;k<numfilters1;k++){ for(int m= 0; m<numfilters;m++){ for (int i=0; i<filterdim; i++) { for (int j=0; j<filterdim; j++){ filter2[k*(numfilters*filterdim*filterdim)+ m*filterdim*filterdim + i*filterdim + j] = 1; // printf("filter2[%d][%d]: %d\t",k, m*filterdim*filterdim+i*filterdim + j, filter2[k*(numfilters*filterdim*filterdim)+ m*filterdim*filterdim + i*filterdim + j]); } } } // printf("\n"); } dim3 dimGrid (32); dim3 dimBlock (32,32); hipMemcpy(picd,pic,numbytes, hipMemcpyHostToDevice); hipMemcpyToSymbol(FIL, filter, numfilters*filterdim*filterdim*sizeof(int)); hipLaunchKernelGGL(( conv1), dim3(dimGrid), dim3(dimBlock), 0, 0, picd,resultd); hipMemcpy(result,resultd,numfilters*numbytes2,hipMemcpyDeviceToHost); dim3 dimBlock1 (16,16); hipMemcpy(maxip1d, result,numfilters*numbytes2, hipMemcpyHostToDevice); hipLaunchKernelGGL(( maxpooling), dim3(dimGrid), dim3(dimBlock1), 0, 0, maxip1d, maxop1d); hipMemcpy(maxop1, maxop1d, numfilters*numbytes3, hipMemcpyDeviceToHost); hipMemcpy(cip2d, maxop1,numfilters*numbytes3,hipMemcpyHostToDevice); hipMemcpy(filter2d, filter2,numfilters1*numfilters*filterdim*filterdim*sizeof(int), hipMemcpyHostToDevice); dim3 dimGrid2(64); dim3 dimBlock2(8,8); hipLaunchKernelGGL(( conv2), dim3(dimGrid2), dim3(dimBlock2), 0, 0, cip2d, filter2d, cop2d); hipMemcpy(cop2, cop2d,numfilters1*numbytes4,hipMemcpyDeviceToHost); hipMemcpy(maxip2d, cop2,numfilters1*numbytes4,hipMemcpyHostToDevice); hipLaunchKernelGGL(( maxpool), dim3(dimGrid2), dim3(dimBlock2), 0, 0, maxip2d, maxop2d); hipMemcpy(maxop2, maxop2d, numfilters*numbytes5, hipMemcpyDeviceToHost); for(k=0;k<numfilters1;k++){ for(i=0;i<4;i++){ for(j=0;j<4;j++){ printf("maxpool[%d][%d]:%d\t",k,i*4+j, maxop2[k*16+i*4+j]); } printf("\n"); } printf("\n\n"); } }
424d209962984b3f1bc6f03fdf4023c92b5aed4d.cu
#include <stdio.h> #include <stdlib.h> #include <fcntl.h> #include "string.h" // #include<time.h> #include<float.h> __constant__ int FIL[32*5*5]; __global__ void conv1(unsigned int *picd, int *resultd){ int i,j,k,l; int sum, offset; i = threadIdx.y; j = threadIdx.x; l = blockIdx.x; offset = l*25; int xsize = 28; int filterdim = 5; k=0; sum =0; if(i<(xsize -filterdim +1)&& j<(xsize -filterdim +1)){ sum = FIL[offset + k]*picd[ xsize * (i) + j ] + FIL[offset+ k+1]*picd[ xsize*(i) + (j+1) ] + FIL[offset+ k+2]*picd[ xsize * (i)+(j+2)] + FIL[offset+k+3]*picd[xsize * (i)+(j+3)] + FIL[offset+k+4]*picd[ xsize * (i)+(j+4)]+ FIL[offset+ k+5]*picd[ xsize*(i+1)+(j) ] + FIL[offset+k+6]*picd[ xsize * (i+1) + (j+1) ] + FIL[offset+ k+7]*picd[ xsize*(i+1) + (j+2) ] + FIL[offset+k+8]*picd[ xsize*(i+1) + (j+3) ] + FIL[offset+k+9]*picd[ xsize*(i+1) + (j+4) ] + FIL[offset+k+10]*picd[ xsize*(i+2) + (j) ] + FIL[offset+k+11]*picd[ xsize * (i+2) + (j+1) ] + FIL[offset+k+12]*picd[ xsize*(i+2) + (j+2)] + FIL[offset+k+13]*picd[ xsize*(i+2) + (j+3)] +FIL[offset+k+14]*picd[ xsize*(i+2) + (j+4)] + FIL[offset +k+15]*picd[ xsize*(i+3) + (j)] + FIL[offset+k+16]*picd[ xsize*(i+3) + (j+1)] + FIL[offset+k+17]*picd[ xsize*(i+3) + (j+2)] + FIL[offset+k+18]*picd[ xsize*(i+3) + (j+3)] + FIL[offset+k+19]*picd[ xsize*(i+3) + (j+4)] + FIL[offset+k+20]*picd[ xsize*(i+4) + (j)] +FIL[offset+k+21]*picd[ xsize*(i+3) + (j+1)] + FIL[offset+k+22]*picd[ xsize*(i+4) + (j+2)] + FIL[offset+k+23]*picd[ xsize*(i+4) + (j+3)] + FIL[offset+ k+24]*picd[ xsize*(i+4) + (j+4)]; resultd[l*(xsize -filterdim +1)*(xsize -filterdim +1) + i*(xsize - filterdim +1)+j] = sum; //printf("resultgpu[%d][%d]=%d\n",l,i*(xsize - filterdim +1)+j,resulth[l*(xsize -filterdim +1)*(xsize -filterdim +1) + i*(xsize - filterdim +1)+j]); } } __global__ void maxpooling(int *maxip1d, int *maxop1d){ int i,j,l; i = threadIdx.y; j = threadIdx.x; l = blockIdx.x; int xsize = 28; int filterdim = 5; if(i<((xsize-filterdim+1)/2)&&(j<((xsize-filterdim+1)/2))){ int a,b,c,d,index, max1, max2; index = l*((xsize -filterdim +1)*(xsize -filterdim +1))+ threadIdx.x*2 + threadIdx.y*2*(xsize -filterdim +1); a = maxip1d[index]; b = maxip1d[index +1]; c = maxip1d[index+(xsize-filterdim+1)]; d = maxip1d[index + (xsize-filterdim+2)]; if(a>b){ max1 = a; } else{ max1 = b; } if(c>d){ max2 = c; } else{ max2 = d; } if(max1>max2){ maxop1d[l*(xsize -filterdim +1)*(xsize -filterdim +1)/4 + i*(xsize - filterdim +1)/2+j]=max1; } else{ maxop1d[l*(xsize -filterdim +1)*(xsize -filterdim +1)/4 + i*(xsize - filterdim +1)/2+j] = max2; } } } __global__ void conv2(int *cip2d, int *filter2d, int *cop2d){ int i,j,l,sum; i = threadIdx.y; j = threadIdx.x; l = blockIdx.x; int lstar; lstar = l*800; sum = 0; int k =0; int di = 12; int disquare = di*di; int m; if(i<8 && j<8){ for(m = 0; m<32; m++){ sum = sum + filter2d[lstar + k]*cip2d[(m*disquare)+ (di*i) + j] + filter2d[lstar + k+1]*cip2d[(m*disquare)+ di*(i) + (j+1)] + filter2d[lstar+ k+2]*cip2d[(m*disquare)+ di*(i)+(j+2)] + filter2d[lstar +k+3]*cip2d[(m*disquare)+ di*(i)+(j+3)] + filter2d[lstar+k+4]*cip2d[(m*disquare)+ di*(i)+(j+4)]+ filter2d[lstar+ k+5]*cip2d[(m*disquare)+ di*(i+1)+(j)] + filter2d[lstar +k+6]*cip2d[(m*disquare)+ di* (i+1) + (j+1) ] + filter2d[lstar+ k+7]*cip2d[(m*disquare)+ di*(i+1)+(j+2)] + filter2d[lstar+k+8]*cip2d[(m*disquare)+ di*(i+1) + (j+3) ] + filter2d[lstar +k+9]*cip2d[(m*disquare)+ di*(i+1) +(j+4)] + filter2d[lstar+k+10]*cip2d[(m*disquare)+ di*(i+2) +(j)] + filter2d[lstar+k+11]*cip2d[(m*disquare)+ di* (i+2) + (j+1)] + filter2d[lstar+k+12]*cip2d[(m*disquare)+ di*(i+2) + (j+2)] +filter2d[lstar+k+13]*cip2d[(m*disquare)+ di*(i+2)+(j+3)] + filter2d[lstar+k+14]*cip2d[(m*disquare)+ di*(i+2)+(j+4)]+filter2d[lstar+k+15]*cip2d[(m*disquare)+ di*(i+3)+(j)] + filter2d[lstar+k+16]*cip2d[(m*disquare)+ di*(i+3)+(j+1)]+filter2d[lstar+k+17]*cip2d[(m*disquare)+ di*(i+3)+(j+2)] + filter2d[lstar+k+18]*cip2d[(m*disquare)+ di*(i+3)+(j+3)] + filter2d[lstar+k+19]*cip2d[(m*disquare)+di*(i+3)+(j+4)] + filter2d[lstar+k+20]*cip2d[(m*disquare)+ di*(i+4)+(j)] +filter2d[lstar+k+21]*cip2d[(m*disquare)+ di*(i+3)+(j+1)] + filter2d[lstar +k+22]*cip2d[(m*disquare)+ di*(i+4)+(j+2)] + filter2d[lstar+k+23]*cip2d[(m*disquare)+ di*(i+4)+(j+3)] + filter2d[lstar+ k+24]*cip2d[(m*disquare)+ di*(i+4) + (j+4)]; k+=25; } cop2d[l*64+i*8+j] = sum; // printf("resultdevice[%d][%d]:%d\n",l,i*8+j,cop2d[l*64+i*8+j]); } } __global__ void maxpool(int *maxip2d, int *maxop2d){ int i,j,l; i = threadIdx.y; j = threadIdx.x; l = blockIdx.x; int xsize = 12; int filterdim = 5; if(i<((xsize-filterdim+1)/2)&&(j<((xsize-filterdim+1)/2))){ int a,b,c,d,index, max1, max2; index = l*((xsize -filterdim +1)*(xsize -filterdim +1))+ threadIdx.x*2 + threadIdx.y*2*(xsize -filterdim +1); a = maxip2d[index]; b = maxip2d[index +1]; c = maxip2d[index+(xsize-filterdim+1)]; d = maxip2d[index + (xsize-filterdim+2)]; if(a>b){ max1 = a; } else{ max1 = b; } if(c>d){ max2 = c; } else{ max2 = d; } if(max1>max2){ maxop2d[l*(xsize -filterdim +1)*(xsize -filterdim +1)/4 + i*(xsize - filterdim +1)/2+j]=max1; } else{ maxop2d[l*(xsize -filterdim +1)*(xsize -filterdim +1)/4 + i*(xsize - filterdim +1)/2+j] = max2; } } } int main(int argc, char **argv){ int xsize; int filterdim; int numfilters; int numfilters1; xsize = 28; filterdim = 5; numfilters = 32; numfilters1 = 64; /*Numbytes required for initial image*/ int numbytes = xsize*xsize*sizeof(int); /*Numbytes require for the output of first convolution layer*/ int numbytes2 = (xsize-filterdim+1)*(xsize-filterdim+1)*sizeof(int); //24x24 /**Numbytes required for output of first maxpool layer**/ int numbytes3 = ((xsize-filterdim+1)*(xsize-filterdim+1)/4)*sizeof(int); //12x12 /*Numbytes required for the output of second convolution layer*/ int numbytes4 = ((xsize-filterdim+1)/2 - filterdim + 1)*((xsize-filterdim+1)/2 - filterdim + 1)*sizeof(int);//8x8 /*Numbytes required for the output of second maxpool layer*/ int numbytes5 = (numbytes4/4)*sizeof(int);//4x4 /*Image on host side*/ /*Ip and op to first conv layer*/ unsigned int *pic = (unsigned int *)malloc(numbytes); int *result; int filter[numfilters*filterdim*filterdim]; /*Ip and op to first maxpool layer*/ int *maxip1; int *maxop1; /*Ip and op of second conv layer*/ int *cip2; int *cop2; int *filter2; /*ip and op to second maxpool layer*/ int *maxip2; int *maxop2; /*Device side variables*/ unsigned int *picd; int *resultd; /*Ip and op to first maxpool layer*/ int *maxip1d; int *maxop1d; /*Ip and op of second conv layer*/ int *cip2d; int *cop2d; int *filter2d; /*ip and op to second maxpool layer*/ int *maxip2d; int *maxop2d; result = (int *)malloc(numfilters*numbytes2); maxip1 = (int *)malloc(numfilters*numbytes2); maxop1 = (int *)malloc(numfilters*numbytes3); cip2 = (int *)malloc(numfilters*numbytes3); cop2 = (int *)malloc(numfilters1*numbytes4); filter2 = (int *)malloc(numfilters1*numfilters*filterdim*filterdim*sizeof(int)); maxip2 = (int *)malloc(numfilters1*numbytes4); maxop2 = (int *)malloc(numfilters1*numbytes5); cudaMalloc(&picd, numbytes); cudaMalloc(&resultd, numfilters*numbytes2); cudaMalloc(&maxip1d, numfilters*numbytes2); cudaMalloc(&maxop1d, numfilters*numbytes3); cudaMalloc(&cip2d, numfilters*numbytes3); cudaMalloc(&cop2d, numfilters1*numbytes4); cudaMalloc(&filter2d, numfilters1*numfilters*filterdim*filterdim*sizeof(int)); cudaMalloc(&maxip2d, numfilters1*numbytes4); cudaMalloc(&maxop2d, numfilters1*numbytes5); /*Initializing the image on host side*/ /*Should modify to later on read in image*/ int i,j,k,l,count,dimx; for (i=0; i<xsize; i++) { for (j=0; j<xsize; j++) { pic[i*xsize + j] = 1; //printf("pic[%d][%d] : %d\t",i,j,pic[i*xsize + j]); } // printf("\n"); } /*Initializing the filter for first conv layer to a value*/ /*TO DO : Read in filter from a file */ for(int k=0;k<numfilters;k++){ for (int i=0; i<filterdim; i++) { for (int j=0; j<filterdim; j++){ filter[k*(filterdim*filterdim) + i*filterdim + j] = 1; // printf("filter[%d][%d]: %d\n",k, i*filterdim + j, filter[k*(filterdim*filterdim) + i*filterdim + j]); } } } /*Initializing the filter for second conv layer to a value*/ /*TO DO : Read in filter from a file */ for(int k=0;k<numfilters1;k++){ for(int m= 0; m<numfilters;m++){ for (int i=0; i<filterdim; i++) { for (int j=0; j<filterdim; j++){ filter2[k*(numfilters*filterdim*filterdim)+ m*filterdim*filterdim + i*filterdim + j] = 1; // printf("filter2[%d][%d]: %d\t",k, m*filterdim*filterdim+i*filterdim + j, filter2[k*(numfilters*filterdim*filterdim)+ m*filterdim*filterdim + i*filterdim + j]); } } } // printf("\n"); } dim3 dimGrid (32); dim3 dimBlock (32,32); cudaMemcpy(picd,pic,numbytes, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(FIL, filter, numfilters*filterdim*filterdim*sizeof(int)); conv1<<<dimGrid, dimBlock>>>(picd,resultd); cudaMemcpy(result,resultd,numfilters*numbytes2,cudaMemcpyDeviceToHost); dim3 dimBlock1 (16,16); cudaMemcpy(maxip1d, result,numfilters*numbytes2, cudaMemcpyHostToDevice); maxpooling<<<dimGrid, dimBlock1>>>(maxip1d, maxop1d); cudaMemcpy(maxop1, maxop1d, numfilters*numbytes3, cudaMemcpyDeviceToHost); cudaMemcpy(cip2d, maxop1,numfilters*numbytes3,cudaMemcpyHostToDevice); cudaMemcpy(filter2d, filter2,numfilters1*numfilters*filterdim*filterdim*sizeof(int), cudaMemcpyHostToDevice); dim3 dimGrid2(64); dim3 dimBlock2(8,8); conv2<<<dimGrid2, dimBlock2>>>(cip2d, filter2d, cop2d); cudaMemcpy(cop2, cop2d,numfilters1*numbytes4,cudaMemcpyDeviceToHost); cudaMemcpy(maxip2d, cop2,numfilters1*numbytes4,cudaMemcpyHostToDevice); maxpool<<<dimGrid2, dimBlock2>>>(maxip2d, maxop2d); cudaMemcpy(maxop2, maxop2d, numfilters*numbytes5, cudaMemcpyDeviceToHost); for(k=0;k<numfilters1;k++){ for(i=0;i<4;i++){ for(j=0;j<4;j++){ printf("maxpool[%d][%d]:%d\t",k,i*4+j, maxop2[k*16+i*4+j]); } printf("\n"); } printf("\n\n"); } }
a2458e0562f748682bb2e3c2fe36b713624e05b4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/NamedTensorUtils.h> #include <ATen/native/SortingUtils.h> #include <c10/macros/Macros.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/detail/TensorInfo.cuh> #include <ATen/native/hip/SortingCommon.cuh> #include <ATen/native/hip/SortingRadixSelect.cuh> #include <THH/THHDeviceUtils.cuh> // only for THCRoundUp? #include <THH/THHNumerics.cuh> #include <THH/THHScanUtils.cuh> #include <THH/THHTensorMathReduce.cuh> // AddOp #include <cassert> #include <cstdlib> namespace at { namespace native { namespace { // Finds the rank k element, and its index, of the values along dimension dim template <typename scalar_t, typename index_t, int Dim> __global__ void gatherKthValue( cuda::detail::TensorInfo<scalar_t, index_t> input, index_t inputSliceSize, index_t k, index_t numInputSlices, index_t inputWithinSliceStride, cuda::detail::TensorInfo<scalar_t, index_t> kthValue, cuda::detail::TensorInfo<int64_t, index_t> indices) { // Indices are limited to integer fp precision, so counts can fit in // int32, regardless of index_t __shared__ int smem[C10_WARP_SIZE]; // one per each warp, up to warp limit index_t slice = getLinearBlockId<index_t>(); if (slice >= numInputSlices) { return; } // Find the start offset for our slice index_t sliceStartIndex = cuda::detail::IndexToOffset<scalar_t, index_t, Dim>::get(slice, input); index_t kthValueSliceStartIndex = cuda::detail::IndexToOffset<scalar_t, index_t, Dim>::get(slice, kthValue); index_t indicesSliceStartIndex = cuda::detail::IndexToOffset<int64_t, index_t, Dim>::get(slice, indices); scalar_t* inputSliceStart = &input.data[sliceStartIndex]; scalar_t* kthValueSliceStart = &kthValue.data[kthValueSliceStartIndex]; int64_t* indicesSliceStart = &indices.data[indicesSliceStartIndex]; // Find the k-th highest element in our input scalar_t kValue = static_cast<scalar_t>(0); radixSelect< scalar_t, typename TopKTypeConfig<scalar_t>::RadixType, index_t, false>( inputSliceStart, k, inputSliceSize, inputWithinSliceStride, smem, &kValue); // Find the index of the k-th highest element index_t kValueIndex = 0; bool foundKValue = false; for (index_t i = threadIdx.x; i < inputSliceSize; i += blockDim.x) { bool inRange = (i < inputSliceSize); scalar_t v = inRange ? doLdg(&inputSliceStart[i * inputWithinSliceStride]) : static_cast<scalar_t>(0); bool isKValue = inRange && ((v == kValue) || (THCNumerics<scalar_t>::isnan(v) && THCNumerics<scalar_t>::isnan(kValue))); if (isKValue) { kValueIndex = i; foundKValue = true; break; } } if (foundKValue) { kthValueSliceStart[0] = kValue; indicesSliceStart[0] = kValueIndex; } } // CUDA kernel to find the median, and its index, of the values along dimension dim template <typename scalar_t, typename index_t, int Dim> __global__ void gatherMedian( cuda::detail::TensorInfo<scalar_t, index_t> values, cuda::detail::TensorInfo<int64_t, index_t> indices, cuda::detail::TensorInfo<scalar_t, index_t> input, index_t inputSliceSize, index_t numInputSlices, index_t inputWithinSliceStride, bool ignore_nan) { // Shared memory for the subroutine RadixSelect. Note that RadixSelect converts the // floating point type to int with the same relative ordering. __shared__ int smem[C10_WARP_SIZE]; // one per each warp, up to warp limit index_t slice = getLinearBlockId<index_t>(); if (slice >= numInputSlices) { return; } // Finds the start offset for our slice index_t valuesSliceStartIndex = cuda::detail::IndexToOffset<scalar_t, index_t, Dim>::get(slice, values); index_t indicesSliceStartIndex = cuda::detail::IndexToOffset<int64_t, index_t, Dim>::get(slice, indices); index_t inputSliceStartIndex = cuda::detail::IndexToOffset<scalar_t, index_t, Dim>::get(slice, input); scalar_t* valuesSliceStart = &values.data[valuesSliceStartIndex]; int64_t* indicesSliceStart = &indices.data[indicesSliceStartIndex]; scalar_t* inputSliceStart = &input.data[inputSliceStartIndex]; index_t nan_count = 0; for (index_t i = threadIdx.x; i < inputSliceSize; i += blockDim.x) { scalar_t val = doLdg(&inputSliceStart[i * inputWithinSliceStride]); nan_count += THCNumerics<scalar_t>::isnan(val) ? 1 : 0; } // Counts number of nan values // This code performs a parallel sum reduction (not the most efficient code) __shared__ int64_t num_nan; if (threadIdx.x == 0) { num_nan = 0; } __syncthreads(); if (nan_count > 0) { atomicAdd(&num_nan, nan_count); } __syncthreads(); // For torch.median, if we found nan set k to last index so the computed value // is nan, otherwise set k to the middle element of the non-nan values index_t k = (!ignore_nan && num_nan > 0) ? inputSliceSize - 1 : (inputSliceSize - num_nan - 1) / 2; // Find the median scalar_t median = static_cast<scalar_t>(0); radixSelect< scalar_t, typename TopKTypeConfig<scalar_t>::RadixType, index_t, false>( inputSliceStart, k + 1, inputSliceSize, inputWithinSliceStride, smem, &median); valuesSliceStart[0] = median; // Find the index of the median value in the slice for (index_t i = threadIdx.x; i < inputSliceSize; i += blockDim.x) { scalar_t val = doLdg(&inputSliceStart[i * inputWithinSliceStride]); if (val == median || (THCNumerics<scalar_t>::isnan(val) && THCNumerics<scalar_t>::isnan(median))) { indicesSliceStart[0] = i; break; } } } struct KthValueLauncher { int64_t k; KthValueLauncher(int64_t k) : k(k) {} template <typename scalar_t, typename index_t, int all_dims> inline void launch( cuda::detail::TensorInfo<scalar_t, index_t> values_info, int collapse_values_dim, cuda::detail::TensorInfo<int64_t, index_t> indices_info, int collapse_indices_dim, cuda::detail::TensorInfo<scalar_t, index_t> self_info, int collapse_self_dim, int64_t num_slices, int64_t slice_size) { dim3 grid; if (!getGridFromTiles(num_slices, grid)) { AT_ERROR("slices are too many"); } dim3 block(::min( THCRoundUp(slice_size, (int64_t)C10_WARP_SIZE), (int64_t)1024)); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( gatherKthValue<scalar_t, index_t, all_dims>), dim3(grid), dim3(block), 0, stream, self_info, slice_size, k, num_slices, /* The actual dimension that the k-selection is running in */ /* may have changed from collapseDims() */ self_info.strides[collapse_self_dim], values_info, indices_info); } }; struct MedianLauncher { bool ignore_nan; MedianLauncher(bool ignore_nan) : ignore_nan(ignore_nan) {} template <typename scalar_t, typename index_t, int all_dims> inline void launch( cuda::detail::TensorInfo<scalar_t, index_t> values_info, int collapse_values_dim, cuda::detail::TensorInfo<int64_t, index_t> indices_info, int collapse_indices_dim, cuda::detail::TensorInfo<scalar_t, index_t> self_info, int collapse_self_dim, int64_t num_slices, int64_t slice_size) { dim3 grid; if (!getGridFromTiles(num_slices, grid)) { AT_ERROR("slices are too many"); } dim3 block(::min( THCRoundUp(slice_size, (int64_t)C10_WARP_SIZE), (int64_t)1024)); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( gatherMedian<scalar_t, index_t, all_dims>), dim3(grid), dim3(block), 0, stream, values_info, indices_info, self_info, slice_size, num_slices, self_info.strides[collapse_self_dim], ignore_nan); } }; template <typename scalar_t> void kthvalue_cuda_template( Tensor& values, Tensor& indices, const Tensor& self, int64_t k, int64_t dim_, bool keepdim) { int64_t dim = maybe_wrap_dim(dim_, self.dim()); int64_t slicesize = self.size(dim); // FIXME: This seems bogus, I only do this because it was the old behaviour. // The reductions are fine, as long as the axis being reduced along // isn't of 0 elements (and the output has elements). TORCH_CHECK( self.numel() > 0, "cannot perform reduction function kthvalue", " on tensor with no elements because the operation does not have an identity"); TORCH_CHECK(k >= 1 && k <= slicesize, "selected number k out of range"); _reduction_with_indices_allocate_or_resize_output( values, indices, self, dim, keepdim); if (self.dim() == 0 && self.numel() == 1) { values.copy_(self); indices.zero_(); return; } TORCH_CHECK( self.dim() <= MAX_TENSORINFO_DIMS, "cannot operate on more than ", MAX_TENSORINFO_DIMS, " dimensions"); // Based on required index size, run the algorithm with the // appropriate index type if (cuda::detail::canUse32BitIndexMath(self) && cuda::detail::canUse32BitIndexMath(values) && cuda::detail::canUse32BitIndexMath(indices)) { run_launcher<scalar_t, uint32_t>( values, indices, self, dim, KthValueLauncher(k)); } else { run_launcher<scalar_t, uint64_t>( values, indices, self, dim, KthValueLauncher(k)); } if (!keepdim) { values.squeeze_(dim); indices.squeeze_(dim); } AT_CUDA_CHECK(hipGetLastError()); } std::tuple<Tensor&, Tensor&> kthvalue_out_impl_cuda( Tensor& values, Tensor& indices, const Tensor& self, int64_t k, int64_t dim, bool keepdim) { AT_DISPATCH_ALL_TYPES_AND( at::ScalarType::Half, self.scalar_type(), "kthvalue_cuda", [&] { kthvalue_cuda_template<scalar_t>( values, indices, self, k, dim, keepdim); }); return std::forward_as_tuple(values, indices); } std::tuple<Tensor&, Tensor&> median_with_indices_impl( Tensor& values, Tensor& indices, const Tensor& self, int64_t dim, bool keepdim, bool ignore_nan) { NoNamesGuard guard; dim = at::maybe_wrap_dim(dim, self.dim()); Tensor in = self.dim() > 0 ? self.contiguous() : self.unsqueeze(0); int64_t size = in.size(dim); TORCH_CHECK( size > 0, "median() cannot compute median for a dimension of size 0 because ", "the operation does not have an identity"); checkDeviceType("median", {values, indices}, self.device().type()); checkScalarType("median", {indices, "indices", 1}, kLong); checkSameType("median", {values, "values", 0}, {self, "self", 2}); TORCH_CHECK( self.dim() <= MAX_TENSORINFO_DIMS, "median() cannot operate on more than ", MAX_TENSORINFO_DIMS, " dimensions"); std::vector<int64_t> out_shape = self.sizes().vec(); if (self.dim() > 0) { if (keepdim) { out_shape[dim] = 1; } else { out_shape.erase(out_shape.begin() + dim); } } values.resize_(out_shape); indices.resize_(out_shape); // Only launch kernel for non-empty tensors if (self.numel() > 0) { // Ensure #dim is the same for all tensors required for reduction Tensor vals = keepdim && self.dim() > 0 ? values : values.unsqueeze(dim); Tensor inds = keepdim && self.dim() > 0 ? indices : indices.unsqueeze(dim); AT_DISPATCH_ALL_TYPES_AND( at::ScalarType::Half, self.scalar_type(), "median_out_impl", [&] { if (cuda::detail::canUse32BitIndexMath(vals) && cuda::detail::canUse32BitIndexMath(inds) && cuda::detail::canUse32BitIndexMath(in)) { run_launcher<scalar_t, uint32_t>( vals, inds, in, dim, MedianLauncher(ignore_nan)); } else { run_launcher<scalar_t, uint64_t>( vals, inds, in, dim, MedianLauncher(ignore_nan)); } }); AT_CUDA_CHECK(hipGetLastError()); } guard.reset(); namedinference::propagate_names_for_reduction(values, self, dim, keepdim); namedinference::propagate_names_for_reduction(indices, self, dim, keepdim); return std::forward_as_tuple(values, indices); } Tensor median_impl(const Tensor& self, bool ignore_nan) { NoNamesGuard guard; int64_t size = self.numel(); TORCH_CHECK(size > 0, "median() input tensor cannot be empty"); // Sort input tensor to efficiently query for median element Tensor sorted = std::get<0>(self.flatten().sort()); if (!ignore_nan) { // For torch.median return either the middle element or nan (sorted as // largest) if there are any int64_t k = (size - 1) / 2; return at::where(sorted[-1].isnan(), sorted[-1], sorted[k]); } else { // For torch.nanmedian return the middle element among the non-nan values Tensor k = ((size - 1) - sorted.isnan().sum()) / 2; return sorted[k.toType(kLong)]; } } } // namespace // Mark: kthvalue std::tuple<Tensor&, Tensor&> kthvalue_out_cuda( Tensor& values, Tensor& indices, const Tensor& self, int64_t k, int64_t dim, bool keepdim) { auto result = [&]() { NoNamesGuard guard; // `kthvalue_out_impl_cuda` expects contiguous in input `self`. return kthvalue_out_impl_cuda(values, indices, self.contiguous(), k, dim, keepdim); }(); namedinference::propagate_names_for_reduction(values, self, dim, keepdim); namedinference::propagate_names_for_reduction(indices, self, dim, keepdim); return result; } // Mark: median std::tuple<Tensor&, Tensor&> median_out_cuda( Tensor& values, Tensor& indices, const Tensor& self, int64_t dim, bool keepdim) { return median_with_indices_impl( values, indices, self, dim, keepdim, /*ignore_nan=*/false); } Tensor median_cuda(const Tensor& self) { return median_impl(self, /*ignore_nan=*/false); } std::tuple<Tensor&, Tensor&> nanmedian_out_cuda( Tensor& values, Tensor& indices, const Tensor& self, int64_t dim, bool keepdim) { return median_with_indices_impl( values, indices, self, dim, keepdim, /*ignore_nan=*/true); } Tensor nanmedian_cuda(const Tensor& self) { return median_impl(self, /*ignore_nan=*/true); } } // namespace native } // namespace at
a2458e0562f748682bb2e3c2fe36b713624e05b4.cu
#include <ATen/ATen.h> #include <ATen/NamedTensorUtils.h> #include <ATen/native/SortingUtils.h> #include <c10/macros/Macros.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/detail/TensorInfo.cuh> #include <ATen/native/cuda/SortingCommon.cuh> #include <ATen/native/cuda/SortingRadixSelect.cuh> #include <THC/THCDeviceUtils.cuh> // only for THCRoundUp? #include <THC/THCNumerics.cuh> #include <THC/THCScanUtils.cuh> #include <THC/THCTensorMathReduce.cuh> // AddOp #include <cassert> #include <cstdlib> namespace at { namespace native { namespace { // Finds the rank k element, and its index, of the values along dimension dim template <typename scalar_t, typename index_t, int Dim> __global__ void gatherKthValue( cuda::detail::TensorInfo<scalar_t, index_t> input, index_t inputSliceSize, index_t k, index_t numInputSlices, index_t inputWithinSliceStride, cuda::detail::TensorInfo<scalar_t, index_t> kthValue, cuda::detail::TensorInfo<int64_t, index_t> indices) { // Indices are limited to integer fp precision, so counts can fit in // int32, regardless of index_t __shared__ int smem[C10_WARP_SIZE]; // one per each warp, up to warp limit index_t slice = getLinearBlockId<index_t>(); if (slice >= numInputSlices) { return; } // Find the start offset for our slice index_t sliceStartIndex = cuda::detail::IndexToOffset<scalar_t, index_t, Dim>::get(slice, input); index_t kthValueSliceStartIndex = cuda::detail::IndexToOffset<scalar_t, index_t, Dim>::get(slice, kthValue); index_t indicesSliceStartIndex = cuda::detail::IndexToOffset<int64_t, index_t, Dim>::get(slice, indices); scalar_t* inputSliceStart = &input.data[sliceStartIndex]; scalar_t* kthValueSliceStart = &kthValue.data[kthValueSliceStartIndex]; int64_t* indicesSliceStart = &indices.data[indicesSliceStartIndex]; // Find the k-th highest element in our input scalar_t kValue = static_cast<scalar_t>(0); radixSelect< scalar_t, typename TopKTypeConfig<scalar_t>::RadixType, index_t, false>( inputSliceStart, k, inputSliceSize, inputWithinSliceStride, smem, &kValue); // Find the index of the k-th highest element index_t kValueIndex = 0; bool foundKValue = false; for (index_t i = threadIdx.x; i < inputSliceSize; i += blockDim.x) { bool inRange = (i < inputSliceSize); scalar_t v = inRange ? doLdg(&inputSliceStart[i * inputWithinSliceStride]) : static_cast<scalar_t>(0); bool isKValue = inRange && ((v == kValue) || (THCNumerics<scalar_t>::isnan(v) && THCNumerics<scalar_t>::isnan(kValue))); if (isKValue) { kValueIndex = i; foundKValue = true; break; } } if (foundKValue) { kthValueSliceStart[0] = kValue; indicesSliceStart[0] = kValueIndex; } } // CUDA kernel to find the median, and its index, of the values along dimension dim template <typename scalar_t, typename index_t, int Dim> __global__ void gatherMedian( cuda::detail::TensorInfo<scalar_t, index_t> values, cuda::detail::TensorInfo<int64_t, index_t> indices, cuda::detail::TensorInfo<scalar_t, index_t> input, index_t inputSliceSize, index_t numInputSlices, index_t inputWithinSliceStride, bool ignore_nan) { // Shared memory for the subroutine RadixSelect. Note that RadixSelect converts the // floating point type to int with the same relative ordering. __shared__ int smem[C10_WARP_SIZE]; // one per each warp, up to warp limit index_t slice = getLinearBlockId<index_t>(); if (slice >= numInputSlices) { return; } // Finds the start offset for our slice index_t valuesSliceStartIndex = cuda::detail::IndexToOffset<scalar_t, index_t, Dim>::get(slice, values); index_t indicesSliceStartIndex = cuda::detail::IndexToOffset<int64_t, index_t, Dim>::get(slice, indices); index_t inputSliceStartIndex = cuda::detail::IndexToOffset<scalar_t, index_t, Dim>::get(slice, input); scalar_t* valuesSliceStart = &values.data[valuesSliceStartIndex]; int64_t* indicesSliceStart = &indices.data[indicesSliceStartIndex]; scalar_t* inputSliceStart = &input.data[inputSliceStartIndex]; index_t nan_count = 0; for (index_t i = threadIdx.x; i < inputSliceSize; i += blockDim.x) { scalar_t val = doLdg(&inputSliceStart[i * inputWithinSliceStride]); nan_count += THCNumerics<scalar_t>::isnan(val) ? 1 : 0; } // Counts number of nan values // This code performs a parallel sum reduction (not the most efficient code) __shared__ int64_t num_nan; if (threadIdx.x == 0) { num_nan = 0; } __syncthreads(); if (nan_count > 0) { atomicAdd(&num_nan, nan_count); } __syncthreads(); // For torch.median, if we found nan set k to last index so the computed value // is nan, otherwise set k to the middle element of the non-nan values index_t k = (!ignore_nan && num_nan > 0) ? inputSliceSize - 1 : (inputSliceSize - num_nan - 1) / 2; // Find the median scalar_t median = static_cast<scalar_t>(0); radixSelect< scalar_t, typename TopKTypeConfig<scalar_t>::RadixType, index_t, false>( inputSliceStart, k + 1, inputSliceSize, inputWithinSliceStride, smem, &median); valuesSliceStart[0] = median; // Find the index of the median value in the slice for (index_t i = threadIdx.x; i < inputSliceSize; i += blockDim.x) { scalar_t val = doLdg(&inputSliceStart[i * inputWithinSliceStride]); if (val == median || (THCNumerics<scalar_t>::isnan(val) && THCNumerics<scalar_t>::isnan(median))) { indicesSliceStart[0] = i; break; } } } struct KthValueLauncher { int64_t k; KthValueLauncher(int64_t k) : k(k) {} template <typename scalar_t, typename index_t, int all_dims> inline void launch( cuda::detail::TensorInfo<scalar_t, index_t> values_info, int collapse_values_dim, cuda::detail::TensorInfo<int64_t, index_t> indices_info, int collapse_indices_dim, cuda::detail::TensorInfo<scalar_t, index_t> self_info, int collapse_self_dim, int64_t num_slices, int64_t slice_size) { dim3 grid; if (!getGridFromTiles(num_slices, grid)) { AT_ERROR("slices are too many"); } dim3 block(std::min( THCRoundUp(slice_size, (int64_t)C10_WARP_SIZE), (int64_t)1024)); auto stream = at::cuda::getCurrentCUDAStream(); gatherKthValue<scalar_t, index_t, all_dims><<<grid, block, 0, stream>>>( self_info, slice_size, k, num_slices, /* The actual dimension that the k-selection is running in */ /* may have changed from collapseDims() */ self_info.strides[collapse_self_dim], values_info, indices_info); } }; struct MedianLauncher { bool ignore_nan; MedianLauncher(bool ignore_nan) : ignore_nan(ignore_nan) {} template <typename scalar_t, typename index_t, int all_dims> inline void launch( cuda::detail::TensorInfo<scalar_t, index_t> values_info, int collapse_values_dim, cuda::detail::TensorInfo<int64_t, index_t> indices_info, int collapse_indices_dim, cuda::detail::TensorInfo<scalar_t, index_t> self_info, int collapse_self_dim, int64_t num_slices, int64_t slice_size) { dim3 grid; if (!getGridFromTiles(num_slices, grid)) { AT_ERROR("slices are too many"); } dim3 block(std::min( THCRoundUp(slice_size, (int64_t)C10_WARP_SIZE), (int64_t)1024)); auto stream = at::cuda::getCurrentCUDAStream(); gatherMedian<scalar_t, index_t, all_dims><<<grid, block, 0, stream>>>( values_info, indices_info, self_info, slice_size, num_slices, self_info.strides[collapse_self_dim], ignore_nan); } }; template <typename scalar_t> void kthvalue_cuda_template( Tensor& values, Tensor& indices, const Tensor& self, int64_t k, int64_t dim_, bool keepdim) { int64_t dim = maybe_wrap_dim(dim_, self.dim()); int64_t slicesize = self.size(dim); // FIXME: This seems bogus, I only do this because it was the old behaviour. // The reductions are fine, as long as the axis being reduced along // isn't of 0 elements (and the output has elements). TORCH_CHECK( self.numel() > 0, "cannot perform reduction function kthvalue", " on tensor with no elements because the operation does not have an identity"); TORCH_CHECK(k >= 1 && k <= slicesize, "selected number k out of range"); _reduction_with_indices_allocate_or_resize_output( values, indices, self, dim, keepdim); if (self.dim() == 0 && self.numel() == 1) { values.copy_(self); indices.zero_(); return; } TORCH_CHECK( self.dim() <= MAX_TENSORINFO_DIMS, "cannot operate on more than ", MAX_TENSORINFO_DIMS, " dimensions"); // Based on required index size, run the algorithm with the // appropriate index type if (cuda::detail::canUse32BitIndexMath(self) && cuda::detail::canUse32BitIndexMath(values) && cuda::detail::canUse32BitIndexMath(indices)) { run_launcher<scalar_t, uint32_t>( values, indices, self, dim, KthValueLauncher(k)); } else { run_launcher<scalar_t, uint64_t>( values, indices, self, dim, KthValueLauncher(k)); } if (!keepdim) { values.squeeze_(dim); indices.squeeze_(dim); } AT_CUDA_CHECK(cudaGetLastError()); } std::tuple<Tensor&, Tensor&> kthvalue_out_impl_cuda( Tensor& values, Tensor& indices, const Tensor& self, int64_t k, int64_t dim, bool keepdim) { AT_DISPATCH_ALL_TYPES_AND( at::ScalarType::Half, self.scalar_type(), "kthvalue_cuda", [&] { kthvalue_cuda_template<scalar_t>( values, indices, self, k, dim, keepdim); }); return std::forward_as_tuple(values, indices); } std::tuple<Tensor&, Tensor&> median_with_indices_impl( Tensor& values, Tensor& indices, const Tensor& self, int64_t dim, bool keepdim, bool ignore_nan) { NoNamesGuard guard; dim = at::maybe_wrap_dim(dim, self.dim()); Tensor in = self.dim() > 0 ? self.contiguous() : self.unsqueeze(0); int64_t size = in.size(dim); TORCH_CHECK( size > 0, "median() cannot compute median for a dimension of size 0 because ", "the operation does not have an identity"); checkDeviceType("median", {values, indices}, self.device().type()); checkScalarType("median", {indices, "indices", 1}, kLong); checkSameType("median", {values, "values", 0}, {self, "self", 2}); TORCH_CHECK( self.dim() <= MAX_TENSORINFO_DIMS, "median() cannot operate on more than ", MAX_TENSORINFO_DIMS, " dimensions"); std::vector<int64_t> out_shape = self.sizes().vec(); if (self.dim() > 0) { if (keepdim) { out_shape[dim] = 1; } else { out_shape.erase(out_shape.begin() + dim); } } values.resize_(out_shape); indices.resize_(out_shape); // Only launch kernel for non-empty tensors if (self.numel() > 0) { // Ensure #dim is the same for all tensors required for reduction Tensor vals = keepdim && self.dim() > 0 ? values : values.unsqueeze(dim); Tensor inds = keepdim && self.dim() > 0 ? indices : indices.unsqueeze(dim); AT_DISPATCH_ALL_TYPES_AND( at::ScalarType::Half, self.scalar_type(), "median_out_impl", [&] { if (cuda::detail::canUse32BitIndexMath(vals) && cuda::detail::canUse32BitIndexMath(inds) && cuda::detail::canUse32BitIndexMath(in)) { run_launcher<scalar_t, uint32_t>( vals, inds, in, dim, MedianLauncher(ignore_nan)); } else { run_launcher<scalar_t, uint64_t>( vals, inds, in, dim, MedianLauncher(ignore_nan)); } }); AT_CUDA_CHECK(cudaGetLastError()); } guard.reset(); namedinference::propagate_names_for_reduction(values, self, dim, keepdim); namedinference::propagate_names_for_reduction(indices, self, dim, keepdim); return std::forward_as_tuple(values, indices); } Tensor median_impl(const Tensor& self, bool ignore_nan) { NoNamesGuard guard; int64_t size = self.numel(); TORCH_CHECK(size > 0, "median() input tensor cannot be empty"); // Sort input tensor to efficiently query for median element Tensor sorted = std::get<0>(self.flatten().sort()); if (!ignore_nan) { // For torch.median return either the middle element or nan (sorted as // largest) if there are any int64_t k = (size - 1) / 2; return at::where(sorted[-1].isnan(), sorted[-1], sorted[k]); } else { // For torch.nanmedian return the middle element among the non-nan values Tensor k = ((size - 1) - sorted.isnan().sum()) / 2; return sorted[k.toType(kLong)]; } } } // namespace // Mark: kthvalue std::tuple<Tensor&, Tensor&> kthvalue_out_cuda( Tensor& values, Tensor& indices, const Tensor& self, int64_t k, int64_t dim, bool keepdim) { auto result = [&]() { NoNamesGuard guard; // `kthvalue_out_impl_cuda` expects contiguous in input `self`. return kthvalue_out_impl_cuda(values, indices, self.contiguous(), k, dim, keepdim); }(); namedinference::propagate_names_for_reduction(values, self, dim, keepdim); namedinference::propagate_names_for_reduction(indices, self, dim, keepdim); return result; } // Mark: median std::tuple<Tensor&, Tensor&> median_out_cuda( Tensor& values, Tensor& indices, const Tensor& self, int64_t dim, bool keepdim) { return median_with_indices_impl( values, indices, self, dim, keepdim, /*ignore_nan=*/false); } Tensor median_cuda(const Tensor& self) { return median_impl(self, /*ignore_nan=*/false); } std::tuple<Tensor&, Tensor&> nanmedian_out_cuda( Tensor& values, Tensor& indices, const Tensor& self, int64_t dim, bool keepdim) { return median_with_indices_impl( values, indices, self, dim, keepdim, /*ignore_nan=*/true); } Tensor nanmedian_cuda(const Tensor& self) { return median_impl(self, /*ignore_nan=*/true); } } // namespace native } // namespace at
e21513db6c9317c7d7e3236880a2e1a7b5f78f3f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "resampling.h" #include <iostream> #include <vector_functions.h> #include "helper_math.h" namespace dart { // -=-=-=-=-=-=-=-=-=- kernels -=-=-=-=-=-=-=-=-=- template <typename T, int factor> __global__ void gpu_downsampleNearest(const T * imgIn, const uint2 dimIn, T * imgOut) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= dimIn.x/factor || y >= dimIn.y/factor) { return; } imgOut[x + y*dimIn.x/factor] = imgIn[x*factor + y*factor*dimIn.x]; } template <int factor> __global__ void gpu_downsampleAreaAverage(const float * imgIn, const uint2 dimIn, float * imgOut) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= dimIn.x/factor || y >= dimIn.y/factor) { return; } float val = 0; #pragma unroll for (int dy=0; dy < factor; ++dy) { for (int dx=0; dx < factor; ++dx) { const float &d = imgIn[factor*x+dx + (factor*y+dy)*dimIn.x]; val += d; } } imgOut[x + y*dimIn.x/factor] = val/(factor*factor); } template <int factor> __global__ void gpu_downsampleAreaAverage(const uchar3 * imgIn, const uint2 dimIn, uchar3 * imgOut) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= dimIn.x/factor || y >= dimIn.y/factor) { return; } int3 val = make_int3(0); #pragma unroll for (int dy=0; dy < factor; ++dy) { for (int dx=0; dx < factor; ++dx) { const uchar3 &d = imgIn[factor*x+dx + (factor*y+dy)*dimIn.x]; val.x += d.x; val.y += d.y; val.z += d.z; } } imgOut[x + y*dimIn.x/factor] = make_uchar3(val.x/(factor*factor),val.y/(factor*factor),val.z/(factor*factor)); } template <int factor> __global__ void gpu_downsampleAreaAverage(const uchar4 * imgIn, const uint2 dimIn, uchar4 * imgOut) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= dimIn.x/factor || y >= dimIn.y/factor) { return; } int3 val = make_int3(0); #pragma unroll for (int dy=0; dy < factor; ++dy) { for (int dx=0; dx < factor; ++dx) { const uchar4 &d = imgIn[factor*x+dx + (factor*y+dy)*dimIn.x]; val.x += d.x; val.y += d.y; val.z += d.z; } } imgOut[x + y*dimIn.x/factor] = make_uchar4(val.x/(factor*factor),val.y/(factor*factor),val.z/(factor*factor),255); } template <int factor, bool ignoreZero> __global__ void gpu_downsampleMin(const float * imgIn, const uint2 dimIn, float * imgOut) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= dimIn.x/factor || y >= dimIn.y/factor) { return; } float minVal = 0; for (int dy=0; dy < factor; ++dy) { for (int dx=0; dx < factor; ++dx) { const float &val = imgIn[factor*x+dx + (factor*y+dy)*dimIn.x]; if (ignoreZero) { if (val != 0 && val < minVal) { minVal = val; } } else if (val < minVal) { minVal = val; } } } imgOut[x + y*dimIn.x/factor] = minVal; } // -=-=-=-=-=-=-=-=-=- interface -=-=-=-=-=-=-=-=-=- void downsampleAreaAverage(const float * imgIn, const uint2 dimIn, float * imgOut, const int factor) { dim3 block(16,8,1); dim3 grid( ceil( (dimIn.x/factor) / (float)block.x), ceil( (dimIn.y/factor) / (float)block.y )); switch (factor) { case 2: hipLaunchKernelGGL(( gpu_downsampleAreaAverage<2>), dim3(grid),dim3(block), 0, 0, imgIn,dimIn,imgOut); break; case 4: hipLaunchKernelGGL(( gpu_downsampleAreaAverage<4>), dim3(grid),dim3(block), 0, 0, imgIn,dimIn,imgOut); break; case 8: hipLaunchKernelGGL(( gpu_downsampleAreaAverage<8>), dim3(grid),dim3(block), 0, 0, imgIn,dimIn,imgOut); break; case 16: hipLaunchKernelGGL(( gpu_downsampleAreaAverage<16>), dim3(grid),dim3(block), 0, 0, imgIn,dimIn,imgOut); break; default: std::cout << "downsampling factor " << factor << " not supported" << std::endl; break; } } void downsampleAreaAverage(const uchar3 * imgIn, const uint2 dimIn, uchar3 * imgOut, const int factor) { dim3 block(16,8,1); dim3 grid( ceil( (dimIn.x/factor) / (float)block.x), ceil( (dimIn.y/factor) / (float)block.y )); switch (factor) { case 2: hipLaunchKernelGGL(( gpu_downsampleAreaAverage<2>), dim3(grid),dim3(block), 0, 0, imgIn,dimIn,imgOut); break; case 4: hipLaunchKernelGGL(( gpu_downsampleAreaAverage<4>), dim3(grid),dim3(block), 0, 0, imgIn,dimIn,imgOut); break; case 8: hipLaunchKernelGGL(( gpu_downsampleAreaAverage<8>), dim3(grid),dim3(block), 0, 0, imgIn,dimIn,imgOut); break; case 16: hipLaunchKernelGGL(( gpu_downsampleAreaAverage<16>), dim3(grid),dim3(block), 0, 0, imgIn,dimIn,imgOut); break; default: std::cout << "downsampling factor " << factor << " not supported" << std::endl; break; } } void downsampleAreaAverage(const uchar4 * imgIn, const uint2 dimIn, uchar4 * imgOut, const int factor) { dim3 block(16,8,1); dim3 grid( ceil( (dimIn.x/factor) / (float)block.x), ceil( (dimIn.y/factor) / (float)block.y )); switch (factor) { case 2: hipLaunchKernelGGL(( gpu_downsampleAreaAverage<2>), dim3(grid),dim3(block), 0, 0, imgIn,dimIn,imgOut); break; case 4: hipLaunchKernelGGL(( gpu_downsampleAreaAverage<4>), dim3(grid),dim3(block), 0, 0, imgIn,dimIn,imgOut); break; case 8: hipLaunchKernelGGL(( gpu_downsampleAreaAverage<8>), dim3(grid),dim3(block), 0, 0, imgIn,dimIn,imgOut); break; case 16: hipLaunchKernelGGL(( gpu_downsampleAreaAverage<16>), dim3(grid),dim3(block), 0, 0, imgIn,dimIn,imgOut); break; default: std::cout << "downsampling factor " << factor << " not supported" << std::endl; break; } } void downsampleNearest(const float * imgIn, const uint2 dimIn, float * imgOut, const int factor) { dim3 block(16,8,1); dim3 grid( ceil( (dimIn.x/factor) / (float)block.x), ceil( (dimIn.y/factor) / (float)block.y )); switch (factor) { case 2: hipLaunchKernelGGL(( gpu_downsampleNearest<float,2>), dim3(grid),dim3(block), 0, 0, imgIn,dimIn,imgOut); break; case 4: hipLaunchKernelGGL(( gpu_downsampleNearest<float,4>), dim3(grid),dim3(block), 0, 0, imgIn,dimIn,imgOut); break; case 8: hipLaunchKernelGGL(( gpu_downsampleNearest<float,8>), dim3(grid),dim3(block), 0, 0, imgIn,dimIn,imgOut); break; case 16: hipLaunchKernelGGL(( gpu_downsampleNearest<float,16>), dim3(grid),dim3(block), 0, 0, imgIn,dimIn,imgOut); break; default: std::cout << "downsampling factor " << factor << " not supported" << std::endl; break; } } void downsampleNearest(const uchar3 * imgIn, const uint2 dimIn, uchar3 * imgOut, const int factor) { dim3 block(16,8,1); dim3 grid( ceil( (dimIn.x/factor) / (float)block.x), ceil( (dimIn.y/factor) / (float)block.y )); switch (factor) { case 2: hipLaunchKernelGGL(( gpu_downsampleNearest<uchar3,2>), dim3(grid),dim3(block), 0, 0, imgIn,dimIn,imgOut); break; case 4: hipLaunchKernelGGL(( gpu_downsampleNearest<uchar3,4>), dim3(grid),dim3(block), 0, 0, imgIn,dimIn,imgOut); break; case 8: hipLaunchKernelGGL(( gpu_downsampleNearest<uchar3,8>), dim3(grid),dim3(block), 0, 0, imgIn,dimIn,imgOut); break; case 16: hipLaunchKernelGGL(( gpu_downsampleNearest<uchar3,16>), dim3(grid),dim3(block), 0, 0, imgIn,dimIn,imgOut); break; default: std::cout << "downsampling factor " << factor << " not supported" << std::endl; break; } } void downsampleMin(const float * imgIn, const uint2 dimIn, float * imgOut, const int factor, bool ignoreZero) { dim3 block(16,8,1); dim3 grid( ceil( (dimIn.x/factor) / (float)block.x), ceil( (dimIn.y/factor) / (float)block.y )); switch (factor) { case 2: if (ignoreZero) { hipLaunchKernelGGL(( gpu_downsampleMin<2,true>), dim3(grid),dim3(block), 0, 0, imgIn,dimIn,imgOut); } else { hipLaunchKernelGGL(( gpu_downsampleMin<2,false>), dim3(grid),dim3(block), 0, 0, imgIn,dimIn,imgOut); } break; case 4: if (ignoreZero) { hipLaunchKernelGGL(( gpu_downsampleMin<4,true>), dim3(grid),dim3(block), 0, 0, imgIn,dimIn,imgOut); } else { hipLaunchKernelGGL(( gpu_downsampleMin<4,false>), dim3(grid),dim3(block), 0, 0, imgIn,dimIn,imgOut); } break; case 8: if (ignoreZero) { hipLaunchKernelGGL(( gpu_downsampleMin<8,true>), dim3(grid),dim3(block), 0, 0, imgIn,dimIn,imgOut); } else { hipLaunchKernelGGL(( gpu_downsampleMin<8,false>), dim3(grid),dim3(block), 0, 0, imgIn,dimIn,imgOut); } break; case 16: if (ignoreZero) { hipLaunchKernelGGL(( gpu_downsampleMin<16,true>), dim3(grid),dim3(block), 0, 0, imgIn,dimIn,imgOut); } else { hipLaunchKernelGGL(( gpu_downsampleMin<16,false>), dim3(grid),dim3(block), 0, 0, imgIn,dimIn,imgOut); } break; default: std::cout << "downsampling factor " << factor << " not supported" << std::endl; break; } } }
e21513db6c9317c7d7e3236880a2e1a7b5f78f3f.cu
#include "resampling.h" #include <iostream> #include <vector_functions.h> #include "helper_math.h" namespace dart { // -=-=-=-=-=-=-=-=-=- kernels -=-=-=-=-=-=-=-=-=- template <typename T, int factor> __global__ void gpu_downsampleNearest(const T * imgIn, const uint2 dimIn, T * imgOut) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= dimIn.x/factor || y >= dimIn.y/factor) { return; } imgOut[x + y*dimIn.x/factor] = imgIn[x*factor + y*factor*dimIn.x]; } template <int factor> __global__ void gpu_downsampleAreaAverage(const float * imgIn, const uint2 dimIn, float * imgOut) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= dimIn.x/factor || y >= dimIn.y/factor) { return; } float val = 0; #pragma unroll for (int dy=0; dy < factor; ++dy) { for (int dx=0; dx < factor; ++dx) { const float &d = imgIn[factor*x+dx + (factor*y+dy)*dimIn.x]; val += d; } } imgOut[x + y*dimIn.x/factor] = val/(factor*factor); } template <int factor> __global__ void gpu_downsampleAreaAverage(const uchar3 * imgIn, const uint2 dimIn, uchar3 * imgOut) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= dimIn.x/factor || y >= dimIn.y/factor) { return; } int3 val = make_int3(0); #pragma unroll for (int dy=0; dy < factor; ++dy) { for (int dx=0; dx < factor; ++dx) { const uchar3 &d = imgIn[factor*x+dx + (factor*y+dy)*dimIn.x]; val.x += d.x; val.y += d.y; val.z += d.z; } } imgOut[x + y*dimIn.x/factor] = make_uchar3(val.x/(factor*factor),val.y/(factor*factor),val.z/(factor*factor)); } template <int factor> __global__ void gpu_downsampleAreaAverage(const uchar4 * imgIn, const uint2 dimIn, uchar4 * imgOut) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= dimIn.x/factor || y >= dimIn.y/factor) { return; } int3 val = make_int3(0); #pragma unroll for (int dy=0; dy < factor; ++dy) { for (int dx=0; dx < factor; ++dx) { const uchar4 &d = imgIn[factor*x+dx + (factor*y+dy)*dimIn.x]; val.x += d.x; val.y += d.y; val.z += d.z; } } imgOut[x + y*dimIn.x/factor] = make_uchar4(val.x/(factor*factor),val.y/(factor*factor),val.z/(factor*factor),255); } template <int factor, bool ignoreZero> __global__ void gpu_downsampleMin(const float * imgIn, const uint2 dimIn, float * imgOut) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= dimIn.x/factor || y >= dimIn.y/factor) { return; } float minVal = 0; for (int dy=0; dy < factor; ++dy) { for (int dx=0; dx < factor; ++dx) { const float &val = imgIn[factor*x+dx + (factor*y+dy)*dimIn.x]; if (ignoreZero) { if (val != 0 && val < minVal) { minVal = val; } } else if (val < minVal) { minVal = val; } } } imgOut[x + y*dimIn.x/factor] = minVal; } // -=-=-=-=-=-=-=-=-=- interface -=-=-=-=-=-=-=-=-=- void downsampleAreaAverage(const float * imgIn, const uint2 dimIn, float * imgOut, const int factor) { dim3 block(16,8,1); dim3 grid( ceil( (dimIn.x/factor) / (float)block.x), ceil( (dimIn.y/factor) / (float)block.y )); switch (factor) { case 2: gpu_downsampleAreaAverage<2><<<grid,block>>>(imgIn,dimIn,imgOut); break; case 4: gpu_downsampleAreaAverage<4><<<grid,block>>>(imgIn,dimIn,imgOut); break; case 8: gpu_downsampleAreaAverage<8><<<grid,block>>>(imgIn,dimIn,imgOut); break; case 16: gpu_downsampleAreaAverage<16><<<grid,block>>>(imgIn,dimIn,imgOut); break; default: std::cout << "downsampling factor " << factor << " not supported" << std::endl; break; } } void downsampleAreaAverage(const uchar3 * imgIn, const uint2 dimIn, uchar3 * imgOut, const int factor) { dim3 block(16,8,1); dim3 grid( ceil( (dimIn.x/factor) / (float)block.x), ceil( (dimIn.y/factor) / (float)block.y )); switch (factor) { case 2: gpu_downsampleAreaAverage<2><<<grid,block>>>(imgIn,dimIn,imgOut); break; case 4: gpu_downsampleAreaAverage<4><<<grid,block>>>(imgIn,dimIn,imgOut); break; case 8: gpu_downsampleAreaAverage<8><<<grid,block>>>(imgIn,dimIn,imgOut); break; case 16: gpu_downsampleAreaAverage<16><<<grid,block>>>(imgIn,dimIn,imgOut); break; default: std::cout << "downsampling factor " << factor << " not supported" << std::endl; break; } } void downsampleAreaAverage(const uchar4 * imgIn, const uint2 dimIn, uchar4 * imgOut, const int factor) { dim3 block(16,8,1); dim3 grid( ceil( (dimIn.x/factor) / (float)block.x), ceil( (dimIn.y/factor) / (float)block.y )); switch (factor) { case 2: gpu_downsampleAreaAverage<2><<<grid,block>>>(imgIn,dimIn,imgOut); break; case 4: gpu_downsampleAreaAverage<4><<<grid,block>>>(imgIn,dimIn,imgOut); break; case 8: gpu_downsampleAreaAverage<8><<<grid,block>>>(imgIn,dimIn,imgOut); break; case 16: gpu_downsampleAreaAverage<16><<<grid,block>>>(imgIn,dimIn,imgOut); break; default: std::cout << "downsampling factor " << factor << " not supported" << std::endl; break; } } void downsampleNearest(const float * imgIn, const uint2 dimIn, float * imgOut, const int factor) { dim3 block(16,8,1); dim3 grid( ceil( (dimIn.x/factor) / (float)block.x), ceil( (dimIn.y/factor) / (float)block.y )); switch (factor) { case 2: gpu_downsampleNearest<float,2><<<grid,block>>>(imgIn,dimIn,imgOut); break; case 4: gpu_downsampleNearest<float,4><<<grid,block>>>(imgIn,dimIn,imgOut); break; case 8: gpu_downsampleNearest<float,8><<<grid,block>>>(imgIn,dimIn,imgOut); break; case 16: gpu_downsampleNearest<float,16><<<grid,block>>>(imgIn,dimIn,imgOut); break; default: std::cout << "downsampling factor " << factor << " not supported" << std::endl; break; } } void downsampleNearest(const uchar3 * imgIn, const uint2 dimIn, uchar3 * imgOut, const int factor) { dim3 block(16,8,1); dim3 grid( ceil( (dimIn.x/factor) / (float)block.x), ceil( (dimIn.y/factor) / (float)block.y )); switch (factor) { case 2: gpu_downsampleNearest<uchar3,2><<<grid,block>>>(imgIn,dimIn,imgOut); break; case 4: gpu_downsampleNearest<uchar3,4><<<grid,block>>>(imgIn,dimIn,imgOut); break; case 8: gpu_downsampleNearest<uchar3,8><<<grid,block>>>(imgIn,dimIn,imgOut); break; case 16: gpu_downsampleNearest<uchar3,16><<<grid,block>>>(imgIn,dimIn,imgOut); break; default: std::cout << "downsampling factor " << factor << " not supported" << std::endl; break; } } void downsampleMin(const float * imgIn, const uint2 dimIn, float * imgOut, const int factor, bool ignoreZero) { dim3 block(16,8,1); dim3 grid( ceil( (dimIn.x/factor) / (float)block.x), ceil( (dimIn.y/factor) / (float)block.y )); switch (factor) { case 2: if (ignoreZero) { gpu_downsampleMin<2,true><<<grid,block>>>(imgIn,dimIn,imgOut); } else { gpu_downsampleMin<2,false><<<grid,block>>>(imgIn,dimIn,imgOut); } break; case 4: if (ignoreZero) { gpu_downsampleMin<4,true><<<grid,block>>>(imgIn,dimIn,imgOut); } else { gpu_downsampleMin<4,false><<<grid,block>>>(imgIn,dimIn,imgOut); } break; case 8: if (ignoreZero) { gpu_downsampleMin<8,true><<<grid,block>>>(imgIn,dimIn,imgOut); } else { gpu_downsampleMin<8,false><<<grid,block>>>(imgIn,dimIn,imgOut); } break; case 16: if (ignoreZero) { gpu_downsampleMin<16,true><<<grid,block>>>(imgIn,dimIn,imgOut); } else { gpu_downsampleMin<16,false><<<grid,block>>>(imgIn,dimIn,imgOut); } break; default: std::cout << "downsampling factor " << factor << " not supported" << std::endl; break; } } }
ceb8800a4e0179dcdb24fd56d3ce0a27011e0b30.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <string> #include <iostream> #include <fstream> #include <sstream> #include <cstdlib> #include <cstring> #include <ostream> #include <sys/time.h> using namespace std; double get_walltime() { struct timeval time; if(gettimeofday(&time, NULL)) { return 0; } return (double)time.tv_sec + (double)time.tv_usec * 0.000001; } void readPassFile(char **, char ** ,char *); void readHashFile( char **, char ** ,char *); void writeFile(string *, string *, char *, int); void readPassFile(char * record_pass[], char * record_hash[], char * fileName) { int i = 0; ifstream file; string line; file.open(fileName); //passlist/wordsforsimpletest.txt while(getline(file, line)) { string key,value; istringstream liness(line); getline(liness, key, ','); getline(liness, value, ','); char tKey[key.length() + 1]; char tValue[value.length() + 1]; strcpy(tKey, key.c_str()); strcpy(tValue, value.c_str()); record_pass[i] = (char*) malloc(key.length() + 1); record_hash[i] = (char*) malloc(value.length() + 1); strncpy(record_pass[i], tKey, key.length()+1); strncpy(record_hash[i], tValue, value.length()+1); i++; } file.close(); } void readHashFile(char ** hToCheck_pass, char ** hToCheck_hash, char * fileName ) { ifstream file; string line; int i = 0; file.open(fileName); //passlist/hashFileToTest.txt while(getline(file, line)) { string key = ""; string value = line; char tKey[key.length() + 1]; char tValue[value.length() + 1]; strcpy(tKey, key.c_str()); strcpy(tValue, value.c_str()); hToCheck_pass[i] = (char*) malloc(key.length() + 1); hToCheck_hash[i] = (char*) malloc(value.length() + 1); strncpy(hToCheck_pass[i], tKey, key.length()+1); strncpy(hToCheck_hash[i], tValue, value.length()+1); i++; } file.close(); } void writeFile(char ** result_pass, char ** result_hash, char * fileName, int size) { int i; ofstream fileToWriteTo; fileToWriteTo.open(fileName); //passlist/convertedHash.txt for(i = 0; i < size; i++) { fileToWriteTo << result_pass[i]<< ", " << result_hash[i] << endl; } fileToWriteTo.close(); } __device__ int d_strcmp (char * s1, char * s2) { int ret = 0; while (!(ret = *(unsigned char *) s1 - *(unsigned char *) s2) && *s2) ++s1, ++s2; if (ret < 0) ret = -1; else if (ret > 0) ret = 1 ; return ret; } __global__ void gpuComputation(char ** d_record_pass, char ** d_record_hash, char ** d_hToCheck_pass, char ** d_hToCheck_hash, char ** d_result_pass, char ** d_result_hash, int row) { int blockindex = blockIdx.x; int threadindex = threadIdx.x; /* for(i = 0; i < row; i++) { char tempKey[32]; for(j = 0; j < 32; j++) { if(d_record_hash[i][j] == d_hToCheck_hash[threadIdx.x][j]) { tempKey[j] = d_record_pass[i][j]; printf("h"); } } */ int res = d_strcmp(d_record_hash[blockindex], d_hToCheck_hash[threadindex]); printf("%c\n", d_record_hash[blockindex][0]); printf("here\n"); // printf("%d %d %d\n", blockindex, threadindex, row); if(d_strcmp(d_record_hash[blockindex], d_hToCheck_hash[threadindex]) == 0) { for(int j = 0; j < 32; j++) { //d_result_pass[threadIdx.x][j] = d_record_pass[blockindex][j]; //d_result_hash[threadIdx.x][j] = d_record_hash[blockindex][j]; printf("%c", d_record_hash[threadIdx.x][j]); } printf("\n"); } // } } int performMainComputation(char ** record_pass, char ** record_hash, char ** hToCheck_pass, char ** hToCheck_hash, char ** result_pass, char ** result_hash, int nLinesPFile, int nLinesHFile) { int row = nLinesPFile; int col = nLinesHFile; // int indexStruct = 0; int rowSize = row * sizeof(int); int colSize = col * sizeof(int); char * d_record_pass[nLinesPFile], *d_record_hash[nLinesPFile]; char * d_hToCheck_pass[nLinesHFile], *d_hToCheck_hash[nLinesHFile]; char * d_result_pass[nLinesHFile], *d_result_hash[nLinesHFile]; hipMemcpy(d_record_pass, record_pass, rowSize, hipMemcpyHostToDevice); hipMemcpy(d_record_hash, record_hash, rowSize, hipMemcpyHostToDevice); hipMemcpy(d_hToCheck_pass, hToCheck_pass, rowSize, hipMemcpyHostToDevice); hipMemcpy(d_hToCheck_hash, hToCheck_hash, rowSize, hipMemcpyHostToDevice); hipMemcpy(d_result_pass, result_pass, rowSize, hipMemcpyHostToDevice); hipMemcpy(d_result_hash, result_hash, rowSize, hipMemcpyHostToDevice); dim3 blocks(row, 1); dim3 grids(col, 1); for(int i = 0; i < 16; i++) cout << record_pass[i] << " " << d_record_pass[i] << endl; hipLaunchKernelGGL(( gpuComputation), dim3(grids), dim3(blocks), 1, 0, d_record_pass, d_record_hash, d_hToCheck_pass, d_hToCheck_hash, d_result_pass, d_result_hash, row); hipDeviceSynchronize(); hipMemcpy(result_pass, d_result_pass, colSize, hipMemcpyDeviceToHost); hipMemcpy(result_hash, d_result_hash, colSize, hipMemcpyDeviceToHost); hipFree(d_record_pass); hipFree(d_hToCheck_pass); hipFree(d_result_pass); hipFree(d_record_hash); hipFree(d_hToCheck_hash); hipFree(d_result_hash); /* for(i = 0; i < row; i++) { for(j = 0; j < col; j++) { if(record[i].hash.compare(hToCheck[j].hash) == 0) { result[indexStruct].pass = record[i].pass; result[indexStruct].hash = record[i].hash; indexStruct++; } } } */ return 16; } void printBenchmark(int nLinesHFile, double readTime, int nPassCracked, double execTime, double writeTime) { cout << endl; cout << "Read time of the file with " << nLinesHFile << " pass hashes = " << readTime << " seconds" << endl << endl; cout << "Total number of passwords cracked = " << nPassCracked << endl; cout << "Total execution time for the main computation = " << execTime << " seconds" << endl; cout << endl; cout << "Write time of the output file = " << writeTime << " seconds" << endl << endl; } int main(int argc, char ** argv) { char * PFile = argv[1]; int nLinesPFile = atoi(argv[2]); char * HFile = argv[3]; int nLinesHFile = atoi(argv[4]); char * outputFile = argv[5]; char * record_pass[nLinesPFile]; char * record_hash[nLinesPFile]; char * hToCheck_pass[nLinesHFile]; char * hToCheck_hash[nLinesHFile]; char * result_pass[nLinesHFile]; char * result_hash[nLinesHFile]; double startReadTime = get_walltime(); readPassFile(record_pass, record_hash, PFile); readHashFile(hToCheck_pass, hToCheck_hash, HFile); double readTime = get_walltime() - startReadTime; double startExecTime = get_walltime(); int nPassCracked = performMainComputation(record_pass, record_hash, hToCheck_pass, hToCheck_hash, result_pass, result_hash, nLinesPFile, nLinesHFile); double execTime = get_walltime() - startExecTime; double startWriteTime = get_walltime(); // writeFile(result_pass, result_hash, outputFile, nPassCracked); double writeTime = get_walltime() - startWriteTime; //printBenchmark(nLinesHFile, readTime, nPassCracked, execTime, writeTime); return 0; }
ceb8800a4e0179dcdb24fd56d3ce0a27011e0b30.cu
#include <string> #include <iostream> #include <fstream> #include <sstream> #include <cstdlib> #include <cstring> #include <ostream> #include <sys/time.h> using namespace std; double get_walltime() { struct timeval time; if(gettimeofday(&time, NULL)) { return 0; } return (double)time.tv_sec + (double)time.tv_usec * 0.000001; } void readPassFile(char **, char ** ,char *); void readHashFile( char **, char ** ,char *); void writeFile(string *, string *, char *, int); void readPassFile(char * record_pass[], char * record_hash[], char * fileName) { int i = 0; ifstream file; string line; file.open(fileName); //passlist/wordsforsimpletest.txt while(getline(file, line)) { string key,value; istringstream liness(line); getline(liness, key, ','); getline(liness, value, ','); char tKey[key.length() + 1]; char tValue[value.length() + 1]; strcpy(tKey, key.c_str()); strcpy(tValue, value.c_str()); record_pass[i] = (char*) malloc(key.length() + 1); record_hash[i] = (char*) malloc(value.length() + 1); strncpy(record_pass[i], tKey, key.length()+1); strncpy(record_hash[i], tValue, value.length()+1); i++; } file.close(); } void readHashFile(char ** hToCheck_pass, char ** hToCheck_hash, char * fileName ) { ifstream file; string line; int i = 0; file.open(fileName); //passlist/hashFileToTest.txt while(getline(file, line)) { string key = ""; string value = line; char tKey[key.length() + 1]; char tValue[value.length() + 1]; strcpy(tKey, key.c_str()); strcpy(tValue, value.c_str()); hToCheck_pass[i] = (char*) malloc(key.length() + 1); hToCheck_hash[i] = (char*) malloc(value.length() + 1); strncpy(hToCheck_pass[i], tKey, key.length()+1); strncpy(hToCheck_hash[i], tValue, value.length()+1); i++; } file.close(); } void writeFile(char ** result_pass, char ** result_hash, char * fileName, int size) { int i; ofstream fileToWriteTo; fileToWriteTo.open(fileName); //passlist/convertedHash.txt for(i = 0; i < size; i++) { fileToWriteTo << result_pass[i]<< ", " << result_hash[i] << endl; } fileToWriteTo.close(); } __device__ int d_strcmp (char * s1, char * s2) { int ret = 0; while (!(ret = *(unsigned char *) s1 - *(unsigned char *) s2) && *s2) ++s1, ++s2; if (ret < 0) ret = -1; else if (ret > 0) ret = 1 ; return ret; } __global__ void gpuComputation(char ** d_record_pass, char ** d_record_hash, char ** d_hToCheck_pass, char ** d_hToCheck_hash, char ** d_result_pass, char ** d_result_hash, int row) { int blockindex = blockIdx.x; int threadindex = threadIdx.x; /* for(i = 0; i < row; i++) { char tempKey[32]; for(j = 0; j < 32; j++) { if(d_record_hash[i][j] == d_hToCheck_hash[threadIdx.x][j]) { tempKey[j] = d_record_pass[i][j]; printf("h"); } } */ int res = d_strcmp(d_record_hash[blockindex], d_hToCheck_hash[threadindex]); printf("%c\n", d_record_hash[blockindex][0]); printf("here\n"); // printf("%d %d %d\n", blockindex, threadindex, row); if(d_strcmp(d_record_hash[blockindex], d_hToCheck_hash[threadindex]) == 0) { for(int j = 0; j < 32; j++) { //d_result_pass[threadIdx.x][j] = d_record_pass[blockindex][j]; //d_result_hash[threadIdx.x][j] = d_record_hash[blockindex][j]; printf("%c", d_record_hash[threadIdx.x][j]); } printf("\n"); } // } } int performMainComputation(char ** record_pass, char ** record_hash, char ** hToCheck_pass, char ** hToCheck_hash, char ** result_pass, char ** result_hash, int nLinesPFile, int nLinesHFile) { int row = nLinesPFile; int col = nLinesHFile; // int indexStruct = 0; int rowSize = row * sizeof(int); int colSize = col * sizeof(int); char * d_record_pass[nLinesPFile], *d_record_hash[nLinesPFile]; char * d_hToCheck_pass[nLinesHFile], *d_hToCheck_hash[nLinesHFile]; char * d_result_pass[nLinesHFile], *d_result_hash[nLinesHFile]; cudaMemcpy(d_record_pass, record_pass, rowSize, cudaMemcpyHostToDevice); cudaMemcpy(d_record_hash, record_hash, rowSize, cudaMemcpyHostToDevice); cudaMemcpy(d_hToCheck_pass, hToCheck_pass, rowSize, cudaMemcpyHostToDevice); cudaMemcpy(d_hToCheck_hash, hToCheck_hash, rowSize, cudaMemcpyHostToDevice); cudaMemcpy(d_result_pass, result_pass, rowSize, cudaMemcpyHostToDevice); cudaMemcpy(d_result_hash, result_hash, rowSize, cudaMemcpyHostToDevice); dim3 blocks(row, 1); dim3 grids(col, 1); for(int i = 0; i < 16; i++) cout << record_pass[i] << " " << d_record_pass[i] << endl; gpuComputation<<<grids, blocks, 1>>>(d_record_pass, d_record_hash, d_hToCheck_pass, d_hToCheck_hash, d_result_pass, d_result_hash, row); cudaDeviceSynchronize(); cudaMemcpy(result_pass, d_result_pass, colSize, cudaMemcpyDeviceToHost); cudaMemcpy(result_hash, d_result_hash, colSize, cudaMemcpyDeviceToHost); cudaFree(d_record_pass); cudaFree(d_hToCheck_pass); cudaFree(d_result_pass); cudaFree(d_record_hash); cudaFree(d_hToCheck_hash); cudaFree(d_result_hash); /* for(i = 0; i < row; i++) { for(j = 0; j < col; j++) { if(record[i].hash.compare(hToCheck[j].hash) == 0) { result[indexStruct].pass = record[i].pass; result[indexStruct].hash = record[i].hash; indexStruct++; } } } */ return 16; } void printBenchmark(int nLinesHFile, double readTime, int nPassCracked, double execTime, double writeTime) { cout << endl; cout << "Read time of the file with " << nLinesHFile << " pass hashes = " << readTime << " seconds" << endl << endl; cout << "Total number of passwords cracked = " << nPassCracked << endl; cout << "Total execution time for the main computation = " << execTime << " seconds" << endl; cout << endl; cout << "Write time of the output file = " << writeTime << " seconds" << endl << endl; } int main(int argc, char ** argv) { char * PFile = argv[1]; int nLinesPFile = atoi(argv[2]); char * HFile = argv[3]; int nLinesHFile = atoi(argv[4]); char * outputFile = argv[5]; char * record_pass[nLinesPFile]; char * record_hash[nLinesPFile]; char * hToCheck_pass[nLinesHFile]; char * hToCheck_hash[nLinesHFile]; char * result_pass[nLinesHFile]; char * result_hash[nLinesHFile]; double startReadTime = get_walltime(); readPassFile(record_pass, record_hash, PFile); readHashFile(hToCheck_pass, hToCheck_hash, HFile); double readTime = get_walltime() - startReadTime; double startExecTime = get_walltime(); int nPassCracked = performMainComputation(record_pass, record_hash, hToCheck_pass, hToCheck_hash, result_pass, result_hash, nLinesPFile, nLinesHFile); double execTime = get_walltime() - startExecTime; double startWriteTime = get_walltime(); // writeFile(result_pass, result_hash, outputFile, nPassCracked); double writeTime = get_walltime() - startWriteTime; //printBenchmark(nLinesHFile, readTime, nPassCracked, execTime, writeTime); return 0; }
38ecd40abfabb89b46f65632722b2cc6147ab584.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THHUNN/generic/LookupTableBag.hip" #else void THNN_(LookupTableBag_updateOutput)( THCState *state, THCIndexTensor *input, THCIndexTensor *offsets, THCTensor *weight, THCTensor *output, THCIndexTensor *offset2bag, int mode, THCIndexTensor *bag_size) { THCUNN_assertSameGPU(state, 5, input, offsets, weight, output, offset2bag); if (!(THCIndexTensor_(isContiguous)(state, input) && THCIndexTensor_(isContiguous)(state, offsets) && THCTensor_(isContiguous)(state, weight))) { THError("Tensors must be contiguous"); } ptrdiff_t numIndices = THCIndexTensor_(size)(state, input, 0); ptrdiff_t numBags = THCIndexTensor_(size)(state, offsets, 0); ptrdiff_t stride = THCTensor_(size)(state, weight, 1); int64_t *bag_size_data = NULL; if (bag_size != NULL) { bag_size_data = THCIndexTensor_(data)(state, bag_size); } hipStream_t stream = THCState_getCurrentStream(state); std::vector<int64_t> outputSize = {numBags, stride}; THCTensor_(resize)(state, output, outputSize, {}); THCTensor_(zero)(state, output); THCIndexTensor_(resize)(state, offset2bag, input->sizes(), {}); dim3 block = dim3(32, 8); int grid = 1024; hipLaunchKernelGGL(( cunn_LookupTableBag_updateOutputKernel<scalar_t, accreal>), dim3(grid), dim3(block), 0, stream, THCIndexTensor_(data)(state, input), THCIndexTensor_(data)(state, offsets), THCTensor_(data)(state, weight), THCTensor_(data)(state, output), THCIndexTensor_(data)(state, offset2bag), numIndices, numBags, stride, mode, bag_size_data ); // THCudaCheck(hipGetLastError()); } void THNN_(LookupTableBag_accGradParameters)( THCState *state, THCIndexTensor *input, THCTensor *gradOutput, THCTensor *gradWeight, THCIndexTensor *offset2bag, THCIndexTensor *count, THCIndexTensor *sortedIndices, THCIndexTensor *origIndices, bool scaleGradByFreq, int mode, THCIndexTensor *bag_size, accreal scale_) { scalar_t scale = ScalarConvert<accreal, scalar_t>::to(scale_); THCUNN_assertSameGPU(state, 6, input, gradOutput, gradWeight, offset2bag, sortedIndices, origIndices); gradOutput = THCTensor_(newContiguous)(state, gradOutput); if (!(THCIndexTensor_(isContiguous)(state, input) && THCTensor_(isContiguous)(state, gradWeight) && THCIndexTensor_(isContiguous)(state, offset2bag))) { THError("Tensors must be contiguous"); } int64_t *bag_size_data = NULL; if (bag_size != NULL) { bag_size_data = THCIndexTensor_(data)(state, bag_size); } int nDim = THCIndexTensor_(nDimensionLegacyAll)(state, input); if (THCIndexTensor_(nDimensionLegacyAll)(state, input) != 1 && THCIndexTensor_(nDimensionLegacyAll)(state, input) != 2) { THCDescBuff s1 = THCIndexTensor_(sizeDesc)(state, input); THError("input must be a vector or matrix, but is of shape: %s", s1.str); } ptrdiff_t numel = THCIndexTensor_(nElement)(state, input); int64_t stride = THCTensor_(stride)(state, gradWeight, 0); hipStream_t stream = THCState_getCurrentStream(state); THCIndexTensor_(resize)(state, sortedIndices, input->sizes(), {}); THCIndexTensor_(resize)(state, origIndices, input->sizes(), {}); // Sort the inputs into sorted with the corresponding indices; we // don't need a stable or multidimensional sort, so just use Thrust // directly { THCIndexTensor_(copy)(state, sortedIndices, input); THCThrustAllocator thrustAlloc(state); thrust::device_ptr<THCIndex_t> sortedIndicesIter(THCIndexTensor_(data)(state, sortedIndices)); thrust::device_ptr<THCIndex_t> origIndicesIter(THCIndexTensor_(data)(state, origIndices)); // Fill sortedOrigIndices with sequential indices thrust::counting_iterator<THCIndex_t> countIter(0); thrust::copy( #if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__ thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif countIter, countIter + numel, origIndicesIter); // Sort; a stable sort is not required thrust::sort_by_key( #if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__ thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif sortedIndicesIter, sortedIndicesIter + numel, origIndicesIter, ThrustLTOp<int64_t>()); } THCIndex_t *sortedIndices_data = THCIndexTensor_(data)(state, sortedIndices); THCIndex_t *origIndices_data = THCIndexTensor_(data)(state, origIndices); THCIndex_t *offset2bag_data = THCIndexTensor_(data)(state, offset2bag); THCIndex_t *count_data = NULL; if (scaleGradByFreq) { THCIndexTensor_(resizeAs)(state, count, input); count_data = THCIndexTensor_(data)(state, count); THCThrustAllocator thrustAlloc(state); thrust::device_ptr<THCIndex_t> sortedIndices_ptr(sortedIndices_data); thrust::device_ptr<THCIndex_t> count_ptr(count_data); // Compute an increasing sequence per unique item in sortedIndices: // sorted: 2 5 5 5 7 7 8 9 9 // count: 1 1 2 3 1 2 1 1 2 thrust::inclusive_scan_by_key( #if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__ thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif sortedIndices_ptr, sortedIndices_ptr + numel, thrust::make_constant_iterator(1), count_ptr ); // Take the maximum of each count per unique key in reverse: // sorted: 2 5 5 5 7 7 8 9 9 // count: 1 3 3 3 2 2 1 2 2 thrust::inclusive_scan_by_key( #if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__ thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif thrust::make_reverse_iterator(sortedIndices_ptr + numel), thrust::make_reverse_iterator(sortedIndices_ptr), thrust::make_reverse_iterator(count_ptr + numel), thrust::make_reverse_iterator(count_ptr + numel), thrust::equal_to<int64_t>(), thrust::maximum<int64_t>() ); } dim3 grid(THCCeilDiv(numel, (ptrdiff_t) 4), THCCeilDiv(stride, (int64_t) 128)); dim3 block(32, 4); hipLaunchKernelGGL(( cunn_LookupTableBag_accGradParametersKernel<scalar_t, accreal>), dim3(grid), dim3(block), 0, stream, sortedIndices_data, origIndices_data, THCTensor_(data)(state, gradOutput), THCTensor_(data)(state, gradWeight), offset2bag_data, count_data, scale, numel, stride, mode, bag_size_data ); THCTensor_(free)(state, gradOutput); // THCudaCheck(hipGetLastError()); } #endif
38ecd40abfabb89b46f65632722b2cc6147ab584.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THCUNN/generic/LookupTableBag.cu" #else void THNN_(LookupTableBag_updateOutput)( THCState *state, THCIndexTensor *input, THCIndexTensor *offsets, THCTensor *weight, THCTensor *output, THCIndexTensor *offset2bag, int mode, THCIndexTensor *bag_size) { THCUNN_assertSameGPU(state, 5, input, offsets, weight, output, offset2bag); if (!(THCIndexTensor_(isContiguous)(state, input) && THCIndexTensor_(isContiguous)(state, offsets) && THCTensor_(isContiguous)(state, weight))) { THError("Tensors must be contiguous"); } ptrdiff_t numIndices = THCIndexTensor_(size)(state, input, 0); ptrdiff_t numBags = THCIndexTensor_(size)(state, offsets, 0); ptrdiff_t stride = THCTensor_(size)(state, weight, 1); int64_t *bag_size_data = NULL; if (bag_size != NULL) { bag_size_data = THCIndexTensor_(data)(state, bag_size); } cudaStream_t stream = THCState_getCurrentStream(state); std::vector<int64_t> outputSize = {numBags, stride}; THCTensor_(resize)(state, output, outputSize, {}); THCTensor_(zero)(state, output); THCIndexTensor_(resize)(state, offset2bag, input->sizes(), {}); dim3 block = dim3(32, 8); int grid = 1024; cunn_LookupTableBag_updateOutputKernel<scalar_t, accreal><<<grid, block, 0, stream>>>( THCIndexTensor_(data)(state, input), THCIndexTensor_(data)(state, offsets), THCTensor_(data)(state, weight), THCTensor_(data)(state, output), THCIndexTensor_(data)(state, offset2bag), numIndices, numBags, stride, mode, bag_size_data ); // THCudaCheck(cudaGetLastError()); } void THNN_(LookupTableBag_accGradParameters)( THCState *state, THCIndexTensor *input, THCTensor *gradOutput, THCTensor *gradWeight, THCIndexTensor *offset2bag, THCIndexTensor *count, THCIndexTensor *sortedIndices, THCIndexTensor *origIndices, bool scaleGradByFreq, int mode, THCIndexTensor *bag_size, accreal scale_) { scalar_t scale = ScalarConvert<accreal, scalar_t>::to(scale_); THCUNN_assertSameGPU(state, 6, input, gradOutput, gradWeight, offset2bag, sortedIndices, origIndices); gradOutput = THCTensor_(newContiguous)(state, gradOutput); if (!(THCIndexTensor_(isContiguous)(state, input) && THCTensor_(isContiguous)(state, gradWeight) && THCIndexTensor_(isContiguous)(state, offset2bag))) { THError("Tensors must be contiguous"); } int64_t *bag_size_data = NULL; if (bag_size != NULL) { bag_size_data = THCIndexTensor_(data)(state, bag_size); } int nDim = THCIndexTensor_(nDimensionLegacyAll)(state, input); if (THCIndexTensor_(nDimensionLegacyAll)(state, input) != 1 && THCIndexTensor_(nDimensionLegacyAll)(state, input) != 2) { THCDescBuff s1 = THCIndexTensor_(sizeDesc)(state, input); THError("input must be a vector or matrix, but is of shape: %s", s1.str); } ptrdiff_t numel = THCIndexTensor_(nElement)(state, input); int64_t stride = THCTensor_(stride)(state, gradWeight, 0); cudaStream_t stream = THCState_getCurrentStream(state); THCIndexTensor_(resize)(state, sortedIndices, input->sizes(), {}); THCIndexTensor_(resize)(state, origIndices, input->sizes(), {}); // Sort the inputs into sorted with the corresponding indices; we // don't need a stable or multidimensional sort, so just use Thrust // directly { THCIndexTensor_(copy)(state, sortedIndices, input); THCThrustAllocator thrustAlloc(state); thrust::device_ptr<THCIndex_t> sortedIndicesIter(THCIndexTensor_(data)(state, sortedIndices)); thrust::device_ptr<THCIndex_t> origIndicesIter(THCIndexTensor_(data)(state, origIndices)); // Fill sortedOrigIndices with sequential indices thrust::counting_iterator<THCIndex_t> countIter(0); thrust::copy( #if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__ thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif countIter, countIter + numel, origIndicesIter); // Sort; a stable sort is not required thrust::sort_by_key( #if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__ thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif sortedIndicesIter, sortedIndicesIter + numel, origIndicesIter, ThrustLTOp<int64_t>()); } THCIndex_t *sortedIndices_data = THCIndexTensor_(data)(state, sortedIndices); THCIndex_t *origIndices_data = THCIndexTensor_(data)(state, origIndices); THCIndex_t *offset2bag_data = THCIndexTensor_(data)(state, offset2bag); THCIndex_t *count_data = NULL; if (scaleGradByFreq) { THCIndexTensor_(resizeAs)(state, count, input); count_data = THCIndexTensor_(data)(state, count); THCThrustAllocator thrustAlloc(state); thrust::device_ptr<THCIndex_t> sortedIndices_ptr(sortedIndices_data); thrust::device_ptr<THCIndex_t> count_ptr(count_data); // Compute an increasing sequence per unique item in sortedIndices: // sorted: 2 5 5 5 7 7 8 9 9 // count: 1 1 2 3 1 2 1 1 2 thrust::inclusive_scan_by_key( #if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__ thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif sortedIndices_ptr, sortedIndices_ptr + numel, thrust::make_constant_iterator(1), count_ptr ); // Take the maximum of each count per unique key in reverse: // sorted: 2 5 5 5 7 7 8 9 9 // count: 1 3 3 3 2 2 1 2 2 thrust::inclusive_scan_by_key( #if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__ thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif thrust::make_reverse_iterator(sortedIndices_ptr + numel), thrust::make_reverse_iterator(sortedIndices_ptr), thrust::make_reverse_iterator(count_ptr + numel), thrust::make_reverse_iterator(count_ptr + numel), thrust::equal_to<int64_t>(), thrust::maximum<int64_t>() ); } dim3 grid(THCCeilDiv(numel, (ptrdiff_t) 4), THCCeilDiv(stride, (int64_t) 128)); dim3 block(32, 4); cunn_LookupTableBag_accGradParametersKernel<scalar_t, accreal><<<grid, block, 0, stream>>>( sortedIndices_data, origIndices_data, THCTensor_(data)(state, gradOutput), THCTensor_(data)(state, gradWeight), offset2bag_data, count_data, scale, numel, stride, mode, bag_size_data ); THCTensor_(free)(state, gradOutput); // THCudaCheck(cudaGetLastError()); } #endif
4c69e5204ac309dee6a92416e5d96b36d37d7d73.hip
// !!! This is a file automatically generated by hipify!!! #include"../include/kernel.hpp" #include <cstdint> #include <hip/hip_runtime.h> #include<stdio.h> void __global__ reduce_global(real* d_x, real* d_y) { const int tid = threadIdx.x; real* x = d_x + blockDim.x * blockIdx.x; for (int offset = blockDim.x >> 1; offset > 0; offset >>= 1) { if (tid < offset) { x[tid] += x[tid + offset]; } __syncthreads(); } if (tid == 0) { d_y[blockIdx.x] = x[0]; } } void __global__ reduce_shared(real* d_x, real* d_y) { const int tid = threadIdx.x; const int bid = blockIdx.x; const int n = bid * blockDim.x + tid; __shared__ real s_y[128]; s_y[tid] = (n < N) ? d_x[n] : 0.0; __syncthreads(); for (int offset = blockDim.x >> 1; offset > 0; offset >>= 1) { if (tid < offset) { s_y[tid] += s_y[tid + offset]; } __syncthreads(); } if (tid == 0) { d_y[bid] = s_y[0]; } } void __global__ reduce_dynamic(real* d_x, real* d_y) { const int tid = threadIdx.x; const int bid = blockIdx.x; const int n = bid * blockDim.x + tid; extern __shared__ real s_y[]; s_y[tid] = (n < N) ? d_x[n] : 0.0; __syncthreads(); for (int offset = blockDim.x >> 1; offset > 0; offset >>= 1) { if (tid < offset) { s_y[tid] += s_y[tid + offset]; } __syncthreads(); } if (tid == 0) { d_y[bid] = s_y[0]; } } real reduce(real* d_x, const int method) { int grid_size = (N + BLOCK_SIZE - 1) / BLOCK_SIZE; const int ymem = sizeof(real) * grid_size; const int smem = sizeof(real) * BLOCK_SIZE; real* d_y; CHECK(hipMalloc(&d_y, ymem)); real* h_y = (real*)malloc(ymem); switch (method) { case 0: reduce_global << <grid_size, BLOCK_SIZE >> > (d_x, d_y); break; case 1: reduce_shared << <grid_size, BLOCK_SIZE >> > (d_x, d_y); break; case 2: reduce_dynamic << <grid_size, BLOCK_SIZE, smem >> > (d_x, d_y); break; default: printf("Error: wrong method\n"); exit(1); break; } CHECK(hipMemcpy(h_y, d_y, ymem, hipMemcpyDeviceToHost)); real result = 0.0; for (int n = 0; n < grid_size; ++n) { result += h_y[n]; } free(h_y); CHECK(hipFree(d_y)); return result; } void timing(real* h_x, real* d_x, const int method) { real sum = 0; for (int repeat = 0; repeat < NUM_REPEATS; ++repeat) { CHECK(hipMemcpy(d_x, h_x, M, hipMemcpyHostToDevice)); hipEvent_t start, stop; CHECK(hipEventCreate(&start)); CHECK(hipEventCreate(&stop)); CHECK(hipEventRecord(start)); hipEventQuery(start); sum = reduce(d_x, method); CHECK(hipEventRecord(stop)); CHECK(hipEventSynchronize(stop)); float elapsed_time; CHECK(hipEventElapsedTime(&elapsed_time, start, stop)); printf("Time = %g ms.\n", elapsed_time); CHECK(hipEventDestroy(start)); CHECK(hipEventDestroy(stop)); } printf("sum = %f.\n", sum); }
4c69e5204ac309dee6a92416e5d96b36d37d7d73.cu
#include"../include/kernel.hpp" #include <cstdint> #include <cuda.h> #include<stdio.h> void __global__ reduce_global(real* d_x, real* d_y) { const int tid = threadIdx.x; real* x = d_x + blockDim.x * blockIdx.x; for (int offset = blockDim.x >> 1; offset > 0; offset >>= 1) { if (tid < offset) { x[tid] += x[tid + offset]; } __syncthreads(); } if (tid == 0) { d_y[blockIdx.x] = x[0]; } } void __global__ reduce_shared(real* d_x, real* d_y) { const int tid = threadIdx.x; const int bid = blockIdx.x; const int n = bid * blockDim.x + tid; __shared__ real s_y[128]; s_y[tid] = (n < N) ? d_x[n] : 0.0; __syncthreads(); for (int offset = blockDim.x >> 1; offset > 0; offset >>= 1) { if (tid < offset) { s_y[tid] += s_y[tid + offset]; } __syncthreads(); } if (tid == 0) { d_y[bid] = s_y[0]; } } void __global__ reduce_dynamic(real* d_x, real* d_y) { const int tid = threadIdx.x; const int bid = blockIdx.x; const int n = bid * blockDim.x + tid; extern __shared__ real s_y[]; s_y[tid] = (n < N) ? d_x[n] : 0.0; __syncthreads(); for (int offset = blockDim.x >> 1; offset > 0; offset >>= 1) { if (tid < offset) { s_y[tid] += s_y[tid + offset]; } __syncthreads(); } if (tid == 0) { d_y[bid] = s_y[0]; } } real reduce(real* d_x, const int method) { int grid_size = (N + BLOCK_SIZE - 1) / BLOCK_SIZE; const int ymem = sizeof(real) * grid_size; const int smem = sizeof(real) * BLOCK_SIZE; real* d_y; CHECK(cudaMalloc(&d_y, ymem)); real* h_y = (real*)malloc(ymem); switch (method) { case 0: reduce_global << <grid_size, BLOCK_SIZE >> > (d_x, d_y); break; case 1: reduce_shared << <grid_size, BLOCK_SIZE >> > (d_x, d_y); break; case 2: reduce_dynamic << <grid_size, BLOCK_SIZE, smem >> > (d_x, d_y); break; default: printf("Error: wrong method\n"); exit(1); break; } CHECK(cudaMemcpy(h_y, d_y, ymem, cudaMemcpyDeviceToHost)); real result = 0.0; for (int n = 0; n < grid_size; ++n) { result += h_y[n]; } free(h_y); CHECK(cudaFree(d_y)); return result; } void timing(real* h_x, real* d_x, const int method) { real sum = 0; for (int repeat = 0; repeat < NUM_REPEATS; ++repeat) { CHECK(cudaMemcpy(d_x, h_x, M, cudaMemcpyHostToDevice)); cudaEvent_t start, stop; CHECK(cudaEventCreate(&start)); CHECK(cudaEventCreate(&stop)); CHECK(cudaEventRecord(start)); cudaEventQuery(start); sum = reduce(d_x, method); CHECK(cudaEventRecord(stop)); CHECK(cudaEventSynchronize(stop)); float elapsed_time; CHECK(cudaEventElapsedTime(&elapsed_time, start, stop)); printf("Time = %g ms.\n", elapsed_time); CHECK(cudaEventDestroy(start)); CHECK(cudaEventDestroy(stop)); } printf("sum = %f.\n", sum); }
2c1cc739e9da796327e66e4beec4e9a6b3975abf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "kernels.h" #include <iostream> __device__ uint8_t mul_uchar(uint8_t value1, int value2) { uint32_t out = value1 * value2; if (out > 0xff) { return 0xff; } return static_cast<uint8_t>(out); } __device__ uint8_t muladd_uchar(uint8_t value, uint8_t addValue, int mulValue) { uint32_t out = value * mulValue; if (out > 0xff) { return 0xff; } out = out + addValue; if (out > 0xff) { return 0xff; } return static_cast<uint8_t>(out); } __global__ void add(const Npp8u* src1, const Npp8u* src2, Npp8u* dst, int step, int width, int height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } int offset = y * step + x; dst[offset] = src1[offset] + src2[offset]; dst[offset] |= -(dst[offset] < src2[offset]); } __global__ void addc(const Npp8u* src, const Npp8u value, Npp8u* dst, int step, int width, int height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } int offset = y * step + x; dst[offset] = src[offset] + value; dst[offset] |= -(dst[offset] < value); } __global__ void mulc(const Npp8u* src, const Npp8u value, Npp8u* dst, int step, int width, int height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } int offset = y * step + x; uint32_t dstValue = src[offset] * value; if (dstValue > 255) { dst[offset] = 255; } else { dst[offset] = dstValue; } } __global__ void addcmulc(const Npp8u* src, const Npp8u addValue, const Npp8u mulValue, Npp8u* dst, int step, int width, int height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } int offset = y * step + x; int dstValue = src[offset] * mulValue; dstValue = dstValue + addValue; if (dstValue > 255) { dstValue = 255; } dst[offset] = dstValue; } __global__ void add_32(const Npp8u* src1, const Npp8u* src2, Npp8u* dst, int step, int width, int height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } x = x << 5; for (auto i = 0; i < 32; i++) { int offset = y * step + x + i; dst[offset] = src1[offset] + src2[offset]; } } // https://docs.nvidia.com/cuda/cuda-math-api/group__CUDA__MATH__INTRINSIC__SIMD.html#group__CUDA__MATH__INTRINSIC__SIMD #define CLAMP_1(x) x < 0 ? 0 : (x > 1 ? 1 : x) #define CLAMP_255(x) x < 0 ? 0 : (x > 255 ? 255 : x) #define CLAMP_int8(x) x < -128 ? -128 : (x > 127 ? 127 : x) __global__ void add_4k_(const Npp32u* src1, const Npp32u* src2, Npp32u* dst, int step, int width, int height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } int offset = y * step + x; dst[offset] = __vaddus4(src1[offset], src2[offset]); } __global__ void add_4k(const uchar4* src1, const uchar4* src2, uchar4* dst, int step, int width, int height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } int offset = y * step + x; dst[offset].x = CLAMP_255(src1[offset].x + src2[offset].x); dst[offset].y = CLAMP_255(src1[offset].y + src2[offset].y); dst[offset].z = CLAMP_255(src1[offset].z + src2[offset].z); dst[offset].w = CLAMP_255(src1[offset].w + src2[offset].w); } __global__ void addc_4k(const Npp32u* src, const Npp32u value, Npp32u* dst, int step, int width, int height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } int offset = y * step + x; dst[offset] = __vaddus4(src[offset], value); } __global__ void mulc_4k(const uchar4* src, const Npp32u value, uchar4* dst, int step, int width, int height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } int offset = y * step + x; dst[offset].x = mul_uchar(src[offset].x, value); dst[offset].y = mul_uchar(src[offset].y, value); dst[offset].z = mul_uchar(src[offset].z, value); dst[offset].w = mul_uchar(src[offset].w, value); } __global__ void addcmulc_4k_nottherightway(const uchar4* src, const Npp8u addValue, const Npp8u mulValue, uchar4* dst, int step, int width, int height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } int offset = y * step + x; dst[offset].x = muladd_uchar(src[offset].x, addValue, mulValue); dst[offset].y = muladd_uchar(src[offset].y, addValue, mulValue); dst[offset].z = muladd_uchar(src[offset].z, addValue, mulValue); dst[offset].w = muladd_uchar(src[offset].w, addValue, mulValue); } __global__ void addcmulc_4k(const uchar4* src, const Npp8u addValue, const Npp32f mulValue, uchar4* dst, int step, int width, int height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } int offset = y * step + x; auto& srcValue = src[offset]; auto &dstValue = dst[offset]; uint32_t temp = 0; temp = srcValue.x*mulValue + addValue; dstValue.x = temp > 255 ? 255:temp; temp = srcValue.y*mulValue + addValue; dstValue.y = temp > 255 ? 255 : temp; temp = srcValue.z*mulValue + addValue; dstValue.z = temp > 255 ? 255 : temp; temp = srcValue.w*mulValue + addValue; dstValue.w = temp > 255 ? 255 : temp; } #define ADDCMULC_INT8_4k_OP( srcValue, dstValue, mulValue ) \ do \ { \ int32_t temp = (srcValue - 128)*mulValue; \ temp = temp > 127 ? 127: temp; \ temp = temp < -128 ? 0: temp+128; \ dstValue = temp; \ } while (0) __global__ void brightnesscontrast_uv_int8_4k(const uchar4* src, const Npp32s addValue, const Npp32f mulValue, uchar4* dst, int step, int width, int height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } int offset = y * step + x; auto& srcValue = src[offset]; auto &dstValue = dst[offset]; ADDCMULC_INT8_4k_OP(srcValue.x, dstValue.x, mulValue); ADDCMULC_INT8_4k_OP(srcValue.y, dstValue.y, mulValue); ADDCMULC_INT8_4k_OP(srcValue.z, dstValue.z, mulValue); ADDCMULC_INT8_4k_OP(srcValue.w, dstValue.w, mulValue); } void launchAddKernel(const Npp8u* src1, const Npp8u* src2, Npp8u* dst, int step, NppiSize size, hipStream_t stream, std::string method) { if (method == BASIC) { dim3 block(32, 32); dim3 grid((size.width + block.x - 1) / block.x, (size.height + block.y - 1) / block.y); add << <grid, block, 0, stream >> > (src1, src2, dst, step, size.width, size.height); } else if (method == M_32) { auto width = size.width >> 5; dim3 block(32, 32); dim3 grid((width + block.x - 1) / block.x, (size.height + block.y - 1) / block.y); add_32 << <grid, block, 0, stream >> > (src1, src2, dst, step, width, size.height); } else if (method == M_4K) { auto width = size.width >> 2; step = step >> 2; dim3 block(32, 32); dim3 grid((width + block.x - 1) / block.x, (size.height + block.y - 1) / block.y); add_4k << <grid, block, 0, stream >> > (reinterpret_cast<const uchar4*>(src1), reinterpret_cast<const uchar4*>(src2), reinterpret_cast<uchar4*>(dst), step, width, size.height); } } void launchAddCKernel(const Npp8u* src1, const Npp32u value, Npp8u* dst, int step, NppiSize size, hipStream_t stream, std::string method) { if (method == BASIC) { dim3 block(32, 32); dim3 grid((size.width + block.x - 1) / block.x, (size.height + block.y - 1) / block.y); addc << <grid, block, 0, stream >> > (src1, static_cast<Npp8u>(value), dst, step, size.width, size.height); } else if (method == M_4K) { auto width = size.width >> 2; step = step >> 2; dim3 block(32, 32); dim3 grid((width + block.x - 1) / block.x, (size.height + block.y - 1) / block.y); const Npp32u simdvalue = value | value << 8 | value << 16 | value << 24; addc_4k << <grid, block, 0, stream >> > (reinterpret_cast<const uint32_t*>(src1), simdvalue, reinterpret_cast<uint32_t*>(dst), step, width, size.height); } } void launchMulCKernel(const Npp8u* src1, const Npp32u value, Npp8u* dst, int step, NppiSize size, hipStream_t stream, std::string method) { if (method == BASIC) { dim3 block(32, 32); dim3 grid((size.width + block.x - 1) / block.x, (size.height + block.y - 1) / block.y); mulc << <grid, block, 0, stream >> > (src1, static_cast<Npp8u>(value), dst, step, size.width, size.height); } else if (method == M_4K) { auto width = size.width >> 2; step = step >> 2; dim3 block(32, 32); dim3 grid((width + block.x - 1) / block.x, (size.height + block.y - 1) / block.y); mulc_4k << <grid, block, 0, stream >> > (reinterpret_cast<const uchar4*>(src1), value, reinterpret_cast<uchar4*>(dst), step, width, size.height); } } void launchAddCMulCKernel(const Npp8u* src, const Npp32u addValue, const Npp32f mulValue, Npp8u* dst, int step, NppiSize size, hipStream_t stream, std::string method) { if (method == BASIC) { dim3 block(32, 32); dim3 grid((size.width + block.x - 1) / block.x, (size.height + block.y - 1) / block.y); addcmulc << <grid, block, 0, stream >> > (src, static_cast<Npp8u>(addValue), static_cast<Npp8u>(mulValue), dst, step, size.width, size.height); } else if (method == M_4K) { auto width = size.width >> 2; step = step >> 2; dim3 block(32, 32); dim3 grid((width + block.x - 1) / block.x, (size.height + block.y - 1) / block.y); addcmulc_4k << <grid, block, 0, stream >> > (reinterpret_cast<const uchar4*>(src), addValue, mulValue, reinterpret_cast<uchar4*>(dst), step, width, size.height); } } void launchBrightnessContrast_uv_int8(const Npp8u* src, const Npp32s addValue, const Npp32f mulValue, Npp8u* dst, int step, NppiSize size, hipStream_t stream, std::string method) { if (method == BASIC) { } else if (method == M_4K) { auto width = size.width >> 2; step = step >> 2; dim3 block(32, 32); dim3 grid((width + block.x - 1) / block.x, (size.height + block.y - 1) / block.y); brightnesscontrast_uv_int8_4k << <grid, block, 0, stream >> > (reinterpret_cast<const uchar4*>(src), addValue, mulValue, reinterpret_cast<uchar4*>(dst), step, width, size.height); } }
2c1cc739e9da796327e66e4beec4e9a6b3975abf.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "kernels.h" #include <iostream> __device__ uint8_t mul_uchar(uint8_t value1, int value2) { uint32_t out = value1 * value2; if (out > 0xff) { return 0xff; } return static_cast<uint8_t>(out); } __device__ uint8_t muladd_uchar(uint8_t value, uint8_t addValue, int mulValue) { uint32_t out = value * mulValue; if (out > 0xff) { return 0xff; } out = out + addValue; if (out > 0xff) { return 0xff; } return static_cast<uint8_t>(out); } __global__ void add(const Npp8u* src1, const Npp8u* src2, Npp8u* dst, int step, int width, int height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } int offset = y * step + x; dst[offset] = src1[offset] + src2[offset]; dst[offset] |= -(dst[offset] < src2[offset]); } __global__ void addc(const Npp8u* src, const Npp8u value, Npp8u* dst, int step, int width, int height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } int offset = y * step + x; dst[offset] = src[offset] + value; dst[offset] |= -(dst[offset] < value); } __global__ void mulc(const Npp8u* src, const Npp8u value, Npp8u* dst, int step, int width, int height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } int offset = y * step + x; uint32_t dstValue = src[offset] * value; if (dstValue > 255) { dst[offset] = 255; } else { dst[offset] = dstValue; } } __global__ void addcmulc(const Npp8u* src, const Npp8u addValue, const Npp8u mulValue, Npp8u* dst, int step, int width, int height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } int offset = y * step + x; int dstValue = src[offset] * mulValue; dstValue = dstValue + addValue; if (dstValue > 255) { dstValue = 255; } dst[offset] = dstValue; } __global__ void add_32(const Npp8u* src1, const Npp8u* src2, Npp8u* dst, int step, int width, int height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } x = x << 5; for (auto i = 0; i < 32; i++) { int offset = y * step + x + i; dst[offset] = src1[offset] + src2[offset]; } } // https://docs.nvidia.com/cuda/cuda-math-api/group__CUDA__MATH__INTRINSIC__SIMD.html#group__CUDA__MATH__INTRINSIC__SIMD #define CLAMP_1(x) x < 0 ? 0 : (x > 1 ? 1 : x) #define CLAMP_255(x) x < 0 ? 0 : (x > 255 ? 255 : x) #define CLAMP_int8(x) x < -128 ? -128 : (x > 127 ? 127 : x) __global__ void add_4k_(const Npp32u* src1, const Npp32u* src2, Npp32u* dst, int step, int width, int height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } int offset = y * step + x; dst[offset] = __vaddus4(src1[offset], src2[offset]); } __global__ void add_4k(const uchar4* src1, const uchar4* src2, uchar4* dst, int step, int width, int height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } int offset = y * step + x; dst[offset].x = CLAMP_255(src1[offset].x + src2[offset].x); dst[offset].y = CLAMP_255(src1[offset].y + src2[offset].y); dst[offset].z = CLAMP_255(src1[offset].z + src2[offset].z); dst[offset].w = CLAMP_255(src1[offset].w + src2[offset].w); } __global__ void addc_4k(const Npp32u* src, const Npp32u value, Npp32u* dst, int step, int width, int height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } int offset = y * step + x; dst[offset] = __vaddus4(src[offset], value); } __global__ void mulc_4k(const uchar4* src, const Npp32u value, uchar4* dst, int step, int width, int height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } int offset = y * step + x; dst[offset].x = mul_uchar(src[offset].x, value); dst[offset].y = mul_uchar(src[offset].y, value); dst[offset].z = mul_uchar(src[offset].z, value); dst[offset].w = mul_uchar(src[offset].w, value); } __global__ void addcmulc_4k_nottherightway(const uchar4* src, const Npp8u addValue, const Npp8u mulValue, uchar4* dst, int step, int width, int height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } int offset = y * step + x; dst[offset].x = muladd_uchar(src[offset].x, addValue, mulValue); dst[offset].y = muladd_uchar(src[offset].y, addValue, mulValue); dst[offset].z = muladd_uchar(src[offset].z, addValue, mulValue); dst[offset].w = muladd_uchar(src[offset].w, addValue, mulValue); } __global__ void addcmulc_4k(const uchar4* src, const Npp8u addValue, const Npp32f mulValue, uchar4* dst, int step, int width, int height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } int offset = y * step + x; auto& srcValue = src[offset]; auto &dstValue = dst[offset]; uint32_t temp = 0; temp = srcValue.x*mulValue + addValue; dstValue.x = temp > 255 ? 255:temp; temp = srcValue.y*mulValue + addValue; dstValue.y = temp > 255 ? 255 : temp; temp = srcValue.z*mulValue + addValue; dstValue.z = temp > 255 ? 255 : temp; temp = srcValue.w*mulValue + addValue; dstValue.w = temp > 255 ? 255 : temp; } #define ADDCMULC_INT8_4k_OP( srcValue, dstValue, mulValue ) \ do \ { \ int32_t temp = (srcValue - 128)*mulValue; \ temp = temp > 127 ? 127: temp; \ temp = temp < -128 ? 0: temp+128; \ dstValue = temp; \ } while (0) __global__ void brightnesscontrast_uv_int8_4k(const uchar4* src, const Npp32s addValue, const Npp32f mulValue, uchar4* dst, int step, int width, int height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } int offset = y * step + x; auto& srcValue = src[offset]; auto &dstValue = dst[offset]; ADDCMULC_INT8_4k_OP(srcValue.x, dstValue.x, mulValue); ADDCMULC_INT8_4k_OP(srcValue.y, dstValue.y, mulValue); ADDCMULC_INT8_4k_OP(srcValue.z, dstValue.z, mulValue); ADDCMULC_INT8_4k_OP(srcValue.w, dstValue.w, mulValue); } void launchAddKernel(const Npp8u* src1, const Npp8u* src2, Npp8u* dst, int step, NppiSize size, cudaStream_t stream, std::string method) { if (method == BASIC) { dim3 block(32, 32); dim3 grid((size.width + block.x - 1) / block.x, (size.height + block.y - 1) / block.y); add << <grid, block, 0, stream >> > (src1, src2, dst, step, size.width, size.height); } else if (method == M_32) { auto width = size.width >> 5; dim3 block(32, 32); dim3 grid((width + block.x - 1) / block.x, (size.height + block.y - 1) / block.y); add_32 << <grid, block, 0, stream >> > (src1, src2, dst, step, width, size.height); } else if (method == M_4K) { auto width = size.width >> 2; step = step >> 2; dim3 block(32, 32); dim3 grid((width + block.x - 1) / block.x, (size.height + block.y - 1) / block.y); add_4k << <grid, block, 0, stream >> > (reinterpret_cast<const uchar4*>(src1), reinterpret_cast<const uchar4*>(src2), reinterpret_cast<uchar4*>(dst), step, width, size.height); } } void launchAddCKernel(const Npp8u* src1, const Npp32u value, Npp8u* dst, int step, NppiSize size, cudaStream_t stream, std::string method) { if (method == BASIC) { dim3 block(32, 32); dim3 grid((size.width + block.x - 1) / block.x, (size.height + block.y - 1) / block.y); addc << <grid, block, 0, stream >> > (src1, static_cast<Npp8u>(value), dst, step, size.width, size.height); } else if (method == M_4K) { auto width = size.width >> 2; step = step >> 2; dim3 block(32, 32); dim3 grid((width + block.x - 1) / block.x, (size.height + block.y - 1) / block.y); const Npp32u simdvalue = value | value << 8 | value << 16 | value << 24; addc_4k << <grid, block, 0, stream >> > (reinterpret_cast<const uint32_t*>(src1), simdvalue, reinterpret_cast<uint32_t*>(dst), step, width, size.height); } } void launchMulCKernel(const Npp8u* src1, const Npp32u value, Npp8u* dst, int step, NppiSize size, cudaStream_t stream, std::string method) { if (method == BASIC) { dim3 block(32, 32); dim3 grid((size.width + block.x - 1) / block.x, (size.height + block.y - 1) / block.y); mulc << <grid, block, 0, stream >> > (src1, static_cast<Npp8u>(value), dst, step, size.width, size.height); } else if (method == M_4K) { auto width = size.width >> 2; step = step >> 2; dim3 block(32, 32); dim3 grid((width + block.x - 1) / block.x, (size.height + block.y - 1) / block.y); mulc_4k << <grid, block, 0, stream >> > (reinterpret_cast<const uchar4*>(src1), value, reinterpret_cast<uchar4*>(dst), step, width, size.height); } } void launchAddCMulCKernel(const Npp8u* src, const Npp32u addValue, const Npp32f mulValue, Npp8u* dst, int step, NppiSize size, cudaStream_t stream, std::string method) { if (method == BASIC) { dim3 block(32, 32); dim3 grid((size.width + block.x - 1) / block.x, (size.height + block.y - 1) / block.y); addcmulc << <grid, block, 0, stream >> > (src, static_cast<Npp8u>(addValue), static_cast<Npp8u>(mulValue), dst, step, size.width, size.height); } else if (method == M_4K) { auto width = size.width >> 2; step = step >> 2; dim3 block(32, 32); dim3 grid((width + block.x - 1) / block.x, (size.height + block.y - 1) / block.y); addcmulc_4k << <grid, block, 0, stream >> > (reinterpret_cast<const uchar4*>(src), addValue, mulValue, reinterpret_cast<uchar4*>(dst), step, width, size.height); } } void launchBrightnessContrast_uv_int8(const Npp8u* src, const Npp32s addValue, const Npp32f mulValue, Npp8u* dst, int step, NppiSize size, cudaStream_t stream, std::string method) { if (method == BASIC) { } else if (method == M_4K) { auto width = size.width >> 2; step = step >> 2; dim3 block(32, 32); dim3 grid((width + block.x - 1) / block.x, (size.height + block.y - 1) / block.y); brightnesscontrast_uv_int8_4k << <grid, block, 0, stream >> > (reinterpret_cast<const uchar4*>(src), addValue, mulValue, reinterpret_cast<uchar4*>(dst), step, width, size.height); } }
f17643d0380780de40b30c48597d17d5e7e1e786.hip
// !!! This is a file automatically generated by hipify!!! /* Multiplica um vetor por uma constante. Exemplo para o uso de memria constante em CUDA */ #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #define TAM 100 #define VLR_ESCALAR 10 #define TPB 256 __device__ __constant__ int escalar_d; __global__ void mult(int *vetA_glb){ int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < TAM) { vetA_glb[idx] = escalar_d * vetA_glb[idx]; } } int main(int argc,char **argv){ int *vetA_h; int *vetA_d; int blocksPerGrid; int i, escalar_h; //Aloca o vetor no host vetA_h=(int *)malloc(TAM * sizeof(int)); //Aloca o vetor no device hipMalloc((void**)&vetA_d,TAM*(sizeof(int))); //Preenche o vetor no host for(i=0;i<TAM;i++){ vetA_h[i]=i; } //Copia o contedo do vetor para o device hipMemcpy(vetA_d,vetA_h,TAM*(sizeof(int)), hipMemcpyHostToDevice); escalar_h=VLR_ESCALAR; //Copia o contedo de escalar_h, lido do terminal, para a varivel constante escalar_d, no device hipMemcpyToSymbol(escalar_d,&escalar_h,sizeof(int)); //Define a quantidade de blocos por grade blocksPerGrid=(TAM+TPB-1)/TPB; //Invoca o kernel com blocksPerGrid blocos e threadsPerBlock threads hipLaunchKernelGGL(( mult) , dim3(blocksPerGrid),dim3(TPB), 0, 0, vetA_d); //Copia o resultado da soma de volta para o host hipMemcpy(vetA_h,vetA_d,TAM*(sizeof(int)), hipMemcpyDeviceToHost); //Imprime o resultado no host for(i=0;i<TAM;i++){ printf("%d ",vetA_h[i]); } //Desaloca os vetores no host free(vetA_h); //Desaloca os vetores no device hipFree(vetA_d); }
f17643d0380780de40b30c48597d17d5e7e1e786.cu
/* Multiplica um vetor por uma constante. Exemplo para o uso de memória constante em CUDA */ #include <stdio.h> #include <stdlib.h> #include <cuda.h> #define TAM 100 #define VLR_ESCALAR 10 #define TPB 256 __device__ __constant__ int escalar_d; __global__ void mult(int *vetA_glb){ int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < TAM) { vetA_glb[idx] = escalar_d * vetA_glb[idx]; } } int main(int argc,char **argv){ int *vetA_h; int *vetA_d; int blocksPerGrid; int i, escalar_h; //Aloca o vetor no host vetA_h=(int *)malloc(TAM * sizeof(int)); //Aloca o vetor no device cudaMalloc((void**)&vetA_d,TAM*(sizeof(int))); //Preenche o vetor no host for(i=0;i<TAM;i++){ vetA_h[i]=i; } //Copia o conteúdo do vetor para o device cudaMemcpy(vetA_d,vetA_h,TAM*(sizeof(int)), cudaMemcpyHostToDevice); escalar_h=VLR_ESCALAR; //Copia o conteúdo de escalar_h, lido do terminal, para a variável constante escalar_d, no device cudaMemcpyToSymbol(escalar_d,&escalar_h,sizeof(int)); //Define a quantidade de blocos por grade blocksPerGrid=(TAM+TPB-1)/TPB; //Invoca o kernel com blocksPerGrid blocos e threadsPerBlock threads mult <<<blocksPerGrid,TPB>>> (vetA_d); //Copia o resultado da soma de volta para o host cudaMemcpy(vetA_h,vetA_d,TAM*(sizeof(int)), cudaMemcpyDeviceToHost); //Imprime o resultado no host for(i=0;i<TAM;i++){ printf("%d ",vetA_h[i]); } //Desaloca os vetores no host free(vetA_h); //Desaloca os vetores no device cudaFree(vetA_d); }
84bd9d8c05f5749d27e9f446cbae27911d18e7d7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "head.h" extern double *d_t; extern double *d_dt; extern double *d_it; extern double *d_V; extern double *d_m; extern double *d_m0; extern double *d_h; extern double *d_h0; extern double *d_jj; extern double *d_jj0; extern double *d_d; extern double *d_d0; extern double *d_f; extern double *d_f0; extern double *d_X; extern double *d_X0; extern double *d_cai; extern double *isi; extern double *esi; extern double *ina; extern double *ik; extern double *ik1; extern double *ikp; extern double *ib; __global__ void comp_ina(double *d_V, double *d_m, double *d_h, double *d_jj, double *d_m0, double *d_h0, double *d_jj0, double *d_dt, double *ina) { int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<nx*ny){ int j = (int)(k/nx);//V[i][j]iji //int id = k+(nx+2)+1+2*j;//index(nx+2)*(ny+2)global index,+10,0(nx+2)0 //d_it[k] = 0.0; double gna = 23.0; double ena = ((R*temp) / frdy)*log(nao / nai); double am = 0.32*(d_V[k+nx+2+1+2*j] + 47.13) / (1.0 - exp(-0.1*(d_V[k+nx+2+1+2*j] + 47.13))); double bm = 0.08*exp(-d_V[k+nx+2+1+2*j] / 11.0); double ah, bh, aj ,bj; if (d_V[k+nx+2+1+2*j] < -40.0) { ah = 0.135*exp((80 + d_V[k+nx+2+1+2*j]) / -6.8); bh = 3.56*exp(0.079*d_V[k+nx+2+1+2*j]) + 310000 * exp(0.35*d_V[k+nx+2+1+2*j]); aj = (-127140.0 * exp(0.2444*d_V[k+nx+2+1+2*j]) - 0.00003474*exp(-0.04391*d_V[k+nx+2+1+2*j]))* ((d_V[k+nx+2+1+2*j] + 37.78)/(1.0 + exp(0.311*(d_V[k+nx+2+1+2*j] + 79.23)))); bj = (0.1212*exp(-0.01052*d_V[k+nx+2+1+2*j])) / (1.0 + exp(-0.1378*(d_V[k+nx+2+1+2*j] + 40.14))); } else { ah = 0.0; bh = 1.0 / (0.13*(1 + exp((d_V[k+nx+2+1+2*j] + 10.66) / -11.1))); aj = 0.0; bj = (0.3*exp(-0.0000002535*d_V[k+nx+2+1+2*j])) / (1.0 + exp(-0.1*(d_V[k+nx+2+1+2*j] + 32.0))); } double mtau = 1 / (am + bm); double htau = 1 / (ah + bh); double jtau = 1 / (aj + bj); double mss = am*mtau; double hss = ah*htau; double jss = aj*jtau; d_m0[k] = mss - (mss - d_m[k])*exp(- d_dt[k] / mtau);//d_dt d_h0[k] = hss - (hss - d_h[k])*exp(- d_dt[k] / htau); d_jj0[k] = jss - (jss - d_jj[k])*exp(- d_dt[k] / jtau); ina[k] = gna*d_m0[k] * d_m0[k] * d_m0[k] * d_h0[k] * d_jj0[k] * (d_V[k+nx+2+1+2*j] - ena); } } __global__ void comp_ical(double *d_V, double *d_d, double *d_f, double *d_d0, double *d_f0, double *d_cai, double *d_dt, double *isi,double *esi){ int k = threadIdx.x + blockIdx.x * blockDim.x; //int i = threadIdx.x; if(k<nx*ny){ int j = (int)(k/nx); //__shared__ double esi[tpb];// //__shared__ double isi[tpb]; esi[k] = 7.7 - 13.0287*log(d_cai[k]); double ad = 0.095*exp(-0.01*(d_V[k+nx+2+1+2*j] - 5)) / (1.0 + exp(-0.072*(d_V[k+nx+2+1+2*j] - 5))); double bd = 0.07*exp(-0.017*(d_V[k+nx+2+1+2*j] + 44)) / (1.0 + exp(0.05*(d_V[k+nx+2+1+2*j] + 44))); double af = 0.012*exp(-0.008*(d_V[k+nx+2+1+2*j] + 28)) / (1.0 + exp(0.15*(d_V[k+nx+2+1+2*j] + 28))); double bf = 0.0065*exp(-0.02*(d_V[k+nx+2+1+2*j] + 30)) / (1.0 + exp(-0.2*(d_V[k+nx+2+1+2*j] + 30))); double taud = 1.0 / (ad + bd); double tauf = 1.0 / (af + bf); double dss = ad*taud; double fss = af*tauf; d_d0[k] = dss - (dss - d_d[k])*exp(-d_dt[k] / taud); d_f0[k] = fss - (fss - d_f[k])*exp(-d_dt[k] / tauf); isi[k] = 0.09*d_d0[k] * d_f0[k] * (d_V[k+nx+2+1+2*j] - esi[k]); //double dcai = -0.0001*isi[k] + 0.07*(0.0001 - d_cai[k]); //d_cai[k] = d_cai[k] + dcai*d_dt[k]; //d_it[k] = d_it[k] + isi[k]; } } __global__ void update_cai( double *d_cai, double *d_dt, double *isi){ int k = threadIdx.x + blockIdx.x * blockDim.x; //int i = threadIdx.x; if(k<nx*ny){ double dcai = -0.0001*isi[k] + 0.07*(0.0001 - d_cai[k]); d_cai[k] = d_cai[k] + dcai*d_dt[k]; } } void gpu_update_cai(){ int bpg; //tpb = 256; bpg = (nx*ny+tpb-1)/tpb; hipLaunchKernelGGL(( update_cai), dim3(bpg), dim3(tpb), 0, 0, d_cai,d_dt,isi); hipDeviceSynchronize(); } __global__ void comp_ik(double *d_V, double *d_X, double *d_X0, double *d_dt,double *ik){ int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<nx*ny){ int j = (int)(k/nx); double gk = 0.282*sqrt(ko / 5.4); double ek = ((R*temp) / frdy)*log(ko / ki); //double prnak = 0.01833; //ek = ((R*temp) / frdy)*log((ko + prnak*nao) / (ki + prnak*nai)); double ax = 0.0005*exp(0.083*(d_V[k+nx+2+1+2*j] + 50)) / (1.0 + exp(0.057*(d_V[k+nx+2+1+2*j] + 50))); double bx = 0.0013*exp(-0.06*(d_V[k+nx+2+1+2*j] + 20)) / (1.0 + exp(-0.04*(d_V[k+nx+2+1+2*j] + 20))); double taux = 1.0 / (ax + bx); double xss = ax*taux; d_X0[k] = xss - (xss - d_X[k])*exp(-d_dt[k] / taux); double Xi; if (d_V[k+nx+2+1+2*j] > -100.0) { Xi = 2.837*(exp(0.04*(d_V[k+nx+2+1+2*j] + 77.0)) - 1.0)/ ((d_V[k+nx+2+1+2*j] + 77.0)*exp(0.04*(d_V[k+nx+2+1+2*j] + 35.0))); } else { Xi = 1.0; } ik[k] = gk*d_X0[k] * Xi*(d_V[k+nx+2+1+2*j] - ek); } } __global__ void comp_ik1(double *d_V, double *ik1) { int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<nx*ny){ int j = (int)(k/nx); double gk1 = 0.6047*(sqrt(ko / 5.4)); double ek1 = ((R*temp) / frdy)*log(ko / ki); double ak1 = 1.02 / (1.0 + exp(0.2385*(d_V[k+nx+2+1+2*j] - ek1 - 59.215))); double bk1 = (0.49124*exp(0.08032*(d_V[k+nx+2+1+2*j] - ek1 + 5.476))+ exp(0.06175*(d_V[k+nx+2+1+2*j] - ek1 - 594.31))) /(1.0 + exp(-0.5143*(d_V[k+nx+2+1+2*j] - ek1 + 4.753))); double K1ss = ak1 / (ak1 + bk1); ik1[k] = gk1*K1ss*(d_V[k+nx+2+1+2*j] - ek1); } } __global__ void comp_ikp(double *d_V, double *ikp) { int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<nx*ny){ int j = (int)(k/nx); double gkp = 0.0183; double ekp = ((R*temp) / frdy)*log(ko / ki); double kp = 1.0 / (1.0 + exp((7.488 - d_V[k+nx+2+1+2*j]) / 5.98)); ikp[k] = gkp*kp*(d_V[k+nx+2+1+2*j] - ekp); } } __global__ void comp_ib(double *d_V, double *d_it,double *ib, double *ik,double *ik1,double *ikp, double *isi,double *ina) { int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<nx*ny){ int j = (int)(k/nx); ib[k] = 0.03921*(d_V[k+nx+2+1+2*j] + 59.87); d_it[k]=ib[k]+ikp[k]+ik1[k]+ik[k]+isi[k]+ina[k]; } } __global__ void new_gate(double *d_m, double *d_h, double *d_jj, double *d_m0, double *d_h0, double *d_jj0, double *d_d, double *d_f, double *d_d0, double *d_f0, double *d_X,double *d_X0){ int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<nx*ny){ //int j = (int)(k/nx); d_m[k] = d_m0[k]; d_h[k] = d_h0[k]; d_jj[k] = d_jj0[k]; d_d[k] = d_d0[k]; d_f[k] = d_f0[k]; d_X[k] = d_X0[k]; } } void gpu_new_gate(){ int bpg; //tpb = 256; bpg = (nx*ny+tpb-1)/tpb; hipLaunchKernelGGL(( new_gate), dim3(bpg), dim3(tpb), 0, 0, d_m, d_h, d_jj,d_m0, d_h0, d_jj0, d_d, d_f, d_d0, d_f0, d_X,d_X0); hipDeviceSynchronize(); } void gpu_ion(){ int bpg; //tpb = 256; bpg = (nx*ny+tpb-1)/tpb; hipLaunchKernelGGL(( comp_ina), dim3(bpg), dim3(tpb), 0, 0, d_V, d_m, d_h, d_jj, d_m0, d_h0, d_jj0, d_dt,ina); hipLaunchKernelGGL(( comp_ical), dim3(bpg), dim3(tpb), 0, 0, d_V, d_d, d_f, d_d0, d_f0,d_cai, d_dt, isi,esi); hipLaunchKernelGGL(( comp_ik), dim3(bpg), dim3(tpb), 0, 0, d_V, d_X, d_X0, d_dt,ik); hipLaunchKernelGGL(( comp_ik1), dim3(bpg), dim3(tpb), 0, 0, d_V, ik1); hipLaunchKernelGGL(( comp_ikp), dim3(bpg), dim3(tpb), 0, 0, d_V, ikp); hipLaunchKernelGGL(( comp_ib), dim3(bpg), dim3(tpb), 0, 0, d_V, d_it,ib,ik,ik1,ikp,isi,ina); hipDeviceSynchronize(); }
84bd9d8c05f5749d27e9f446cbae27911d18e7d7.cu
#include "head.h" extern double *d_t; extern double *d_dt; extern double *d_it; extern double *d_V; extern double *d_m; extern double *d_m0; extern double *d_h; extern double *d_h0; extern double *d_jj; extern double *d_jj0; extern double *d_d; extern double *d_d0; extern double *d_f; extern double *d_f0; extern double *d_X; extern double *d_X0; extern double *d_cai; extern double *isi; extern double *esi; extern double *ina; extern double *ik; extern double *ik1; extern double *ikp; extern double *ib; __global__ void comp_ina(double *d_V, double *d_m, double *d_h, double *d_jj, double *d_m0, double *d_h0, double *d_jj0, double *d_dt, double *ina) { int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<nx*ny){ int j = (int)(k/nx);//相当于二维矩阵中V[i][j]的i行,这里的j表示矩阵的i。 //int id = k+(nx+2)+1+2*j;//这是什么index?这是扩充为(nx+2)*(ny+2)后的global index,这里+1是为了不要(0,0)这一点,(nx+2)的意义是不要第0行。 //d_it[k] = 0.0; double gna = 23.0; double ena = ((R*temp) / frdy)*log(nao / nai); double am = 0.32*(d_V[k+nx+2+1+2*j] + 47.13) / (1.0 - exp(-0.1*(d_V[k+nx+2+1+2*j] + 47.13))); double bm = 0.08*exp(-d_V[k+nx+2+1+2*j] / 11.0); double ah, bh, aj ,bj; if (d_V[k+nx+2+1+2*j] < -40.0) { ah = 0.135*exp((80 + d_V[k+nx+2+1+2*j]) / -6.8); bh = 3.56*exp(0.079*d_V[k+nx+2+1+2*j]) + 310000 * exp(0.35*d_V[k+nx+2+1+2*j]); aj = (-127140.0 * exp(0.2444*d_V[k+nx+2+1+2*j]) - 0.00003474*exp(-0.04391*d_V[k+nx+2+1+2*j]))* ((d_V[k+nx+2+1+2*j] + 37.78)/(1.0 + exp(0.311*(d_V[k+nx+2+1+2*j] + 79.23)))); bj = (0.1212*exp(-0.01052*d_V[k+nx+2+1+2*j])) / (1.0 + exp(-0.1378*(d_V[k+nx+2+1+2*j] + 40.14))); } else { ah = 0.0; bh = 1.0 / (0.13*(1 + exp((d_V[k+nx+2+1+2*j] + 10.66) / -11.1))); aj = 0.0; bj = (0.3*exp(-0.0000002535*d_V[k+nx+2+1+2*j])) / (1.0 + exp(-0.1*(d_V[k+nx+2+1+2*j] + 32.0))); } double mtau = 1 / (am + bm); double htau = 1 / (ah + bh); double jtau = 1 / (aj + bj); double mss = am*mtau; double hss = ah*htau; double jss = aj*jtau; d_m0[k] = mss - (mss - d_m[k])*exp(- d_dt[k] / mtau);//可能是d_dt类型出了问题 d_h0[k] = hss - (hss - d_h[k])*exp(- d_dt[k] / htau); d_jj0[k] = jss - (jss - d_jj[k])*exp(- d_dt[k] / jtau); ina[k] = gna*d_m0[k] * d_m0[k] * d_m0[k] * d_h0[k] * d_jj0[k] * (d_V[k+nx+2+1+2*j] - ena); } } __global__ void comp_ical(double *d_V, double *d_d, double *d_f, double *d_d0, double *d_f0, double *d_cai, double *d_dt, double *isi,double *esi){ int k = threadIdx.x + blockIdx.x * blockDim.x; //int i = threadIdx.x; if(k<nx*ny){ int j = (int)(k/nx); //__shared__ double esi[tpb];//这两个变量设置共享内存,也可以不设置,不影响 //__shared__ double isi[tpb]; esi[k] = 7.7 - 13.0287*log(d_cai[k]); double ad = 0.095*exp(-0.01*(d_V[k+nx+2+1+2*j] - 5)) / (1.0 + exp(-0.072*(d_V[k+nx+2+1+2*j] - 5))); double bd = 0.07*exp(-0.017*(d_V[k+nx+2+1+2*j] + 44)) / (1.0 + exp(0.05*(d_V[k+nx+2+1+2*j] + 44))); double af = 0.012*exp(-0.008*(d_V[k+nx+2+1+2*j] + 28)) / (1.0 + exp(0.15*(d_V[k+nx+2+1+2*j] + 28))); double bf = 0.0065*exp(-0.02*(d_V[k+nx+2+1+2*j] + 30)) / (1.0 + exp(-0.2*(d_V[k+nx+2+1+2*j] + 30))); double taud = 1.0 / (ad + bd); double tauf = 1.0 / (af + bf); double dss = ad*taud; double fss = af*tauf; d_d0[k] = dss - (dss - d_d[k])*exp(-d_dt[k] / taud); d_f0[k] = fss - (fss - d_f[k])*exp(-d_dt[k] / tauf); isi[k] = 0.09*d_d0[k] * d_f0[k] * (d_V[k+nx+2+1+2*j] - esi[k]); //double dcai = -0.0001*isi[k] + 0.07*(0.0001 - d_cai[k]); //d_cai[k] = d_cai[k] + dcai*d_dt[k]; //d_it[k] = d_it[k] + isi[k]; } } __global__ void update_cai( double *d_cai, double *d_dt, double *isi){ int k = threadIdx.x + blockIdx.x * blockDim.x; //int i = threadIdx.x; if(k<nx*ny){ double dcai = -0.0001*isi[k] + 0.07*(0.0001 - d_cai[k]); d_cai[k] = d_cai[k] + dcai*d_dt[k]; } } void gpu_update_cai(){ int bpg; //tpb = 256; bpg = (nx*ny+tpb-1)/tpb; update_cai<<<bpg, tpb>>>(d_cai,d_dt,isi); cudaDeviceSynchronize(); } __global__ void comp_ik(double *d_V, double *d_X, double *d_X0, double *d_dt,double *ik){ int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<nx*ny){ int j = (int)(k/nx); double gk = 0.282*sqrt(ko / 5.4); double ek = ((R*temp) / frdy)*log(ko / ki); //double prnak = 0.01833; //ek = ((R*temp) / frdy)*log((ko + prnak*nao) / (ki + prnak*nai)); double ax = 0.0005*exp(0.083*(d_V[k+nx+2+1+2*j] + 50)) / (1.0 + exp(0.057*(d_V[k+nx+2+1+2*j] + 50))); double bx = 0.0013*exp(-0.06*(d_V[k+nx+2+1+2*j] + 20)) / (1.0 + exp(-0.04*(d_V[k+nx+2+1+2*j] + 20))); double taux = 1.0 / (ax + bx); double xss = ax*taux; d_X0[k] = xss - (xss - d_X[k])*exp(-d_dt[k] / taux); double Xi; if (d_V[k+nx+2+1+2*j] > -100.0) { Xi = 2.837*(exp(0.04*(d_V[k+nx+2+1+2*j] + 77.0)) - 1.0)/ ((d_V[k+nx+2+1+2*j] + 77.0)*exp(0.04*(d_V[k+nx+2+1+2*j] + 35.0))); } else { Xi = 1.0; } ik[k] = gk*d_X0[k] * Xi*(d_V[k+nx+2+1+2*j] - ek); } } __global__ void comp_ik1(double *d_V, double *ik1) { int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<nx*ny){ int j = (int)(k/nx); double gk1 = 0.6047*(sqrt(ko / 5.4)); double ek1 = ((R*temp) / frdy)*log(ko / ki); double ak1 = 1.02 / (1.0 + exp(0.2385*(d_V[k+nx+2+1+2*j] - ek1 - 59.215))); double bk1 = (0.49124*exp(0.08032*(d_V[k+nx+2+1+2*j] - ek1 + 5.476))+ exp(0.06175*(d_V[k+nx+2+1+2*j] - ek1 - 594.31))) /(1.0 + exp(-0.5143*(d_V[k+nx+2+1+2*j] - ek1 + 4.753))); double K1ss = ak1 / (ak1 + bk1); ik1[k] = gk1*K1ss*(d_V[k+nx+2+1+2*j] - ek1); } } __global__ void comp_ikp(double *d_V, double *ikp) { int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<nx*ny){ int j = (int)(k/nx); double gkp = 0.0183; double ekp = ((R*temp) / frdy)*log(ko / ki); double kp = 1.0 / (1.0 + exp((7.488 - d_V[k+nx+2+1+2*j]) / 5.98)); ikp[k] = gkp*kp*(d_V[k+nx+2+1+2*j] - ekp); } } __global__ void comp_ib(double *d_V, double *d_it,double *ib, double *ik,double *ik1,double *ikp, double *isi,double *ina) { int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<nx*ny){ int j = (int)(k/nx); ib[k] = 0.03921*(d_V[k+nx+2+1+2*j] + 59.87); d_it[k]=ib[k]+ikp[k]+ik1[k]+ik[k]+isi[k]+ina[k]; } } __global__ void new_gate(double *d_m, double *d_h, double *d_jj, double *d_m0, double *d_h0, double *d_jj0, double *d_d, double *d_f, double *d_d0, double *d_f0, double *d_X,double *d_X0){ int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<nx*ny){ //int j = (int)(k/nx); d_m[k] = d_m0[k]; d_h[k] = d_h0[k]; d_jj[k] = d_jj0[k]; d_d[k] = d_d0[k]; d_f[k] = d_f0[k]; d_X[k] = d_X0[k]; } } void gpu_new_gate(){ int bpg; //tpb = 256; bpg = (nx*ny+tpb-1)/tpb; new_gate<<<bpg, tpb>>>(d_m, d_h, d_jj,d_m0, d_h0, d_jj0, d_d, d_f, d_d0, d_f0, d_X,d_X0); cudaDeviceSynchronize(); } void gpu_ion(){ int bpg; //tpb = 256; bpg = (nx*ny+tpb-1)/tpb; comp_ina<<<bpg, tpb>>>(d_V, d_m, d_h, d_jj, d_m0, d_h0, d_jj0, d_dt,ina); comp_ical<<<bpg, tpb>>>(d_V, d_d, d_f, d_d0, d_f0,d_cai, d_dt, isi,esi); comp_ik<<<bpg, tpb>>>(d_V, d_X, d_X0, d_dt,ik); comp_ik1<<<bpg, tpb>>>(d_V, ik1); comp_ikp<<<bpg, tpb>>>(d_V, ikp); comp_ib<<<bpg, tpb>>>(d_V, d_it,ib,ik,ik1,ikp,isi,ina); cudaDeviceSynchronize(); }
98d307dc65d07a634e1ffeec7289ef236650774e.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2022 The Microsoft DeepSpeed Team */ #include <limits> #include "inference_cuda_layers.h" #ifndef __HIP_PLATFORM_HCC__ #include <hip/hip_runtime_api.h> #endif #include <cstdio> #include <cstdlib> #include <ctime> #define ATTN_THREADS 256 #define MAX_REG_SIZE 8 #define minus_infinity -10000.0 void CheckCudaErrorAux(const char* file, unsigned line) { hipError_t err = hipGetLastError(); if (err == hipSuccess) return; std::cerr << hipGetErrorString(err) << "(" << err << ") at " << file << ":" << line << std::endl; throw std::runtime_error("CUDA ERROR!!!\n"); } #define CUDA_CHECK_ERROR() CheckCudaErrorAux(__FILE__, __LINE__) namespace cg = cooperative_groups; __global__ void attn_softmax_v2(__half* vals, __half* mask, __half* alibi, float layer_scale, bool triangular, bool recompute, bool local_attention, int window_size, int total_count, int heads, int sequence_length, int num_seq, int head_offset, int mask_stride, int mp_size, int iterations, int reduceWidth) { #ifdef HALF_PRECISION_AVAILABLE cg::thread_block b = cg::this_thread_block(); cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b); float2 low_data[MAX_REG_SIZE]; float2 high_data[MAX_REG_SIZE]; const __half zero_h = __float2half(0.f); int wid = threadIdx.x >> 5; int lane = threadIdx.x & 0x1f; int warp_num = blockDim.x >> 5; int reduce_blocks = reduceWidth >> 5; int seq_lane = threadIdx.x % reduceWidth; __shared__ float partialSum[MAX_WARP_NUM]; int iter_offset = blockIdx.x * (warp_num / reduce_blocks) + (wid / reduce_blocks); int batch_idx = iter_offset / (num_seq * heads); int alibi_offset = batch_idx * heads * mp_size + head_offset; int mask_offset = batch_idx * mask_stride + (iter_offset % mask_stride); if (iter_offset < total_count) { vals += (iter_offset * sequence_length); alibi_offset = (alibi_offset + ((iter_offset / num_seq) % heads)) * sequence_length; mask_offset = mask_offset * sequence_length; int seq_id = iter_offset % num_seq; int seq_id4 = seq_id >> 2; int real_seq_id = seq_id + (num_seq == sequence_length ? 0 : sequence_length); int window_stride4 = (local_attention && (real_seq_id >> 2) > (window_size >> 2)) ? (real_seq_id >> 2) - (window_size >> 2) : 0; int window_stride = (local_attention && real_seq_id >= window_size) ? real_seq_id - window_size : -1; float max_val = minus_infinity; // if (lane == 0) printf("%d, %d: %d \n", wid, blockIdx.x, mask_offset); for (int i = 0; i < iterations; i++) { int data_id = i * (reduceWidth << 2) + (seq_lane << 2); if ((!triangular || ((data_id >> 2) <= seq_id4)) && (data_id >> 2) >= window_stride4 && data_id < sequence_length) { if ((sequence_length - data_id) >= 4) { low_data[i].x = data_id > window_stride ? __half2float(vals[data_id]) * layer_scale : minus_infinity; low_data[i].y = ((!triangular || ((data_id + 1) <= seq_id)) && (data_id + 1) > window_stride) ? __half2float(vals[data_id + 1]) * layer_scale : minus_infinity; high_data[i].x = ((!triangular || ((data_id + 2) <= seq_id)) && (data_id + 2) > window_stride) ? __half2float(vals[data_id + 2]) * layer_scale : minus_infinity; high_data[i].y = ((!triangular || ((data_id + 3) <= seq_id)) && (data_id + 3) > window_stride) ? __half2float(vals[data_id + 3]) * layer_scale : minus_infinity; if (alibi) { low_data[i].x = low_data[i].x + __half2float(alibi[data_id + alibi_offset]); low_data[i].y = low_data[i].y + __half2float(alibi[data_id + alibi_offset + 1]); high_data[i].x = high_data[i].x + __half2float(alibi[data_id + alibi_offset + 2]); high_data[i].y = high_data[i].y + __half2float(alibi[data_id + alibi_offset + 3]); } if (mask) { low_data[i].x += __half2float(mask[data_id + mask_offset]); low_data[i].y += __half2float(mask[data_id + mask_offset + 1]); high_data[i].x += __half2float(mask[data_id + mask_offset + 2]); high_data[i].y += __half2float(mask[data_id + mask_offset + 3]); } } else { low_data[i].x = data_id > window_stride ? __half2float(vals[data_id]) * layer_scale : minus_infinity; low_data[i].y = (((!triangular || (data_id + 1) <= seq_id) && (data_id + 1) > window_stride) && (data_id + 1) < sequence_length) ? __half2float(vals[data_id + 1]) * layer_scale : minus_infinity; high_data[i].x = (((!triangular || (data_id + 2) <= seq_id) && (data_id + 2) > window_stride) && (data_id + 2) < sequence_length) ? __half2float(vals[data_id + 2]) * layer_scale : minus_infinity; if (alibi) { low_data[i].x = low_data[i].x + __half2float(alibi[data_id + alibi_offset]); if ((data_id + 1) < sequence_length) low_data[i].y = low_data[i].y + __half2float(alibi[data_id + alibi_offset + 1]); if ((data_id + 2) < sequence_length) high_data[i].x = high_data[i].x + __half2float(alibi[data_id + alibi_offset + 2]); } high_data[i].y = minus_infinity; if (mask) { low_data[i].x += __half2float(mask[data_id + mask_offset]); if ((data_id + 1) < sequence_length) low_data[i].y += __half2float(mask[data_id + mask_offset + 1]); if ((data_id + 2) < sequence_length) high_data[i].x += __half2float(mask[data_id + mask_offset + 2]); } } // if(lane == 0) printf("%f , %d, %d \n", low_data[i].x, data_id, seq_id); max_val = (low_data[i].x > max_val ? low_data[i].x : max_val); max_val = (low_data[i].y > max_val ? low_data[i].y : max_val); max_val = (high_data[i].x > max_val ? high_data[i].x : max_val); max_val = (high_data[i].y > max_val ? high_data[i].y : max_val); } else { low_data[i].x = minus_infinity; low_data[i].y = minus_infinity; high_data[i].x = minus_infinity; high_data[i].y = minus_infinity; } } for (int i = 1; i < WARP_SIZE; i *= 2) { auto temp = g.shfl_xor(max_val, i); max_val = (temp > max_val ? temp : max_val); } if (reduceWidth > WARP_SIZE) { if (lane == 0) partialSum[wid] = max_val; b.sync(); if (lane < warp_num) max_val = partialSum[lane]; b.sync(); for (int i = 1; i < reduce_blocks; i *= 2) { auto temp = g.shfl_xor(max_val, i); max_val = (temp > max_val ? temp : max_val); } max_val = g.shfl(max_val, threadIdx.x / WARP_SIZE); } float sum = 0; for (int i = 0; i < iterations; i++) { low_data[i].x = __expf(low_data[i].x - max_val); low_data[i].y = __expf(low_data[i].y - max_val); high_data[i].x = __expf(high_data[i].x - max_val); high_data[i].y = __expf(high_data[i].y - max_val); sum += (low_data[i].x + low_data[i].y + high_data[i].x + high_data[i].y); } for (int i = 1; i < WARP_SIZE; i *= 2) sum += g.shfl_xor(sum, i); if (reduceWidth > WARP_SIZE) { if (lane == 0) partialSum[wid] = sum; b.sync(); if (lane < warp_num) sum = partialSum[lane]; b.sync(); for (int i = 1; i < reduce_blocks; i *= 2) { sum += g.shfl_xor(sum, i); } sum = g.shfl(sum, threadIdx.x / WARP_SIZE); } sum += 1e-6; for (int i = 0; i < iterations; i++) { int data_id = i * (reduceWidth << 2) + (seq_lane << 2); if (data_id < sequence_length) { if ((sequence_length - data_id) >= 4) { vals[data_id] = __float2half(low_data[i].x / sum); vals[data_id + 1] = __float2half(low_data[i].y / sum); vals[data_id + 2] = __float2half(high_data[i].x / sum); vals[data_id + 3] = __float2half(high_data[i].y / sum); } else { vals[data_id] = __float2half(low_data[i].x / sum); if ((data_id + 1) < sequence_length) vals[data_id + 1] = __float2half(low_data[i].y / sum); if ((data_id + 2) < sequence_length) vals[data_id + 2] = __float2half(high_data[i].x / sum); } } } } #endif } __global__ void attn_softmax_v2(float* vals, float* attn_mask, float* alibi, float layer_scale, bool triangular, bool recompute, bool local_attention, int window_size, int total_count, int heads, int sequence_length, int num_seq, int head_offset, int mask_stride, int mp_size, int iterations, int reduceWidth) { cg::thread_block b = cg::this_thread_block(); cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b); float4 data[MAX_REG_SIZE]; int wid = threadIdx.x >> 5; int lane = threadIdx.x & 0x1f; int warp_num = blockDim.x >> 5; int reduce_blocks = reduceWidth >> 5; int seq_lane = threadIdx.x % reduceWidth; __shared__ float partialSum[MAX_WARP_NUM]; int iter_offset = blockIdx.x * (warp_num / reduce_blocks) + (wid / reduce_blocks); if (iter_offset < total_count) { vals += (iter_offset * sequence_length); int batch_idx = iter_offset / (num_seq * heads); int alibi_offset = batch_idx * heads * mp_size + head_offset; int mask_offset = batch_idx * mask_stride + (iter_offset % mask_stride); mask_offset = mask_offset * sequence_length; int seq_id = iter_offset % num_seq; int seq_id4 = seq_id >> 2; int real_seq_id = seq_id + (num_seq == sequence_length ? 0 : sequence_length); int window_stride4 = (local_attention && (real_seq_id >> 2) > (window_size >> 2)) ? (real_seq_id >> 2) - (window_size >> 2) : 0; int window_stride = (local_attention && real_seq_id >= window_size) ? real_seq_id - window_size : -1; float max_val = minus_infinity; for (int i = 0; i < iterations; i++) { int data_id = i * (reduceWidth << 2) + (seq_lane << 2); if ((!triangular || ((data_id >> 2) <= seq_id4)) && (data_id >> 2) >= window_stride4 && data_id < sequence_length) { if ((sequence_length - data_id) >= 4) { data[i].x = (data_id > window_stride ? vals[data_id] : minus_infinity); data[i].y = ((!triangular || ((data_id + 1) <= seq_id)) && (data_id + 1) > window_stride) ? vals[data_id + 1] : minus_infinity; data[i].z = ((!triangular || ((data_id + 2) <= seq_id)) && (data_id + 2) > window_stride) ? vals[data_id + 2] : minus_infinity; data[i].w = ((!triangular || ((data_id + 3) <= seq_id)) && (data_id + 3) > window_stride) ? vals[data_id + 3] : minus_infinity; if (attn_mask) { data[i].x += attn_mask[data_id + mask_offset]; data[i].y += attn_mask[data_id + mask_offset + 1]; data[i].z += attn_mask[data_id + mask_offset + 2]; data[i].w += attn_mask[data_id + mask_offset + 3]; } } else { data[i].x = data_id > window_stride ? vals[data_id] : minus_infinity; data[i].y = (((!triangular || (data_id + 1) <= seq_id)) && (data_id + 1) > window_stride && (data_id + 1) < sequence_length) ? (vals[data_id + 1]) : minus_infinity; data[i].z = (((!triangular || (data_id + 2) <= seq_id)) && (data_id + 2) > window_stride && (data_id + 2) < sequence_length) ? (vals[data_id + 2]) : minus_infinity; data[i].w = minus_infinity; if (attn_mask) { data[i].x += attn_mask[data_id + mask_offset]; if ((data_id + 1) < sequence_length) data[i].y += attn_mask[data_id + mask_offset + 1]; if ((data_id + 2) < sequence_length) data[i].z += attn_mask[data_id + mask_offset + 2]; } } max_val = (data[i].x > max_val ? data[i].x : max_val); max_val = (data[i].y > max_val ? data[i].y : max_val); max_val = (data[i].z > max_val ? data[i].z : max_val); max_val = (data[i].w > max_val ? data[i].w : max_val); } else { data[i].x = minus_infinity; data[i].y = minus_infinity; data[i].z = minus_infinity; data[i].w = minus_infinity; } } for (int i = 1; i < WARP_SIZE; i *= 2) { auto temp = g.shfl_xor(max_val, i); max_val = (temp > max_val ? temp : max_val); } if (reduceWidth > WARP_SIZE) { if (lane == 0) partialSum[wid] = max_val; b.sync(); if (lane < warp_num) max_val = partialSum[lane]; b.sync(); for (int i = 1; i < reduce_blocks; i *= 2) { auto temp = g.shfl_xor(max_val, i); max_val = (temp > max_val ? temp : max_val); } max_val = g.shfl(max_val, threadIdx.x / WARP_SIZE); } float sum = 0; for (int i = 0; i < iterations; i++) { data[i].x = __expf(data[i].x - max_val); data[i].y = __expf(data[i].y - max_val); data[i].z = __expf(data[i].z - max_val); data[i].w = __expf(data[i].w - max_val); sum += (data[i].x + data[i].y + data[i].z + data[i].w); } for (int i = 1; i < WARP_SIZE; i *= 2) sum += g.shfl_xor(sum, i); if (reduceWidth > WARP_SIZE) { if (lane == 0) partialSum[wid] = sum; b.sync(); if (lane < warp_num) sum = partialSum[lane]; b.sync(); for (int i = 1; i < reduce_blocks; i *= 2) { sum += g.shfl_xor(sum, i); } sum = g.shfl(sum, threadIdx.x / WARP_SIZE); } sum += 1e-6; for (int i = 0; i < iterations; i++) { int data_id = i * (reduceWidth << 2) + (seq_lane << 2); if (data_id < sequence_length) { if ((sequence_length - data_id) >= 4) { vals[data_id] = data[i].x / sum; vals[data_id + 1] = data[i].y / sum; vals[data_id + 2] = data[i].z / sum; vals[data_id + 3] = data[i].w / sum; } else { vals[data_id] = data[i].x / sum; if ((data_id + 1) < sequence_length) vals[data_id + 1] = data[i].y / sum; if ((data_id + 2) < sequence_length) vals[data_id + 2] = data[i].z / sum; } } } } } template <typename T> void launch_attn_softmax_v2(T* vals, T* mask, T* alibi, float layer_scale, bool triangular, bool recompute, bool local_attention, int window_size, int batch_size, int heads, int num_seq, int sequence_length, int head_offset, int mask_stride, int mp_size, hipStream_t stream) { int total_count = batch_size * heads * num_seq; int warp_num = ATTN_THREADS / WARP_SIZE; dim3 grid_dim((total_count - 1) / (warp_num / ((sequence_length - 1) / ATTN_THREADS + 1)) + 1); dim3 block_dim(ATTN_THREADS); const int reduce_width = ((sequence_length - 1) / ATTN_THREADS + 1) * WARP_SIZE; const int iterations = (sequence_length - 1) / (reduce_width << 2) + 1; if (sequence_length <= 32768) hipLaunchKernelGGL(( attn_softmax_v2), dim3(grid_dim), dim3(block_dim), 0, stream, vals, mask, alibi, layer_scale, triangular, recompute, local_attention, window_size, total_count, heads, sequence_length, num_seq, head_offset, mask_stride, mp_size, iterations, reduce_width); else throw std::runtime_error("Unsupport Seq_Length!"); } template void launch_attn_softmax_v2(float* vals, float* mask, float* alibi, float layer_scale, bool triangular, bool recompute, bool local_attention, int window_size, int batch_size, int heads, int num_seq, int sequence_length, int head_offset, int mask_stride, int mp_size, hipStream_t stream); template void launch_attn_softmax_v2(__half* vals, __half* mask, __half* alibi, float layer_scale, bool triangular, bool recompute, bool local_attention, int window_size, int batch_size, int heads, int num_seq, int sequence_length, int head_offset, int mask_stride, int mp_size, hipStream_t stream);
98d307dc65d07a634e1ffeec7289ef236650774e.cu
/* Copyright 2022 The Microsoft DeepSpeed Team */ #include <limits> #include "inference_cuda_layers.h" #ifndef __HIP_PLATFORM_HCC__ #include <cuda_profiler_api.h> #endif #include <cstdio> #include <cstdlib> #include <ctime> #define ATTN_THREADS 256 #define MAX_REG_SIZE 8 #define minus_infinity -10000.0 void CheckCudaErrorAux(const char* file, unsigned line) { cudaError_t err = cudaGetLastError(); if (err == cudaSuccess) return; std::cerr << cudaGetErrorString(err) << "(" << err << ") at " << file << ":" << line << std::endl; throw std::runtime_error("CUDA ERROR!!!\n"); } #define CUDA_CHECK_ERROR() CheckCudaErrorAux(__FILE__, __LINE__) namespace cg = cooperative_groups; __global__ void attn_softmax_v2(__half* vals, __half* mask, __half* alibi, float layer_scale, bool triangular, bool recompute, bool local_attention, int window_size, int total_count, int heads, int sequence_length, int num_seq, int head_offset, int mask_stride, int mp_size, int iterations, int reduceWidth) { #ifdef HALF_PRECISION_AVAILABLE cg::thread_block b = cg::this_thread_block(); cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b); float2 low_data[MAX_REG_SIZE]; float2 high_data[MAX_REG_SIZE]; const __half zero_h = __float2half(0.f); int wid = threadIdx.x >> 5; int lane = threadIdx.x & 0x1f; int warp_num = blockDim.x >> 5; int reduce_blocks = reduceWidth >> 5; int seq_lane = threadIdx.x % reduceWidth; __shared__ float partialSum[MAX_WARP_NUM]; int iter_offset = blockIdx.x * (warp_num / reduce_blocks) + (wid / reduce_blocks); int batch_idx = iter_offset / (num_seq * heads); int alibi_offset = batch_idx * heads * mp_size + head_offset; int mask_offset = batch_idx * mask_stride + (iter_offset % mask_stride); if (iter_offset < total_count) { vals += (iter_offset * sequence_length); alibi_offset = (alibi_offset + ((iter_offset / num_seq) % heads)) * sequence_length; mask_offset = mask_offset * sequence_length; int seq_id = iter_offset % num_seq; int seq_id4 = seq_id >> 2; int real_seq_id = seq_id + (num_seq == sequence_length ? 0 : sequence_length); int window_stride4 = (local_attention && (real_seq_id >> 2) > (window_size >> 2)) ? (real_seq_id >> 2) - (window_size >> 2) : 0; int window_stride = (local_attention && real_seq_id >= window_size) ? real_seq_id - window_size : -1; float max_val = minus_infinity; // if (lane == 0) printf("%d, %d: %d \n", wid, blockIdx.x, mask_offset); for (int i = 0; i < iterations; i++) { int data_id = i * (reduceWidth << 2) + (seq_lane << 2); if ((!triangular || ((data_id >> 2) <= seq_id4)) && (data_id >> 2) >= window_stride4 && data_id < sequence_length) { if ((sequence_length - data_id) >= 4) { low_data[i].x = data_id > window_stride ? __half2float(vals[data_id]) * layer_scale : minus_infinity; low_data[i].y = ((!triangular || ((data_id + 1) <= seq_id)) && (data_id + 1) > window_stride) ? __half2float(vals[data_id + 1]) * layer_scale : minus_infinity; high_data[i].x = ((!triangular || ((data_id + 2) <= seq_id)) && (data_id + 2) > window_stride) ? __half2float(vals[data_id + 2]) * layer_scale : minus_infinity; high_data[i].y = ((!triangular || ((data_id + 3) <= seq_id)) && (data_id + 3) > window_stride) ? __half2float(vals[data_id + 3]) * layer_scale : minus_infinity; if (alibi) { low_data[i].x = low_data[i].x + __half2float(alibi[data_id + alibi_offset]); low_data[i].y = low_data[i].y + __half2float(alibi[data_id + alibi_offset + 1]); high_data[i].x = high_data[i].x + __half2float(alibi[data_id + alibi_offset + 2]); high_data[i].y = high_data[i].y + __half2float(alibi[data_id + alibi_offset + 3]); } if (mask) { low_data[i].x += __half2float(mask[data_id + mask_offset]); low_data[i].y += __half2float(mask[data_id + mask_offset + 1]); high_data[i].x += __half2float(mask[data_id + mask_offset + 2]); high_data[i].y += __half2float(mask[data_id + mask_offset + 3]); } } else { low_data[i].x = data_id > window_stride ? __half2float(vals[data_id]) * layer_scale : minus_infinity; low_data[i].y = (((!triangular || (data_id + 1) <= seq_id) && (data_id + 1) > window_stride) && (data_id + 1) < sequence_length) ? __half2float(vals[data_id + 1]) * layer_scale : minus_infinity; high_data[i].x = (((!triangular || (data_id + 2) <= seq_id) && (data_id + 2) > window_stride) && (data_id + 2) < sequence_length) ? __half2float(vals[data_id + 2]) * layer_scale : minus_infinity; if (alibi) { low_data[i].x = low_data[i].x + __half2float(alibi[data_id + alibi_offset]); if ((data_id + 1) < sequence_length) low_data[i].y = low_data[i].y + __half2float(alibi[data_id + alibi_offset + 1]); if ((data_id + 2) < sequence_length) high_data[i].x = high_data[i].x + __half2float(alibi[data_id + alibi_offset + 2]); } high_data[i].y = minus_infinity; if (mask) { low_data[i].x += __half2float(mask[data_id + mask_offset]); if ((data_id + 1) < sequence_length) low_data[i].y += __half2float(mask[data_id + mask_offset + 1]); if ((data_id + 2) < sequence_length) high_data[i].x += __half2float(mask[data_id + mask_offset + 2]); } } // if(lane == 0) printf("%f , %d, %d \n", low_data[i].x, data_id, seq_id); max_val = (low_data[i].x > max_val ? low_data[i].x : max_val); max_val = (low_data[i].y > max_val ? low_data[i].y : max_val); max_val = (high_data[i].x > max_val ? high_data[i].x : max_val); max_val = (high_data[i].y > max_val ? high_data[i].y : max_val); } else { low_data[i].x = minus_infinity; low_data[i].y = minus_infinity; high_data[i].x = minus_infinity; high_data[i].y = minus_infinity; } } for (int i = 1; i < WARP_SIZE; i *= 2) { auto temp = g.shfl_xor(max_val, i); max_val = (temp > max_val ? temp : max_val); } if (reduceWidth > WARP_SIZE) { if (lane == 0) partialSum[wid] = max_val; b.sync(); if (lane < warp_num) max_val = partialSum[lane]; b.sync(); for (int i = 1; i < reduce_blocks; i *= 2) { auto temp = g.shfl_xor(max_val, i); max_val = (temp > max_val ? temp : max_val); } max_val = g.shfl(max_val, threadIdx.x / WARP_SIZE); } float sum = 0; for (int i = 0; i < iterations; i++) { low_data[i].x = __expf(low_data[i].x - max_val); low_data[i].y = __expf(low_data[i].y - max_val); high_data[i].x = __expf(high_data[i].x - max_val); high_data[i].y = __expf(high_data[i].y - max_val); sum += (low_data[i].x + low_data[i].y + high_data[i].x + high_data[i].y); } for (int i = 1; i < WARP_SIZE; i *= 2) sum += g.shfl_xor(sum, i); if (reduceWidth > WARP_SIZE) { if (lane == 0) partialSum[wid] = sum; b.sync(); if (lane < warp_num) sum = partialSum[lane]; b.sync(); for (int i = 1; i < reduce_blocks; i *= 2) { sum += g.shfl_xor(sum, i); } sum = g.shfl(sum, threadIdx.x / WARP_SIZE); } sum += 1e-6; for (int i = 0; i < iterations; i++) { int data_id = i * (reduceWidth << 2) + (seq_lane << 2); if (data_id < sequence_length) { if ((sequence_length - data_id) >= 4) { vals[data_id] = __float2half(low_data[i].x / sum); vals[data_id + 1] = __float2half(low_data[i].y / sum); vals[data_id + 2] = __float2half(high_data[i].x / sum); vals[data_id + 3] = __float2half(high_data[i].y / sum); } else { vals[data_id] = __float2half(low_data[i].x / sum); if ((data_id + 1) < sequence_length) vals[data_id + 1] = __float2half(low_data[i].y / sum); if ((data_id + 2) < sequence_length) vals[data_id + 2] = __float2half(high_data[i].x / sum); } } } } #endif } __global__ void attn_softmax_v2(float* vals, float* attn_mask, float* alibi, float layer_scale, bool triangular, bool recompute, bool local_attention, int window_size, int total_count, int heads, int sequence_length, int num_seq, int head_offset, int mask_stride, int mp_size, int iterations, int reduceWidth) { cg::thread_block b = cg::this_thread_block(); cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b); float4 data[MAX_REG_SIZE]; int wid = threadIdx.x >> 5; int lane = threadIdx.x & 0x1f; int warp_num = blockDim.x >> 5; int reduce_blocks = reduceWidth >> 5; int seq_lane = threadIdx.x % reduceWidth; __shared__ float partialSum[MAX_WARP_NUM]; int iter_offset = blockIdx.x * (warp_num / reduce_blocks) + (wid / reduce_blocks); if (iter_offset < total_count) { vals += (iter_offset * sequence_length); int batch_idx = iter_offset / (num_seq * heads); int alibi_offset = batch_idx * heads * mp_size + head_offset; int mask_offset = batch_idx * mask_stride + (iter_offset % mask_stride); mask_offset = mask_offset * sequence_length; int seq_id = iter_offset % num_seq; int seq_id4 = seq_id >> 2; int real_seq_id = seq_id + (num_seq == sequence_length ? 0 : sequence_length); int window_stride4 = (local_attention && (real_seq_id >> 2) > (window_size >> 2)) ? (real_seq_id >> 2) - (window_size >> 2) : 0; int window_stride = (local_attention && real_seq_id >= window_size) ? real_seq_id - window_size : -1; float max_val = minus_infinity; for (int i = 0; i < iterations; i++) { int data_id = i * (reduceWidth << 2) + (seq_lane << 2); if ((!triangular || ((data_id >> 2) <= seq_id4)) && (data_id >> 2) >= window_stride4 && data_id < sequence_length) { if ((sequence_length - data_id) >= 4) { data[i].x = (data_id > window_stride ? vals[data_id] : minus_infinity); data[i].y = ((!triangular || ((data_id + 1) <= seq_id)) && (data_id + 1) > window_stride) ? vals[data_id + 1] : minus_infinity; data[i].z = ((!triangular || ((data_id + 2) <= seq_id)) && (data_id + 2) > window_stride) ? vals[data_id + 2] : minus_infinity; data[i].w = ((!triangular || ((data_id + 3) <= seq_id)) && (data_id + 3) > window_stride) ? vals[data_id + 3] : minus_infinity; if (attn_mask) { data[i].x += attn_mask[data_id + mask_offset]; data[i].y += attn_mask[data_id + mask_offset + 1]; data[i].z += attn_mask[data_id + mask_offset + 2]; data[i].w += attn_mask[data_id + mask_offset + 3]; } } else { data[i].x = data_id > window_stride ? vals[data_id] : minus_infinity; data[i].y = (((!triangular || (data_id + 1) <= seq_id)) && (data_id + 1) > window_stride && (data_id + 1) < sequence_length) ? (vals[data_id + 1]) : minus_infinity; data[i].z = (((!triangular || (data_id + 2) <= seq_id)) && (data_id + 2) > window_stride && (data_id + 2) < sequence_length) ? (vals[data_id + 2]) : minus_infinity; data[i].w = minus_infinity; if (attn_mask) { data[i].x += attn_mask[data_id + mask_offset]; if ((data_id + 1) < sequence_length) data[i].y += attn_mask[data_id + mask_offset + 1]; if ((data_id + 2) < sequence_length) data[i].z += attn_mask[data_id + mask_offset + 2]; } } max_val = (data[i].x > max_val ? data[i].x : max_val); max_val = (data[i].y > max_val ? data[i].y : max_val); max_val = (data[i].z > max_val ? data[i].z : max_val); max_val = (data[i].w > max_val ? data[i].w : max_val); } else { data[i].x = minus_infinity; data[i].y = minus_infinity; data[i].z = minus_infinity; data[i].w = minus_infinity; } } for (int i = 1; i < WARP_SIZE; i *= 2) { auto temp = g.shfl_xor(max_val, i); max_val = (temp > max_val ? temp : max_val); } if (reduceWidth > WARP_SIZE) { if (lane == 0) partialSum[wid] = max_val; b.sync(); if (lane < warp_num) max_val = partialSum[lane]; b.sync(); for (int i = 1; i < reduce_blocks; i *= 2) { auto temp = g.shfl_xor(max_val, i); max_val = (temp > max_val ? temp : max_val); } max_val = g.shfl(max_val, threadIdx.x / WARP_SIZE); } float sum = 0; for (int i = 0; i < iterations; i++) { data[i].x = __expf(data[i].x - max_val); data[i].y = __expf(data[i].y - max_val); data[i].z = __expf(data[i].z - max_val); data[i].w = __expf(data[i].w - max_val); sum += (data[i].x + data[i].y + data[i].z + data[i].w); } for (int i = 1; i < WARP_SIZE; i *= 2) sum += g.shfl_xor(sum, i); if (reduceWidth > WARP_SIZE) { if (lane == 0) partialSum[wid] = sum; b.sync(); if (lane < warp_num) sum = partialSum[lane]; b.sync(); for (int i = 1; i < reduce_blocks; i *= 2) { sum += g.shfl_xor(sum, i); } sum = g.shfl(sum, threadIdx.x / WARP_SIZE); } sum += 1e-6; for (int i = 0; i < iterations; i++) { int data_id = i * (reduceWidth << 2) + (seq_lane << 2); if (data_id < sequence_length) { if ((sequence_length - data_id) >= 4) { vals[data_id] = data[i].x / sum; vals[data_id + 1] = data[i].y / sum; vals[data_id + 2] = data[i].z / sum; vals[data_id + 3] = data[i].w / sum; } else { vals[data_id] = data[i].x / sum; if ((data_id + 1) < sequence_length) vals[data_id + 1] = data[i].y / sum; if ((data_id + 2) < sequence_length) vals[data_id + 2] = data[i].z / sum; } } } } } template <typename T> void launch_attn_softmax_v2(T* vals, T* mask, T* alibi, float layer_scale, bool triangular, bool recompute, bool local_attention, int window_size, int batch_size, int heads, int num_seq, int sequence_length, int head_offset, int mask_stride, int mp_size, cudaStream_t stream) { int total_count = batch_size * heads * num_seq; int warp_num = ATTN_THREADS / WARP_SIZE; dim3 grid_dim((total_count - 1) / (warp_num / ((sequence_length - 1) / ATTN_THREADS + 1)) + 1); dim3 block_dim(ATTN_THREADS); const int reduce_width = ((sequence_length - 1) / ATTN_THREADS + 1) * WARP_SIZE; const int iterations = (sequence_length - 1) / (reduce_width << 2) + 1; if (sequence_length <= 32768) attn_softmax_v2<<<grid_dim, block_dim, 0, stream>>>(vals, mask, alibi, layer_scale, triangular, recompute, local_attention, window_size, total_count, heads, sequence_length, num_seq, head_offset, mask_stride, mp_size, iterations, reduce_width); else throw std::runtime_error("Unsupport Seq_Length!"); } template void launch_attn_softmax_v2(float* vals, float* mask, float* alibi, float layer_scale, bool triangular, bool recompute, bool local_attention, int window_size, int batch_size, int heads, int num_seq, int sequence_length, int head_offset, int mask_stride, int mp_size, cudaStream_t stream); template void launch_attn_softmax_v2(__half* vals, __half* mask, __half* alibi, float layer_scale, bool triangular, bool recompute, bool local_attention, int window_size, int batch_size, int heads, int num_seq, int sequence_length, int head_offset, int mask_stride, int mp_size, cudaStream_t stream);
85c1705ef51c31233a5d11f240c5e3262e8a5c8b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "rotate_gpu_nms.hpp" #include <vector> #include <iostream> #include <cmath> #define CUDA_CHECK(condition) \ /* Code block avoids redefinition of hipError_t error */ \ do { \ hipError_t error = condition; \ if (error != hipSuccess) { \ std::cout << hipGetErrorString(error) << std::endl; \ } \ } while (0) #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) int const threadsPerBlock = sizeof(unsigned long long) * 8; __device__ inline float trangle_area(float * a, float * b, float * c) { return ((a[0] - c[0]) * (b[1] - c[1]) - (a[1] - c[1]) * (b[0] - c[0]))/2.0; } __device__ inline float area(float * int_pts, int num_of_inter) { float area = 0.0; for(int i = 0;i < num_of_inter - 2;i++) { area += fabs(trangle_area(int_pts, int_pts + 2 * i + 2, int_pts + 2 * i + 4)); } return area; } __device__ inline void reorder_pts(float * int_pts, int num_of_inter) { if(num_of_inter > 0) { float center[2]; center[0] = 0.0; center[1] = 0.0; for(int i = 0;i < num_of_inter;i++) { center[0] += int_pts[2 * i]; center[1] += int_pts[2 * i + 1]; } center[0] /= num_of_inter; center[1] /= num_of_inter; float vs[16]; float v[2]; float d; for(int i = 0;i < num_of_inter;i++) { v[0] = int_pts[2 * i]-center[0]; v[1] = int_pts[2 * i + 1]-center[1]; d = sqrt(v[0] * v[0] + v[1] * v[1]); v[0] = v[0] / d; v[1] = v[1] / d; if(v[1] < 0) { v[0]= - 2 - v[0]; } vs[i] = v[0]; } float temp,tx,ty; int j; for(int i=1;i<num_of_inter;++i){ if(vs[i-1]>vs[i]){ temp = vs[i]; tx = int_pts[2*i]; ty = int_pts[2*i+1]; j=i; while(j>0&&vs[j-1]>temp){ vs[j] = vs[j-1]; int_pts[j*2] = int_pts[j*2-2]; int_pts[j*2+1] = int_pts[j*2-1]; j--; } vs[j] = temp; int_pts[j*2] = tx; int_pts[j*2+1] = ty; } } } } __device__ inline bool inter2line(float * pts1, float *pts2, int i, int j, float * temp_pts) { float a[2]; float b[2]; float c[2]; float d[2]; float area_abc, area_abd, area_cda, area_cdb; a[0] = pts1[2 * i]; a[1] = pts1[2 * i + 1]; b[0] = pts1[2 * ((i + 1) % 4)]; b[1] = pts1[2 * ((i + 1) % 4) + 1]; c[0] = pts2[2 * j]; c[1] = pts2[2 * j + 1]; d[0] = pts2[2 * ((j + 1) % 4)]; d[1] = pts2[2 * ((j + 1) % 4) + 1]; area_abc = trangle_area(a, b, c); area_abd = trangle_area(a, b, d); if(area_abc * area_abd >= 0) { return false; } area_cda = trangle_area(c, d, a); area_cdb = area_cda + area_abc - area_abd; if (area_cda * area_cdb >= 0) { return false; } float t = area_cda / (area_abd - area_abc); float dx = t * (b[0] - a[0]); float dy = t * (b[1] - a[1]); temp_pts[0] = a[0] + dx; temp_pts[1] = a[1] + dy; return true; } __device__ inline bool in_rect(float pt_x, float pt_y, float * pts) { float ab[2]; float ad[2]; float ap[2]; float abab; float abap; float adad; float adap; ab[0] = pts[2] - pts[0]; ab[1] = pts[3] - pts[1]; ad[0] = pts[6] - pts[0]; ad[1] = pts[7] - pts[1]; ap[0] = pt_x - pts[0]; ap[1] = pt_y - pts[1]; abab = ab[0] * ab[0] + ab[1] * ab[1]; abap = ab[0] * ap[0] + ab[1] * ap[1]; adad = ad[0] * ad[0] + ad[1] * ad[1]; adap = ad[0] * ap[0] + ad[1] * ap[1]; return abab >= abap and abap >= 0 and adad >= adap and adap >= 0; } __device__ inline int inter_pts(float * pts1, float * pts2, float * int_pts) { int num_of_inter = 0; for(int i = 0;i < 4;i++) { if(in_rect(pts1[2 * i], pts1[2 * i + 1], pts2)) { int_pts[num_of_inter * 2] = pts1[2 * i]; int_pts[num_of_inter * 2 + 1] = pts1[2 * i + 1]; num_of_inter++; } if(in_rect(pts2[2 * i], pts2[2 * i + 1], pts1)) { int_pts[num_of_inter * 2] = pts2[2 * i]; int_pts[num_of_inter * 2 + 1] = pts2[2 * i + 1]; num_of_inter++; } } float temp_pts[2]; for(int i = 0;i < 4;i++) { for(int j = 0;j < 4;j++) { bool has_pts = inter2line(pts1, pts2, i, j, temp_pts); if(has_pts) { int_pts[num_of_inter * 2] = temp_pts[0]; int_pts[num_of_inter * 2 + 1] = temp_pts[1]; num_of_inter++; } } } return num_of_inter; } __device__ inline void convert_region(float * pts , float const * const region) { float angle = region[4]; float a_cos = cos(angle/180.0*3.1415926535); float a_sin = sin(angle/180.0*3.1415926535); float ctr_x = region[0]; float ctr_y = region[1]; float w = region[2]; float h = region[3]; float pts_x[4]; float pts_y[4]; pts_x[0] = - w / 2; pts_x[1] = w / 2; pts_x[2] = w / 2; pts_x[3] = - w / 2; pts_y[0] = - h / 2; pts_y[1] = - h / 2; pts_y[2] = h / 2; pts_y[3] = h / 2; for(int i = 0;i < 4;i++) { pts[7 - 2 * i - 1] = a_cos * pts_x[i] - a_sin * pts_y[i] + ctr_x; pts[7 - 2 * i] = a_sin * pts_x[i] + a_cos * pts_y[i] + ctr_y; } } __device__ inline float inter(float const * const region1, float const * const region2) { float pts1[8]; float pts2[8]; float int_pts[16]; int num_of_inter; convert_region(pts1, region1); convert_region(pts2, region2); num_of_inter = inter_pts(pts1, pts2, int_pts); reorder_pts(int_pts, num_of_inter); return area(int_pts, num_of_inter); } __device__ inline float devRotateIoU(float const * const region1, float const * const region2) { float area1 = region1[2] * region1[3]; float area2 = region2[2] * region2[3]; float area_inter = inter(region1, region2); return area_inter / (area1 + area2 - area_inter); } __global__ void rotate_nms_kernel(const int n_boxes, const float nms_overlap_thresh, const float *dev_boxes, unsigned long long *dev_mask) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ float block_boxes[threadsPerBlock * 6]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 6 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 0]; block_boxes[threadIdx.x * 6 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 1]; block_boxes[threadIdx.x * 6 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 2]; block_boxes[threadIdx.x * 6 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 3]; block_boxes[threadIdx.x * 6 + 4] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 4]; block_boxes[threadIdx.x * 6 + 5] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 5]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const float *cur_box = dev_boxes + cur_box_idx * 6; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devRotateIoU(cur_box, block_boxes + i * 6) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } void _set_device(int device_id) { int current_device; CUDA_CHECK(hipGetDevice(&current_device)); if (current_device == device_id) { return; } // The call to hipSetDevice must come before any calls to Get, which // may perform initialization using the GPU. CUDA_CHECK(hipSetDevice(device_id)); } void _rotate_nms(int* keep_out, int* num_out, const float* boxes_host, int boxes_num, int boxes_dim, float nms_overlap_thresh, int device_id) { _set_device(device_id); float* boxes_dev = NULL; unsigned long long* mask_dev = NULL; const int col_blocks = DIVUP(boxes_num, threadsPerBlock); CUDA_CHECK(hipMalloc(&boxes_dev, boxes_num * boxes_dim * sizeof(float))); CUDA_CHECK(hipMemcpy(boxes_dev, boxes_host, boxes_num * boxes_dim * sizeof(float), hipMemcpyHostToDevice)); CUDA_CHECK(hipMalloc(&mask_dev, boxes_num * col_blocks * sizeof(unsigned long long))); dim3 blocks(DIVUP(boxes_num, threadsPerBlock), DIVUP(boxes_num, threadsPerBlock)); dim3 threads(threadsPerBlock); hipLaunchKernelGGL(( rotate_nms_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num, nms_overlap_thresh, boxes_dev, mask_dev); std::vector<unsigned long long> mask_host(boxes_num * col_blocks); CUDA_CHECK(hipMemcpy(&mask_host[0], mask_dev, sizeof(unsigned long long) * boxes_num * col_blocks, hipMemcpyDeviceToHost)); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { keep_out[num_to_keep++] = i; unsigned long long *p = &mask_host[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } *num_out = num_to_keep; CUDA_CHECK(hipFree(boxes_dev)); CUDA_CHECK(hipFree(mask_dev)); }
85c1705ef51c31233a5d11f240c5e3262e8a5c8b.cu
#include "rotate_gpu_nms.hpp" #include <vector> #include <iostream> #include <cmath> #define CUDA_CHECK(condition) \ /* Code block avoids redefinition of cudaError_t error */ \ do { \ cudaError_t error = condition; \ if (error != cudaSuccess) { \ std::cout << cudaGetErrorString(error) << std::endl; \ } \ } while (0) #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) int const threadsPerBlock = sizeof(unsigned long long) * 8; __device__ inline float trangle_area(float * a, float * b, float * c) { return ((a[0] - c[0]) * (b[1] - c[1]) - (a[1] - c[1]) * (b[0] - c[0]))/2.0; } __device__ inline float area(float * int_pts, int num_of_inter) { float area = 0.0; for(int i = 0;i < num_of_inter - 2;i++) { area += fabs(trangle_area(int_pts, int_pts + 2 * i + 2, int_pts + 2 * i + 4)); } return area; } __device__ inline void reorder_pts(float * int_pts, int num_of_inter) { if(num_of_inter > 0) { float center[2]; center[0] = 0.0; center[1] = 0.0; for(int i = 0;i < num_of_inter;i++) { center[0] += int_pts[2 * i]; center[1] += int_pts[2 * i + 1]; } center[0] /= num_of_inter; center[1] /= num_of_inter; float vs[16]; float v[2]; float d; for(int i = 0;i < num_of_inter;i++) { v[0] = int_pts[2 * i]-center[0]; v[1] = int_pts[2 * i + 1]-center[1]; d = sqrt(v[0] * v[0] + v[1] * v[1]); v[0] = v[0] / d; v[1] = v[1] / d; if(v[1] < 0) { v[0]= - 2 - v[0]; } vs[i] = v[0]; } float temp,tx,ty; int j; for(int i=1;i<num_of_inter;++i){ if(vs[i-1]>vs[i]){ temp = vs[i]; tx = int_pts[2*i]; ty = int_pts[2*i+1]; j=i; while(j>0&&vs[j-1]>temp){ vs[j] = vs[j-1]; int_pts[j*2] = int_pts[j*2-2]; int_pts[j*2+1] = int_pts[j*2-1]; j--; } vs[j] = temp; int_pts[j*2] = tx; int_pts[j*2+1] = ty; } } } } __device__ inline bool inter2line(float * pts1, float *pts2, int i, int j, float * temp_pts) { float a[2]; float b[2]; float c[2]; float d[2]; float area_abc, area_abd, area_cda, area_cdb; a[0] = pts1[2 * i]; a[1] = pts1[2 * i + 1]; b[0] = pts1[2 * ((i + 1) % 4)]; b[1] = pts1[2 * ((i + 1) % 4) + 1]; c[0] = pts2[2 * j]; c[1] = pts2[2 * j + 1]; d[0] = pts2[2 * ((j + 1) % 4)]; d[1] = pts2[2 * ((j + 1) % 4) + 1]; area_abc = trangle_area(a, b, c); area_abd = trangle_area(a, b, d); if(area_abc * area_abd >= 0) { return false; } area_cda = trangle_area(c, d, a); area_cdb = area_cda + area_abc - area_abd; if (area_cda * area_cdb >= 0) { return false; } float t = area_cda / (area_abd - area_abc); float dx = t * (b[0] - a[0]); float dy = t * (b[1] - a[1]); temp_pts[0] = a[0] + dx; temp_pts[1] = a[1] + dy; return true; } __device__ inline bool in_rect(float pt_x, float pt_y, float * pts) { float ab[2]; float ad[2]; float ap[2]; float abab; float abap; float adad; float adap; ab[0] = pts[2] - pts[0]; ab[1] = pts[3] - pts[1]; ad[0] = pts[6] - pts[0]; ad[1] = pts[7] - pts[1]; ap[0] = pt_x - pts[0]; ap[1] = pt_y - pts[1]; abab = ab[0] * ab[0] + ab[1] * ab[1]; abap = ab[0] * ap[0] + ab[1] * ap[1]; adad = ad[0] * ad[0] + ad[1] * ad[1]; adap = ad[0] * ap[0] + ad[1] * ap[1]; return abab >= abap and abap >= 0 and adad >= adap and adap >= 0; } __device__ inline int inter_pts(float * pts1, float * pts2, float * int_pts) { int num_of_inter = 0; for(int i = 0;i < 4;i++) { if(in_rect(pts1[2 * i], pts1[2 * i + 1], pts2)) { int_pts[num_of_inter * 2] = pts1[2 * i]; int_pts[num_of_inter * 2 + 1] = pts1[2 * i + 1]; num_of_inter++; } if(in_rect(pts2[2 * i], pts2[2 * i + 1], pts1)) { int_pts[num_of_inter * 2] = pts2[2 * i]; int_pts[num_of_inter * 2 + 1] = pts2[2 * i + 1]; num_of_inter++; } } float temp_pts[2]; for(int i = 0;i < 4;i++) { for(int j = 0;j < 4;j++) { bool has_pts = inter2line(pts1, pts2, i, j, temp_pts); if(has_pts) { int_pts[num_of_inter * 2] = temp_pts[0]; int_pts[num_of_inter * 2 + 1] = temp_pts[1]; num_of_inter++; } } } return num_of_inter; } __device__ inline void convert_region(float * pts , float const * const region) { float angle = region[4]; float a_cos = cos(angle/180.0*3.1415926535); float a_sin = sin(angle/180.0*3.1415926535); float ctr_x = region[0]; float ctr_y = region[1]; float w = region[2]; float h = region[3]; float pts_x[4]; float pts_y[4]; pts_x[0] = - w / 2; pts_x[1] = w / 2; pts_x[2] = w / 2; pts_x[3] = - w / 2; pts_y[0] = - h / 2; pts_y[1] = - h / 2; pts_y[2] = h / 2; pts_y[3] = h / 2; for(int i = 0;i < 4;i++) { pts[7 - 2 * i - 1] = a_cos * pts_x[i] - a_sin * pts_y[i] + ctr_x; pts[7 - 2 * i] = a_sin * pts_x[i] + a_cos * pts_y[i] + ctr_y; } } __device__ inline float inter(float const * const region1, float const * const region2) { float pts1[8]; float pts2[8]; float int_pts[16]; int num_of_inter; convert_region(pts1, region1); convert_region(pts2, region2); num_of_inter = inter_pts(pts1, pts2, int_pts); reorder_pts(int_pts, num_of_inter); return area(int_pts, num_of_inter); } __device__ inline float devRotateIoU(float const * const region1, float const * const region2) { float area1 = region1[2] * region1[3]; float area2 = region2[2] * region2[3]; float area_inter = inter(region1, region2); return area_inter / (area1 + area2 - area_inter); } __global__ void rotate_nms_kernel(const int n_boxes, const float nms_overlap_thresh, const float *dev_boxes, unsigned long long *dev_mask) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ float block_boxes[threadsPerBlock * 6]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 6 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 0]; block_boxes[threadIdx.x * 6 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 1]; block_boxes[threadIdx.x * 6 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 2]; block_boxes[threadIdx.x * 6 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 3]; block_boxes[threadIdx.x * 6 + 4] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 4]; block_boxes[threadIdx.x * 6 + 5] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 5]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const float *cur_box = dev_boxes + cur_box_idx * 6; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devRotateIoU(cur_box, block_boxes + i * 6) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } void _set_device(int device_id) { int current_device; CUDA_CHECK(cudaGetDevice(&current_device)); if (current_device == device_id) { return; } // The call to cudaSetDevice must come before any calls to Get, which // may perform initialization using the GPU. CUDA_CHECK(cudaSetDevice(device_id)); } void _rotate_nms(int* keep_out, int* num_out, const float* boxes_host, int boxes_num, int boxes_dim, float nms_overlap_thresh, int device_id) { _set_device(device_id); float* boxes_dev = NULL; unsigned long long* mask_dev = NULL; const int col_blocks = DIVUP(boxes_num, threadsPerBlock); CUDA_CHECK(cudaMalloc(&boxes_dev, boxes_num * boxes_dim * sizeof(float))); CUDA_CHECK(cudaMemcpy(boxes_dev, boxes_host, boxes_num * boxes_dim * sizeof(float), cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMalloc(&mask_dev, boxes_num * col_blocks * sizeof(unsigned long long))); dim3 blocks(DIVUP(boxes_num, threadsPerBlock), DIVUP(boxes_num, threadsPerBlock)); dim3 threads(threadsPerBlock); rotate_nms_kernel<<<blocks, threads>>>(boxes_num, nms_overlap_thresh, boxes_dev, mask_dev); std::vector<unsigned long long> mask_host(boxes_num * col_blocks); CUDA_CHECK(cudaMemcpy(&mask_host[0], mask_dev, sizeof(unsigned long long) * boxes_num * col_blocks, cudaMemcpyDeviceToHost)); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { keep_out[num_to_keep++] = i; unsigned long long *p = &mask_host[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } *num_out = num_to_keep; CUDA_CHECK(cudaFree(boxes_dev)); CUDA_CHECK(cudaFree(mask_dev)); }
8514c5f2c23ab8200c711454075bd5013be55aa5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // #include "support_kernels.cu0 #include <stdio.h> #include "../profiling/bonsai_timing.h" PROF_MODULE(dev_approximate_gravity); #include "node_specs.h" #ifdef WIN32 #define M_PI 3.14159265358979323846264338328 #endif #define WARP_SIZE2 5 #define WARP_SIZE 32 #if NCRIT > 2*WARP_SIZE #error "NCRIT in include/node_specs.h must be <= WARP_SIZE" #endif #define laneId (threadIdx.x & (WARP_SIZE - 1)) #define warpId (threadIdx.x >> WARP_SIZE2) #define BTEST(x) (-(int)(x)) #if 1 #define _QUADRUPOLE_ #endif /***********************************/ static __device__ __forceinline__ float Wkernel(const float q) { const float sigma = 8.0f/M_PI; const float qm = 1.0f - q; if (q < 0.5f) return sigma * (1.0f + (-6.0f)*q*q*qm); else if (q < 1.0f) return sigma * 2.0f*qm*qm*qm; return 0.0f; } static __device__ __forceinline__ float computePartialDensity( const float h, const float r2, const float mass) { const float r = sqrtf(r2); //Can we combine this with the force sqrt? const float hinv = 1.0f/h; //Can we precompute this and keep in register? const float q = r * hinv; const float hinv3 = hinv*hinv*hinv; // printf("ON DEV: Kernel: %f\tq: %f\tr: %f\thinv: %f\n", Wkernel(q), q, r, hinv); return mass * Wkernel(q) * hinv3; } static __device__ __forceinline__ void computeDensityAndNgb( const float r2, const float h, const float mass, float &density, float &nb) { if (h*h > r2) { nb++; density += computePartialDensity(h,r2,mass); } } static __device__ __forceinline__ float adjustH(const float h_old, const float nnb) { const float nbDesired = 42; const float f = 0.5f * (1.0f + cbrtf(nbDesired / nnb)); const float fScale = max(min(f, 1.2), 0.8); return (h_old*fScale); } /************************************/ /********* PREFIX SUM ***********/ /************************************/ __device__ __forceinline__ int inclusive_scan_warp(volatile int* prefix, int value) { prefix[laneId] = value; for (int i = 0; i < WARP_SIZE2; i++) { const int offset = 1 << i; prefix[laneId] += prefix[laneId - offset] & BTEST(laneId >= offset); } return prefix[WARP_SIZE-1]; } /* inclusive prefix sum for an array */ __device__ int inclusive_scan_array(int N, volatile int* prefix_in) { int y = inclusive_scan_warp(prefix_in, prefix_in[laneId]); if (N <= WARP_SIZE) return y; for (int p = WARP_SIZE; p < N; p += WARP_SIZE) { volatile int *prefix = &prefix_in[p]; const int y1 = inclusive_scan_warp(prefix, prefix[laneId]); prefix[laneId] += y; y += y1; } return y; } /**** binary scans ****/ __device__ __forceinline__ int lanemask_lt() { int mask; asm("mov.u32 %0, %lanemask_lt;" : "=r" (mask)); return mask; } __device__ int warp_exclusive_scan(const bool p, int &psum) { const unsigned int b = __ballot(p); psum = __popc(b & lanemask_lt()); return __popc(b); } __device__ int warp_exclusive_scan(const bool p) { const int b = __ballot(p); return __popc(b & lanemask_lt()); } /************************************/ /********* SEGMENTED SCAN ***********/ /************************************/ __device__ __forceinline__ int lanemask_le() { int mask; asm("mov.u32 %0, %lanemask_le;" : "=r" (mask)); return mask; } __device__ __forceinline__ int inclusive_segscan_warp( volatile int *shmem, const int packed_value, int &dist_block, int &nseg) { const int flag = packed_value < 0; const int mask = BTEST(flag); const int value = (mask & (-1-packed_value)) + (~mask & 1); const int flags = __ballot(flag); nseg += __popc (flags) ; dist_block = __clz(__brev(flags)); const int distance = min(__clz(flags & lanemask_le()) + laneId - 31, laneId); shmem[laneId] = value; for (int i = 0; i < WARP_SIZE2; i++) { const int offset = 1 << i; shmem[laneId] += shmem[laneId - offset] & BTEST(offset <= distance); } return shmem[WARP_SIZE - 1]; } /* does not work if segment size > WARP_SIZE */ __device__ __forceinline__ int inclusive_segscan_array(volatile int *shmem_in, const int N) { int dist, nseg = 0; int y = inclusive_segscan_warp(shmem_in, shmem_in[laneId], dist, nseg); if (N <= WARP_SIZE) return nseg; for (int p = WARP_SIZE; p < N; p += WARP_SIZE) { volatile int *shmem = shmem_in + p; int y1 = inclusive_segscan_warp(shmem, shmem[laneId], dist, nseg); shmem[laneId] += y & BTEST(laneId < dist); y = y1; } return nseg; } /**************************************/ /*************** Tree walk ************/ /**************************************/ template<int SHIFT> __forceinline__ __device__ int ACCS(const int i) { return (i & ((LMEM_STACK_SIZE << SHIFT) - 1))*blockDim.x + threadIdx.x; } texture<float4, 1, hipReadModeElementType> texNodeSize; texture<float4, 1, hipReadModeElementType> texNodeCenter; texture<float4, 1, hipReadModeElementType> texMultipole; texture<float4, 1, hipReadModeElementType> texBody; //This function is called from the my_cuda_rt file. I could not get the // references extern since g++ did not accept the texture objects const void* getTexturePointer(const char* name) { if(strcmp(name, "texNodeSize") == 0) return &texNodeSize; if(strcmp(name, "texNodeCenter") == 0) return &texNodeCenter; if(strcmp(name, "texMultipole") == 0) return &texMultipole; if(strcmp(name, "texBody") == 0) return &texBody; return NULL; } /*********** Forces *************/ __device__ __forceinline__ float4 add_acc( float4 acc, const float4 pos, const float massj, const float3 posj, const float eps2, float2 &density) { #if 1 /* to test performance of a tree-walk */ const float3 dr = make_float3(posj.x - pos.x, posj.y - pos.y, posj.z - pos.z); const float r2 = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z + eps2; const float rinv = rsqrtf(r2); const float rinv2 = rinv*rinv; const float mrinv = massj * rinv; const float mrinv3 = mrinv * rinv2; acc.w -= mrinv; acc.x += mrinv3 * dr.x; acc.y += mrinv3 * dr.y; acc.z += mrinv3 * dr.z; #endif //Density computeDensityAndNgb(r2,pos.w,massj,density.x,density.y); return acc; } __device__ float4 get_D04(float ds2, int selfGrav = 1) { #if 1 float ids = rsqrtf(ds2); //Does not work with zero-softening // if(isnan(ids)) ids = 0; //This does work with zero-softening, few percent performance drop //float ids = (1.0f / sqrtf(ds2)) * selfGrav; Slower in Pre CUDA4.1 ids *= selfGrav; #else const float ids = (ds2 > 0.0f) ? rsqrtf(ds2) : 0.0f; #endif const float ids2 = ids*ids; float ids3 = ids *ids2; float ids5 = ids3*ids2; float ids7 = ids5*ids2; return make_float4(ids, -ids3, +3.0f*ids5, -15.0f*ids7); } // 9 flops __device__ __forceinline__ float4 add_acc( float4 acc, const float4 pos, const float mass, const float3 com, const float4 Q0, const float4 Q1, float eps2, float2 &density) { const float3 dr = make_float3(pos.x - com.x, pos.y - com.y, pos.z - com.z); const float r2 = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z + eps2; const float rinv = rsqrtf(r2); const float rinv2 = rinv *rinv; const float mrinv = mass*rinv; const float mrinv3 = rinv2*mrinv; const float mrinv5 = rinv2*mrinv3; const float mrinv7 = rinv2*mrinv5; // 16 //Density computeDensityAndNgb(r2,pos.w,mass,density.x,density.y); #if 0 float D0 = mrinv; float D1 = -mrinv3; float D2 = mrinv5*( 3.0f); float D3 = -mrinv7*(15.0f); float oct_q11 = Q0.x; float oct_q22 = Q0.y; float oct_q33 = Q0.z; float oct_q12 = Q1.x; float oct_q13 = Q1.y; float oct_q23 = Q1.z; float Qii = oct_q11 + oct_q22 + oct_q33; float QijRiRj = (oct_q11*dr.x*dr.x + oct_q22*dr.y*dr.y + oct_q33*dr.z*dr.z) + 2.0f*(oct_q12*dr.y*dr.x + oct_q13*dr.z*dr.x + oct_q23*dr.y*dr.z); acc.w -= D0 + 0.5f*D1*Qii + 0.5f*D2*QijRiRj; float C01a = D1 + 0.5f*D2*Qii + 0.5f*D3*QijRiRj; acc.x += C01a*dr.x + D2*(oct_q11*dr.x + oct_q12*dr.y + oct_q13*dr.z); acc.y += C01a*dr.y + D2*(oct_q12*dr.x + oct_q22*dr.y + oct_q23*dr.z); acc.z += C01a*dr.z + D2*(oct_q13*dr.x + oct_q23*dr.y + oct_q33*dr.z); #else float D0 = mrinv; float D1 = -mrinv3; float D2 = mrinv5*( 3.0f); float D3 = -mrinv7*(15.0f); // 3 const float q11 = Q0.x; const float q22 = Q0.y; const float q33 = Q0.z; const float q12 = Q1.x; const float q13 = Q1.y; const float q23 = Q1.z; const float q = q11 + q22 + q33; const float3 qR = make_float3( q11*dr.x + q12*dr.y + q13*dr.z, q12*dr.x + q22*dr.y + q23*dr.z, q13*dr.x + q23*dr.y + q33*dr.z); const float qRR = qR.x*dr.x + qR.y*dr.y + qR.z*dr.z; // 22 acc.w -= D0 + 0.5f*(D1*q + D2*qRR); float C = D1 + 0.5f*(D2*q + D3*qRR); acc.x += C*dr.x + D2*qR.x; acc.y += C*dr.y + D2*qR.y; acc.z += C*dr.z + D2*qR.z; // 23 #endif /* total: 16 + 3 + 22 + 23 = 64 flops */ return acc; } /*******************************/ /****** Opening criterion ******/ /*******************************/ //Improved Barnes Hut criterium __device__ bool split_node_grav_impbh( const float4 nodeCOM, const float4 groupCenter, const float4 groupSize) { //Compute the distance between the group and the cell float3 dr = make_float3( fabsf(groupCenter.x - nodeCOM.x) - (groupSize.x), fabsf(groupCenter.y - nodeCOM.y) - (groupSize.y), fabsf(groupCenter.z - nodeCOM.z) - (groupSize.z) ); dr.x += fabsf(dr.x); dr.x *= 0.5f; dr.y += fabsf(dr.y); dr.y *= 0.5f; dr.z += fabsf(dr.z); dr.z *= 0.5f; //Distance squared, no need to do sqrt since opening criteria has been squared const float ds2 = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z; // return (ds2 <= fabsf(nodeCOM.w)); if (ds2 <= fabsf(nodeCOM.w)) return true; // if (fabs(ds2 - fabs(nodeCOM.w)) < 10e-04) return true; //Limited precision can result in round of errors. Use this as extra safe guard return false; } //Minimum distance __device__ bool split_node_grav_md( const float4 nodeCenter, const float4 nodeSize, const float4 groupCenter, const float4 groupSize) { //Compute the distance between the group and the cell float3 dr = {fabs(groupCenter.x - nodeCenter.x) - (groupSize.x + nodeSize.x), fabs(groupCenter.y - nodeCenter.y) - (groupSize.y + nodeSize.y), fabs(groupCenter.z - nodeCenter.z) - (groupSize.z + nodeSize.z)}; dr.x += fabs(dr.x); dr.x *= 0.5f; dr.y += fabs(dr.y); dr.y *= 0.5f; dr.z += fabs(dr.z); dr.z *= 0.5f; //Distance squared, no need to do sqrt since opening criteria has been squared float ds2 = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z; return (ds2 <= fabs(nodeCenter.w)); } #define TEXTURES /*******************************/ /****** Force tree-walk ******/ /*******************************/ template<const int SHIFT, const int BLOCKDIM2, const int NI> __device__ #if 0 /* __noinline__ crashes the kernel when compled with ABI */ __noinline__ #else __forceinline__ #endif void approximate_gravity( float4 pos_i[NI], real4 group_pos, float eps2, uint2 node_begend, real4 *multipole_data, real4 *body_pos, volatile int *shmem, int *lmem, int &ngb, int &apprCount, int &direCount, volatile float4 *boxSizeInfo, float4 groupSize, volatile float4 *boxCenterInfo, float group_eps, real4 acc_i[NI], float2 dens_i[NI]) { /*********** shared memory distribution **********/ // begin, end, size // ----------------------- const int stack_sz = (LMEM_STACK_SIZE << SHIFT) << BLOCKDIM2; /* stack allocated per thread-block */ const int nWarps2 = BLOCKDIM2 - WARP_SIZE2; int *approxL = lmem + stack_sz + (LMEM_EXTRA_SIZE >> nWarps2) * warpId; volatile int *directS = shmem; // 0*DIM, 1*DIM, 1*DIM volatile int *nodesS = directS + WARP_SIZE; // 1*DIM, 10*DIM, 9*DIM volatile int *prefix = nodesS + WARP_SIZE*8; // 9*DIM, 10*DIM, 1*DIM const int NJMAX = WARP_SIZE*3; int *body_list = (int* )&nodesS [WARP_SIZE]; // 2*DIM, 5*DIM, 2*DIM float *sh_mass = (float* )&body_list[NJMAX]; // 5*DIM, 6*DIM, 1*DIM float3 *sh_pos = (float3*)&sh_mass [WARP_SIZE]; // 6*DIM, 9*DIM 3*DIM volatile int *approxM = approxL; volatile int *directM = directS; volatile int * nodesM = nodesS; /*********** stack **********/ int *nstack = lmem; /*********** begin tree-walk **********/ int n_approx = 0; int n_direct = 0; #pragma unroll 1 for (int i = 0; i < NI; i++) dens_i[i] = make_float2(0,0); for (int root_node = node_begend.x; root_node < node_begend.y; root_node += WARP_SIZE) { int n_nodes0 = min(node_begend.y - root_node, WARP_SIZE); int n_stack0 = 0; int n_stack_pre = 0; { nstack[ACCS<SHIFT>(n_stack0)] = root_node + laneId; n_stack0++; } /*********** walk each level **********/ while (n_nodes0 > 0) { int n_nodes1 = 0; int n_offset = 0; int n_stack1 = n_stack0; int c_stack0 = n_stack_pre; /*********** walk a level **********/ while(c_stack0 < n_stack0) { /*** **** --> fetch the list of nodes rom LMEM ***/ bool use_node = laneId < n_nodes0; #if 1 { prefix[laneId] = nstack[ACCS<SHIFT>(c_stack0)]; c_stack0++; } const int node = prefix[min(laneId, n_nodes0 - 1)]; #else /* eg: seems to work, but I do not remember if that will *always* work */ int node; { node = nstack[ACCS<SHIFT>(c_stack0)]; c_stack0++; } #endif #if 0 /* if uncommented, give same results, see below */ if (blockIdx.x == 0 && warpId == 0) printf("laneId = %d node= %d \n", laneId, node); #endif #if 0 if(n_nodes0 > 0){ //Work around pre 4.1 compiler bug n_nodes0 -= WARP_SIZE; } #else n_nodes0 -= WARP_SIZE; #endif /*** **** --> process each of the nodes in the list in parallel ***/ #ifndef TEXTURES float4 nodeSize = boxSizeInfo[node]; //Fetch the size of the box. Size.w = child info float4 node_pos = boxCenterInfo[node]; //Fetch the center of the box. center.w = opening info #else float4 nodeSize = tex1Dfetch(texNodeSize, node); float4 node_pos = tex1Dfetch(texNodeCenter, node); #endif int node_data = __float_as_int(nodeSize.w); //Check if a cell has to be opened #ifdef IMPBH //Improved barnes-hut method #ifndef TEXTURES float4 nodeCOM = multipole_data[node*3]; #else float4 nodeCOM = tex1Dfetch(texMultipole,node*3); #endif nodeCOM.w = node_pos.w; bool split = split_node_grav_impbh(nodeCOM, group_pos, groupSize); #else bool split = split_node_grav_md(node_pos, nodeSize, group_pos, groupSize); #endif bool leaf = node_pos.w <= 0; //Small AND equal incase of a 1 particle cell //Check if it is a leaf // split = true; //If node_data = 0xF it means the remote process decided split was not required //important for traversal of LET trees!! Since numerical difference between CPU and GPU //can cause different outcomes on the split test if(node_data == 0xFFFFFFFF) split = false; bool flag = (split && !leaf) && use_node; //Flag = use_node + split + not_a_leaf;Use only non_leaf nodes that are to be split uint mask = BTEST(flag); // mask = #FFFFFFFF if use_node+split+not_a_leaf==true, otherwise zero int child = node_data & 0x0FFFFFFF; //Index to the first child of the node int nchild = (((node_data & 0xF0000000) >> 28)) & mask; //The number of children this node has /*** **** --> calculate prefix ***/ int n_total = inclusive_scan_warp(prefix, nchild); // inclusive scan to compute memory offset of each child (return total # of children) int offset = prefix[laneId]; offset += n_offset - nchild; // convert inclusive into exclusive scan for referencing purpose for (int i = n_offset; i < n_offset + n_total; i += WARP_SIZE) //nullify part of the array that will be filled with children nodesM[laneId + i] = 0; //but do not touch those parts which has already been filled #if 0 /* the following gives different result than then one in else */ /* the results become the same if I uncomment printf above */ if (flag) { nodesM[offset] = child; if (nodesM[offset + 1] == 0) nodesM[offset + 1] = child + 1; if (nodesM[offset + 2] == 0) nodesM[offset + 2] = child + 2; if (nodesM[offset + 3] == 0) nodesM[offset + 3] = child + 3; if (nodesM[offset + 4] == 0) nodesM[offset + 4] = child + 4; if (nodesM[offset + 5] == 0) nodesM[offset + 5] = child + 5; if (nodesM[offset + 6] == 0) nodesM[offset + 6] = child + 6; if (nodesM[offset + 7] == 0) nodesM[offset + 7] = child + 7; } #elif0 if (flag) nodesM[offset] = child; //Thread with the node that is about to be split //writes the first child in the array of nodes /*** in the following 8 lines, we calculate indexes of all the children that have to be walked from the index of the first child***/ if (flag && nodesM[offset + 1] == 0) nodesM[offset + 1] = child + 1; if (flag && nodesM[offset + 2] == 0) nodesM[offset + 2] = child + 2; if (flag && nodesM[offset + 3] == 0) nodesM[offset + 3] = child + 3; if (flag && nodesM[offset + 4] == 0) nodesM[offset + 4] = child + 4; if (flag && nodesM[offset + 5] == 0) nodesM[offset + 5] = child + 5; if (flag && nodesM[offset + 6] == 0) nodesM[offset + 6] = child + 6; if (flag && nodesM[offset + 7] == 0) nodesM[offset + 7] = child + 7; #else //This code does not require reading of nodesM before writing thereby preventing //possible synchronization , not completed writes , problems if(flag) { for(int i=0; i < nchild; i++) { nodesM[offset + i] = child + i; } } #endif n_offset += n_total; //Increase the offset in the array by the number of newly added nodes /*** **** --> save list of nodes to LMEM ***/ /*** if half of shared memory or more is filled with the the nodes, dump these into slowmem stack ***/ while(n_offset >= WARP_SIZE) { n_offset -= WARP_SIZE; const int offs1 = ACCS<SHIFT>(n_stack1); nstack[offs1] = nodesM[n_offset + laneId]; n_stack1++; n_nodes1 += WARP_SIZE; if((n_stack1 - c_stack0) >= (LMEM_STACK_SIZE << SHIFT)) { //We overwrote our current stack apprCount = -1; return; } } /******************************/ /******************************/ /***** EVALUATION *****/ /******************************/ /******************************/ #if 1 /***********************************/ /****** APPROX ******/ /***********************************/ /* binary prefix sum */ flag = !split && use_node; n_total = warp_exclusive_scan(flag, offset); if (flag) approxM[n_approx + offset] = node; n_approx += n_total; while (n_approx >= WARP_SIZE) { n_approx -= WARP_SIZE; const int address = (approxM[n_approx + laneId] << 1) + approxM[n_approx + laneId]; #ifndef TEXTURES const float4 monopole = multipole_data[address ]; #else const float4 monopole = tex1Dfetch(texMultipole, address); #endif sh_mass[laneId] = monopole.w; sh_pos [laneId] = make_float3(monopole.x, monopole.y, monopole.z); #ifndef _QUADRUPOLE_ for (int i = 0; i < WARP_SIZE; i++) for (int k = 0; k < NI; k++) acc_i[k] = add_acc(acc_i[k], pos_i[k], sh_mass[i], sh_pos[i], eps2); #else for (int i = 0; i < WARP_SIZE; i++) { const int address = approxM[n_approx + i] * 3; const float4 Q0 = tex1Dfetch(texMultipole, address + 1); const float4 Q1 = tex1Dfetch(texMultipole, address + 2); for (int k = 0; k < NI; k++) acc_i[k] = add_acc(acc_i[k], pos_i[k], sh_mass[i], sh_pos[i], Q0, Q1, eps2, dens_i[k]); } #endif /* _QUADRUPOLE_ */ apprCount += WARP_SIZE*NI; } #endif #if 1 /***********************************/ /****** DIRECT ******/ /***********************************/ flag = split && leaf && use_node; //flag = split + leaf + use_node const int jbody = node_data & BODYMASK; //the first body in the leaf const int nbody = (((node_data & INVBMASK) >> LEAFBIT)+1) & BTEST(flag); //number of bodies in the leaf masked with the flag body_list[laneId] = directM[laneId]; //copy list of bodies from previous pass to body_list // step 1 /* binary prefix sum */ // step 1 int n_bodies = inclusive_scan_warp(prefix, nbody); // inclusive scan to compute memory offset for each body offset = prefix[laneId]; // step 2 if (flag) prefix[warp_exclusive_scan(flag)] = laneId; //with tid whose leaves have to be opened directM[laneId] = offset; //Store a copy of inclusive scan in direct offset -= nbody; //convert inclusive int oexclusive scan offset += 1; //add unity, since later prefix0[tid] == 0 used to check barrier int nl_pre = 0; //Number of leaves that have already been processed while (n_bodies > 0) { int nb = min(n_bodies, NJMAX - n_direct); //Make sure number of bides to be extracted does not exceed //the amount of allocated shared memory // step 0 //nullify part of the body_list that will be filled with bodies for (int i = n_direct; i < n_direct + nb; i += WARP_SIZE) //from the leaves that are being processed body_list[i + laneId] = 0; //step 1: if (flag && (directM[laneId] <= nb) && (offset > 0)) //make sure that the thread indeed carries a leaf body_list[n_direct + offset- 1] = -1-jbody; //whose bodies will be extracted // step 2: const int nl = inclusive_segscan_array(&body_list[n_direct], nb); nb = directM[prefix[nl_pre + nl - 1]]; // number of bodies stored in these leaves /***************************************************************************** * example of what is accomplished in steps 0-2 * * --------------------------- * * step 0: body_list = 000000000000000000000 * * step 1: body_list = n000m000p000000q00r00 n,m,.. = -1-jbody_n,m... * * step 2: body_list = n n+1 n+2 n+3 m m+1 m+2 m+3 p p+1 p+2 p+3 p+4 p+5 ... * *****************************************************************************/ n_bodies -= nb; //subtract from n_bodies number of bodies that have been extracted nl_pre += nl; //increase the number of leaves that where processed directM[laneId] -= nb; //subtract the number of extracted bodies in this pass offset = max(offset - nb, 0); n_direct += nb; //increase the number of bodies to be procssed while(n_direct >= WARP_SIZE) { n_direct -= WARP_SIZE; const float4 posj = body_pos[body_list[n_direct + laneId]]; #if 0 const float4 posj = tex1Dfetch(texBody, body_list[n_direct + tid]); #endif sh_mass[laneId] = posj.w; sh_pos [laneId] = make_float3(posj.x, posj.y, posj.z); for (int i = 0; i < WARP_SIZE; i++) for (int k = 0; k < NI; k++) acc_i[k] = add_acc(acc_i[k], pos_i[k], sh_mass[i], sh_pos[i], eps2, dens_i[k]); direCount += WARP_SIZE*NI; } } directM[laneId] = body_list[laneId]; #endif } //end lvl n_nodes1 += n_offset; if (n_offset > 0) { nstack[ACCS<SHIFT>(n_stack1)] = nodesM[laneId]; n_stack1++; if((n_stack1 - c_stack0) >= (LMEM_STACK_SIZE << SHIFT)) { //We overwrote our current stack apprCount = -1; return; } } /*** **** --> copy nodes1 to nodes0: done by reassigning the pointers ***/ n_nodes0 = n_nodes1; n_stack_pre = n_stack0; n_stack0 = n_stack1; }//end while levels }//end for if(n_approx > 0) { if (laneId < n_approx) { const int address = (approxM[laneId] << 1) + approxM[laneId]; #ifndef TEXTURES float4 monopole = multipole_data[address ]; float4 octopole0 = multipole_data[address + 1]; float4 octopole1 = multipole_data[address + 2]; #else float4 monopole = tex1Dfetch(texMultipole, address); float4 octopole0 = tex1Dfetch(texMultipole, address + 1); float4 octopole1 = tex1Dfetch(texMultipole, address + 2); #endif sh_mass[laneId] = monopole.w; sh_pos [laneId] = make_float3(monopole.x, monopole.y, monopole.z); } else { //Set non-active memory locations to zero sh_mass[laneId] = 0.0f; sh_pos [laneId] = make_float3(1.0e10f, 1.0e10f, 1.0e10f); } #ifndef _QUADRUPOLE_ for (int i = 0; i < WARP_SIZE; i++) for (int k = 0; k < NI; k++) acc_i[k] = add_acc(acc_i[k], pos_i[k], sh_mass[i], sh_pos[i],eps2, dens_i[k]); #else for (int i = 0; i < WARP_SIZE; i++) { float4 Q0, Q1; Q0 = Q1 = make_float4(0.0f, 0.0f, 0.0f, 0.0f); if (i < n_approx) { const int address = approxM[i] * 3; Q0 = tex1Dfetch(texMultipole, address + 1); Q1 = tex1Dfetch(texMultipole, address + 2); } for (int k = 0; k < NI; k++) acc_i[k] = add_acc(acc_i[k], pos_i[k], sh_mass[i], sh_pos[i], Q0, Q1, eps2, dens_i[k]); } #endif apprCount += WARP_SIZE*NI; } //if n_approx > 0 if(n_direct > 0) { if (laneId < n_direct) { const float4 posj = body_pos[directM[laneId]]; #if 0 const float4 posj = tex1Dfetch(texBody, direct[tid]); #endif sh_mass[laneId] = posj.w; sh_pos [laneId] = make_float3(posj.x, posj.y, posj.z); } else { sh_mass[laneId] = 0.0f; sh_pos [laneId] = make_float3(1.0e10f, 1.0e10f, 1.0e10f); } for (int i = 0; i < WARP_SIZE; i++) for (int k = 0; k < NI; k++) acc_i[k] = add_acc(acc_i[k], pos_i[k], sh_mass[i], sh_pos[i], eps2, dens_i[k]); direCount += WARP_SIZE*NI; } } extern "C" __global__ void #if 0 /* casues 164 bytes spill to lmem with NTHREAD = 128 */ __launch_bounds__(NTHREAD) #endif dev_approximate_gravity( const int n_active_groups, int n_bodies, float eps2, uint2 node_begend, int *active_groups, real4 *body_pos, real4 *multipole_data, float4 *acc_out, real4 *group_body_pos, //This can be different from body_pos int *ngb_out, int *active_inout, int2 *interactions, float4 *boxSizeInfo, float4 *groupSizeInfo, float4 *boxCenterInfo, float4 *groupCenterInfo, real4 *body_vel, int *MEM_BUF, float *body_h, float2 *body_dens_out) { const int blockDim2 = NTHREAD2; const int shMemSize = 10 * (1 << blockDim2); __shared__ int shmem_pool[shMemSize]; const int nWarps2 = blockDim2 - WARP_SIZE2; const int sh_offs = (shMemSize >> nWarps2) * warpId; volatile int *shmem = shmem_pool + sh_offs; /*********** check if this block is linked to a leaf **********/ int *lmem = &MEM_BUF[blockIdx.x*(LMEM_STACK_SIZE*blockDim.x + LMEM_EXTRA_SIZE)]; int bid = gridDim.x * blockIdx.y + blockIdx.x; while(true) { if(laneId == 0) { bid = atomicAdd(&active_inout[n_bodies], 1); shmem[0] = bid; } bid = shmem[0]; if (bid >= n_active_groups) return; int grpOffset = 0; /*********** set necessary thread constants **********/ #ifdef DO_BLOCK_TIMESTEP real4 curGroupSize = groupSizeInfo[active_groups[bid + grpOffset]]; #else real4 curGroupSize = groupSizeInfo[bid + grpOffset]; #endif const int groupData = __float_as_int(curGroupSize.w); const uint body_addr = groupData & CRITMASK; const uint nb_i = ((groupData & INVCMASK) >> CRITBIT) + 1; #ifdef DO_BLOCK_TIMESTEP real4 group_pos = groupCenterInfo[active_groups[bid + grpOffset]]; #else real4 group_pos = groupCenterInfo[bid + grpOffset]; #endif uint body_i[2]; int ni = nb_i <= WARP_SIZE ? 1 : 2; body_i[0] = body_addr + laneId%nb_i; body_i[1] = body_addr + WARP_SIZE + laneId%(nb_i - WARP_SIZE); float4 pos_i[2]; float4 acc_i[2]; float2 dens_i[2]; pos_i[0] = group_body_pos[body_i[0]]; pos_i[0].w = body_h[body_i[0]]; if(ni > 1){ //Only read if we actually have ni == 2 pos_i[1] = group_body_pos[body_i[1]]; pos_i[1].w = body_h[body_i[1]]; } acc_i[0] = acc_i[1] = make_float4(0.0f, 0.0f, 0.0f, 0.0f); int ngb_i; const float group_eps = 0; int apprCount = 0; int direCount = 0; if (ni == 1) approximate_gravity<0, blockDim2, 1>( pos_i, group_pos, eps2, node_begend, multipole_data, body_pos, shmem, lmem, ngb_i, apprCount, direCount, boxSizeInfo, curGroupSize, boxCenterInfo, group_eps, acc_i, dens_i); else approximate_gravity<0, blockDim2, 2>( pos_i, group_pos, eps2, node_begend, multipole_data, body_pos, shmem, lmem, ngb_i, apprCount, direCount, boxSizeInfo, curGroupSize, boxCenterInfo, group_eps, acc_i, dens_i); #if 1 /* this increase lmem spill count */ if(apprCount < 0) { //Try to get access to the big stack, only one block per time is allowed if(laneId == 0) { int res = atomicExch(&active_inout[n_bodies+1], 1); //If the old value (res) is 0 we can go otherwise sleep int waitCounter = 0; while(res != 0) { //Sleep for(int i=0; i < (1024); i++) { waitCounter += 1; } //Test again shmem[0] = waitCounter; res = atomicExch(&active_inout[n_bodies+1], 1); } } lmem = &MEM_BUF[gridDim.x*(LMEM_STACK_SIZE*blockDim.x + LMEM_EXTRA_SIZE)]; //Use the extra large buffer apprCount = direCount = 0; acc_i[0] = acc_i[1] = make_float4(0.0f, 0.0f, 0.0f, 0.0f); if (ni == 1) approximate_gravity<8, blockDim2, 1>( pos_i, group_pos, eps2, node_begend, multipole_data, body_pos, shmem, lmem, ngb_i, apprCount, direCount, boxSizeInfo, curGroupSize, boxCenterInfo, group_eps, acc_i, dens_i); else approximate_gravity<8, blockDim2, 2>( pos_i, group_pos, eps2, node_begend, multipole_data, body_pos, shmem, lmem, ngb_i, apprCount, direCount, boxSizeInfo, curGroupSize, boxCenterInfo, group_eps, acc_i, dens_i); lmem = &MEM_BUF[blockIdx.x*(LMEM_STACK_SIZE*blockDim.x + LMEM_EXTRA_SIZE)]; if(laneId == 0) { atomicExch(&active_inout[n_bodies+1], 0); //Release the lock } }//end if apprCount < 0 #endif if (laneId < nb_i) { const int addr = body_i[0]; acc_out [addr] = acc_i[0]; // ngb_out [addr] = ngb_i; ngb_out [addr] = addr; //JB Fixed this for demo active_inout[addr] = 1; interactions[addr].x = apprCount / ni; interactions[addr].y = direCount / ni; body_dens_out[addr] = dens_i[0]; body_h[addr] = adjustH(body_h[addr], dens_i[0].y); if (ni == 2) { const int addr = body_i[1]; acc_out [addr] = acc_i[1]; // ngb_out [addr] = ngb_i; ngb_out [addr] = addr; //JB Fixed this for demo active_inout[addr] = 1; interactions[addr].x = apprCount / ni; interactions[addr].y = direCount / ni; body_dens_out[addr] = dens_i[1]; body_h[addr] = adjustH(body_h[addr], dens_i[1].y); } } } //end while } extern "C" __global__ void #if 0 /* casues 164 bytes spill to lmem with NTHREAD = 128 */ __launch_bounds__(NTHREAD) #endif dev_approximate_gravity_let( const int n_active_groups, int n_bodies, float eps2, uint2 node_begend, int *active_groups, real4 *body_pos, real4 *multipole_data, float4 *acc_out, real4 *group_body_pos, //This can be different from body_pos int *ngb_out, int *active_inout, int2 *interactions, float4 *boxSizeInfo, float4 *groupSizeInfo, float4 *boxCenterInfo, float4 *groupCenterInfo, real4 *body_vel, int *MEM_BUF, float *body_h, float2 *body_dens_out) { const int blockDim2 = NTHREAD2; const int shMemSize = 10 * (1 << blockDim2); __shared__ int shmem_pool[shMemSize]; const int nWarps2 = blockDim2 - WARP_SIZE2; const int sh_offs = (shMemSize >> nWarps2) * warpId; volatile int *shmem = shmem_pool + sh_offs; /*********** check if this block is linked to a leaf **********/ int *lmem = &MEM_BUF[blockIdx.x*(LMEM_STACK_SIZE*blockDim.x + LMEM_EXTRA_SIZE)]; int bid = gridDim.x * blockIdx.y + blockIdx.x; while(true) { if(laneId == 0) { bid = atomicAdd(&active_inout[n_bodies], 1); shmem[0] = bid; } bid = shmem[0]; if (bid >= n_active_groups) return; int grpOffset = 0; /*********** set necessary thread constants **********/ #ifdef DO_BLOCK_TIMESTEP real4 curGroupSize = groupSizeInfo[active_groups[bid + grpOffset]]; #else real4 curGroupSize = groupSizeInfo[bid + grpOffset]; #endif const int groupData = __float_as_int(curGroupSize.w); const uint body_addr = groupData & CRITMASK; const uint nb_i = ((groupData & INVCMASK) >> CRITBIT) + 1; #ifdef DO_BLOCK_TIMESTEP real4 group_pos = groupCenterInfo[active_groups[bid + grpOffset]]; #else real4 group_pos = groupCenterInfo[bid + grpOffset]; #endif uint body_i[2]; int ni = nb_i <= WARP_SIZE ? 1 : 2; body_i[0] = body_addr + laneId%nb_i; body_i[1] = body_addr + WARP_SIZE + laneId%(nb_i - WARP_SIZE); float4 pos_i[2]; float4 acc_i[2]; float2 dens_i[2]; pos_i[0] = group_body_pos[body_i[0]]; pos_i[0].w = body_h[body_i[0]]; if(ni > 1){ //Only read if we actually have ni == 2 pos_i[1] = group_body_pos[body_i[1]]; pos_i[1].w = body_h[body_i[1]]; } acc_i[0] = acc_i[1] = make_float4(0.0f, 0.0f, 0.0f, 0.0f); int ngb_i; const float group_eps = 0; int apprCount = 0; int direCount = 0; if (ni == 1) approximate_gravity<0, blockDim2, 1>( pos_i, group_pos, eps2, node_begend, multipole_data, body_pos, shmem, lmem, ngb_i, apprCount, direCount, boxSizeInfo, curGroupSize, boxCenterInfo, group_eps, acc_i, dens_i); else approximate_gravity<0, blockDim2, 2>( pos_i, group_pos, eps2, node_begend, multipole_data, body_pos, shmem, lmem, ngb_i, apprCount, direCount, boxSizeInfo, curGroupSize, boxCenterInfo, group_eps, acc_i, dens_i); #if 1 /* this increase lmem spill count */ if(apprCount < 0) { //Try to get access to the big stack, only one block per time is allowed if(laneId == 0) { int res = atomicExch(&active_inout[n_bodies+1], 1); //If the old value (res) is 0 we can go otherwise sleep int waitCounter = 0; while(res != 0) { //Sleep for(int i=0; i < (1024); i++) { waitCounter += 1; } //Test again shmem[0] = waitCounter; res = atomicExch(&active_inout[n_bodies+1], 1); } } lmem = &MEM_BUF[gridDim.x*(LMEM_STACK_SIZE*blockDim.x + LMEM_EXTRA_SIZE)]; //Use the extra large buffer apprCount = direCount = 0; acc_i[0] = acc_i[1] = make_float4(0.0f, 0.0f, 0.0f, 0.0f); if (ni == 1) approximate_gravity<8, blockDim2, 1>( pos_i, group_pos, eps2, node_begend, multipole_data, body_pos, shmem, lmem, ngb_i, apprCount, direCount, boxSizeInfo, curGroupSize, boxCenterInfo, group_eps, acc_i, dens_i); else approximate_gravity<8, blockDim2, 2>( pos_i, group_pos, eps2, node_begend, multipole_data, body_pos, shmem, lmem, ngb_i, apprCount, direCount, boxSizeInfo, curGroupSize, boxCenterInfo, group_eps, acc_i, dens_i); lmem = &MEM_BUF[blockIdx.x*(LMEM_STACK_SIZE*blockDim.x + LMEM_EXTRA_SIZE)]; if(laneId == 0) { atomicExch(&active_inout[n_bodies+1], 0); //Release the lock } }//end if apprCount < 0 #endif if (laneId < nb_i) { const int addr = body_i[0]; acc_out [addr].x += acc_i[0].x; acc_out [addr].y += acc_i[0].y; acc_out [addr].z += acc_i[0].z; acc_out [addr].w += acc_i[0].w; // ngb_out [addr] = ngb_i; ngb_out [addr] = addr; //JB Fixed this for demo active_inout[addr] = 1; interactions[addr].x += apprCount / ni; interactions[addr].y += direCount / ni; body_dens_out[addr].x += dens_i[0].x; body_dens_out[addr].y += dens_i[0].y; if (ni == 2) { const int addr = body_i[1]; acc_out [addr].x += acc_i[1].x; acc_out [addr].y += acc_i[1].y; acc_out [addr].z += acc_i[1].z; acc_out [addr].w += acc_i[1].w; // ngb_out [addr] = ngb_i; ngb_out [addr] = addr; //JB Fixed this for demo active_inout[addr] = 1; interactions[addr].x += apprCount / ni; interactions[addr].y += direCount / ni; body_dens_out[addr].x += dens_i[1].x; body_dens_out[addr].y += dens_i[1].y; } } } //end while }
8514c5f2c23ab8200c711454075bd5013be55aa5.cu
// #include "support_kernels.cu0 #include <stdio.h> #include "../profiling/bonsai_timing.h" PROF_MODULE(dev_approximate_gravity); #include "node_specs.h" #ifdef WIN32 #define M_PI 3.14159265358979323846264338328 #endif #define WARP_SIZE2 5 #define WARP_SIZE 32 #if NCRIT > 2*WARP_SIZE #error "NCRIT in include/node_specs.h must be <= WARP_SIZE" #endif #define laneId (threadIdx.x & (WARP_SIZE - 1)) #define warpId (threadIdx.x >> WARP_SIZE2) #define BTEST(x) (-(int)(x)) #if 1 #define _QUADRUPOLE_ #endif /***********************************/ static __device__ __forceinline__ float Wkernel(const float q) { const float sigma = 8.0f/M_PI; const float qm = 1.0f - q; if (q < 0.5f) return sigma * (1.0f + (-6.0f)*q*q*qm); else if (q < 1.0f) return sigma * 2.0f*qm*qm*qm; return 0.0f; } static __device__ __forceinline__ float computePartialDensity( const float h, const float r2, const float mass) { const float r = sqrtf(r2); //Can we combine this with the force sqrt? const float hinv = 1.0f/h; //Can we precompute this and keep in register? const float q = r * hinv; const float hinv3 = hinv*hinv*hinv; // printf("ON DEV: Kernel: %f\tq: %f\tr: %f\thinv: %f\n", Wkernel(q), q, r, hinv); return mass * Wkernel(q) * hinv3; } static __device__ __forceinline__ void computeDensityAndNgb( const float r2, const float h, const float mass, float &density, float &nb) { if (h*h > r2) { nb++; density += computePartialDensity(h,r2,mass); } } static __device__ __forceinline__ float adjustH(const float h_old, const float nnb) { const float nbDesired = 42; const float f = 0.5f * (1.0f + cbrtf(nbDesired / nnb)); const float fScale = max(min(f, 1.2), 0.8); return (h_old*fScale); } /************************************/ /********* PREFIX SUM ***********/ /************************************/ __device__ __forceinline__ int inclusive_scan_warp(volatile int* prefix, int value) { prefix[laneId] = value; for (int i = 0; i < WARP_SIZE2; i++) { const int offset = 1 << i; prefix[laneId] += prefix[laneId - offset] & BTEST(laneId >= offset); } return prefix[WARP_SIZE-1]; } /* inclusive prefix sum for an array */ __device__ int inclusive_scan_array(int N, volatile int* prefix_in) { int y = inclusive_scan_warp(prefix_in, prefix_in[laneId]); if (N <= WARP_SIZE) return y; for (int p = WARP_SIZE; p < N; p += WARP_SIZE) { volatile int *prefix = &prefix_in[p]; const int y1 = inclusive_scan_warp(prefix, prefix[laneId]); prefix[laneId] += y; y += y1; } return y; } /**** binary scans ****/ __device__ __forceinline__ int lanemask_lt() { int mask; asm("mov.u32 %0, %lanemask_lt;" : "=r" (mask)); return mask; } __device__ int warp_exclusive_scan(const bool p, int &psum) { const unsigned int b = __ballot(p); psum = __popc(b & lanemask_lt()); return __popc(b); } __device__ int warp_exclusive_scan(const bool p) { const int b = __ballot(p); return __popc(b & lanemask_lt()); } /************************************/ /********* SEGMENTED SCAN ***********/ /************************************/ __device__ __forceinline__ int lanemask_le() { int mask; asm("mov.u32 %0, %lanemask_le;" : "=r" (mask)); return mask; } __device__ __forceinline__ int inclusive_segscan_warp( volatile int *shmem, const int packed_value, int &dist_block, int &nseg) { const int flag = packed_value < 0; const int mask = BTEST(flag); const int value = (mask & (-1-packed_value)) + (~mask & 1); const int flags = __ballot(flag); nseg += __popc (flags) ; dist_block = __clz(__brev(flags)); const int distance = min(__clz(flags & lanemask_le()) + laneId - 31, laneId); shmem[laneId] = value; for (int i = 0; i < WARP_SIZE2; i++) { const int offset = 1 << i; shmem[laneId] += shmem[laneId - offset] & BTEST(offset <= distance); } return shmem[WARP_SIZE - 1]; } /* does not work if segment size > WARP_SIZE */ __device__ __forceinline__ int inclusive_segscan_array(volatile int *shmem_in, const int N) { int dist, nseg = 0; int y = inclusive_segscan_warp(shmem_in, shmem_in[laneId], dist, nseg); if (N <= WARP_SIZE) return nseg; for (int p = WARP_SIZE; p < N; p += WARP_SIZE) { volatile int *shmem = shmem_in + p; int y1 = inclusive_segscan_warp(shmem, shmem[laneId], dist, nseg); shmem[laneId] += y & BTEST(laneId < dist); y = y1; } return nseg; } /**************************************/ /*************** Tree walk ************/ /**************************************/ template<int SHIFT> __forceinline__ __device__ int ACCS(const int i) { return (i & ((LMEM_STACK_SIZE << SHIFT) - 1))*blockDim.x + threadIdx.x; } texture<float4, 1, cudaReadModeElementType> texNodeSize; texture<float4, 1, cudaReadModeElementType> texNodeCenter; texture<float4, 1, cudaReadModeElementType> texMultipole; texture<float4, 1, cudaReadModeElementType> texBody; //This function is called from the my_cuda_rt file. I could not get the // references extern since g++ did not accept the texture objects const void* getTexturePointer(const char* name) { if(strcmp(name, "texNodeSize") == 0) return &texNodeSize; if(strcmp(name, "texNodeCenter") == 0) return &texNodeCenter; if(strcmp(name, "texMultipole") == 0) return &texMultipole; if(strcmp(name, "texBody") == 0) return &texBody; return NULL; } /*********** Forces *************/ __device__ __forceinline__ float4 add_acc( float4 acc, const float4 pos, const float massj, const float3 posj, const float eps2, float2 &density) { #if 1 /* to test performance of a tree-walk */ const float3 dr = make_float3(posj.x - pos.x, posj.y - pos.y, posj.z - pos.z); const float r2 = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z + eps2; const float rinv = rsqrtf(r2); const float rinv2 = rinv*rinv; const float mrinv = massj * rinv; const float mrinv3 = mrinv * rinv2; acc.w -= mrinv; acc.x += mrinv3 * dr.x; acc.y += mrinv3 * dr.y; acc.z += mrinv3 * dr.z; #endif //Density computeDensityAndNgb(r2,pos.w,massj,density.x,density.y); return acc; } __device__ float4 get_D04(float ds2, int selfGrav = 1) { #if 1 float ids = rsqrtf(ds2); //Does not work with zero-softening // if(isnan(ids)) ids = 0; //This does work with zero-softening, few percent performance drop //float ids = (1.0f / sqrtf(ds2)) * selfGrav; Slower in Pre CUDA4.1 ids *= selfGrav; #else const float ids = (ds2 > 0.0f) ? rsqrtf(ds2) : 0.0f; #endif const float ids2 = ids*ids; float ids3 = ids *ids2; float ids5 = ids3*ids2; float ids7 = ids5*ids2; return make_float4(ids, -ids3, +3.0f*ids5, -15.0f*ids7); } // 9 flops __device__ __forceinline__ float4 add_acc( float4 acc, const float4 pos, const float mass, const float3 com, const float4 Q0, const float4 Q1, float eps2, float2 &density) { const float3 dr = make_float3(pos.x - com.x, pos.y - com.y, pos.z - com.z); const float r2 = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z + eps2; const float rinv = rsqrtf(r2); const float rinv2 = rinv *rinv; const float mrinv = mass*rinv; const float mrinv3 = rinv2*mrinv; const float mrinv5 = rinv2*mrinv3; const float mrinv7 = rinv2*mrinv5; // 16 //Density computeDensityAndNgb(r2,pos.w,mass,density.x,density.y); #if 0 float D0 = mrinv; float D1 = -mrinv3; float D2 = mrinv5*( 3.0f); float D3 = -mrinv7*(15.0f); float oct_q11 = Q0.x; float oct_q22 = Q0.y; float oct_q33 = Q0.z; float oct_q12 = Q1.x; float oct_q13 = Q1.y; float oct_q23 = Q1.z; float Qii = oct_q11 + oct_q22 + oct_q33; float QijRiRj = (oct_q11*dr.x*dr.x + oct_q22*dr.y*dr.y + oct_q33*dr.z*dr.z) + 2.0f*(oct_q12*dr.y*dr.x + oct_q13*dr.z*dr.x + oct_q23*dr.y*dr.z); acc.w -= D0 + 0.5f*D1*Qii + 0.5f*D2*QijRiRj; float C01a = D1 + 0.5f*D2*Qii + 0.5f*D3*QijRiRj; acc.x += C01a*dr.x + D2*(oct_q11*dr.x + oct_q12*dr.y + oct_q13*dr.z); acc.y += C01a*dr.y + D2*(oct_q12*dr.x + oct_q22*dr.y + oct_q23*dr.z); acc.z += C01a*dr.z + D2*(oct_q13*dr.x + oct_q23*dr.y + oct_q33*dr.z); #else float D0 = mrinv; float D1 = -mrinv3; float D2 = mrinv5*( 3.0f); float D3 = -mrinv7*(15.0f); // 3 const float q11 = Q0.x; const float q22 = Q0.y; const float q33 = Q0.z; const float q12 = Q1.x; const float q13 = Q1.y; const float q23 = Q1.z; const float q = q11 + q22 + q33; const float3 qR = make_float3( q11*dr.x + q12*dr.y + q13*dr.z, q12*dr.x + q22*dr.y + q23*dr.z, q13*dr.x + q23*dr.y + q33*dr.z); const float qRR = qR.x*dr.x + qR.y*dr.y + qR.z*dr.z; // 22 acc.w -= D0 + 0.5f*(D1*q + D2*qRR); float C = D1 + 0.5f*(D2*q + D3*qRR); acc.x += C*dr.x + D2*qR.x; acc.y += C*dr.y + D2*qR.y; acc.z += C*dr.z + D2*qR.z; // 23 #endif /* total: 16 + 3 + 22 + 23 = 64 flops */ return acc; } /*******************************/ /****** Opening criterion ******/ /*******************************/ //Improved Barnes Hut criterium __device__ bool split_node_grav_impbh( const float4 nodeCOM, const float4 groupCenter, const float4 groupSize) { //Compute the distance between the group and the cell float3 dr = make_float3( fabsf(groupCenter.x - nodeCOM.x) - (groupSize.x), fabsf(groupCenter.y - nodeCOM.y) - (groupSize.y), fabsf(groupCenter.z - nodeCOM.z) - (groupSize.z) ); dr.x += fabsf(dr.x); dr.x *= 0.5f; dr.y += fabsf(dr.y); dr.y *= 0.5f; dr.z += fabsf(dr.z); dr.z *= 0.5f; //Distance squared, no need to do sqrt since opening criteria has been squared const float ds2 = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z; // return (ds2 <= fabsf(nodeCOM.w)); if (ds2 <= fabsf(nodeCOM.w)) return true; // if (fabs(ds2 - fabs(nodeCOM.w)) < 10e-04) return true; //Limited precision can result in round of errors. Use this as extra safe guard return false; } //Minimum distance __device__ bool split_node_grav_md( const float4 nodeCenter, const float4 nodeSize, const float4 groupCenter, const float4 groupSize) { //Compute the distance between the group and the cell float3 dr = {fabs(groupCenter.x - nodeCenter.x) - (groupSize.x + nodeSize.x), fabs(groupCenter.y - nodeCenter.y) - (groupSize.y + nodeSize.y), fabs(groupCenter.z - nodeCenter.z) - (groupSize.z + nodeSize.z)}; dr.x += fabs(dr.x); dr.x *= 0.5f; dr.y += fabs(dr.y); dr.y *= 0.5f; dr.z += fabs(dr.z); dr.z *= 0.5f; //Distance squared, no need to do sqrt since opening criteria has been squared float ds2 = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z; return (ds2 <= fabs(nodeCenter.w)); } #define TEXTURES /*******************************/ /****** Force tree-walk ******/ /*******************************/ template<const int SHIFT, const int BLOCKDIM2, const int NI> __device__ #if 0 /* __noinline__ crashes the kernel when compled with ABI */ __noinline__ #else __forceinline__ #endif void approximate_gravity( float4 pos_i[NI], real4 group_pos, float eps2, uint2 node_begend, real4 *multipole_data, real4 *body_pos, volatile int *shmem, int *lmem, int &ngb, int &apprCount, int &direCount, volatile float4 *boxSizeInfo, float4 groupSize, volatile float4 *boxCenterInfo, float group_eps, real4 acc_i[NI], float2 dens_i[NI]) { /*********** shared memory distribution **********/ // begin, end, size // ----------------------- const int stack_sz = (LMEM_STACK_SIZE << SHIFT) << BLOCKDIM2; /* stack allocated per thread-block */ const int nWarps2 = BLOCKDIM2 - WARP_SIZE2; int *approxL = lmem + stack_sz + (LMEM_EXTRA_SIZE >> nWarps2) * warpId; volatile int *directS = shmem; // 0*DIM, 1*DIM, 1*DIM volatile int *nodesS = directS + WARP_SIZE; // 1*DIM, 10*DIM, 9*DIM volatile int *prefix = nodesS + WARP_SIZE*8; // 9*DIM, 10*DIM, 1*DIM const int NJMAX = WARP_SIZE*3; int *body_list = (int* )&nodesS [WARP_SIZE]; // 2*DIM, 5*DIM, 2*DIM float *sh_mass = (float* )&body_list[NJMAX]; // 5*DIM, 6*DIM, 1*DIM float3 *sh_pos = (float3*)&sh_mass [WARP_SIZE]; // 6*DIM, 9*DIM 3*DIM volatile int *approxM = approxL; volatile int *directM = directS; volatile int * nodesM = nodesS; /*********** stack **********/ int *nstack = lmem; /*********** begin tree-walk **********/ int n_approx = 0; int n_direct = 0; #pragma unroll 1 for (int i = 0; i < NI; i++) dens_i[i] = make_float2(0,0); for (int root_node = node_begend.x; root_node < node_begend.y; root_node += WARP_SIZE) { int n_nodes0 = min(node_begend.y - root_node, WARP_SIZE); int n_stack0 = 0; int n_stack_pre = 0; { nstack[ACCS<SHIFT>(n_stack0)] = root_node + laneId; n_stack0++; } /*********** walk each level **********/ while (n_nodes0 > 0) { int n_nodes1 = 0; int n_offset = 0; int n_stack1 = n_stack0; int c_stack0 = n_stack_pre; /*********** walk a level **********/ while(c_stack0 < n_stack0) { /*** **** --> fetch the list of nodes rom LMEM ***/ bool use_node = laneId < n_nodes0; #if 1 { prefix[laneId] = nstack[ACCS<SHIFT>(c_stack0)]; c_stack0++; } const int node = prefix[min(laneId, n_nodes0 - 1)]; #else /* eg: seems to work, but I do not remember if that will *always* work */ int node; { node = nstack[ACCS<SHIFT>(c_stack0)]; c_stack0++; } #endif #if 0 /* if uncommented, give same results, see below */ if (blockIdx.x == 0 && warpId == 0) printf("laneId = %d node= %d \n", laneId, node); #endif #if 0 if(n_nodes0 > 0){ //Work around pre 4.1 compiler bug n_nodes0 -= WARP_SIZE; } #else n_nodes0 -= WARP_SIZE; #endif /*** **** --> process each of the nodes in the list in parallel ***/ #ifndef TEXTURES float4 nodeSize = boxSizeInfo[node]; //Fetch the size of the box. Size.w = child info float4 node_pos = boxCenterInfo[node]; //Fetch the center of the box. center.w = opening info #else float4 nodeSize = tex1Dfetch(texNodeSize, node); float4 node_pos = tex1Dfetch(texNodeCenter, node); #endif int node_data = __float_as_int(nodeSize.w); //Check if a cell has to be opened #ifdef IMPBH //Improved barnes-hut method #ifndef TEXTURES float4 nodeCOM = multipole_data[node*3]; #else float4 nodeCOM = tex1Dfetch(texMultipole,node*3); #endif nodeCOM.w = node_pos.w; bool split = split_node_grav_impbh(nodeCOM, group_pos, groupSize); #else bool split = split_node_grav_md(node_pos, nodeSize, group_pos, groupSize); #endif bool leaf = node_pos.w <= 0; //Small AND equal incase of a 1 particle cell //Check if it is a leaf // split = true; //If node_data = 0xF it means the remote process decided split was not required //important for traversal of LET trees!! Since numerical difference between CPU and GPU //can cause different outcomes on the split test if(node_data == 0xFFFFFFFF) split = false; bool flag = (split && !leaf) && use_node; //Flag = use_node + split + not_a_leaf;Use only non_leaf nodes that are to be split uint mask = BTEST(flag); // mask = #FFFFFFFF if use_node+split+not_a_leaf==true, otherwise zero int child = node_data & 0x0FFFFFFF; //Index to the first child of the node int nchild = (((node_data & 0xF0000000) >> 28)) & mask; //The number of children this node has /*** **** --> calculate prefix ***/ int n_total = inclusive_scan_warp(prefix, nchild); // inclusive scan to compute memory offset of each child (return total # of children) int offset = prefix[laneId]; offset += n_offset - nchild; // convert inclusive into exclusive scan for referencing purpose for (int i = n_offset; i < n_offset + n_total; i += WARP_SIZE) //nullify part of the array that will be filled with children nodesM[laneId + i] = 0; //but do not touch those parts which has already been filled #if 0 /* the following gives different result than then one in else */ /* the results become the same if I uncomment printf above */ if (flag) { nodesM[offset] = child; if (nodesM[offset + 1] == 0) nodesM[offset + 1] = child + 1; if (nodesM[offset + 2] == 0) nodesM[offset + 2] = child + 2; if (nodesM[offset + 3] == 0) nodesM[offset + 3] = child + 3; if (nodesM[offset + 4] == 0) nodesM[offset + 4] = child + 4; if (nodesM[offset + 5] == 0) nodesM[offset + 5] = child + 5; if (nodesM[offset + 6] == 0) nodesM[offset + 6] = child + 6; if (nodesM[offset + 7] == 0) nodesM[offset + 7] = child + 7; } #elif0 if (flag) nodesM[offset] = child; //Thread with the node that is about to be split //writes the first child in the array of nodes /*** in the following 8 lines, we calculate indexes of all the children that have to be walked from the index of the first child***/ if (flag && nodesM[offset + 1] == 0) nodesM[offset + 1] = child + 1; if (flag && nodesM[offset + 2] == 0) nodesM[offset + 2] = child + 2; if (flag && nodesM[offset + 3] == 0) nodesM[offset + 3] = child + 3; if (flag && nodesM[offset + 4] == 0) nodesM[offset + 4] = child + 4; if (flag && nodesM[offset + 5] == 0) nodesM[offset + 5] = child + 5; if (flag && nodesM[offset + 6] == 0) nodesM[offset + 6] = child + 6; if (flag && nodesM[offset + 7] == 0) nodesM[offset + 7] = child + 7; #else //This code does not require reading of nodesM before writing thereby preventing //possible synchronization , not completed writes , problems if(flag) { for(int i=0; i < nchild; i++) { nodesM[offset + i] = child + i; } } #endif n_offset += n_total; //Increase the offset in the array by the number of newly added nodes /*** **** --> save list of nodes to LMEM ***/ /*** if half of shared memory or more is filled with the the nodes, dump these into slowmem stack ***/ while(n_offset >= WARP_SIZE) { n_offset -= WARP_SIZE; const int offs1 = ACCS<SHIFT>(n_stack1); nstack[offs1] = nodesM[n_offset + laneId]; n_stack1++; n_nodes1 += WARP_SIZE; if((n_stack1 - c_stack0) >= (LMEM_STACK_SIZE << SHIFT)) { //We overwrote our current stack apprCount = -1; return; } } /******************************/ /******************************/ /***** EVALUATION *****/ /******************************/ /******************************/ #if 1 /***********************************/ /****** APPROX ******/ /***********************************/ /* binary prefix sum */ flag = !split && use_node; n_total = warp_exclusive_scan(flag, offset); if (flag) approxM[n_approx + offset] = node; n_approx += n_total; while (n_approx >= WARP_SIZE) { n_approx -= WARP_SIZE; const int address = (approxM[n_approx + laneId] << 1) + approxM[n_approx + laneId]; #ifndef TEXTURES const float4 monopole = multipole_data[address ]; #else const float4 monopole = tex1Dfetch(texMultipole, address); #endif sh_mass[laneId] = monopole.w; sh_pos [laneId] = make_float3(monopole.x, monopole.y, monopole.z); #ifndef _QUADRUPOLE_ for (int i = 0; i < WARP_SIZE; i++) for (int k = 0; k < NI; k++) acc_i[k] = add_acc(acc_i[k], pos_i[k], sh_mass[i], sh_pos[i], eps2); #else for (int i = 0; i < WARP_SIZE; i++) { const int address = approxM[n_approx + i] * 3; const float4 Q0 = tex1Dfetch(texMultipole, address + 1); const float4 Q1 = tex1Dfetch(texMultipole, address + 2); for (int k = 0; k < NI; k++) acc_i[k] = add_acc(acc_i[k], pos_i[k], sh_mass[i], sh_pos[i], Q0, Q1, eps2, dens_i[k]); } #endif /* _QUADRUPOLE_ */ apprCount += WARP_SIZE*NI; } #endif #if 1 /***********************************/ /****** DIRECT ******/ /***********************************/ flag = split && leaf && use_node; //flag = split + leaf + use_node const int jbody = node_data & BODYMASK; //the first body in the leaf const int nbody = (((node_data & INVBMASK) >> LEAFBIT)+1) & BTEST(flag); //number of bodies in the leaf masked with the flag body_list[laneId] = directM[laneId]; //copy list of bodies from previous pass to body_list // step 1 /* binary prefix sum */ // step 1 int n_bodies = inclusive_scan_warp(prefix, nbody); // inclusive scan to compute memory offset for each body offset = prefix[laneId]; // step 2 if (flag) prefix[warp_exclusive_scan(flag)] = laneId; //with tid whose leaves have to be opened directM[laneId] = offset; //Store a copy of inclusive scan in direct offset -= nbody; //convert inclusive int oexclusive scan offset += 1; //add unity, since later prefix0[tid] == 0 used to check barrier int nl_pre = 0; //Number of leaves that have already been processed while (n_bodies > 0) { int nb = min(n_bodies, NJMAX - n_direct); //Make sure number of bides to be extracted does not exceed //the amount of allocated shared memory // step 0 //nullify part of the body_list that will be filled with bodies for (int i = n_direct; i < n_direct + nb; i += WARP_SIZE) //from the leaves that are being processed body_list[i + laneId] = 0; //step 1: if (flag && (directM[laneId] <= nb) && (offset > 0)) //make sure that the thread indeed carries a leaf body_list[n_direct + offset- 1] = -1-jbody; //whose bodies will be extracted // step 2: const int nl = inclusive_segscan_array(&body_list[n_direct], nb); nb = directM[prefix[nl_pre + nl - 1]]; // number of bodies stored in these leaves /***************************************************************************** * example of what is accomplished in steps 0-2 * * --------------------------- * * step 0: body_list = 000000000000000000000 * * step 1: body_list = n000m000p000000q00r00 n,m,.. = -1-jbody_n,m... * * step 2: body_list = n n+1 n+2 n+3 m m+1 m+2 m+3 p p+1 p+2 p+3 p+4 p+5 ... * *****************************************************************************/ n_bodies -= nb; //subtract from n_bodies number of bodies that have been extracted nl_pre += nl; //increase the number of leaves that where processed directM[laneId] -= nb; //subtract the number of extracted bodies in this pass offset = max(offset - nb, 0); n_direct += nb; //increase the number of bodies to be procssed while(n_direct >= WARP_SIZE) { n_direct -= WARP_SIZE; const float4 posj = body_pos[body_list[n_direct + laneId]]; #if 0 const float4 posj = tex1Dfetch(texBody, body_list[n_direct + tid]); #endif sh_mass[laneId] = posj.w; sh_pos [laneId] = make_float3(posj.x, posj.y, posj.z); for (int i = 0; i < WARP_SIZE; i++) for (int k = 0; k < NI; k++) acc_i[k] = add_acc(acc_i[k], pos_i[k], sh_mass[i], sh_pos[i], eps2, dens_i[k]); direCount += WARP_SIZE*NI; } } directM[laneId] = body_list[laneId]; #endif } //end lvl n_nodes1 += n_offset; if (n_offset > 0) { nstack[ACCS<SHIFT>(n_stack1)] = nodesM[laneId]; n_stack1++; if((n_stack1 - c_stack0) >= (LMEM_STACK_SIZE << SHIFT)) { //We overwrote our current stack apprCount = -1; return; } } /*** **** --> copy nodes1 to nodes0: done by reassigning the pointers ***/ n_nodes0 = n_nodes1; n_stack_pre = n_stack0; n_stack0 = n_stack1; }//end while levels }//end for if(n_approx > 0) { if (laneId < n_approx) { const int address = (approxM[laneId] << 1) + approxM[laneId]; #ifndef TEXTURES float4 monopole = multipole_data[address ]; float4 octopole0 = multipole_data[address + 1]; float4 octopole1 = multipole_data[address + 2]; #else float4 monopole = tex1Dfetch(texMultipole, address); float4 octopole0 = tex1Dfetch(texMultipole, address + 1); float4 octopole1 = tex1Dfetch(texMultipole, address + 2); #endif sh_mass[laneId] = monopole.w; sh_pos [laneId] = make_float3(monopole.x, monopole.y, monopole.z); } else { //Set non-active memory locations to zero sh_mass[laneId] = 0.0f; sh_pos [laneId] = make_float3(1.0e10f, 1.0e10f, 1.0e10f); } #ifndef _QUADRUPOLE_ for (int i = 0; i < WARP_SIZE; i++) for (int k = 0; k < NI; k++) acc_i[k] = add_acc(acc_i[k], pos_i[k], sh_mass[i], sh_pos[i],eps2, dens_i[k]); #else for (int i = 0; i < WARP_SIZE; i++) { float4 Q0, Q1; Q0 = Q1 = make_float4(0.0f, 0.0f, 0.0f, 0.0f); if (i < n_approx) { const int address = approxM[i] * 3; Q0 = tex1Dfetch(texMultipole, address + 1); Q1 = tex1Dfetch(texMultipole, address + 2); } for (int k = 0; k < NI; k++) acc_i[k] = add_acc(acc_i[k], pos_i[k], sh_mass[i], sh_pos[i], Q0, Q1, eps2, dens_i[k]); } #endif apprCount += WARP_SIZE*NI; } //if n_approx > 0 if(n_direct > 0) { if (laneId < n_direct) { const float4 posj = body_pos[directM[laneId]]; #if 0 const float4 posj = tex1Dfetch(texBody, direct[tid]); #endif sh_mass[laneId] = posj.w; sh_pos [laneId] = make_float3(posj.x, posj.y, posj.z); } else { sh_mass[laneId] = 0.0f; sh_pos [laneId] = make_float3(1.0e10f, 1.0e10f, 1.0e10f); } for (int i = 0; i < WARP_SIZE; i++) for (int k = 0; k < NI; k++) acc_i[k] = add_acc(acc_i[k], pos_i[k], sh_mass[i], sh_pos[i], eps2, dens_i[k]); direCount += WARP_SIZE*NI; } } extern "C" __global__ void #if 0 /* casues 164 bytes spill to lmem with NTHREAD = 128 */ __launch_bounds__(NTHREAD) #endif dev_approximate_gravity( const int n_active_groups, int n_bodies, float eps2, uint2 node_begend, int *active_groups, real4 *body_pos, real4 *multipole_data, float4 *acc_out, real4 *group_body_pos, //This can be different from body_pos int *ngb_out, int *active_inout, int2 *interactions, float4 *boxSizeInfo, float4 *groupSizeInfo, float4 *boxCenterInfo, float4 *groupCenterInfo, real4 *body_vel, int *MEM_BUF, float *body_h, float2 *body_dens_out) { const int blockDim2 = NTHREAD2; const int shMemSize = 10 * (1 << blockDim2); __shared__ int shmem_pool[shMemSize]; const int nWarps2 = blockDim2 - WARP_SIZE2; const int sh_offs = (shMemSize >> nWarps2) * warpId; volatile int *shmem = shmem_pool + sh_offs; /*********** check if this block is linked to a leaf **********/ int *lmem = &MEM_BUF[blockIdx.x*(LMEM_STACK_SIZE*blockDim.x + LMEM_EXTRA_SIZE)]; int bid = gridDim.x * blockIdx.y + blockIdx.x; while(true) { if(laneId == 0) { bid = atomicAdd(&active_inout[n_bodies], 1); shmem[0] = bid; } bid = shmem[0]; if (bid >= n_active_groups) return; int grpOffset = 0; /*********** set necessary thread constants **********/ #ifdef DO_BLOCK_TIMESTEP real4 curGroupSize = groupSizeInfo[active_groups[bid + grpOffset]]; #else real4 curGroupSize = groupSizeInfo[bid + grpOffset]; #endif const int groupData = __float_as_int(curGroupSize.w); const uint body_addr = groupData & CRITMASK; const uint nb_i = ((groupData & INVCMASK) >> CRITBIT) + 1; #ifdef DO_BLOCK_TIMESTEP real4 group_pos = groupCenterInfo[active_groups[bid + grpOffset]]; #else real4 group_pos = groupCenterInfo[bid + grpOffset]; #endif uint body_i[2]; int ni = nb_i <= WARP_SIZE ? 1 : 2; body_i[0] = body_addr + laneId%nb_i; body_i[1] = body_addr + WARP_SIZE + laneId%(nb_i - WARP_SIZE); float4 pos_i[2]; float4 acc_i[2]; float2 dens_i[2]; pos_i[0] = group_body_pos[body_i[0]]; pos_i[0].w = body_h[body_i[0]]; if(ni > 1){ //Only read if we actually have ni == 2 pos_i[1] = group_body_pos[body_i[1]]; pos_i[1].w = body_h[body_i[1]]; } acc_i[0] = acc_i[1] = make_float4(0.0f, 0.0f, 0.0f, 0.0f); int ngb_i; const float group_eps = 0; int apprCount = 0; int direCount = 0; if (ni == 1) approximate_gravity<0, blockDim2, 1>( pos_i, group_pos, eps2, node_begend, multipole_data, body_pos, shmem, lmem, ngb_i, apprCount, direCount, boxSizeInfo, curGroupSize, boxCenterInfo, group_eps, acc_i, dens_i); else approximate_gravity<0, blockDim2, 2>( pos_i, group_pos, eps2, node_begend, multipole_data, body_pos, shmem, lmem, ngb_i, apprCount, direCount, boxSizeInfo, curGroupSize, boxCenterInfo, group_eps, acc_i, dens_i); #if 1 /* this increase lmem spill count */ if(apprCount < 0) { //Try to get access to the big stack, only one block per time is allowed if(laneId == 0) { int res = atomicExch(&active_inout[n_bodies+1], 1); //If the old value (res) is 0 we can go otherwise sleep int waitCounter = 0; while(res != 0) { //Sleep for(int i=0; i < (1024); i++) { waitCounter += 1; } //Test again shmem[0] = waitCounter; res = atomicExch(&active_inout[n_bodies+1], 1); } } lmem = &MEM_BUF[gridDim.x*(LMEM_STACK_SIZE*blockDim.x + LMEM_EXTRA_SIZE)]; //Use the extra large buffer apprCount = direCount = 0; acc_i[0] = acc_i[1] = make_float4(0.0f, 0.0f, 0.0f, 0.0f); if (ni == 1) approximate_gravity<8, blockDim2, 1>( pos_i, group_pos, eps2, node_begend, multipole_data, body_pos, shmem, lmem, ngb_i, apprCount, direCount, boxSizeInfo, curGroupSize, boxCenterInfo, group_eps, acc_i, dens_i); else approximate_gravity<8, blockDim2, 2>( pos_i, group_pos, eps2, node_begend, multipole_data, body_pos, shmem, lmem, ngb_i, apprCount, direCount, boxSizeInfo, curGroupSize, boxCenterInfo, group_eps, acc_i, dens_i); lmem = &MEM_BUF[blockIdx.x*(LMEM_STACK_SIZE*blockDim.x + LMEM_EXTRA_SIZE)]; if(laneId == 0) { atomicExch(&active_inout[n_bodies+1], 0); //Release the lock } }//end if apprCount < 0 #endif if (laneId < nb_i) { const int addr = body_i[0]; acc_out [addr] = acc_i[0]; // ngb_out [addr] = ngb_i; ngb_out [addr] = addr; //JB Fixed this for demo active_inout[addr] = 1; interactions[addr].x = apprCount / ni; interactions[addr].y = direCount / ni; body_dens_out[addr] = dens_i[0]; body_h[addr] = adjustH(body_h[addr], dens_i[0].y); if (ni == 2) { const int addr = body_i[1]; acc_out [addr] = acc_i[1]; // ngb_out [addr] = ngb_i; ngb_out [addr] = addr; //JB Fixed this for demo active_inout[addr] = 1; interactions[addr].x = apprCount / ni; interactions[addr].y = direCount / ni; body_dens_out[addr] = dens_i[1]; body_h[addr] = adjustH(body_h[addr], dens_i[1].y); } } } //end while } extern "C" __global__ void #if 0 /* casues 164 bytes spill to lmem with NTHREAD = 128 */ __launch_bounds__(NTHREAD) #endif dev_approximate_gravity_let( const int n_active_groups, int n_bodies, float eps2, uint2 node_begend, int *active_groups, real4 *body_pos, real4 *multipole_data, float4 *acc_out, real4 *group_body_pos, //This can be different from body_pos int *ngb_out, int *active_inout, int2 *interactions, float4 *boxSizeInfo, float4 *groupSizeInfo, float4 *boxCenterInfo, float4 *groupCenterInfo, real4 *body_vel, int *MEM_BUF, float *body_h, float2 *body_dens_out) { const int blockDim2 = NTHREAD2; const int shMemSize = 10 * (1 << blockDim2); __shared__ int shmem_pool[shMemSize]; const int nWarps2 = blockDim2 - WARP_SIZE2; const int sh_offs = (shMemSize >> nWarps2) * warpId; volatile int *shmem = shmem_pool + sh_offs; /*********** check if this block is linked to a leaf **********/ int *lmem = &MEM_BUF[blockIdx.x*(LMEM_STACK_SIZE*blockDim.x + LMEM_EXTRA_SIZE)]; int bid = gridDim.x * blockIdx.y + blockIdx.x; while(true) { if(laneId == 0) { bid = atomicAdd(&active_inout[n_bodies], 1); shmem[0] = bid; } bid = shmem[0]; if (bid >= n_active_groups) return; int grpOffset = 0; /*********** set necessary thread constants **********/ #ifdef DO_BLOCK_TIMESTEP real4 curGroupSize = groupSizeInfo[active_groups[bid + grpOffset]]; #else real4 curGroupSize = groupSizeInfo[bid + grpOffset]; #endif const int groupData = __float_as_int(curGroupSize.w); const uint body_addr = groupData & CRITMASK; const uint nb_i = ((groupData & INVCMASK) >> CRITBIT) + 1; #ifdef DO_BLOCK_TIMESTEP real4 group_pos = groupCenterInfo[active_groups[bid + grpOffset]]; #else real4 group_pos = groupCenterInfo[bid + grpOffset]; #endif uint body_i[2]; int ni = nb_i <= WARP_SIZE ? 1 : 2; body_i[0] = body_addr + laneId%nb_i; body_i[1] = body_addr + WARP_SIZE + laneId%(nb_i - WARP_SIZE); float4 pos_i[2]; float4 acc_i[2]; float2 dens_i[2]; pos_i[0] = group_body_pos[body_i[0]]; pos_i[0].w = body_h[body_i[0]]; if(ni > 1){ //Only read if we actually have ni == 2 pos_i[1] = group_body_pos[body_i[1]]; pos_i[1].w = body_h[body_i[1]]; } acc_i[0] = acc_i[1] = make_float4(0.0f, 0.0f, 0.0f, 0.0f); int ngb_i; const float group_eps = 0; int apprCount = 0; int direCount = 0; if (ni == 1) approximate_gravity<0, blockDim2, 1>( pos_i, group_pos, eps2, node_begend, multipole_data, body_pos, shmem, lmem, ngb_i, apprCount, direCount, boxSizeInfo, curGroupSize, boxCenterInfo, group_eps, acc_i, dens_i); else approximate_gravity<0, blockDim2, 2>( pos_i, group_pos, eps2, node_begend, multipole_data, body_pos, shmem, lmem, ngb_i, apprCount, direCount, boxSizeInfo, curGroupSize, boxCenterInfo, group_eps, acc_i, dens_i); #if 1 /* this increase lmem spill count */ if(apprCount < 0) { //Try to get access to the big stack, only one block per time is allowed if(laneId == 0) { int res = atomicExch(&active_inout[n_bodies+1], 1); //If the old value (res) is 0 we can go otherwise sleep int waitCounter = 0; while(res != 0) { //Sleep for(int i=0; i < (1024); i++) { waitCounter += 1; } //Test again shmem[0] = waitCounter; res = atomicExch(&active_inout[n_bodies+1], 1); } } lmem = &MEM_BUF[gridDim.x*(LMEM_STACK_SIZE*blockDim.x + LMEM_EXTRA_SIZE)]; //Use the extra large buffer apprCount = direCount = 0; acc_i[0] = acc_i[1] = make_float4(0.0f, 0.0f, 0.0f, 0.0f); if (ni == 1) approximate_gravity<8, blockDim2, 1>( pos_i, group_pos, eps2, node_begend, multipole_data, body_pos, shmem, lmem, ngb_i, apprCount, direCount, boxSizeInfo, curGroupSize, boxCenterInfo, group_eps, acc_i, dens_i); else approximate_gravity<8, blockDim2, 2>( pos_i, group_pos, eps2, node_begend, multipole_data, body_pos, shmem, lmem, ngb_i, apprCount, direCount, boxSizeInfo, curGroupSize, boxCenterInfo, group_eps, acc_i, dens_i); lmem = &MEM_BUF[blockIdx.x*(LMEM_STACK_SIZE*blockDim.x + LMEM_EXTRA_SIZE)]; if(laneId == 0) { atomicExch(&active_inout[n_bodies+1], 0); //Release the lock } }//end if apprCount < 0 #endif if (laneId < nb_i) { const int addr = body_i[0]; acc_out [addr].x += acc_i[0].x; acc_out [addr].y += acc_i[0].y; acc_out [addr].z += acc_i[0].z; acc_out [addr].w += acc_i[0].w; // ngb_out [addr] = ngb_i; ngb_out [addr] = addr; //JB Fixed this for demo active_inout[addr] = 1; interactions[addr].x += apprCount / ni; interactions[addr].y += direCount / ni; body_dens_out[addr].x += dens_i[0].x; body_dens_out[addr].y += dens_i[0].y; if (ni == 2) { const int addr = body_i[1]; acc_out [addr].x += acc_i[1].x; acc_out [addr].y += acc_i[1].y; acc_out [addr].z += acc_i[1].z; acc_out [addr].w += acc_i[1].w; // ngb_out [addr] = ngb_i; ngb_out [addr] = addr; //JB Fixed this for demo active_inout[addr] = 1; interactions[addr].x += apprCount / ni; interactions[addr].y += direCount / ni; body_dens_out[addr].x += dens_i[1].x; body_dens_out[addr].y += dens_i[1].y; } } } //end while }
63641126fc550142f50f1a382ab8825013f893ba.hip
// !!! This is a file automatically generated by hipify!!! #include <cmath> #include <hip/hip_runtime_api.h> #include "../include/MatrixOps.cuh" namespace blas1{ namespace cudaBlas { __global__ void naiveTranspose(float *input, float *output, size_t width, size_t height) { unsigned int row_id = blockIdx.y * blockDim.y + threadIdx.y; unsigned int column_id = blockIdx.x * blockDim.x + threadIdx.x; if ((row_id < height) && (column_id < width)) { output[row_id + height * column_id] = input[row_id * width + column_id]; } } __global__ void naiveCopy(float *input, float *output, size_t width, size_t height) { unsigned int row_id = blockIdx.y * blockDim.y + threadIdx.y; unsigned int column_id = blockIdx.x * blockDim.x + threadIdx.x; if ((row_id < height) && (column_id < width)) { output[row_id * width + column_id] = input[row_id * width + column_id]; } } __global__ void transpose_32(float *input, float *output, size_t width, size_t height) { const unsigned int BLOCK_SIZE = 32; __shared__ float sharedMem[BLOCK_SIZE][BLOCK_SIZE + 1]; // Bank conflict unsigned int column_index = blockIdx.x * blockDim.x + threadIdx.x; unsigned int row_index = blockIdx.y * blockDim.y + threadIdx.y; if ((column_index < width) && (row_index < height)) { unsigned element_index = row_index * width + column_index; sharedMem[threadIdx.y][threadIdx.x] = input[element_index]; } __syncthreads(); unsigned int local_row = blockIdx.y * BLOCK_SIZE + threadIdx.x; // coalesced access unsigned int local_column = blockIdx.x * BLOCK_SIZE + threadIdx.y; // coalesced access if ((local_row < height) && (local_column < width)) { output[local_column * height + local_row] = sharedMem[threadIdx.x][threadIdx.y]; } } __global__ void transpose_16(float *input, float *output, size_t width, size_t height) { const unsigned int BLOCK_SIZE = 16; __shared__ float sharedMem[BLOCK_SIZE][BLOCK_SIZE + 1]; // Bank conflict unsigned int column_index = blockIdx.x * blockDim.x + threadIdx.x; unsigned int row_index = blockIdx.y * blockDim.y + threadIdx.y; if ((column_index < width) && (row_index < height)) { unsigned element_index = row_index * width + column_index; sharedMem[threadIdx.y][threadIdx.x] = input[element_index]; } __syncthreads(); unsigned int local_row = blockIdx.y * BLOCK_SIZE + threadIdx.x; // coalesced access unsigned int local_column = blockIdx.x * BLOCK_SIZE + threadIdx.y; // coalesced access if ((local_row < height) && (local_column < width)) { output[local_column * height + local_row] = sharedMem[threadIdx.x][threadIdx.y]; } } __global__ void transposeRowBlock_32(float *input, float *output, size_t width, size_t heights) { const unsigned int TILE_SIZE = 32; const unsigned int BLOCK_ROWS = 8; __shared__ float tile[TILE_SIZE][TILE_SIZE + 1]; unsigned int x = blockIdx.x * TILE_SIZE + threadIdx.x; unsigned int y = blockIdx.y * TILE_SIZE + threadIdx.y; auto w = gridDim.x * TILE_SIZE; for (int j = 0; j < TILE_SIZE; j += BLOCK_ROWS) { tile[threadIdx.y + j][threadIdx.x] = input[(y + j) * w + x]; } __syncthreads(); auto local_row = blockIdx.y * TILE_SIZE + threadIdx.x; auto local_column = blockIdx.x * TILE_SIZE + threadIdx.y; #pragma unroll for (int j = 0; j < TILE_SIZE; j += BLOCK_ROWS) { output[(local_column + j) * w + local_row] = tile[threadIdx.x][threadIdx.y + j]; } } __global__ void transposeRowBlock_16(float *input, float *output, size_t width, size_t heights) { const unsigned int TILE_SIZE = 16; const unsigned int BLOCK_ROWS = 8; __shared__ float tile[TILE_SIZE][TILE_SIZE + 1]; unsigned int x = blockIdx.x * TILE_SIZE + threadIdx.x; unsigned int y = blockIdx.y * TILE_SIZE + threadIdx.y; auto w = gridDim.x * TILE_SIZE; for (int j = 0; j < TILE_SIZE; j += BLOCK_ROWS) { tile[threadIdx.y + j][threadIdx.x] = input[(y + j) * w + x]; } __syncthreads(); auto local_row = blockIdx.y * TILE_SIZE + threadIdx.x; auto local_column = blockIdx.x * TILE_SIZE + threadIdx.y; #pragma unroll for (int j = 0; j < TILE_SIZE; j += BLOCK_ROWS) { output[(local_column + j) * w + local_row] = tile[threadIdx.x][threadIdx.y + j]; } } __global__ void MatrixCopy_32(float *input, float *output, size_t width, size_t height) { const unsigned int BLOCK_SIZE = 32; __shared__ float sharedMem[BLOCK_SIZE][BLOCK_SIZE + 1]; // Bank conflict unsigned int column_index = blockIdx.x * blockDim.x + threadIdx.x; unsigned int row_index = blockIdx.y * blockDim.y + threadIdx.y; if ((column_index < width) && (row_index < height)) { unsigned element_index = row_index * width + column_index; sharedMem[threadIdx.y][threadIdx.x] = input[element_index]; } __syncthreads(); unsigned int local_row = blockIdx.y * BLOCK_SIZE + threadIdx.x; // coalesced access unsigned int local_column = blockIdx.x * BLOCK_SIZE + threadIdx.y; // coalesced access if ((local_row < height) && (local_column < width)) { output[local_row * width + local_column] = sharedMem[threadIdx.x][threadIdx.y]; } } __global__ void MatrixCopy_16(float *input, float *output, size_t width, size_t height) { const unsigned int BLOCK_SIZE = 32; __shared__ float sharedMem[BLOCK_SIZE][BLOCK_SIZE + 1]; // Bank conflict solution unsigned int column_index = blockIdx.x * blockDim.x + threadIdx.x; unsigned int row_index = blockIdx.y * blockDim.y + threadIdx.y; if ((column_index < width) && (row_index < height)) { unsigned element_index = row_index * width + column_index; sharedMem[threadIdx.y][threadIdx.x] = input[element_index]; } __syncthreads(); unsigned int local_row = blockIdx.y * BLOCK_SIZE + threadIdx.x; // coalesced access unsigned int local_column = blockIdx.x * BLOCK_SIZE + threadIdx.y; // coalesced access if ((local_row < height) && (local_column < width)) { output[local_row * width + local_column] = sharedMem[threadIdx.x][threadIdx.y]; } } } }
63641126fc550142f50f1a382ab8825013f893ba.cu
#include <cmath> #include <cuda_runtime_api.h> #include "../include/MatrixOps.cuh" namespace blas1{ namespace cudaBlas { __global__ void naiveTranspose(float *input, float *output, size_t width, size_t height) { unsigned int row_id = blockIdx.y * blockDim.y + threadIdx.y; unsigned int column_id = blockIdx.x * blockDim.x + threadIdx.x; if ((row_id < height) && (column_id < width)) { output[row_id + height * column_id] = input[row_id * width + column_id]; } } __global__ void naiveCopy(float *input, float *output, size_t width, size_t height) { unsigned int row_id = blockIdx.y * blockDim.y + threadIdx.y; unsigned int column_id = blockIdx.x * blockDim.x + threadIdx.x; if ((row_id < height) && (column_id < width)) { output[row_id * width + column_id] = input[row_id * width + column_id]; } } __global__ void transpose_32(float *input, float *output, size_t width, size_t height) { const unsigned int BLOCK_SIZE = 32; __shared__ float sharedMem[BLOCK_SIZE][BLOCK_SIZE + 1]; // Bank conflict unsigned int column_index = blockIdx.x * blockDim.x + threadIdx.x; unsigned int row_index = blockIdx.y * blockDim.y + threadIdx.y; if ((column_index < width) && (row_index < height)) { unsigned element_index = row_index * width + column_index; sharedMem[threadIdx.y][threadIdx.x] = input[element_index]; } __syncthreads(); unsigned int local_row = blockIdx.y * BLOCK_SIZE + threadIdx.x; // coalesced access unsigned int local_column = blockIdx.x * BLOCK_SIZE + threadIdx.y; // coalesced access if ((local_row < height) && (local_column < width)) { output[local_column * height + local_row] = sharedMem[threadIdx.x][threadIdx.y]; } } __global__ void transpose_16(float *input, float *output, size_t width, size_t height) { const unsigned int BLOCK_SIZE = 16; __shared__ float sharedMem[BLOCK_SIZE][BLOCK_SIZE + 1]; // Bank conflict unsigned int column_index = blockIdx.x * blockDim.x + threadIdx.x; unsigned int row_index = blockIdx.y * blockDim.y + threadIdx.y; if ((column_index < width) && (row_index < height)) { unsigned element_index = row_index * width + column_index; sharedMem[threadIdx.y][threadIdx.x] = input[element_index]; } __syncthreads(); unsigned int local_row = blockIdx.y * BLOCK_SIZE + threadIdx.x; // coalesced access unsigned int local_column = blockIdx.x * BLOCK_SIZE + threadIdx.y; // coalesced access if ((local_row < height) && (local_column < width)) { output[local_column * height + local_row] = sharedMem[threadIdx.x][threadIdx.y]; } } __global__ void transposeRowBlock_32(float *input, float *output, size_t width, size_t heights) { const unsigned int TILE_SIZE = 32; const unsigned int BLOCK_ROWS = 8; __shared__ float tile[TILE_SIZE][TILE_SIZE + 1]; unsigned int x = blockIdx.x * TILE_SIZE + threadIdx.x; unsigned int y = blockIdx.y * TILE_SIZE + threadIdx.y; auto w = gridDim.x * TILE_SIZE; for (int j = 0; j < TILE_SIZE; j += BLOCK_ROWS) { tile[threadIdx.y + j][threadIdx.x] = input[(y + j) * w + x]; } __syncthreads(); auto local_row = blockIdx.y * TILE_SIZE + threadIdx.x; auto local_column = blockIdx.x * TILE_SIZE + threadIdx.y; #pragma unroll for (int j = 0; j < TILE_SIZE; j += BLOCK_ROWS) { output[(local_column + j) * w + local_row] = tile[threadIdx.x][threadIdx.y + j]; } } __global__ void transposeRowBlock_16(float *input, float *output, size_t width, size_t heights) { const unsigned int TILE_SIZE = 16; const unsigned int BLOCK_ROWS = 8; __shared__ float tile[TILE_SIZE][TILE_SIZE + 1]; unsigned int x = blockIdx.x * TILE_SIZE + threadIdx.x; unsigned int y = blockIdx.y * TILE_SIZE + threadIdx.y; auto w = gridDim.x * TILE_SIZE; for (int j = 0; j < TILE_SIZE; j += BLOCK_ROWS) { tile[threadIdx.y + j][threadIdx.x] = input[(y + j) * w + x]; } __syncthreads(); auto local_row = blockIdx.y * TILE_SIZE + threadIdx.x; auto local_column = blockIdx.x * TILE_SIZE + threadIdx.y; #pragma unroll for (int j = 0; j < TILE_SIZE; j += BLOCK_ROWS) { output[(local_column + j) * w + local_row] = tile[threadIdx.x][threadIdx.y + j]; } } __global__ void MatrixCopy_32(float *input, float *output, size_t width, size_t height) { const unsigned int BLOCK_SIZE = 32; __shared__ float sharedMem[BLOCK_SIZE][BLOCK_SIZE + 1]; // Bank conflict unsigned int column_index = blockIdx.x * blockDim.x + threadIdx.x; unsigned int row_index = blockIdx.y * blockDim.y + threadIdx.y; if ((column_index < width) && (row_index < height)) { unsigned element_index = row_index * width + column_index; sharedMem[threadIdx.y][threadIdx.x] = input[element_index]; } __syncthreads(); unsigned int local_row = blockIdx.y * BLOCK_SIZE + threadIdx.x; // coalesced access unsigned int local_column = blockIdx.x * BLOCK_SIZE + threadIdx.y; // coalesced access if ((local_row < height) && (local_column < width)) { output[local_row * width + local_column] = sharedMem[threadIdx.x][threadIdx.y]; } } __global__ void MatrixCopy_16(float *input, float *output, size_t width, size_t height) { const unsigned int BLOCK_SIZE = 32; __shared__ float sharedMem[BLOCK_SIZE][BLOCK_SIZE + 1]; // Bank conflict solution unsigned int column_index = blockIdx.x * blockDim.x + threadIdx.x; unsigned int row_index = blockIdx.y * blockDim.y + threadIdx.y; if ((column_index < width) && (row_index < height)) { unsigned element_index = row_index * width + column_index; sharedMem[threadIdx.y][threadIdx.x] = input[element_index]; } __syncthreads(); unsigned int local_row = blockIdx.y * BLOCK_SIZE + threadIdx.x; // coalesced access unsigned int local_column = blockIdx.x * BLOCK_SIZE + threadIdx.y; // coalesced access if ((local_row < height) && (local_column < width)) { output[local_row * width + local_column] = sharedMem[threadIdx.x][threadIdx.y]; } } } }
ee45d2c77ab6ccdc54546f6c6184d2c8180e230a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <cstdlib> #include <time.h> #define __DRIVER_TYPES_H__ #include "helper_cuda.h" const int LOOP = 1; const size_t TILE_DIM = 32; const size_t BLOCK_ROWS = 8; const size_t totalElements = 256 * 1024 * 1024l; const size_t totalMemorySize = sizeof(float) * totalElements; hipEvent_t start, stop; __global__ void copy1(float *odata, const float *idata) { size_t x = blockIdx.x * TILE_DIM + threadIdx.x; // 0..31 size_t y = blockIdx.y * TILE_DIM + threadIdx.y; // 0..7 size_t width = gridDim.x * TILE_DIM; for (size_t j=0; j<TILE_DIM; j+=BLOCK_ROWS) { odata[(y+j)*width + x] = idata[(y+j)*width+x]; } } __global__ void copy2(float *odata, const float *idata) { size_t x = blockIdx.x * TILE_DIM + threadIdx.x; // 0..31 size_t y = blockIdx.y * TILE_DIM + threadIdx.y*4; // 0,4,8,16,20,24,28 size_t width = gridDim.x * TILE_DIM; for (size_t j=0; j<4; ++j) { odata[(y+j)*width + x] = idata[(y+j)*width+x]; } } __global__ void copy3(float *odata, const float *idata) { size_t x = blockIdx.x * TILE_DIM + threadIdx.x; size_t y = blockIdx.y * TILE_DIM + threadIdx.y; size_t width = gridDim.x * TILE_DIM; odata[y*width + x] = idata[y*width+x]; } __global__ void copy3a(float *odata, const float *idata) { size_t tid = blockIdx.x * blockDim.x + threadIdx.x; // adding this if slows this kernel down to less than copy3, // but by a small amount, though makes it work in all memory size cases. // it is a trade-off! If one can guarantee totalElements vs blockDim.x, // then one can get the speed up by removing this check. if (tid < totalElements) { odata[tid] = idata[tid]; } } __global__ void copy4(float *odata, const float *idata) { size_t tid = threadIdx.x + blockDim.x * threadIdx.y; tid += blockDim.x * blockDim.y * blockIdx.x; size_t width = blockDim.x * blockDim.y * gridDim.x; for (size_t j=0; j<totalElements-width; j+=width) { odata[tid + j] = idata[tid + j]; } // the final blocks that would overflow size_t j=totalElements-width; if (tid+j < totalElements) { odata[tid + j] = idata[tid + j]; } } float measureCudaMemCpy(float *dev_dst, float *dev_src) { float elapsedTimeTotal = 0; for (size_t i=0; i<LOOP; ++i) { // do a device to device memory copy via kernel, and time it checkCudaErrors(hipEventRecord(start, 0)); checkCudaErrors(hipMemcpy(dev_dst, dev_src, totalMemorySize, hipMemcpyDeviceToDevice)); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipEventRecord(stop, 0)); checkCudaErrors(hipEventSynchronize(stop)); float elapsedTime; checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop)); elapsedTimeTotal += elapsedTime; } return elapsedTimeTotal / LOOP; } float measureCopy1(float *dev_dst, float *dev_src, dim3 BLOCKS) { float elapsedTimeTotal = 0; for (size_t i=0; i<LOOP; ++i) { // do a device to device memory copy via kernel, and time it checkCudaErrors(hipEventRecord(start, 0)); dim3 THREADS(TILE_DIM, BLOCK_ROWS, 1); hipLaunchKernelGGL(( copy1) , dim3(BLOCKS), dim3(THREADS) , 0, 0, dev_dst, dev_src); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipEventRecord(stop, 0)); checkCudaErrors(hipEventSynchronize(stop)); float elapsedTime; checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop)); elapsedTimeTotal += elapsedTime; } return elapsedTimeTotal / LOOP; } float measureCopy2(float *dev_dst, float *dev_src, dim3 BLOCKS) { float elapsedTimeTotal = 0; for (size_t i=0; i<LOOP; ++i) { // do a device to device memory copy via kernel, and time it checkCudaErrors(hipEventRecord(start, 0)); dim3 THREADS(TILE_DIM, BLOCK_ROWS, 1); hipLaunchKernelGGL(( copy2) , dim3(BLOCKS), dim3(THREADS) , 0, 0, dev_dst, dev_src); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipEventRecord(stop, 0)); checkCudaErrors(hipEventSynchronize(stop)); float elapsedTime; checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop)); elapsedTimeTotal += elapsedTime; } return elapsedTimeTotal / LOOP; } float measureCopy3(float *dev_dst, float *dev_src, dim3 BLOCKS) { float elapsedTimeTotal = 0; for (size_t i=0; i<LOOP; ++i) { // do a device to device memory copy via kernel, and time it checkCudaErrors(hipEventRecord(start, 0)); dim3 THREADS(TILE_DIM, TILE_DIM, 1); hipLaunchKernelGGL(( copy3) , dim3(BLOCKS), dim3(THREADS) , 0, 0, dev_dst, dev_src); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipEventRecord(stop, 0)); checkCudaErrors(hipEventSynchronize(stop)); float elapsedTime; checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop)); elapsedTimeTotal += elapsedTime; } return elapsedTimeTotal / LOOP; } /* changed this to use occupancy API. it calculated the same numbers I had handcoded, but this makes it past/future proof. the copy3a kernel with this code to drive makes the least assumptions about the hardware, and is the fastest. https://devblogs.nvidia.com/parallelforall/cuda-pro-tip-occupancy-api-simplifies-launch-configuration/ was the article that covered the use of the occupancy api, and the code I copied for debug printing. */ float measureCopy3a(float *dev_dst, float *dev_src) { int blockSize=0; // to supress warning, assign to zero int minGridSize; int gridSize; hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, copy3a, 0, 0); gridSize = (totalElements + blockSize - 1) / blockSize; float elapsedTimeTotal = 0; for (int i=0; i<LOOP; ++i) { // do a device to device memory copy via kernel, and time it checkCudaErrors(hipEventRecord(start, 0)); hipLaunchKernelGGL(( copy3a) , dim3(gridSize), dim3(blockSize) , 0, 0, dev_dst, dev_src); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipEventRecord(stop, 0)); checkCudaErrors(hipEventSynchronize(stop)); float elapsedTime; checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop)); elapsedTimeTotal += elapsedTime; } int maxActiveBlocks; hipOccupancyMaxActiveBlocksPerMultiprocessor( &maxActiveBlocks, copy3a, blockSize, 0); int device; hipDeviceProp_t props; hipGetDevice(&device); hipGetDeviceProperties(&props, device); float occupancy = (maxActiveBlocks * blockSize / props.warpSize) / (float)(props.maxThreadsPerMultiProcessor / props.warpSize); //printf(" [Debugging Info for copy3a] Launched blocks of size %d. Theoretical occupancy: %f\n", blockSize, occupancy); return elapsedTimeTotal / LOOP; } float measureCopy4(float *dev_dst, float *dev_src, dim3 BLOCKS) { float elapsedTimeTotal = 0; for (size_t i=0; i<LOOP; ++i) { // do a device to device memory copy via kernel, and time it checkCudaErrors(hipEventRecord(start, 0)); dim3 THREADS(TILE_DIM, TILE_DIM, 1); hipLaunchKernelGGL(( copy4) , dim3(BLOCKS), dim3(THREADS) , 0, 0, dev_dst, dev_src); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipEventRecord(stop, 0)); checkCudaErrors(hipEventSynchronize(stop)); float elapsedTime; checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop)); elapsedTimeTotal += elapsedTime; } return elapsedTimeTotal / LOOP; } int main(int argc, char **argv) { srand(time(NULL)); checkCudaErrors(hipSetDevice(0)); float *host_src, *host_dst; host_src = (float*)malloc(totalMemorySize); host_dst = (float*)malloc(totalMemorySize); float *dev_src, *dev_dst; checkCudaErrors(hipMalloc((void**)&dev_src, totalMemorySize)); checkCudaErrors(hipMalloc((void**)&dev_dst, totalMemorySize)); // fill memory on host side float theRand = rand(); for (size_t i=0; i<totalElements; ++i) { host_src[i] = theRand; } // copy host memory to device checkCudaErrors(hipMemcpy(dev_src, host_src, totalMemorySize, hipMemcpyHostToDevice)); checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); float timePerLoop; timePerLoop = measureCudaMemCpy(dev_dst, dev_src); printf("mem Elapsed time: %f s\n", timePerLoop / 1000.0); timePerLoop = measureCopy1(dev_dst, dev_src, dim3(totalElements/1024/32/32, 1024, 1)); printf("1 Elapsed time: %f s\n", timePerLoop / 1000.0); timePerLoop = measureCopy1(dev_dst, dev_src, dim3(totalElements/1024/32/32/32, 1024*32, 1)); printf("1 Elapsed time: %f s\n", timePerLoop / 1000.0); timePerLoop = measureCopy2(dev_dst, dev_src, dim3(totalElements/1024, 1024/32/32, 1)); printf("2 Elapsed time: %f s\n", timePerLoop / 1000.0); timePerLoop = measureCopy2(dev_dst, dev_src, dim3(totalElements/1024/32/32, 1024, 1)); printf("2 Elapsed time: %f s\n", timePerLoop / 1000.0); timePerLoop = measureCopy2(dev_dst, dev_src, dim3(totalElements/1024/32/32/32, 1024*32, 1)); printf("2 Elapsed time: %f s\n", timePerLoop / 1000.0); // timePerLoop = measureCopy2(dev_dst, dev_src, dim3(totalElements/1024/32/32/64, 1024*64, 1)); // printf("Elapsed time: %f s\n", timePerLoop / 1000.0); timePerLoop = measureCopy3(dev_dst, dev_src, dim3(totalElements/1024/32/32/32, 1024*32, 1)); printf("3 Elapsed time: %f s\n", timePerLoop / 1000.0); timePerLoop = measureCopy3a(dev_dst, dev_src); printf("3a Elapsed time: %f s\n", timePerLoop / 1000.0); timePerLoop = measureCopy4(dev_dst, dev_src, dim3(26, 1, 1)); printf("4 Elapsed time: %f s\n", timePerLoop / 1000.0); timePerLoop = measureCopy4(dev_dst, dev_src, dim3(1024, 1, 1)); printf("4 Elapsed time: %f s\n", timePerLoop / 1000.0); checkCudaErrors(hipEventDestroy(start)); checkCudaErrors(hipEventDestroy(stop)); checkCudaErrors(hipMemcpy(host_dst, dev_dst, totalMemorySize, hipMemcpyDeviceToHost)); for (size_t i=0; i<totalElements; ++i) { if (host_dst[i] != host_src[i]) { printf("*** First mismatch at %ld. Got %f, was expecting %f ***\n", i, host_dst[i], host_src[i]); break; } } checkCudaErrors(hipFree(dev_src)); checkCudaErrors(hipFree(dev_dst)); free(host_src); free(host_dst); return 0; }
ee45d2c77ab6ccdc54546f6c6184d2c8180e230a.cu
#include <iostream> #include <cstdlib> #include <time.h> #define __DRIVER_TYPES_H__ #include "helper_cuda.h" const int LOOP = 1; const size_t TILE_DIM = 32; const size_t BLOCK_ROWS = 8; const size_t totalElements = 256 * 1024 * 1024l; const size_t totalMemorySize = sizeof(float) * totalElements; cudaEvent_t start, stop; __global__ void copy1(float *odata, const float *idata) { size_t x = blockIdx.x * TILE_DIM + threadIdx.x; // 0..31 size_t y = blockIdx.y * TILE_DIM + threadIdx.y; // 0..7 size_t width = gridDim.x * TILE_DIM; for (size_t j=0; j<TILE_DIM; j+=BLOCK_ROWS) { odata[(y+j)*width + x] = idata[(y+j)*width+x]; } } __global__ void copy2(float *odata, const float *idata) { size_t x = blockIdx.x * TILE_DIM + threadIdx.x; // 0..31 size_t y = blockIdx.y * TILE_DIM + threadIdx.y*4; // 0,4,8,16,20,24,28 size_t width = gridDim.x * TILE_DIM; for (size_t j=0; j<4; ++j) { odata[(y+j)*width + x] = idata[(y+j)*width+x]; } } __global__ void copy3(float *odata, const float *idata) { size_t x = blockIdx.x * TILE_DIM + threadIdx.x; size_t y = blockIdx.y * TILE_DIM + threadIdx.y; size_t width = gridDim.x * TILE_DIM; odata[y*width + x] = idata[y*width+x]; } __global__ void copy3a(float *odata, const float *idata) { size_t tid = blockIdx.x * blockDim.x + threadIdx.x; // adding this if slows this kernel down to less than copy3, // but by a small amount, though makes it work in all memory size cases. // it is a trade-off! If one can guarantee totalElements vs blockDim.x, // then one can get the speed up by removing this check. if (tid < totalElements) { odata[tid] = idata[tid]; } } __global__ void copy4(float *odata, const float *idata) { size_t tid = threadIdx.x + blockDim.x * threadIdx.y; tid += blockDim.x * blockDim.y * blockIdx.x; size_t width = blockDim.x * blockDim.y * gridDim.x; for (size_t j=0; j<totalElements-width; j+=width) { odata[tid + j] = idata[tid + j]; } // the final blocks that would overflow size_t j=totalElements-width; if (tid+j < totalElements) { odata[tid + j] = idata[tid + j]; } } float measureCudaMemCpy(float *dev_dst, float *dev_src) { float elapsedTimeTotal = 0; for (size_t i=0; i<LOOP; ++i) { // do a device to device memory copy via kernel, and time it checkCudaErrors(cudaEventRecord(start, 0)); checkCudaErrors(cudaMemcpy(dev_dst, dev_src, totalMemorySize, cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaEventRecord(stop, 0)); checkCudaErrors(cudaEventSynchronize(stop)); float elapsedTime; checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop)); elapsedTimeTotal += elapsedTime; } return elapsedTimeTotal / LOOP; } float measureCopy1(float *dev_dst, float *dev_src, dim3 BLOCKS) { float elapsedTimeTotal = 0; for (size_t i=0; i<LOOP; ++i) { // do a device to device memory copy via kernel, and time it checkCudaErrors(cudaEventRecord(start, 0)); dim3 THREADS(TILE_DIM, BLOCK_ROWS, 1); copy1 <<< BLOCKS, THREADS >>>(dev_dst, dev_src); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaEventRecord(stop, 0)); checkCudaErrors(cudaEventSynchronize(stop)); float elapsedTime; checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop)); elapsedTimeTotal += elapsedTime; } return elapsedTimeTotal / LOOP; } float measureCopy2(float *dev_dst, float *dev_src, dim3 BLOCKS) { float elapsedTimeTotal = 0; for (size_t i=0; i<LOOP; ++i) { // do a device to device memory copy via kernel, and time it checkCudaErrors(cudaEventRecord(start, 0)); dim3 THREADS(TILE_DIM, BLOCK_ROWS, 1); copy2 <<< BLOCKS, THREADS >>>(dev_dst, dev_src); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaEventRecord(stop, 0)); checkCudaErrors(cudaEventSynchronize(stop)); float elapsedTime; checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop)); elapsedTimeTotal += elapsedTime; } return elapsedTimeTotal / LOOP; } float measureCopy3(float *dev_dst, float *dev_src, dim3 BLOCKS) { float elapsedTimeTotal = 0; for (size_t i=0; i<LOOP; ++i) { // do a device to device memory copy via kernel, and time it checkCudaErrors(cudaEventRecord(start, 0)); dim3 THREADS(TILE_DIM, TILE_DIM, 1); copy3 <<< BLOCKS, THREADS >>>(dev_dst, dev_src); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaEventRecord(stop, 0)); checkCudaErrors(cudaEventSynchronize(stop)); float elapsedTime; checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop)); elapsedTimeTotal += elapsedTime; } return elapsedTimeTotal / LOOP; } /* changed this to use occupancy API. it calculated the same numbers I had handcoded, but this makes it past/future proof. the copy3a kernel with this code to drive makes the least assumptions about the hardware, and is the fastest. https://devblogs.nvidia.com/parallelforall/cuda-pro-tip-occupancy-api-simplifies-launch-configuration/ was the article that covered the use of the occupancy api, and the code I copied for debug printing. */ float measureCopy3a(float *dev_dst, float *dev_src) { int blockSize=0; // to supress warning, assign to zero int minGridSize; int gridSize; cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, copy3a, 0, 0); gridSize = (totalElements + blockSize - 1) / blockSize; float elapsedTimeTotal = 0; for (int i=0; i<LOOP; ++i) { // do a device to device memory copy via kernel, and time it checkCudaErrors(cudaEventRecord(start, 0)); copy3a <<< gridSize, blockSize >>>(dev_dst, dev_src); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaEventRecord(stop, 0)); checkCudaErrors(cudaEventSynchronize(stop)); float elapsedTime; checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop)); elapsedTimeTotal += elapsedTime; } int maxActiveBlocks; cudaOccupancyMaxActiveBlocksPerMultiprocessor( &maxActiveBlocks, copy3a, blockSize, 0); int device; cudaDeviceProp props; cudaGetDevice(&device); cudaGetDeviceProperties(&props, device); float occupancy = (maxActiveBlocks * blockSize / props.warpSize) / (float)(props.maxThreadsPerMultiProcessor / props.warpSize); //printf(" [Debugging Info for copy3a] Launched blocks of size %d. Theoretical occupancy: %f\n", blockSize, occupancy); return elapsedTimeTotal / LOOP; } float measureCopy4(float *dev_dst, float *dev_src, dim3 BLOCKS) { float elapsedTimeTotal = 0; for (size_t i=0; i<LOOP; ++i) { // do a device to device memory copy via kernel, and time it checkCudaErrors(cudaEventRecord(start, 0)); dim3 THREADS(TILE_DIM, TILE_DIM, 1); copy4 <<< BLOCKS, THREADS >>>(dev_dst, dev_src); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaEventRecord(stop, 0)); checkCudaErrors(cudaEventSynchronize(stop)); float elapsedTime; checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop)); elapsedTimeTotal += elapsedTime; } return elapsedTimeTotal / LOOP; } int main(int argc, char **argv) { srand(time(NULL)); checkCudaErrors(cudaSetDevice(0)); float *host_src, *host_dst; host_src = (float*)malloc(totalMemorySize); host_dst = (float*)malloc(totalMemorySize); float *dev_src, *dev_dst; checkCudaErrors(cudaMalloc((void**)&dev_src, totalMemorySize)); checkCudaErrors(cudaMalloc((void**)&dev_dst, totalMemorySize)); // fill memory on host side float theRand = rand(); for (size_t i=0; i<totalElements; ++i) { host_src[i] = theRand; } // copy host memory to device checkCudaErrors(cudaMemcpy(dev_src, host_src, totalMemorySize, cudaMemcpyHostToDevice)); checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); float timePerLoop; timePerLoop = measureCudaMemCpy(dev_dst, dev_src); printf("mem Elapsed time: %f s\n", timePerLoop / 1000.0); timePerLoop = measureCopy1(dev_dst, dev_src, dim3(totalElements/1024/32/32, 1024, 1)); printf("1 Elapsed time: %f s\n", timePerLoop / 1000.0); timePerLoop = measureCopy1(dev_dst, dev_src, dim3(totalElements/1024/32/32/32, 1024*32, 1)); printf("1 Elapsed time: %f s\n", timePerLoop / 1000.0); timePerLoop = measureCopy2(dev_dst, dev_src, dim3(totalElements/1024, 1024/32/32, 1)); printf("2 Elapsed time: %f s\n", timePerLoop / 1000.0); timePerLoop = measureCopy2(dev_dst, dev_src, dim3(totalElements/1024/32/32, 1024, 1)); printf("2 Elapsed time: %f s\n", timePerLoop / 1000.0); timePerLoop = measureCopy2(dev_dst, dev_src, dim3(totalElements/1024/32/32/32, 1024*32, 1)); printf("2 Elapsed time: %f s\n", timePerLoop / 1000.0); // timePerLoop = measureCopy2(dev_dst, dev_src, dim3(totalElements/1024/32/32/64, 1024*64, 1)); // printf("Elapsed time: %f s\n", timePerLoop / 1000.0); timePerLoop = measureCopy3(dev_dst, dev_src, dim3(totalElements/1024/32/32/32, 1024*32, 1)); printf("3 Elapsed time: %f s\n", timePerLoop / 1000.0); timePerLoop = measureCopy3a(dev_dst, dev_src); printf("3a Elapsed time: %f s\n", timePerLoop / 1000.0); timePerLoop = measureCopy4(dev_dst, dev_src, dim3(26, 1, 1)); printf("4 Elapsed time: %f s\n", timePerLoop / 1000.0); timePerLoop = measureCopy4(dev_dst, dev_src, dim3(1024, 1, 1)); printf("4 Elapsed time: %f s\n", timePerLoop / 1000.0); checkCudaErrors(cudaEventDestroy(start)); checkCudaErrors(cudaEventDestroy(stop)); checkCudaErrors(cudaMemcpy(host_dst, dev_dst, totalMemorySize, cudaMemcpyDeviceToHost)); for (size_t i=0; i<totalElements; ++i) { if (host_dst[i] != host_src[i]) { printf("*** First mismatch at %ld. Got %f, was expecting %f ***\n", i, host_dst[i], host_src[i]); break; } } checkCudaErrors(cudaFree(dev_src)); checkCudaErrors(cudaFree(dev_dst)); free(host_src); free(host_dst); return 0; }
b7603c8aaffa09893ff677ba5a6185493c7a56e9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * \file dnn/src/cuda/topk/topk_radix.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2021 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ #include "./topk_radix.cuh" #include "src/cuda/hipcub/hipcub.hpp" #include "src/cuda/cuda_shfl_compat.cuh" #include "src/cuda/utils.cuh" #include <algorithm> #include <cmath> #if __CUDACC_VER_MAJOR__ < 9 #pragma message "topk is a little slower on cuda earlier than 9.0" // on cuda 9.0 and later, due to thread-divergent branches we should use // __syncwarp; and I am too lazy to implement a correct legacy version, so just // use __syncthreads instead for older cuda #define __syncwarp __syncthreads #endif using namespace megdnn; using namespace cuda; using namespace topk; using namespace internal; namespace cuda_topk_impl { const uint32_t WARP_SIZE = 32; static __device__ __forceinline__ uint32_t u32_from_64_low(uint64_t x) { return x; } static __device__ __forceinline__ uint32_t u32_from_64_high(uint64_t x) { return x >> 32; } template <uint32_t x> struct static_log2 { static const uint32_t val = static_log2<x / 2>::val + 1; }; template <> struct static_log2<1> { static const uint32_t val = 0; }; template <uint32_t SIZE, typename T = uint32_t> struct DeviceScanPackedItem; template <typename T> struct DeviceScanPackedItem<1, T> { __device__ __forceinline__ T load(T* data, uint32_t tid) { return data[tid]; } __device__ __forceinline__ void store(T* data, uint32_t tid, uint32_t s) { data[tid] = s; } }; template <> struct DeviceScanPackedItem<4, uint8_t> { uint8_t d0, d1, d2, d3; __device__ __forceinline__ uint32_t load(uint8_t* data, uint32_t tid) { uint32_t item = reinterpret_cast<uint32_t*>(data)[tid]; d3 = item >> 24; d2 = (item >> 16) & 0xFF; d1 = (item >> 8) & 0xFF; d0 = item & 0xFF; return d0 + d1 + d2 + d3; } __device__ __forceinline__ void store(uint8_t* data, uint32_t tid, uint32_t s) { uint8_t o3 = s, o2 = o3 - d3, o1 = o2 - d2, o0 = o1 - d1; reinterpret_cast<uint32_t*>(data)[tid] = (o3 << 24) | (o2 << 16) | (o1 << 8) | o0; } }; //! inclusive scan within a warp using register shuffle template <uint32_t SIZE> __device__ __forceinline__ uint32_t device_scan_shfl_core(uint32_t s, uint32_t tid) { static const uint32_t SIZE_LOG2 = static_log2<SIZE>::val; uint32_t self_lane = tid % SIZE; #pragma unroll for (uint32_t step_log2 = 1; step_log2 <= SIZE_LOG2; ++step_log2) { uint32_t from_lane = (self_lane & ~((1u << step_log2) - 1)) + ((1 << (step_log2 - 1)) - 1); uint32_t valid_mask = (from_lane >= self_lane) - 1; uint32_t s_below = __shfl_up(s, self_lane - from_lane, SIZE); s += s_below & valid_mask; } return s; } /*! * \brief compute inplace inclusive prefix sum of \p data * * Note: no synchronization at the end */ template <uint32_t SIZE, uint32_t NR_SHARD> __device__ __forceinline__ void device_scan(uint32_t* data, uint32_t tid, uint32_t shard) { const uint32_t NR_WARP = SIZE / NR_SHARD / WARP_SIZE; #if __cplusplus > 199711L static_assert(NR_WARP <= WARP_SIZE || (NR_WARP & (NR_WARP - 1)), "bad params"); #endif __syncthreads(); DeviceScanPackedItem<NR_SHARD> packed_item; uint32_t s = packed_item.load(data, tid); s = device_scan_shfl_core<WARP_SIZE>(s, tid); // sync between warps __shared__ uint32_t warp_sums_storage[NR_SHARD][NR_WARP]; uint32_t warp_id = tid / WARP_SIZE; uint32_t* warp_sums = warp_sums_storage[shard]; if ((tid & (WARP_SIZE - 1)) == WARP_SIZE - 1) { warp_sums[warp_id] = s; } __syncthreads(); for (uint32_t i = 0; i < warp_id; ++i) { s += warp_sums[i]; } packed_item.store(data, tid, s); } template <uint32_t PACK_SIZE, typename T> __device__ __forceinline__ void device_scan_packed_accu32(T* data, uint32_t tid) { DeviceScanPackedItem<PACK_SIZE, T> scan_pack; __syncwarp(); uint32_t sum = scan_pack.load(data, tid); sum = device_scan_shfl_core<WARP_SIZE>(sum, tid); scan_pack.store(data, tid, sum); __syncwarp(); } namespace kth { const uint32_t BUCKET_BITS = 8, NR_BUCKET = 1 << BUCKET_BITS, LOCAL_CNT_SHARD = 16, BLOCK_DIM = NR_BUCKET * 4; template <uint32_t v> struct enforce_const_u32 { static const uint32_t val = v; }; /*! * \brief compute scattered histogram for the whole input * * launch config: grid(X, batch), thread(BLOCK_DIM) * * Keys not starting with given prefix would be treated as max * * \param[in] input [batch, length] * \param[out] buckets [batch, X, NR_BUCKET] */ template <typename ctype, bool prefix_valid, uint32_t shift> static __global__ void compute_histogram(const ctype* input, uint32_t* bucket_cnt, uint32_t length, int32_t lda, uint32_t* prefix_ptr) { // note that this layout eliminates bank conflict __shared__ uint32_t local_cnt[NR_BUCKET][LOCAL_CNT_SHARD]; int32_t batch = blockIdx.y; input += batch * lda; bucket_cnt += (batch * gridDim.x + blockIdx.x) * NR_BUCKET; uint32_t prefix; if (prefix_valid) { prefix = prefix_ptr[batch]; } { // init local_cnt uint32_t* p = &local_cnt[0][0]; for (uint32_t i = threadIdx.x; i < LOCAL_CNT_SHARD * NR_BUCKET; i += BLOCK_DIM) { p[i] = 0; } __syncthreads(); } { // accumulate uint32_t i = blockIdx.x * BLOCK_DIM + threadIdx.x, stride = BLOCK_DIM * gridDim.x; uint32_t* dst = &local_cnt[0][threadIdx.x % LOCAL_CNT_SHARD]; while (i < length) { uint32_t key = RadixConverter<ctype>::to_radix(input[i]); if (prefix_valid) { const uint32_t mask = ((~0u) << ((prefix_valid ? shift : 0) + BUCKET_BITS)); key |= ((key & enforce_const_u32<mask>::val) == prefix) - 1; } uint32_t idx = (key >> shift) & ((1 << BUCKET_BITS) - 1); atomicAdd(dst + idx * LOCAL_CNT_SHARD, 1); i += stride; } } __syncthreads(); if (threadIdx.x < NR_BUCKET) { uint32_t s = 0; #pragma unroll for (int i = 0; i < LOCAL_CNT_SHARD; ++i) { s += local_cnt[threadIdx.x][(i + threadIdx.x) % LOCAL_CNT_SHARD]; } bucket_cnt[threadIdx.x] = s; } } /*! * \brief update the values in \p prefix to k'th value in according to bucket * count, and update \p k * * launch config: grid(batch), thread(NR_BUCKET) */ template <bool first, bool last, uint32_t shift, typename ctype> static __global__ void update_prefix_and_k(const uint32_t* bucket_cnt, uint32_t* prefix, uint32_t* k, uint32_t k_init, uint32_t bucket_sharding_size, ctype* result) { __shared__ uint32_t cumsum_bucket_cnt[NR_BUCKET + 1]; uint32_t batch = blockIdx.x; bucket_cnt += batch * bucket_sharding_size * NR_BUCKET; uint32_t sum = 0; for (uint32_t i = 0; i < bucket_sharding_size; ++i) { sum += bucket_cnt[i * NR_BUCKET + threadIdx.x]; } if (!threadIdx.x) { cumsum_bucket_cnt[0] = 0; } const uint32_t i = threadIdx.x + 1; cumsum_bucket_cnt[i] = sum; device_scan<NR_BUCKET, 1>(cumsum_bucket_cnt + 1, threadIdx.x, 0); __syncthreads(); uint32_t kv = first ? k_init : k[batch]; if ((cumsum_bucket_cnt[i] >= kv) & (cumsum_bucket_cnt[i - 1] < kv)) { uint32_t b = (i - 1) << shift; if (first) { prefix[batch] = b; } else if (last) { result[batch] = RadixConverter<ctype>::from_radix(prefix[batch] | b); } else { prefix[batch] |= b; } if (!last) { k[batch] = kv - cumsum_bucket_cnt[i - 1]; } } if ((cumsum_bucket_cnt[NR_BUCKET] < kv) | (cumsum_bucket_cnt[i] != cumsum_bucket_cnt[i - 1] + sum)) { // impossible int* bad = 0x0; *bad = 23; } } static uint32_t get_grid_dim_x(uint32_t length) { return std::max<uint32_t>(length / (128 * BLOCK_DIM), 1); } } // namespace kth /*! * \brief select values smaller or larger than given threshold * * Note: we use register shuffle extensively to perform both reduce and scan. */ namespace select { struct LessPred { template <typename ctype> __device__ __forceinline__ static bool cmp(ctype x, ctype y) { return x < y; } }; struct GreaterPred { template <typename ctype> __device__ __forceinline__ static bool cmp(ctype x, ctype y) { return x > y; } }; const uint32_t REDUCE_WARP_SIZE = 16, REDUCE_SIZE = WARP_SIZE * 4, REDUCE_SHARD = 64; /*! * \brief reduce number of elements satisfying Pred in (N, M) mat to * (N, ceil(M / REDUCE_SIZE)) * * launch config: grid(X, batch), * thread(REDUCE_WARP_SIZE, REDUCE_SHARD) * * Each block computes REDUCE_SHARD outputs */ template <typename ctype, class Pred> static __global__ void kern_reduce_block_cnt(const ctype* input_data, const ctype* input_thresh, uint32_t length, int32_t lda, uint64_t* output, uint32_t output_width) { static const uint32_t BLOCK_DIM_X = REDUCE_WARP_SIZE, BLOCK_DIM_Y = REDUCE_SHARD; uint32_t batch = blockIdx.y, out_col = blockIdx.x * BLOCK_DIM_Y + threadIdx.y, col_begin = out_col * REDUCE_SIZE, col_end = min(col_begin + REDUCE_SIZE, length), tid_local = threadIdx.x; if (out_col >= output_width) { return; } uint32_t thresh = RadixConverter<ctype>::to_radix(input_thresh[batch]); input_data += static_cast<int32_t>(batch) * lda; uint32_t sum_eq = 0, sum_lt = 0; for (uint32_t i = col_begin + tid_local; i < col_end; i += BLOCK_DIM_X) { uint32_t iv = RadixConverter<ctype>::to_radix(input_data[i]); sum_eq += iv == thresh; sum_lt += Pred::cmp(iv, thresh); } #pragma unroll for (uint32_t step = REDUCE_WARP_SIZE / 2; step >= 1; step >>= 1) { sum_eq += __shfl_down(sum_eq, step, REDUCE_WARP_SIZE); sum_lt += __shfl_down(sum_lt, step, REDUCE_WARP_SIZE); } // reduce warp results to a single scalar if (!tid_local) { output[batch * output_width + out_col] = (static_cast<uint64_t>(sum_eq) << 32) | sum_lt; } } static MEGDNN_NOINLINE hipError_t invoke_cub_scan(const uint64_t* input, uint64_t* output, void* workspace, size_t& workspace_size, uint32_t size, hipStream_t stream) { return hipcub::DeviceScan::InclusiveSum(workspace, workspace_size, input, output, size, stream); } static __global__ void kern_init_zero(uint64_t* dst) { dst[0] = 0; } /*! * \brief copy top-k values of each row from input to output * * launch config: grid(X, batch), * thread(WARP_SIZE, COPY_SHARD) */ template <typename ctype, class Pred, int COPY_SHARD> static __global__ void kern_copy(const ctype* input_data, const ctype* input_thresh, const uint64_t* scan, uint32_t scan_width, ctype* output_value, int32_t* output_idx, uint32_t length, uint32_t k, int32_t lda) { #if __cplusplus > 199711L static_assert(REDUCE_SIZE < 256, "local_sum_storage can not be uint8_t"); #endif static const uint32_t BLOCK_DIM_X = WARP_SIZE, BLOCK_DIM_Y = COPY_SHARD; uint32_t scan_col = blockIdx.x * BLOCK_DIM_Y + threadIdx.y; if (scan_col >= scan_width) { return; } uint32_t batch = blockIdx.y, inp_col_begin = min(scan_col * REDUCE_SIZE, length), inp_col_length = min(inp_col_begin + REDUCE_SIZE, length) - inp_col_begin, tid_local = threadIdx.x; uint32_t thresh = RadixConverter<ctype>::to_radix(input_thresh[batch]); input_data += static_cast<int32_t>(batch) * lda + static_cast<int>(inp_col_begin); __shared__ uint8_t local_sum_storage[BLOCK_DIM_Y][2][REDUCE_SIZE + 4]; uint8_t *local_sum_eq = local_sum_storage[threadIdx.y][0], *local_sum_lt = local_sum_storage[threadIdx.y][1]; if (!tid_local) { local_sum_eq[3] = 0; local_sum_lt[3] = 0; } local_sum_eq += 4; local_sum_lt += 4; const uint32_t WORKLOAD = REDUCE_SIZE / WARP_SIZE; #pragma unroll for (uint32_t j = 0; j < WORKLOAD; ++j) { uint32_t i = j * BLOCK_DIM_X + tid_local; if (i < inp_col_length) { uint32_t iv = RadixConverter<ctype>::to_radix(input_data[i]); local_sum_eq[i] = iv == thresh; local_sum_lt[i] = Pred::cmp(iv, thresh); } else { local_sum_eq[i] = 0; local_sum_lt[i] = 0; } } device_scan_packed_accu32<WORKLOAD, uint8_t>(local_sum_eq, tid_local); device_scan_packed_accu32<WORKLOAD, uint8_t>(local_sum_lt, tid_local); scan += batch * scan_width; uint64_t scan_prev_pack = scan[static_cast<int>(scan_col) - 1], k_offset_pack = scan_prev_pack - scan[-1], scan_self_pack = scan[scan_col] - scan_prev_pack; #define unpack(name) \ uint32_t name##_eq = u32_from_64_high(name##_pack), \ name##_lt = u32_from_64_low(name##_pack) unpack(k_offset); unpack(scan_self); #undef unpack uint32_t allowed_eq = k - min(k, (u32_from_64_low(scan[scan_width - 1]) - u32_from_64_low(scan[-1]))), ls_lt_max = k - min(k_offset_lt, k), ls_eq_max = allowed_eq - min(allowed_eq, k_offset_eq); if ((scan_self_lt && ls_lt_max) || (scan_self_eq && ls_eq_max)) { #pragma unroll for (uint32_t j = 0; j < WORKLOAD; ++j) { int32_t i = j * BLOCK_DIM_X + tid_local; uint32_t cur_lt = local_sum_lt[i], cur_eq = local_sum_eq[i]; bool is_lt = cur_lt <= ls_lt_max && cur_lt != local_sum_lt[i - 1]; bool is_eq = cur_eq <= ls_eq_max && cur_eq != local_sum_eq[i - 1]; // exactly one should be true if (is_lt || is_eq) { uint32_t off_lt = cur_lt + k_offset_lt - 1; uint32_t off_eq = cur_eq + k_offset_eq - 1 + (k - allowed_eq); uint32_t ocol = is_lt ? off_lt : off_eq; output_value[batch * k + ocol] = input_data[i]; output_idx[batch * k + ocol] = i + inp_col_begin; } } } } //! get workspace for scan, aligned to uint64_t static size_t get_scan_workspace(uint32_t size) { size_t wk = 0; hipError_t err = invoke_cub_scan(NULL, NULL, NULL, wk, size, NULL); if (err != hipSuccess) { fprintf(stderr, "topk: cub scan failed: %s (%d)\n", hipGetErrorString(err), static_cast<int>(err)); megdnn_trap(); } return ((wk - 1) / sizeof(uint64_t) + 1) * sizeof(uint64_t); } } // namespace select } // namespace cuda_topk_impl uint32_t topk::find_kth_radix_workspace(uint32_t batch, uint32_t length, uint32_t grid_dim_y_limit) { using namespace cuda_topk_impl::kth; uint32_t limit = batch > grid_dim_y_limit ? grid_dim_y_limit : batch; return (limit * get_grid_dim_x(length) * NR_BUCKET + limit * 2) * sizeof(uint32_t); } template <typename ctype> hipError_t topk::find_kth_radix(const ctype* input, ctype* output, void* workspace, uint32_t batch, uint32_t length, int32_t lda, int32_t k, uint32_t grid_dim_y_limit, hipStream_t stream) { using namespace cuda_topk_impl::kth; if (!k) { return hipErrorUnknown; } if (k < 0) { k = length + k + 1; } if (!(BUCKET_BITS == 8 && sizeof(ctype) == 4)) { // no c++11 in megdnn cuda; so we just trap instead of using static // assert megdnn_trap(); } uint32_t batch_idx = 0; uint32_t grid_dim_x = get_grid_dim_x(length); uint32_t grid_dim_y = 1; while (batch_idx < batch) { if (batch - batch_idx >= grid_dim_y_limit) { grid_dim_y = grid_dim_y_limit; } else { grid_dim_y = batch - batch_idx; } dim3 grid_dim(grid_dim_x, grid_dim_y); uint32_t* dev_k = static_cast<uint32_t*>(workspace); uint32_t* dev_prefix = dev_k + grid_dim_y; uint32_t* bucket_cnt = dev_prefix + grid_dim_y; hipLaunchKernelGGL(( compute_histogram<ctype, false, 24>), dim3(grid_dim), dim3(BLOCK_DIM), 0, stream, input + batch_idx * lda, bucket_cnt, length, lda, nullptr); // use float to make compiler happy; it is not used since last == false hipLaunchKernelGGL(( update_prefix_and_k<true, false, 24, float>) , dim3(grid_dim_y), dim3(NR_BUCKET), 0, stream, bucket_cnt, dev_prefix, dev_k, k, grid_dim_x, nullptr); hipLaunchKernelGGL(( compute_histogram<ctype, true, 16>), dim3(grid_dim), dim3(BLOCK_DIM), 0, stream, input + batch_idx * lda, bucket_cnt, length, lda, dev_prefix); hipLaunchKernelGGL(( update_prefix_and_k<false, false, 16, float>) , dim3(grid_dim_y), dim3(NR_BUCKET), 0, stream, bucket_cnt, dev_prefix, dev_k, k, grid_dim_x, nullptr); hipLaunchKernelGGL(( compute_histogram<ctype, true, 8>), dim3(grid_dim), dim3(BLOCK_DIM), 0, stream, input + batch_idx * lda, bucket_cnt, length, lda, dev_prefix); hipLaunchKernelGGL(( update_prefix_and_k<false, false, 8, float>) , dim3(grid_dim_y), dim3(NR_BUCKET), 0, stream, bucket_cnt, dev_prefix, dev_k, k, grid_dim_x, nullptr); hipLaunchKernelGGL(( compute_histogram<ctype, true, 0>), dim3(grid_dim), dim3(BLOCK_DIM), 0, stream, input + batch_idx * lda, bucket_cnt, length, lda, dev_prefix); hipLaunchKernelGGL(( update_prefix_and_k<false, true, 0, ctype>) , dim3(grid_dim_y), dim3(NR_BUCKET), 0, stream, bucket_cnt, dev_prefix, dev_k, k, grid_dim_x, output + batch_idx); batch_idx += grid_dim_y; } return hipGetLastError(); } template <typename ctype> hipError_t topk::topk_select(const ctype* input, const ctype* thresh, ctype* output_value, int32_t* output_idx, void* workspace, uint32_t batch, uint32_t length, int32_t lda, int32_t k, uint32_t batch_upper_limit, hipStream_t stream) { using namespace cuda_topk_impl; using namespace cuda_topk_impl::select; uint32_t length_split = DIVUP(length, REDUCE_SIZE); void (*kptr_reduce_block_cnt)(const ctype*, const ctype*, uint32_t, int32_t, uint64_t*, uint32_t); void (*kptr_copy)(const ctype*, const ctype*, const uint64_t*, uint32_t, ctype*, int32_t*, uint32_t, uint32_t, int32_t); int kern_copy_shard; { int grid, block; hipError_t err = hipOccupancyMaxPotentialBlockSize( &grid, &block, kern_copy<ctype, GreaterPred, 32>); if (err) { return err; } kern_copy_shard = block / (WARP_SIZE * 8) * 8; if (!kern_copy_shard) { fprintf(stderr, "topk: failed to launch: block=%d\n", block); return hipErrorLaunchOutOfResources; } } #define CASE_SHARD_ON(pred, n) \ case n: \ kptr_copy = kern_copy<ctype, pred, n>; \ break #define CASE_SHARD(pred) \ switch (kern_copy_shard) { \ CASE_SHARD_ON(pred, 8); \ CASE_SHARD_ON(pred, 16); \ CASE_SHARD_ON(pred, 24); \ CASE_SHARD_ON(pred, 32); \ default: \ fprintf(stderr, "topk: failed to launch: shard=%d\n", \ kern_copy_shard); \ return hipErrorLaunchOutOfResources; \ } if (k < 0) { k = -k; kptr_reduce_block_cnt = kern_reduce_block_cnt<ctype, GreaterPred>; CASE_SHARD(GreaterPred); } else { kptr_reduce_block_cnt = kern_reduce_block_cnt<ctype, LessPred>; CASE_SHARD(LessPred); } #undef CASE_SHARD #undef CASE_SHARD_ON uint32_t batch_idx = 0; uint32_t batch_real = 1; while (batch_idx < batch) { if (batch - batch_idx >= batch_upper_limit) { batch_real = batch_upper_limit; } else { batch_real = batch - batch_idx; } size_t scan_size = batch_real * length_split; size_t scan_wk = get_scan_workspace(scan_size); uint64_t *scan_inp = static_cast<uint64_t*>(workspace) + scan_wk / sizeof(uint64_t), *scan_out = scan_inp + scan_size; // reduce to scan_inp hipLaunchKernelGGL(( kptr_reduce_block_cnt), dim3(dim3(DIVUP(length_split, REDUCE_SHARD), batch_real)), dim3(dim3(REDUCE_WARP_SIZE, REDUCE_SHARD)), 0, stream, input + batch_idx * lda, thresh + batch_idx, length, lda, scan_inp, length_split); // scan to scan_out scan_out += 1; // set scan[-1] to 0 hipError_t err = invoke_cub_scan(scan_inp, scan_out, workspace, scan_wk, scan_size, stream); if (err != hipSuccess) { return err; } hipLaunchKernelGGL(( kern_init_zero), dim3(1), dim3(1), 0, stream, scan_out - 1); // copy result hipLaunchKernelGGL(( kptr_copy), dim3(dim3(DIVUP(length_split, kern_copy_shard), batch_real)), dim3(dim3(WARP_SIZE, kern_copy_shard)), 0, stream, input + batch_idx * lda, thresh + batch_idx, scan_out, length_split, output_value + std::abs(k) * batch_idx, output_idx + std::abs(k) * batch_idx, length, k, lda); batch_idx += batch_real; } return hipGetLastError(); } uint32_t topk::topk_select_workspace(uint32_t batch, uint32_t length) { using namespace cuda_topk_impl::select; size_t scan_size = batch * DIVUP(length, REDUCE_SIZE); return get_scan_workspace(scan_size) + sizeof(uint64_t) * (scan_size * 2 + 1); } namespace megdnn { namespace cuda { namespace topk { #define INST(t) \ template hipError_t find_kth_radix<t>(const t*, t*, void*, uint32_t, \ uint32_t, int32_t, int32_t, \ uint32_t, hipStream_t); \ template hipError_t topk_select<t>(const t*, const t*, t*, int32_t*, \ void*, uint32_t, uint32_t, int32_t, \ int32_t, uint32_t, hipStream_t) INST(float); INST(int32_t); #undef INST } // namespace topk } // namespace cuda } // namespace megdnn // vim: ft=cuda syntax=cuda.doxygen
b7603c8aaffa09893ff677ba5a6185493c7a56e9.cu
/** * \file dnn/src/cuda/topk/topk_radix.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2021 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ #include "./topk_radix.cuh" #include "src/cuda/cub/device/device_scan.cuh" #include "src/cuda/cuda_shfl_compat.cuh" #include "src/cuda/utils.cuh" #include <algorithm> #include <cmath> #if __CUDACC_VER_MAJOR__ < 9 #pragma message "topk is a little slower on cuda earlier than 9.0" // on cuda 9.0 and later, due to thread-divergent branches we should use // __syncwarp; and I am too lazy to implement a correct legacy version, so just // use __syncthreads instead for older cuda #define __syncwarp __syncthreads #endif using namespace megdnn; using namespace cuda; using namespace topk; using namespace internal; namespace cuda_topk_impl { const uint32_t WARP_SIZE = 32; static __device__ __forceinline__ uint32_t u32_from_64_low(uint64_t x) { return x; } static __device__ __forceinline__ uint32_t u32_from_64_high(uint64_t x) { return x >> 32; } template <uint32_t x> struct static_log2 { static const uint32_t val = static_log2<x / 2>::val + 1; }; template <> struct static_log2<1> { static const uint32_t val = 0; }; template <uint32_t SIZE, typename T = uint32_t> struct DeviceScanPackedItem; template <typename T> struct DeviceScanPackedItem<1, T> { __device__ __forceinline__ T load(T* data, uint32_t tid) { return data[tid]; } __device__ __forceinline__ void store(T* data, uint32_t tid, uint32_t s) { data[tid] = s; } }; template <> struct DeviceScanPackedItem<4, uint8_t> { uint8_t d0, d1, d2, d3; __device__ __forceinline__ uint32_t load(uint8_t* data, uint32_t tid) { uint32_t item = reinterpret_cast<uint32_t*>(data)[tid]; d3 = item >> 24; d2 = (item >> 16) & 0xFF; d1 = (item >> 8) & 0xFF; d0 = item & 0xFF; return d0 + d1 + d2 + d3; } __device__ __forceinline__ void store(uint8_t* data, uint32_t tid, uint32_t s) { uint8_t o3 = s, o2 = o3 - d3, o1 = o2 - d2, o0 = o1 - d1; reinterpret_cast<uint32_t*>(data)[tid] = (o3 << 24) | (o2 << 16) | (o1 << 8) | o0; } }; //! inclusive scan within a warp using register shuffle template <uint32_t SIZE> __device__ __forceinline__ uint32_t device_scan_shfl_core(uint32_t s, uint32_t tid) { static const uint32_t SIZE_LOG2 = static_log2<SIZE>::val; uint32_t self_lane = tid % SIZE; #pragma unroll for (uint32_t step_log2 = 1; step_log2 <= SIZE_LOG2; ++step_log2) { uint32_t from_lane = (self_lane & ~((1u << step_log2) - 1)) + ((1 << (step_log2 - 1)) - 1); uint32_t valid_mask = (from_lane >= self_lane) - 1; uint32_t s_below = __shfl_up(s, self_lane - from_lane, SIZE); s += s_below & valid_mask; } return s; } /*! * \brief compute inplace inclusive prefix sum of \p data * * Note: no synchronization at the end */ template <uint32_t SIZE, uint32_t NR_SHARD> __device__ __forceinline__ void device_scan(uint32_t* data, uint32_t tid, uint32_t shard) { const uint32_t NR_WARP = SIZE / NR_SHARD / WARP_SIZE; #if __cplusplus > 199711L static_assert(NR_WARP <= WARP_SIZE || (NR_WARP & (NR_WARP - 1)), "bad params"); #endif __syncthreads(); DeviceScanPackedItem<NR_SHARD> packed_item; uint32_t s = packed_item.load(data, tid); s = device_scan_shfl_core<WARP_SIZE>(s, tid); // sync between warps __shared__ uint32_t warp_sums_storage[NR_SHARD][NR_WARP]; uint32_t warp_id = tid / WARP_SIZE; uint32_t* warp_sums = warp_sums_storage[shard]; if ((tid & (WARP_SIZE - 1)) == WARP_SIZE - 1) { warp_sums[warp_id] = s; } __syncthreads(); for (uint32_t i = 0; i < warp_id; ++i) { s += warp_sums[i]; } packed_item.store(data, tid, s); } template <uint32_t PACK_SIZE, typename T> __device__ __forceinline__ void device_scan_packed_accu32(T* data, uint32_t tid) { DeviceScanPackedItem<PACK_SIZE, T> scan_pack; __syncwarp(); uint32_t sum = scan_pack.load(data, tid); sum = device_scan_shfl_core<WARP_SIZE>(sum, tid); scan_pack.store(data, tid, sum); __syncwarp(); } namespace kth { const uint32_t BUCKET_BITS = 8, NR_BUCKET = 1 << BUCKET_BITS, LOCAL_CNT_SHARD = 16, BLOCK_DIM = NR_BUCKET * 4; template <uint32_t v> struct enforce_const_u32 { static const uint32_t val = v; }; /*! * \brief compute scattered histogram for the whole input * * launch config: grid(X, batch), thread(BLOCK_DIM) * * Keys not starting with given prefix would be treated as max * * \param[in] input [batch, length] * \param[out] buckets [batch, X, NR_BUCKET] */ template <typename ctype, bool prefix_valid, uint32_t shift> static __global__ void compute_histogram(const ctype* input, uint32_t* bucket_cnt, uint32_t length, int32_t lda, uint32_t* prefix_ptr) { // note that this layout eliminates bank conflict __shared__ uint32_t local_cnt[NR_BUCKET][LOCAL_CNT_SHARD]; int32_t batch = blockIdx.y; input += batch * lda; bucket_cnt += (batch * gridDim.x + blockIdx.x) * NR_BUCKET; uint32_t prefix; if (prefix_valid) { prefix = prefix_ptr[batch]; } { // init local_cnt uint32_t* p = &local_cnt[0][0]; for (uint32_t i = threadIdx.x; i < LOCAL_CNT_SHARD * NR_BUCKET; i += BLOCK_DIM) { p[i] = 0; } __syncthreads(); } { // accumulate uint32_t i = blockIdx.x * BLOCK_DIM + threadIdx.x, stride = BLOCK_DIM * gridDim.x; uint32_t* dst = &local_cnt[0][threadIdx.x % LOCAL_CNT_SHARD]; while (i < length) { uint32_t key = RadixConverter<ctype>::to_radix(input[i]); if (prefix_valid) { const uint32_t mask = ((~0u) << ((prefix_valid ? shift : 0) + BUCKET_BITS)); key |= ((key & enforce_const_u32<mask>::val) == prefix) - 1; } uint32_t idx = (key >> shift) & ((1 << BUCKET_BITS) - 1); atomicAdd(dst + idx * LOCAL_CNT_SHARD, 1); i += stride; } } __syncthreads(); if (threadIdx.x < NR_BUCKET) { uint32_t s = 0; #pragma unroll for (int i = 0; i < LOCAL_CNT_SHARD; ++i) { s += local_cnt[threadIdx.x][(i + threadIdx.x) % LOCAL_CNT_SHARD]; } bucket_cnt[threadIdx.x] = s; } } /*! * \brief update the values in \p prefix to k'th value in according to bucket * count, and update \p k * * launch config: grid(batch), thread(NR_BUCKET) */ template <bool first, bool last, uint32_t shift, typename ctype> static __global__ void update_prefix_and_k(const uint32_t* bucket_cnt, uint32_t* prefix, uint32_t* k, uint32_t k_init, uint32_t bucket_sharding_size, ctype* result) { __shared__ uint32_t cumsum_bucket_cnt[NR_BUCKET + 1]; uint32_t batch = blockIdx.x; bucket_cnt += batch * bucket_sharding_size * NR_BUCKET; uint32_t sum = 0; for (uint32_t i = 0; i < bucket_sharding_size; ++i) { sum += bucket_cnt[i * NR_BUCKET + threadIdx.x]; } if (!threadIdx.x) { cumsum_bucket_cnt[0] = 0; } const uint32_t i = threadIdx.x + 1; cumsum_bucket_cnt[i] = sum; device_scan<NR_BUCKET, 1>(cumsum_bucket_cnt + 1, threadIdx.x, 0); __syncthreads(); uint32_t kv = first ? k_init : k[batch]; if ((cumsum_bucket_cnt[i] >= kv) & (cumsum_bucket_cnt[i - 1] < kv)) { uint32_t b = (i - 1) << shift; if (first) { prefix[batch] = b; } else if (last) { result[batch] = RadixConverter<ctype>::from_radix(prefix[batch] | b); } else { prefix[batch] |= b; } if (!last) { k[batch] = kv - cumsum_bucket_cnt[i - 1]; } } if ((cumsum_bucket_cnt[NR_BUCKET] < kv) | (cumsum_bucket_cnt[i] != cumsum_bucket_cnt[i - 1] + sum)) { // impossible int* bad = 0x0; *bad = 23; } } static uint32_t get_grid_dim_x(uint32_t length) { return std::max<uint32_t>(length / (128 * BLOCK_DIM), 1); } } // namespace kth /*! * \brief select values smaller or larger than given threshold * * Note: we use register shuffle extensively to perform both reduce and scan. */ namespace select { struct LessPred { template <typename ctype> __device__ __forceinline__ static bool cmp(ctype x, ctype y) { return x < y; } }; struct GreaterPred { template <typename ctype> __device__ __forceinline__ static bool cmp(ctype x, ctype y) { return x > y; } }; const uint32_t REDUCE_WARP_SIZE = 16, REDUCE_SIZE = WARP_SIZE * 4, REDUCE_SHARD = 64; /*! * \brief reduce number of elements satisfying Pred in (N, M) mat to * (N, ceil(M / REDUCE_SIZE)) * * launch config: grid(X, batch), * thread(REDUCE_WARP_SIZE, REDUCE_SHARD) * * Each block computes REDUCE_SHARD outputs */ template <typename ctype, class Pred> static __global__ void kern_reduce_block_cnt(const ctype* input_data, const ctype* input_thresh, uint32_t length, int32_t lda, uint64_t* output, uint32_t output_width) { static const uint32_t BLOCK_DIM_X = REDUCE_WARP_SIZE, BLOCK_DIM_Y = REDUCE_SHARD; uint32_t batch = blockIdx.y, out_col = blockIdx.x * BLOCK_DIM_Y + threadIdx.y, col_begin = out_col * REDUCE_SIZE, col_end = min(col_begin + REDUCE_SIZE, length), tid_local = threadIdx.x; if (out_col >= output_width) { return; } uint32_t thresh = RadixConverter<ctype>::to_radix(input_thresh[batch]); input_data += static_cast<int32_t>(batch) * lda; uint32_t sum_eq = 0, sum_lt = 0; for (uint32_t i = col_begin + tid_local; i < col_end; i += BLOCK_DIM_X) { uint32_t iv = RadixConverter<ctype>::to_radix(input_data[i]); sum_eq += iv == thresh; sum_lt += Pred::cmp(iv, thresh); } #pragma unroll for (uint32_t step = REDUCE_WARP_SIZE / 2; step >= 1; step >>= 1) { sum_eq += __shfl_down(sum_eq, step, REDUCE_WARP_SIZE); sum_lt += __shfl_down(sum_lt, step, REDUCE_WARP_SIZE); } // reduce warp results to a single scalar if (!tid_local) { output[batch * output_width + out_col] = (static_cast<uint64_t>(sum_eq) << 32) | sum_lt; } } static MEGDNN_NOINLINE cudaError_t invoke_cub_scan(const uint64_t* input, uint64_t* output, void* workspace, size_t& workspace_size, uint32_t size, cudaStream_t stream) { return cub::DeviceScan::InclusiveSum(workspace, workspace_size, input, output, size, stream); } static __global__ void kern_init_zero(uint64_t* dst) { dst[0] = 0; } /*! * \brief copy top-k values of each row from input to output * * launch config: grid(X, batch), * thread(WARP_SIZE, COPY_SHARD) */ template <typename ctype, class Pred, int COPY_SHARD> static __global__ void kern_copy(const ctype* input_data, const ctype* input_thresh, const uint64_t* scan, uint32_t scan_width, ctype* output_value, int32_t* output_idx, uint32_t length, uint32_t k, int32_t lda) { #if __cplusplus > 199711L static_assert(REDUCE_SIZE < 256, "local_sum_storage can not be uint8_t"); #endif static const uint32_t BLOCK_DIM_X = WARP_SIZE, BLOCK_DIM_Y = COPY_SHARD; uint32_t scan_col = blockIdx.x * BLOCK_DIM_Y + threadIdx.y; if (scan_col >= scan_width) { return; } uint32_t batch = blockIdx.y, inp_col_begin = min(scan_col * REDUCE_SIZE, length), inp_col_length = min(inp_col_begin + REDUCE_SIZE, length) - inp_col_begin, tid_local = threadIdx.x; uint32_t thresh = RadixConverter<ctype>::to_radix(input_thresh[batch]); input_data += static_cast<int32_t>(batch) * lda + static_cast<int>(inp_col_begin); __shared__ uint8_t local_sum_storage[BLOCK_DIM_Y][2][REDUCE_SIZE + 4]; uint8_t *local_sum_eq = local_sum_storage[threadIdx.y][0], *local_sum_lt = local_sum_storage[threadIdx.y][1]; if (!tid_local) { local_sum_eq[3] = 0; local_sum_lt[3] = 0; } local_sum_eq += 4; local_sum_lt += 4; const uint32_t WORKLOAD = REDUCE_SIZE / WARP_SIZE; #pragma unroll for (uint32_t j = 0; j < WORKLOAD; ++j) { uint32_t i = j * BLOCK_DIM_X + tid_local; if (i < inp_col_length) { uint32_t iv = RadixConverter<ctype>::to_radix(input_data[i]); local_sum_eq[i] = iv == thresh; local_sum_lt[i] = Pred::cmp(iv, thresh); } else { local_sum_eq[i] = 0; local_sum_lt[i] = 0; } } device_scan_packed_accu32<WORKLOAD, uint8_t>(local_sum_eq, tid_local); device_scan_packed_accu32<WORKLOAD, uint8_t>(local_sum_lt, tid_local); scan += batch * scan_width; uint64_t scan_prev_pack = scan[static_cast<int>(scan_col) - 1], k_offset_pack = scan_prev_pack - scan[-1], scan_self_pack = scan[scan_col] - scan_prev_pack; #define unpack(name) \ uint32_t name##_eq = u32_from_64_high(name##_pack), \ name##_lt = u32_from_64_low(name##_pack) unpack(k_offset); unpack(scan_self); #undef unpack uint32_t allowed_eq = k - min(k, (u32_from_64_low(scan[scan_width - 1]) - u32_from_64_low(scan[-1]))), ls_lt_max = k - min(k_offset_lt, k), ls_eq_max = allowed_eq - min(allowed_eq, k_offset_eq); if ((scan_self_lt && ls_lt_max) || (scan_self_eq && ls_eq_max)) { #pragma unroll for (uint32_t j = 0; j < WORKLOAD; ++j) { int32_t i = j * BLOCK_DIM_X + tid_local; uint32_t cur_lt = local_sum_lt[i], cur_eq = local_sum_eq[i]; bool is_lt = cur_lt <= ls_lt_max && cur_lt != local_sum_lt[i - 1]; bool is_eq = cur_eq <= ls_eq_max && cur_eq != local_sum_eq[i - 1]; // exactly one should be true if (is_lt || is_eq) { uint32_t off_lt = cur_lt + k_offset_lt - 1; uint32_t off_eq = cur_eq + k_offset_eq - 1 + (k - allowed_eq); uint32_t ocol = is_lt ? off_lt : off_eq; output_value[batch * k + ocol] = input_data[i]; output_idx[batch * k + ocol] = i + inp_col_begin; } } } } //! get workspace for scan, aligned to uint64_t static size_t get_scan_workspace(uint32_t size) { size_t wk = 0; cudaError_t err = invoke_cub_scan(NULL, NULL, NULL, wk, size, NULL); if (err != cudaSuccess) { fprintf(stderr, "topk: cub scan failed: %s (%d)\n", cudaGetErrorString(err), static_cast<int>(err)); megdnn_trap(); } return ((wk - 1) / sizeof(uint64_t) + 1) * sizeof(uint64_t); } } // namespace select } // namespace cuda_topk_impl uint32_t topk::find_kth_radix_workspace(uint32_t batch, uint32_t length, uint32_t grid_dim_y_limit) { using namespace cuda_topk_impl::kth; uint32_t limit = batch > grid_dim_y_limit ? grid_dim_y_limit : batch; return (limit * get_grid_dim_x(length) * NR_BUCKET + limit * 2) * sizeof(uint32_t); } template <typename ctype> cudaError_t topk::find_kth_radix(const ctype* input, ctype* output, void* workspace, uint32_t batch, uint32_t length, int32_t lda, int32_t k, uint32_t grid_dim_y_limit, cudaStream_t stream) { using namespace cuda_topk_impl::kth; if (!k) { return cudaErrorUnknown; } if (k < 0) { k = length + k + 1; } if (!(BUCKET_BITS == 8 && sizeof(ctype) == 4)) { // no c++11 in megdnn cuda; so we just trap instead of using static // assert megdnn_trap(); } uint32_t batch_idx = 0; uint32_t grid_dim_x = get_grid_dim_x(length); uint32_t grid_dim_y = 1; while (batch_idx < batch) { if (batch - batch_idx >= grid_dim_y_limit) { grid_dim_y = grid_dim_y_limit; } else { grid_dim_y = batch - batch_idx; } dim3 grid_dim(grid_dim_x, grid_dim_y); uint32_t* dev_k = static_cast<uint32_t*>(workspace); uint32_t* dev_prefix = dev_k + grid_dim_y; uint32_t* bucket_cnt = dev_prefix + grid_dim_y; compute_histogram<ctype, false, 24><<<grid_dim, BLOCK_DIM, 0, stream>>>( input + batch_idx * lda, bucket_cnt, length, lda, nullptr); // use float to make compiler happy; it is not used since last == false update_prefix_and_k<true, false, 24, float> <<<grid_dim_y, NR_BUCKET, 0, stream>>>( bucket_cnt, dev_prefix, dev_k, k, grid_dim_x, nullptr); compute_histogram<ctype, true, 16><<<grid_dim, BLOCK_DIM, 0, stream>>>( input + batch_idx * lda, bucket_cnt, length, lda, dev_prefix); update_prefix_and_k<false, false, 16, float> <<<grid_dim_y, NR_BUCKET, 0, stream>>>( bucket_cnt, dev_prefix, dev_k, k, grid_dim_x, nullptr); compute_histogram<ctype, true, 8><<<grid_dim, BLOCK_DIM, 0, stream>>>( input + batch_idx * lda, bucket_cnt, length, lda, dev_prefix); update_prefix_and_k<false, false, 8, float> <<<grid_dim_y, NR_BUCKET, 0, stream>>>( bucket_cnt, dev_prefix, dev_k, k, grid_dim_x, nullptr); compute_histogram<ctype, true, 0><<<grid_dim, BLOCK_DIM, 0, stream>>>( input + batch_idx * lda, bucket_cnt, length, lda, dev_prefix); update_prefix_and_k<false, true, 0, ctype> <<<grid_dim_y, NR_BUCKET, 0, stream>>>(bucket_cnt, dev_prefix, dev_k, k, grid_dim_x, output + batch_idx); batch_idx += grid_dim_y; } return cudaGetLastError(); } template <typename ctype> cudaError_t topk::topk_select(const ctype* input, const ctype* thresh, ctype* output_value, int32_t* output_idx, void* workspace, uint32_t batch, uint32_t length, int32_t lda, int32_t k, uint32_t batch_upper_limit, cudaStream_t stream) { using namespace cuda_topk_impl; using namespace cuda_topk_impl::select; uint32_t length_split = DIVUP(length, REDUCE_SIZE); void (*kptr_reduce_block_cnt)(const ctype*, const ctype*, uint32_t, int32_t, uint64_t*, uint32_t); void (*kptr_copy)(const ctype*, const ctype*, const uint64_t*, uint32_t, ctype*, int32_t*, uint32_t, uint32_t, int32_t); int kern_copy_shard; { int grid, block; cudaError_t err = cudaOccupancyMaxPotentialBlockSize( &grid, &block, kern_copy<ctype, GreaterPred, 32>); if (err) { return err; } kern_copy_shard = block / (WARP_SIZE * 8) * 8; if (!kern_copy_shard) { fprintf(stderr, "topk: failed to launch: block=%d\n", block); return cudaErrorLaunchOutOfResources; } } #define CASE_SHARD_ON(pred, n) \ case n: \ kptr_copy = kern_copy<ctype, pred, n>; \ break #define CASE_SHARD(pred) \ switch (kern_copy_shard) { \ CASE_SHARD_ON(pred, 8); \ CASE_SHARD_ON(pred, 16); \ CASE_SHARD_ON(pred, 24); \ CASE_SHARD_ON(pred, 32); \ default: \ fprintf(stderr, "topk: failed to launch: shard=%d\n", \ kern_copy_shard); \ return cudaErrorLaunchOutOfResources; \ } if (k < 0) { k = -k; kptr_reduce_block_cnt = kern_reduce_block_cnt<ctype, GreaterPred>; CASE_SHARD(GreaterPred); } else { kptr_reduce_block_cnt = kern_reduce_block_cnt<ctype, LessPred>; CASE_SHARD(LessPred); } #undef CASE_SHARD #undef CASE_SHARD_ON uint32_t batch_idx = 0; uint32_t batch_real = 1; while (batch_idx < batch) { if (batch - batch_idx >= batch_upper_limit) { batch_real = batch_upper_limit; } else { batch_real = batch - batch_idx; } size_t scan_size = batch_real * length_split; size_t scan_wk = get_scan_workspace(scan_size); uint64_t *scan_inp = static_cast<uint64_t*>(workspace) + scan_wk / sizeof(uint64_t), *scan_out = scan_inp + scan_size; // reduce to scan_inp kptr_reduce_block_cnt<<< dim3(DIVUP(length_split, REDUCE_SHARD), batch_real), dim3(REDUCE_WARP_SIZE, REDUCE_SHARD), 0, stream>>>( input + batch_idx * lda, thresh + batch_idx, length, lda, scan_inp, length_split); // scan to scan_out scan_out += 1; // set scan[-1] to 0 cudaError_t err = invoke_cub_scan(scan_inp, scan_out, workspace, scan_wk, scan_size, stream); if (err != cudaSuccess) { return err; } kern_init_zero<<<1, 1, 0, stream>>>(scan_out - 1); // copy result kptr_copy<<<dim3(DIVUP(length_split, kern_copy_shard), batch_real), dim3(WARP_SIZE, kern_copy_shard), 0, stream>>>( input + batch_idx * lda, thresh + batch_idx, scan_out, length_split, output_value + std::abs(k) * batch_idx, output_idx + std::abs(k) * batch_idx, length, k, lda); batch_idx += batch_real; } return cudaGetLastError(); } uint32_t topk::topk_select_workspace(uint32_t batch, uint32_t length) { using namespace cuda_topk_impl::select; size_t scan_size = batch * DIVUP(length, REDUCE_SIZE); return get_scan_workspace(scan_size) + sizeof(uint64_t) * (scan_size * 2 + 1); } namespace megdnn { namespace cuda { namespace topk { #define INST(t) \ template cudaError_t find_kth_radix<t>(const t*, t*, void*, uint32_t, \ uint32_t, int32_t, int32_t, \ uint32_t, cudaStream_t); \ template cudaError_t topk_select<t>(const t*, const t*, t*, int32_t*, \ void*, uint32_t, uint32_t, int32_t, \ int32_t, uint32_t, cudaStream_t) INST(float); INST(int32_t); #undef INST } // namespace topk } // namespace cuda } // namespace megdnn // vim: ft=cuda syntax=cuda.doxygen
9c39e665cb071eccd87b364169e9f7d22ae3c184.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "KerDivision.h" #include "KerCommon.h" template< typename T > __forceinline__ __device__ T min(T a, T b) { if (a < b) return a; else return b; } __global__ void kerSplitTri ( KerIntArray splitTriArr, Tri* triArr, TriOpp* oppArr, char* triInfoArr, int* insTriMap, int* triToVert, int triNum, int insTriNum ) { // Iterate current triangles for (int idx = getCurThreadIdx(); idx < splitTriArr._num; idx += getThreadNum()) { const int triIdx = splitTriArr._arr[idx]; const int newBeg = (triNum >= 0) ? (triNum + 2 * insTriMap[triIdx]) : (triIdx + 1); const int newTriIdx[DEG] = { triIdx, newBeg, newBeg + 1 }; TriOpp newOpp[3] = { { -1, -1, -1 }, { -1, -1, -1 }, { -1, -1, -1 } }; // Set adjacency of 3 internal faces of 3 new triangles newOpp[0].setOpp(0, newTriIdx[1], 1); newOpp[0].setOpp(1, newTriIdx[2], 0); newOpp[1].setOpp(0, newTriIdx[2], 1); newOpp[1].setOpp(1, newTriIdx[0], 0); newOpp[2].setOpp(0, newTriIdx[0], 1); newOpp[2].setOpp(1, newTriIdx[1], 0); // Set adjacency of 4 external faces const TriOpp oldOpp = oppArr[triIdx]; // Iterate faces of old triangle for (int ni = 0; ni < DEG; ++ni) { if (-1 == oldOpp._t[ni]) continue; // No neighbour at this face int neiTriIdx = oldOpp.getOppTri(ni); int neiTriVi = oldOpp.getOppVi(ni); // Check if neighbour has split const int neiNewBeg = insTriMap[neiTriIdx]; if (-1 == neiNewBeg) // Neighbour is un-split { oppArr[neiTriIdx].setOpp(neiTriVi, newTriIdx[ni], 2); // Point un-split neighbour back to this new triangle } else // Neighbour has split { // Get neighbour's new split triangle that has this face if (triNum >= 0) neiTriIdx = ((0 == neiTriVi) ? neiTriIdx : (triNum + 2 * neiNewBeg + neiTriVi - 1)); else neiTriIdx += neiTriVi; neiTriVi = 2; } newOpp[ni].setOpp(2, neiTriIdx, neiTriVi); // Point this triangle to neighbour } // Write split triangle and opp const Tri tri = triArr[triIdx]; // Note: This slot will be overwritten below const int splitVertex = triToVert[triIdx]; for (int ti = 0; ti < DEG; ++ti) { const Tri newTri = { tri._v[(ti + 1) % DEG], tri._v[(ti + 2) % DEG], splitVertex }; const int toTriIdx = newTriIdx[ti]; triArr[toTriIdx] = newTri; oppArr[toTriIdx] = newOpp[ti]; setTriAliveState(triInfoArr[toTriIdx], true); setTriCheckState(triInfoArr[toTriIdx], Changed); } } return; } // Note: triVoteArr should *not* be modified here __global__ void kerMarkRejectedFlips ( int* actTriArr, TriOpp* oppArr, int* triVoteArr, char* triInfoArr, int* flipToTri, int actTriNum, int* dbgRejFlipArr ) { for (int idx = getCurThreadIdx(); idx < actTriNum; idx += getThreadNum()) { int output = -1; const int triIdx = actTriArr[idx]; const int voteVal = triVoteArr[triIdx]; if (INT_MAX == voteVal) { setTriCheckState(triInfoArr[triIdx], Checked); actTriArr[idx] = -1; } else { int bossTriIdx, botVi; decode(voteVal, &bossTriIdx, &botVi); if (bossTriIdx == triIdx) // Boss of myself { const TriOpp& opp = oppArr[triIdx]; const int topTriIdx = opp.getOppTri(botVi); const int topVoteVal = triVoteArr[topTriIdx]; if (topVoteVal == voteVal) output = voteVal; } if (NULL != dbgRejFlipArr && output == -1) dbgRejFlipArr[triIdx] = 1; } flipToTri[idx] = output; } return; } __global__ void kerFlip ( KerIntArray flipToTri, Tri* triArr, TriOpp* oppArr, char* triInfoArr, int2* triMsgArr, int* actTriArr, FlipItem* flipArr, int* triConsArr, int* vertTriArr, int orgFlipNum, int actTriNum ) { // Iterate flips for (int flipIdx = getCurThreadIdx(); flipIdx < flipToTri._num; flipIdx += getThreadNum()) { int botIdx, botVi; const int voteVal = flipToTri._arr[flipIdx]; decode(voteVal, &botIdx, &botVi); // Bottom triangle Tri botTri = triArr[botIdx]; const TriOpp& botOpp = oppArr[botIdx]; // Top triangle const int topIdx = botOpp.getOppTri(botVi); const int topVi = botOpp.getOppVi(botVi); Tri topTri = triArr[topIdx]; const int globFlipIdx = orgFlipNum + flipIdx; const int botAVi = (botVi + 1) % 3; const int botBVi = (botVi + 2) % 3; const int topAVi = (topVi + 2) % 3; const int topBVi = (topVi + 1) % 3; // Create new triangle const int topVert = topTri._v[topVi]; const int botVert = botTri._v[botVi]; const int botA = botTri._v[botAVi]; const int botB = botTri._v[botBVi]; // Update the bottom and top triangle botTri = makeTri(botVert, botA, topVert); topTri = makeTri(topVert, botB, botVert); triArr[botIdx] = botTri; triArr[topIdx] = topTri; int newBotNei = 0xffff; int newTopNei = 0xffff; setTriIdxVi(newBotNei, botAVi, 1, 0); setTriIdxVi(newBotNei, botBVi, 3, 2); setTriIdxVi(newTopNei, topAVi, 3, 2); setTriIdxVi(newTopNei, topBVi, 0, 0); // Write down the new triangle idx triMsgArr[botIdx] = make_int2(newBotNei, globFlipIdx); triMsgArr[topIdx] = make_int2(newTopNei, globFlipIdx); // Record the flip FlipItem flipItem = { botVert, topVert, botIdx, topIdx }; storeFlip(flipArr, globFlipIdx, flipItem); // Prepare for the next round if (actTriArr != NULL) actTriArr[actTriNum + flipIdx] = (Checked == getTriCheckState(triInfoArr[topIdx])) ? topIdx : -1; if (triConsArr == NULL) // Standard flipping triInfoArr[topIdx] = 3; // Alive + Changed else { vertTriArr[botA] = botIdx; vertTriArr[botB] = topIdx; // Update constraint intersection info int botLabel = triConsArr[botIdx]; int topLabel = triConsArr[topIdx]; const int consIdx = decode_cIdx(botLabel); const int botSide = decode_cSide(botLabel); int topSide = decode_cSide(topLabel); if (topSide < 2) // Not the last triangle topSide = (decode_cVi(topLabel) == topAVi ? 0 : 1); switch (botSide) // Cannto be 3 { case 0: switch (topSide) { case 0: botLabel = -1; topLabel = encode_constraint(consIdx, 2, 0); break; case 1: botLabel = encode_constraint(consIdx, 0, 0); topLabel = encode_constraint(consIdx, 1, 1); break; case 3: botLabel = -1; topLabel = encode_constraint(consIdx, 0, 3); break; } break; case 1: switch (topSide) { case 0: botLabel = encode_constraint(consIdx, 1, 0); topLabel = encode_constraint(consIdx, 2, 1); break; case 1: botLabel = encode_constraint(consIdx, 0, 1); topLabel = -1; break; case 3: botLabel = encode_constraint(consIdx, 2, 3); topLabel = -1; break; } break; case 2: botLabel = (topSide == 1 ? encode_constraint(consIdx, 0, 2) : -1); topLabel = (topSide == 0 ? encode_constraint(consIdx, 2, 2) : -1); break; } triConsArr[botIdx] = botLabel; triConsArr[topIdx] = topLabel; } } return; } __global__ void kerUpdateOpp ( FlipItem* flipVec, TriOpp* oppArr, int2* triMsgArr, int* flipToTri, int orgFlipNum, int flipNum ) { // Iterate flips for (int flipIdx = getCurThreadIdx(); flipIdx < flipNum; flipIdx += getThreadNum()) { int botIdx, botVi; int voteVal = flipToTri[flipIdx]; decode(voteVal, &botIdx, &botVi); int extOpp[4]; TriOpp opp; opp = oppArr[botIdx]; extOpp[0] = opp.getOppTriVi((botVi + 1) % 3); extOpp[1] = opp.getOppTriVi((botVi + 2) % 3); int topIdx = opp.getOppTri(botVi); const int topVi = opp.getOppVi(botVi); opp = oppArr[topIdx]; extOpp[2] = opp.getOppTriVi((topVi + 2) % 3); extOpp[3] = opp.getOppTriVi((topVi + 1) % 3); // Ok, update with neighbors for (int i = 0; i < 4; ++i) { int newTriIdx, vi; int triOpp = extOpp[i]; bool isCons = isOppValConstraint(triOpp); // No neighbor if (-1 == triOpp) continue; int oppIdx = getOppValTri(triOpp); int oppVi = getOppValVi(triOpp); const int2 msg = triMsgArr[oppIdx]; if (msg.y < orgFlipNum) // Neighbor not flipped { // Set my neighbor's opp newTriIdx = ((i & 1) == 0 ? topIdx : botIdx); vi = (i == 0 || i == 3) ? 0 : 2; oppArr[oppIdx].setOpp(oppVi, newTriIdx, vi, isCons); } else { const int oppFlipIdx = msg.y - orgFlipNum; // Update my own opp const int newLocOppIdx = getTriIdx(msg.x, oppVi); if (newLocOppIdx != 3) oppIdx = flipVec[oppFlipIdx]._t[newLocOppIdx]; oppVi = getTriVi(msg.x, oppVi); setOppValTriVi(extOpp[i], oppIdx, oppVi); } } // Now output opp._t[0] = extOpp[3]; opp.setOpp(1, topIdx, 1); opp._t[2] = extOpp[1]; oppArr[botIdx] = opp; opp._t[0] = extOpp[0]; opp.setOpp(1, botIdx, 1); opp._t[2] = extOpp[2]; oppArr[topIdx] = opp; } return; } __global__ void kerUpdateFlipTrace ( FlipItem* flipArr, int* triToFlip, int orgFlipNum, int flipNum ) { for (int idx = getCurThreadIdx(); idx < flipNum; idx += getThreadNum()) { const int flipIdx = orgFlipNum + idx; FlipItem flipItem = loadFlip(flipArr, flipIdx); int triIdx, nextFlip; triIdx = flipItem._t[0]; nextFlip = triToFlip[triIdx]; flipItem._t[0] = (nextFlip == -1) ? (triIdx << 1) | 0 : nextFlip; triToFlip[triIdx] = (flipIdx << 1) | 1; triIdx = flipItem._t[1]; nextFlip = triToFlip[triIdx]; flipItem._t[1] = (nextFlip == -1) ? (triIdx << 1) | 0 : nextFlip; triToFlip[triIdx] = (flipIdx << 1) | 1; storeFlip(flipArr, flipIdx, flipItem); } } __global__ void kerUpdateVertIdx ( KerTriArray triVec, char* triInfoArr, int* orgPointIdx ) { for (int idx = getCurThreadIdx(); idx < triVec._num; idx += getThreadNum()) { if (!isTriAlive(triInfoArr[idx])) continue; Tri tri = triVec._arr[idx]; for (int i = 0; i < DEG; ++i) tri._v[i] = orgPointIdx[tri._v[i]]; triVec._arr[idx] = tri; } } __global__ void kerShiftTriIdx ( KerIntArray idxVec, int* shiftArr ) { for (int idx = getCurThreadIdx(); idx < idxVec._num; idx += getThreadNum()) { const int oldIdx = idxVec._arr[idx]; if (oldIdx != -1) idxVec._arr[idx] = oldIdx + shiftArr[oldIdx]; } } __global__ void kerMarkSpecialTris ( KerCharArray triInfoVec, TriOpp* oppArr ) { for (int idx = getCurThreadIdx(); idx < triInfoVec._num; idx += getThreadNum()) { if (!isTriAlive(triInfoVec._arr[idx])) continue; TriOpp opp = oppArr[idx]; bool changed = false; for (int vi = 0; vi < DEG; ++vi) { if (-1 == opp._t[vi]) continue; if (opp.isOppSpecial(vi)) changed = true; } if (changed) setTriCheckState(triInfoVec._arr[idx], Changed); } } __forceinline__ __device__ float hash(int k) { k *= 357913941; k ^= k << 24; k += ~357913941; k ^= k >> 31; k ^= k << 31; return int_as_float(k); } __global__ void kerPickWinnerPoint ( KerIntArray vertexTriVec, int* vertCircleArr, int* triCircleArr, int* triVertArr, int noSample ) { const float rate = float(vertexTriVec._num) / noSample; // Iterate uninserted points for (int idx = getCurThreadIdx(); idx < noSample; idx += getThreadNum()) { const int vert = int(idx * rate); const int triIdx = vertexTriVec._arr[vert]; if (triIdx == -1) continue; const int vertSVal = vertCircleArr[idx]; const int winSVal = triCircleArr[triIdx]; // Check if vertex is winner if (winSVal == vertSVal) atomicMin(&triVertArr[triIdx], vert); } return; } __global__ void kerMakeFirstTri ( Tri* triArr, TriOpp* oppArr, char* triInfoArr, Tri tri, int infIdx ) { const Tri tris[] = { { tri._v[0], tri._v[1], tri._v[2] }, { tri._v[2], tri._v[1], infIdx }, { tri._v[0], tri._v[2], infIdx }, { tri._v[1], tri._v[0], infIdx } }; const int oppTri[][3] = { { 1, 2, 3 }, { 3, 2, 0 }, { 1, 3, 0 }, { 2, 1, 0 } }; const int oppVi[][4] = { { 2, 2, 2 }, { 1, 0, 0 }, { 1, 0, 1 }, { 1, 0, 2 } }; for (int i = 0; i < 4; ++i) { triArr[i] = tris[i]; triInfoArr[i] = 1; TriOpp opp = { -1, -1, -1 }; for (int j = 0; j < 3; ++j) opp.setOpp(j, oppTri[i][j], oppVi[i][j]); oppArr[i] = opp; } } __global__ void kerShiftOpp ( KerIntArray shiftVec, TriOpp* src, TriOpp* dest, int destSize ) { for (int idx = getCurThreadIdx(); idx < shiftVec._num; idx += getThreadNum()) { const int shift = shiftVec._arr[idx]; TriOpp opp = src[idx]; for (int vi = 0; vi < 3; ++vi) { const int oppTri = opp.getOppTri(vi); CudaAssert(oppTri >= 0 && oppTri < shiftVec._num); CudaAssert(oppTri + shiftVec._arr[oppTri] < destSize); opp.setOppTri(vi, oppTri + shiftVec._arr[oppTri]); } CudaAssert(idx + shift < destSize); dest[idx + shift] = opp; } } __global__ void kerMarkInfinityTri ( KerTriArray triVec, char* triInfoArr, TriOpp* oppArr, int infIdx ) { for (int idx = getCurThreadIdx(); idx < triVec._num; idx += getThreadNum()) { if (!triVec._arr[idx].has(infIdx)) continue; // Mark as deleted setTriAliveState(triInfoArr[idx], false); TriOpp opp = oppArr[idx]; for (int vi = 0; vi < DEG; ++vi) { if (opp._t[vi] < 0) continue; const int oppIdx = opp.getOppTri(vi); const int oppVi = opp.getOppVi(vi); oppArr[oppIdx]._t[oppVi] = -1; } } } __global__ void kerCollectFreeSlots ( char* triInfoArr, int* prefixArr, int* freeArr, int newTriNum ) { for (int idx = getCurThreadIdx(); idx < newTriNum; idx += getThreadNum()) { if (isTriAlive(triInfoArr[idx])) continue; int freeIdx = idx - prefixArr[idx]; freeArr[freeIdx] = idx; } } __global__ void kerMakeCompactMap ( KerCharArray triInfoVec, int* prefixArr, int* freeArr, int newTriNum ) { for (int idx = newTriNum + getCurThreadIdx(); idx < triInfoVec._num; idx += getThreadNum()) { if (!isTriAlive(triInfoVec._arr[idx])) { prefixArr[idx] = -1; continue; } int freeIdx = newTriNum - prefixArr[idx]; int newTriIdx = freeArr[freeIdx]; prefixArr[idx] = newTriIdx; } } __global__ void kerCompactTris ( KerCharArray triInfoVec, int* prefixArr, Tri* triArr, TriOpp* oppArr, int newTriNum ) { for (int idx = newTriNum + getCurThreadIdx(); idx < triInfoVec._num; idx += getThreadNum()) { int newTriIdx = prefixArr[idx]; if (newTriIdx == -1) continue; triArr[newTriIdx] = triArr[idx]; triInfoVec._arr[newTriIdx] = triInfoVec._arr[idx]; TriOpp opp = oppArr[idx]; for (int vi = 0; vi < DEG; ++vi) { if (opp._t[vi] < 0) continue; const int oppIdx = opp.getOppTri(vi); if (oppIdx >= newTriNum) { const int oppNewIdx = prefixArr[oppIdx]; opp.setOppTri(vi, oppNewIdx); } else { const int oppVi = opp.getOppVi(vi); oppArr[oppIdx].setOppTri(oppVi, newTriIdx); } } oppArr[newTriIdx] = opp; } } __global__ void kerMapTriToVert ( KerTriArray triVec, int* vertTriArr ) { for (int idx = getCurThreadIdx(); idx < triVec._num; idx += getThreadNum()) { Tri tri = triVec._arr[idx]; for (int vi = 0; vi < DEG; ++vi) vertTriArr[tri._v[vi]] = idx; } } __global__ void kerMarkRejectedConsFlips ( KerIntArray actTriVec, int* triConsArr, int* triVoteArr, char* triInfoArr, TriOpp* oppArr, int* flipToTri, int* dbgRejFlipArr ) { for (int idx = getCurThreadIdx(); idx < actTriVec._num; idx += getThreadNum()) { int output = -1; const int midIdx = actTriVec._arr[idx]; const int voteVal = triVoteArr[midIdx]; if (INT_MAX != voteVal) { const int bossTriIdx = getConsFlipVoteIdx(voteVal); const int priority = getConsFlipVotePriority(voteVal); if (bossTriIdx == midIdx) // Boss of myself { const int midLabel = triConsArr[midIdx]; const int midVi = decode_cVi(midLabel); const int midSide = decode_cSide(midLabel); const int rightIdx = oppArr[midIdx].getOppTri(midVi); const int leftIdx = oppArr[midIdx].getOppTri((midVi + midSide + 1) % 3); if (triVoteArr[rightIdx] == voteVal) { if (priority == PriorityCase1) output = encode(midIdx, midVi); else if (triVoteArr[leftIdx] == voteVal) output = encode(midIdx, midVi); if (NULL != dbgRejFlipArr && output == -1) dbgRejFlipArr[midIdx] = 1; if (output != -1) { // Mark all triangles as changed setTriCheckState(triInfoArr[leftIdx], Changed); setTriCheckState(triInfoArr[midIdx], Changed); setTriCheckState(triInfoArr[rightIdx], Changed); const int rightLabel = triConsArr[rightIdx]; if (decode_cSide(rightLabel) != 3) // Not the last one { const int nextIdx = oppArr[rightIdx].getOppTri(decode_cVi(rightLabel)); setTriCheckState(triInfoArr[nextIdx], Changed); } // NOTE: Only marking the left and the right of the flip is // not enough, since when flipping we only check the front pair! // This, however, does not affect the correctness since // the remaining pairs will be processed in the next outer loop. } } } } flipToTri[idx] = output; } return; }
9c39e665cb071eccd87b364169e9f7d22ae3c184.cu
#include "KerDivision.h" #include "KerCommon.h" template< typename T > __forceinline__ __device__ T min(T a, T b) { if (a < b) return a; else return b; } __global__ void kerSplitTri ( KerIntArray splitTriArr, Tri* triArr, TriOpp* oppArr, char* triInfoArr, int* insTriMap, int* triToVert, int triNum, int insTriNum ) { // Iterate current triangles for (int idx = getCurThreadIdx(); idx < splitTriArr._num; idx += getThreadNum()) { const int triIdx = splitTriArr._arr[idx]; const int newBeg = (triNum >= 0) ? (triNum + 2 * insTriMap[triIdx]) : (triIdx + 1); const int newTriIdx[DEG] = { triIdx, newBeg, newBeg + 1 }; TriOpp newOpp[3] = { { -1, -1, -1 }, { -1, -1, -1 }, { -1, -1, -1 } }; // Set adjacency of 3 internal faces of 3 new triangles newOpp[0].setOpp(0, newTriIdx[1], 1); newOpp[0].setOpp(1, newTriIdx[2], 0); newOpp[1].setOpp(0, newTriIdx[2], 1); newOpp[1].setOpp(1, newTriIdx[0], 0); newOpp[2].setOpp(0, newTriIdx[0], 1); newOpp[2].setOpp(1, newTriIdx[1], 0); // Set adjacency of 4 external faces const TriOpp oldOpp = oppArr[triIdx]; // Iterate faces of old triangle for (int ni = 0; ni < DEG; ++ni) { if (-1 == oldOpp._t[ni]) continue; // No neighbour at this face int neiTriIdx = oldOpp.getOppTri(ni); int neiTriVi = oldOpp.getOppVi(ni); // Check if neighbour has split const int neiNewBeg = insTriMap[neiTriIdx]; if (-1 == neiNewBeg) // Neighbour is un-split { oppArr[neiTriIdx].setOpp(neiTriVi, newTriIdx[ni], 2); // Point un-split neighbour back to this new triangle } else // Neighbour has split { // Get neighbour's new split triangle that has this face if (triNum >= 0) neiTriIdx = ((0 == neiTriVi) ? neiTriIdx : (triNum + 2 * neiNewBeg + neiTriVi - 1)); else neiTriIdx += neiTriVi; neiTriVi = 2; } newOpp[ni].setOpp(2, neiTriIdx, neiTriVi); // Point this triangle to neighbour } // Write split triangle and opp const Tri tri = triArr[triIdx]; // Note: This slot will be overwritten below const int splitVertex = triToVert[triIdx]; for (int ti = 0; ti < DEG; ++ti) { const Tri newTri = { tri._v[(ti + 1) % DEG], tri._v[(ti + 2) % DEG], splitVertex }; const int toTriIdx = newTriIdx[ti]; triArr[toTriIdx] = newTri; oppArr[toTriIdx] = newOpp[ti]; setTriAliveState(triInfoArr[toTriIdx], true); setTriCheckState(triInfoArr[toTriIdx], Changed); } } return; } // Note: triVoteArr should *not* be modified here __global__ void kerMarkRejectedFlips ( int* actTriArr, TriOpp* oppArr, int* triVoteArr, char* triInfoArr, int* flipToTri, int actTriNum, int* dbgRejFlipArr ) { for (int idx = getCurThreadIdx(); idx < actTriNum; idx += getThreadNum()) { int output = -1; const int triIdx = actTriArr[idx]; const int voteVal = triVoteArr[triIdx]; if (INT_MAX == voteVal) { setTriCheckState(triInfoArr[triIdx], Checked); actTriArr[idx] = -1; } else { int bossTriIdx, botVi; decode(voteVal, &bossTriIdx, &botVi); if (bossTriIdx == triIdx) // Boss of myself { const TriOpp& opp = oppArr[triIdx]; const int topTriIdx = opp.getOppTri(botVi); const int topVoteVal = triVoteArr[topTriIdx]; if (topVoteVal == voteVal) output = voteVal; } if (NULL != dbgRejFlipArr && output == -1) dbgRejFlipArr[triIdx] = 1; } flipToTri[idx] = output; } return; } __global__ void kerFlip ( KerIntArray flipToTri, Tri* triArr, TriOpp* oppArr, char* triInfoArr, int2* triMsgArr, int* actTriArr, FlipItem* flipArr, int* triConsArr, int* vertTriArr, int orgFlipNum, int actTriNum ) { // Iterate flips for (int flipIdx = getCurThreadIdx(); flipIdx < flipToTri._num; flipIdx += getThreadNum()) { int botIdx, botVi; const int voteVal = flipToTri._arr[flipIdx]; decode(voteVal, &botIdx, &botVi); // Bottom triangle Tri botTri = triArr[botIdx]; const TriOpp& botOpp = oppArr[botIdx]; // Top triangle const int topIdx = botOpp.getOppTri(botVi); const int topVi = botOpp.getOppVi(botVi); Tri topTri = triArr[topIdx]; const int globFlipIdx = orgFlipNum + flipIdx; const int botAVi = (botVi + 1) % 3; const int botBVi = (botVi + 2) % 3; const int topAVi = (topVi + 2) % 3; const int topBVi = (topVi + 1) % 3; // Create new triangle const int topVert = topTri._v[topVi]; const int botVert = botTri._v[botVi]; const int botA = botTri._v[botAVi]; const int botB = botTri._v[botBVi]; // Update the bottom and top triangle botTri = makeTri(botVert, botA, topVert); topTri = makeTri(topVert, botB, botVert); triArr[botIdx] = botTri; triArr[topIdx] = topTri; int newBotNei = 0xffff; int newTopNei = 0xffff; setTriIdxVi(newBotNei, botAVi, 1, 0); setTriIdxVi(newBotNei, botBVi, 3, 2); setTriIdxVi(newTopNei, topAVi, 3, 2); setTriIdxVi(newTopNei, topBVi, 0, 0); // Write down the new triangle idx triMsgArr[botIdx] = make_int2(newBotNei, globFlipIdx); triMsgArr[topIdx] = make_int2(newTopNei, globFlipIdx); // Record the flip FlipItem flipItem = { botVert, topVert, botIdx, topIdx }; storeFlip(flipArr, globFlipIdx, flipItem); // Prepare for the next round if (actTriArr != NULL) actTriArr[actTriNum + flipIdx] = (Checked == getTriCheckState(triInfoArr[topIdx])) ? topIdx : -1; if (triConsArr == NULL) // Standard flipping triInfoArr[topIdx] = 3; // Alive + Changed else { vertTriArr[botA] = botIdx; vertTriArr[botB] = topIdx; // Update constraint intersection info int botLabel = triConsArr[botIdx]; int topLabel = triConsArr[topIdx]; const int consIdx = decode_cIdx(botLabel); const int botSide = decode_cSide(botLabel); int topSide = decode_cSide(topLabel); if (topSide < 2) // Not the last triangle topSide = (decode_cVi(topLabel) == topAVi ? 0 : 1); switch (botSide) // Cannto be 3 { case 0: switch (topSide) { case 0: botLabel = -1; topLabel = encode_constraint(consIdx, 2, 0); break; case 1: botLabel = encode_constraint(consIdx, 0, 0); topLabel = encode_constraint(consIdx, 1, 1); break; case 3: botLabel = -1; topLabel = encode_constraint(consIdx, 0, 3); break; } break; case 1: switch (topSide) { case 0: botLabel = encode_constraint(consIdx, 1, 0); topLabel = encode_constraint(consIdx, 2, 1); break; case 1: botLabel = encode_constraint(consIdx, 0, 1); topLabel = -1; break; case 3: botLabel = encode_constraint(consIdx, 2, 3); topLabel = -1; break; } break; case 2: botLabel = (topSide == 1 ? encode_constraint(consIdx, 0, 2) : -1); topLabel = (topSide == 0 ? encode_constraint(consIdx, 2, 2) : -1); break; } triConsArr[botIdx] = botLabel; triConsArr[topIdx] = topLabel; } } return; } __global__ void kerUpdateOpp ( FlipItem* flipVec, TriOpp* oppArr, int2* triMsgArr, int* flipToTri, int orgFlipNum, int flipNum ) { // Iterate flips for (int flipIdx = getCurThreadIdx(); flipIdx < flipNum; flipIdx += getThreadNum()) { int botIdx, botVi; int voteVal = flipToTri[flipIdx]; decode(voteVal, &botIdx, &botVi); int extOpp[4]; TriOpp opp; opp = oppArr[botIdx]; extOpp[0] = opp.getOppTriVi((botVi + 1) % 3); extOpp[1] = opp.getOppTriVi((botVi + 2) % 3); int topIdx = opp.getOppTri(botVi); const int topVi = opp.getOppVi(botVi); opp = oppArr[topIdx]; extOpp[2] = opp.getOppTriVi((topVi + 2) % 3); extOpp[3] = opp.getOppTriVi((topVi + 1) % 3); // Ok, update with neighbors for (int i = 0; i < 4; ++i) { int newTriIdx, vi; int triOpp = extOpp[i]; bool isCons = isOppValConstraint(triOpp); // No neighbor if (-1 == triOpp) continue; int oppIdx = getOppValTri(triOpp); int oppVi = getOppValVi(triOpp); const int2 msg = triMsgArr[oppIdx]; if (msg.y < orgFlipNum) // Neighbor not flipped { // Set my neighbor's opp newTriIdx = ((i & 1) == 0 ? topIdx : botIdx); vi = (i == 0 || i == 3) ? 0 : 2; oppArr[oppIdx].setOpp(oppVi, newTriIdx, vi, isCons); } else { const int oppFlipIdx = msg.y - orgFlipNum; // Update my own opp const int newLocOppIdx = getTriIdx(msg.x, oppVi); if (newLocOppIdx != 3) oppIdx = flipVec[oppFlipIdx]._t[newLocOppIdx]; oppVi = getTriVi(msg.x, oppVi); setOppValTriVi(extOpp[i], oppIdx, oppVi); } } // Now output opp._t[0] = extOpp[3]; opp.setOpp(1, topIdx, 1); opp._t[2] = extOpp[1]; oppArr[botIdx] = opp; opp._t[0] = extOpp[0]; opp.setOpp(1, botIdx, 1); opp._t[2] = extOpp[2]; oppArr[topIdx] = opp; } return; } __global__ void kerUpdateFlipTrace ( FlipItem* flipArr, int* triToFlip, int orgFlipNum, int flipNum ) { for (int idx = getCurThreadIdx(); idx < flipNum; idx += getThreadNum()) { const int flipIdx = orgFlipNum + idx; FlipItem flipItem = loadFlip(flipArr, flipIdx); int triIdx, nextFlip; triIdx = flipItem._t[0]; nextFlip = triToFlip[triIdx]; flipItem._t[0] = (nextFlip == -1) ? (triIdx << 1) | 0 : nextFlip; triToFlip[triIdx] = (flipIdx << 1) | 1; triIdx = flipItem._t[1]; nextFlip = triToFlip[triIdx]; flipItem._t[1] = (nextFlip == -1) ? (triIdx << 1) | 0 : nextFlip; triToFlip[triIdx] = (flipIdx << 1) | 1; storeFlip(flipArr, flipIdx, flipItem); } } __global__ void kerUpdateVertIdx ( KerTriArray triVec, char* triInfoArr, int* orgPointIdx ) { for (int idx = getCurThreadIdx(); idx < triVec._num; idx += getThreadNum()) { if (!isTriAlive(triInfoArr[idx])) continue; Tri tri = triVec._arr[idx]; for (int i = 0; i < DEG; ++i) tri._v[i] = orgPointIdx[tri._v[i]]; triVec._arr[idx] = tri; } } __global__ void kerShiftTriIdx ( KerIntArray idxVec, int* shiftArr ) { for (int idx = getCurThreadIdx(); idx < idxVec._num; idx += getThreadNum()) { const int oldIdx = idxVec._arr[idx]; if (oldIdx != -1) idxVec._arr[idx] = oldIdx + shiftArr[oldIdx]; } } __global__ void kerMarkSpecialTris ( KerCharArray triInfoVec, TriOpp* oppArr ) { for (int idx = getCurThreadIdx(); idx < triInfoVec._num; idx += getThreadNum()) { if (!isTriAlive(triInfoVec._arr[idx])) continue; TriOpp opp = oppArr[idx]; bool changed = false; for (int vi = 0; vi < DEG; ++vi) { if (-1 == opp._t[vi]) continue; if (opp.isOppSpecial(vi)) changed = true; } if (changed) setTriCheckState(triInfoVec._arr[idx], Changed); } } __forceinline__ __device__ float hash(int k) { k *= 357913941; k ^= k << 24; k += ~357913941; k ^= k >> 31; k ^= k << 31; return int_as_float(k); } __global__ void kerPickWinnerPoint ( KerIntArray vertexTriVec, int* vertCircleArr, int* triCircleArr, int* triVertArr, int noSample ) { const float rate = float(vertexTriVec._num) / noSample; // Iterate uninserted points for (int idx = getCurThreadIdx(); idx < noSample; idx += getThreadNum()) { const int vert = int(idx * rate); const int triIdx = vertexTriVec._arr[vert]; if (triIdx == -1) continue; const int vertSVal = vertCircleArr[idx]; const int winSVal = triCircleArr[triIdx]; // Check if vertex is winner if (winSVal == vertSVal) atomicMin(&triVertArr[triIdx], vert); } return; } __global__ void kerMakeFirstTri ( Tri* triArr, TriOpp* oppArr, char* triInfoArr, Tri tri, int infIdx ) { const Tri tris[] = { { tri._v[0], tri._v[1], tri._v[2] }, { tri._v[2], tri._v[1], infIdx }, { tri._v[0], tri._v[2], infIdx }, { tri._v[1], tri._v[0], infIdx } }; const int oppTri[][3] = { { 1, 2, 3 }, { 3, 2, 0 }, { 1, 3, 0 }, { 2, 1, 0 } }; const int oppVi[][4] = { { 2, 2, 2 }, { 1, 0, 0 }, { 1, 0, 1 }, { 1, 0, 2 } }; for (int i = 0; i < 4; ++i) { triArr[i] = tris[i]; triInfoArr[i] = 1; TriOpp opp = { -1, -1, -1 }; for (int j = 0; j < 3; ++j) opp.setOpp(j, oppTri[i][j], oppVi[i][j]); oppArr[i] = opp; } } __global__ void kerShiftOpp ( KerIntArray shiftVec, TriOpp* src, TriOpp* dest, int destSize ) { for (int idx = getCurThreadIdx(); idx < shiftVec._num; idx += getThreadNum()) { const int shift = shiftVec._arr[idx]; TriOpp opp = src[idx]; for (int vi = 0; vi < 3; ++vi) { const int oppTri = opp.getOppTri(vi); CudaAssert(oppTri >= 0 && oppTri < shiftVec._num); CudaAssert(oppTri + shiftVec._arr[oppTri] < destSize); opp.setOppTri(vi, oppTri + shiftVec._arr[oppTri]); } CudaAssert(idx + shift < destSize); dest[idx + shift] = opp; } } __global__ void kerMarkInfinityTri ( KerTriArray triVec, char* triInfoArr, TriOpp* oppArr, int infIdx ) { for (int idx = getCurThreadIdx(); idx < triVec._num; idx += getThreadNum()) { if (!triVec._arr[idx].has(infIdx)) continue; // Mark as deleted setTriAliveState(triInfoArr[idx], false); TriOpp opp = oppArr[idx]; for (int vi = 0; vi < DEG; ++vi) { if (opp._t[vi] < 0) continue; const int oppIdx = opp.getOppTri(vi); const int oppVi = opp.getOppVi(vi); oppArr[oppIdx]._t[oppVi] = -1; } } } __global__ void kerCollectFreeSlots ( char* triInfoArr, int* prefixArr, int* freeArr, int newTriNum ) { for (int idx = getCurThreadIdx(); idx < newTriNum; idx += getThreadNum()) { if (isTriAlive(triInfoArr[idx])) continue; int freeIdx = idx - prefixArr[idx]; freeArr[freeIdx] = idx; } } __global__ void kerMakeCompactMap ( KerCharArray triInfoVec, int* prefixArr, int* freeArr, int newTriNum ) { for (int idx = newTriNum + getCurThreadIdx(); idx < triInfoVec._num; idx += getThreadNum()) { if (!isTriAlive(triInfoVec._arr[idx])) { prefixArr[idx] = -1; continue; } int freeIdx = newTriNum - prefixArr[idx]; int newTriIdx = freeArr[freeIdx]; prefixArr[idx] = newTriIdx; } } __global__ void kerCompactTris ( KerCharArray triInfoVec, int* prefixArr, Tri* triArr, TriOpp* oppArr, int newTriNum ) { for (int idx = newTriNum + getCurThreadIdx(); idx < triInfoVec._num; idx += getThreadNum()) { int newTriIdx = prefixArr[idx]; if (newTriIdx == -1) continue; triArr[newTriIdx] = triArr[idx]; triInfoVec._arr[newTriIdx] = triInfoVec._arr[idx]; TriOpp opp = oppArr[idx]; for (int vi = 0; vi < DEG; ++vi) { if (opp._t[vi] < 0) continue; const int oppIdx = opp.getOppTri(vi); if (oppIdx >= newTriNum) { const int oppNewIdx = prefixArr[oppIdx]; opp.setOppTri(vi, oppNewIdx); } else { const int oppVi = opp.getOppVi(vi); oppArr[oppIdx].setOppTri(oppVi, newTriIdx); } } oppArr[newTriIdx] = opp; } } __global__ void kerMapTriToVert ( KerTriArray triVec, int* vertTriArr ) { for (int idx = getCurThreadIdx(); idx < triVec._num; idx += getThreadNum()) { Tri tri = triVec._arr[idx]; for (int vi = 0; vi < DEG; ++vi) vertTriArr[tri._v[vi]] = idx; } } __global__ void kerMarkRejectedConsFlips ( KerIntArray actTriVec, int* triConsArr, int* triVoteArr, char* triInfoArr, TriOpp* oppArr, int* flipToTri, int* dbgRejFlipArr ) { for (int idx = getCurThreadIdx(); idx < actTriVec._num; idx += getThreadNum()) { int output = -1; const int midIdx = actTriVec._arr[idx]; const int voteVal = triVoteArr[midIdx]; if (INT_MAX != voteVal) { const int bossTriIdx = getConsFlipVoteIdx(voteVal); const int priority = getConsFlipVotePriority(voteVal); if (bossTriIdx == midIdx) // Boss of myself { const int midLabel = triConsArr[midIdx]; const int midVi = decode_cVi(midLabel); const int midSide = decode_cSide(midLabel); const int rightIdx = oppArr[midIdx].getOppTri(midVi); const int leftIdx = oppArr[midIdx].getOppTri((midVi + midSide + 1) % 3); if (triVoteArr[rightIdx] == voteVal) { if (priority == PriorityCase1) output = encode(midIdx, midVi); else if (triVoteArr[leftIdx] == voteVal) output = encode(midIdx, midVi); if (NULL != dbgRejFlipArr && output == -1) dbgRejFlipArr[midIdx] = 1; if (output != -1) { // Mark all triangles as changed setTriCheckState(triInfoArr[leftIdx], Changed); setTriCheckState(triInfoArr[midIdx], Changed); setTriCheckState(triInfoArr[rightIdx], Changed); const int rightLabel = triConsArr[rightIdx]; if (decode_cSide(rightLabel) != 3) // Not the last one { const int nextIdx = oppArr[rightIdx].getOppTri(decode_cVi(rightLabel)); setTriCheckState(triInfoArr[nextIdx], Changed); } // NOTE: Only marking the left and the right of the flip is // not enough, since when flipping we only check the front pair! // This, however, does not affect the correctness since // the remaining pairs will be processed in the next outer loop. } } } } flipToTri[idx] = output; } return; }
4eac718db4f350eba027e5a6d0f53d2ca139b88d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "cuda/dcn_v2_im2col_cuda.h" #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <THH/THH.h> #include <THH/THHAtomics.cuh> #include <THH/THHDeviceUtils.cuh> THCState *state = at::globalContext().lazyInitCUDA(); // author: Charles Shang // https://github.com/torch/cunn/blob/master/lib/THCUNN/generic/SpatialConvolutionMM.cu // [batch gemm] // https://github.com/pytorch/pytorch/blob/master/aten/src/THC/generic/THCTensorMathBlas.cu __global__ void createBatchGemmBuffer(const float **input_b, float **output_b, float **columns_b, const float **ones_b, const float **weight_b, const float **bias_b, float *input, float *output, float *columns, float *ones, float *weight, float *bias, const int input_stride, const int output_stride, const int columns_stride, const int ones_stride, const int num_batches) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_batches) { input_b[idx] = input + idx * input_stride; output_b[idx] = output + idx * output_stride; columns_b[idx] = columns + idx * columns_stride; ones_b[idx] = ones + idx * ones_stride; // share weights and bias within a Mini-Batch weight_b[idx] = weight; bias_b[idx] = bias; } } at::Tensor dcn_v2_cuda_forward(const at::Tensor &input, const at::Tensor &weight, const at::Tensor &bias, const at::Tensor &offset, const at::Tensor &mask, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const int deformable_group) { using scalar_t = float; // THCAssertSameGPU(THCudaTensor_checkGPU(state, 5, input, weight, bias, offset, mask)); AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor"); AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor"); AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor"); AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor"); const int batch = input.size(0); const int channels = input.size(1); const int height = input.size(2); const int width = input.size(3); const int channels_out = weight.size(0); const int channels_kernel = weight.size(1); const int kernel_h_ = weight.size(2); const int kernel_w_ = weight.size(3); // printf("Kernels: %d %d %d %d\n", kernel_h_, kernel_w_, kernel_w, kernel_h); // printf("Channels: %d %d\n", channels, channels_kernel); // printf("Channels: %d %d\n", channels_out, channels_kernel); AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w, "Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_); AT_ASSERTM(channels == channels_kernel, "Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel); const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; auto ones = at::ones({batch, height_out, width_out}, input.options()); auto columns = at::empty({batch, channels * kernel_h * kernel_w, 1 * height_out * width_out}, input.options()); auto output = at::empty({batch, channels_out, height_out, width_out}, input.options()); // prepare for batch-wise computing, which is significantly faster than instance-wise computing // when batch size is large. // launch batch threads int matrices_size = batch * sizeof(float *); auto input_b = static_cast<const float **>(THCudaMalloc(state, matrices_size)); auto output_b = static_cast<float **>(THCudaMalloc(state, matrices_size)); auto columns_b = static_cast<float **>(THCudaMalloc(state, matrices_size)); auto ones_b = static_cast<const float **>(THCudaMalloc(state, matrices_size)); auto weight_b = static_cast<const float **>(THCudaMalloc(state, matrices_size)); auto bias_b = static_cast<const float **>(THCudaMalloc(state, matrices_size)); const int block = 128; const int grid = (batch + block - 1) / block; hipLaunchKernelGGL(( createBatchGemmBuffer), dim3(grid), dim3(block), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input_b, output_b, columns_b, ones_b, weight_b, bias_b, input.data<scalar_t>(), output.data<scalar_t>(), columns.data<scalar_t>(), ones.data<scalar_t>(), weight.data<scalar_t>(), bias.data<scalar_t>(), channels * width * height, channels_out * width_out * height_out, channels * kernel_h * kernel_w * height_out * width_out, height_out * width_out, batch); long m_ = channels_out; long n_ = height_out * width_out; long k_ = 1; THCudaBlas_SgemmBatched(state, 't', 'n', n_, m_, k_, 1.0f, ones_b, k_, bias_b, k_, 0.0f, output_b, n_, batch); modulated_deformable_im2col_cuda(c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input.data<scalar_t>(), offset.data<scalar_t>(), mask.data<scalar_t>(), batch, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, columns.data<scalar_t>()); long m = channels_out; long n = height_out * width_out; long k = channels * kernel_h * kernel_w; THCudaBlas_SgemmBatched(state, 'n', 'n', n, m, k, 1.0f, (const float **)columns_b, n, weight_b, k, 1.0f, output_b, n, batch); THCudaFree(state, input_b); THCudaFree(state, output_b); THCudaFree(state, columns_b); THCudaFree(state, ones_b); THCudaFree(state, weight_b); THCudaFree(state, bias_b); return output; } __global__ void createBatchGemmBufferBackward( float **grad_output_b, float **columns_b, float **ones_b, float **weight_b, float **grad_weight_b, float **grad_bias_b, float *grad_output, float *columns, float *ones, float *weight, float *grad_weight, float *grad_bias, const int grad_output_stride, const int columns_stride, const int ones_stride, const int num_batches) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_batches) { grad_output_b[idx] = grad_output + idx * grad_output_stride; columns_b[idx] = columns + idx * columns_stride; ones_b[idx] = ones + idx * ones_stride; // share weights and bias within a Mini-Batch weight_b[idx] = weight; grad_weight_b[idx] = grad_weight; grad_bias_b[idx] = grad_bias; } } std::vector<at::Tensor> dcn_v2_cuda_backward(const at::Tensor &input, const at::Tensor &weight, const at::Tensor &bias, const at::Tensor &offset, const at::Tensor &mask, const at::Tensor &grad_output, int kernel_h, int kernel_w, int stride_h, int stride_w, int pad_h, int pad_w, int dilation_h, int dilation_w, int deformable_group) { THArgCheck(input.is_contiguous(), 1, "input tensor has to be contiguous"); THArgCheck(weight.is_contiguous(), 2, "weight tensor has to be contiguous"); AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor"); AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor"); AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor"); AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor"); const int batch = input.size(0); const int channels = input.size(1); const int height = input.size(2); const int width = input.size(3); const int channels_out = weight.size(0); const int channels_kernel = weight.size(1); const int kernel_h_ = weight.size(2); const int kernel_w_ = weight.size(3); AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w, "Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_); AT_ASSERTM(channels == channels_kernel, "Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel); const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; auto ones = at::ones({height_out, width_out}, input.options()); auto columns = at::empty({channels * kernel_h * kernel_w, 1 * height_out * width_out}, input.options()); auto output = at::empty({batch, channels_out, height_out, width_out}, input.options()); auto grad_input = at::zeros_like(input); auto grad_weight = at::zeros_like(weight); auto grad_bias = at::zeros_like(bias); auto grad_offset = at::zeros_like(offset); auto grad_mask = at::zeros_like(mask); using scalar_t = float; for (int b = 0; b < batch; b++) { auto input_n = input.select(0, b); auto offset_n = offset.select(0, b); auto mask_n = mask.select(0, b); auto grad_output_n = grad_output.select(0, b); auto grad_input_n = grad_input.select(0, b); auto grad_offset_n = grad_offset.select(0, b); auto grad_mask_n = grad_mask.select(0, b); long m = channels * kernel_h * kernel_w; long n = height_out * width_out; long k = channels_out; THCudaBlas_Sgemm(state, 'n', 't', n, m, k, 1.0f, grad_output_n.data<scalar_t>(), n, weight.data<scalar_t>(), m, 0.0f, columns.data<scalar_t>(), n); // gradient w.r.t. input coordinate data modulated_deformable_col2im_coord_cuda(c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), columns.data<scalar_t>(), input_n.data<scalar_t>(), offset_n.data<scalar_t>(), mask_n.data<scalar_t>(), 1, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, grad_offset_n.data<scalar_t>(), grad_mask_n.data<scalar_t>()); // gradient w.r.t. input data modulated_deformable_col2im_cuda(c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), columns.data<scalar_t>(), offset_n.data<scalar_t>(), mask_n.data<scalar_t>(), 1, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, grad_input_n.data<scalar_t>()); // gradient w.r.t. weight, dWeight should accumulate across the batch and group modulated_deformable_im2col_cuda(c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input_n.data<scalar_t>(), offset_n.data<scalar_t>(), mask_n.data<scalar_t>(), 1, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, columns.data<scalar_t>()); long m_ = channels_out; long n_ = channels * kernel_h * kernel_w; long k_ = height_out * width_out; THCudaBlas_Sgemm(state, 't', 'n', n_, m_, k_, 1.0f, columns.data<scalar_t>(), k_, grad_output_n.data<scalar_t>(), k_, 1.0f, grad_weight.data<scalar_t>(), n_); // gradient w.r.t. bias // long m_ = channels_out; // long k__ = height_out * width_out; THCudaBlas_Sgemm(state, 'N', 'N', 1, m_, k_, 1.0f, ones.data<scalar_t>(), 1, grad_output_n.data<scalar_t>(), k_, 1.0f, grad_bias.data<scalar_t>(), 1); } return { grad_input, grad_offset, grad_mask, grad_weight, grad_bias }; }
4eac718db4f350eba027e5a6d0f53d2ca139b88d.cu
#include <vector> #include "cuda/dcn_v2_im2col_cuda.h" #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <THC/THC.h> #include <THC/THCAtomics.cuh> #include <THC/THCDeviceUtils.cuh> THCState *state = at::globalContext().lazyInitCUDA(); // author: Charles Shang // https://github.com/torch/cunn/blob/master/lib/THCUNN/generic/SpatialConvolutionMM.cu // [batch gemm] // https://github.com/pytorch/pytorch/blob/master/aten/src/THC/generic/THCTensorMathBlas.cu __global__ void createBatchGemmBuffer(const float **input_b, float **output_b, float **columns_b, const float **ones_b, const float **weight_b, const float **bias_b, float *input, float *output, float *columns, float *ones, float *weight, float *bias, const int input_stride, const int output_stride, const int columns_stride, const int ones_stride, const int num_batches) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_batches) { input_b[idx] = input + idx * input_stride; output_b[idx] = output + idx * output_stride; columns_b[idx] = columns + idx * columns_stride; ones_b[idx] = ones + idx * ones_stride; // share weights and bias within a Mini-Batch weight_b[idx] = weight; bias_b[idx] = bias; } } at::Tensor dcn_v2_cuda_forward(const at::Tensor &input, const at::Tensor &weight, const at::Tensor &bias, const at::Tensor &offset, const at::Tensor &mask, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const int deformable_group) { using scalar_t = float; // THCAssertSameGPU(THCudaTensor_checkGPU(state, 5, input, weight, bias, offset, mask)); AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor"); AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor"); AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor"); AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor"); const int batch = input.size(0); const int channels = input.size(1); const int height = input.size(2); const int width = input.size(3); const int channels_out = weight.size(0); const int channels_kernel = weight.size(1); const int kernel_h_ = weight.size(2); const int kernel_w_ = weight.size(3); // printf("Kernels: %d %d %d %d\n", kernel_h_, kernel_w_, kernel_w, kernel_h); // printf("Channels: %d %d\n", channels, channels_kernel); // printf("Channels: %d %d\n", channels_out, channels_kernel); AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w, "Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_); AT_ASSERTM(channels == channels_kernel, "Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel); const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; auto ones = at::ones({batch, height_out, width_out}, input.options()); auto columns = at::empty({batch, channels * kernel_h * kernel_w, 1 * height_out * width_out}, input.options()); auto output = at::empty({batch, channels_out, height_out, width_out}, input.options()); // prepare for batch-wise computing, which is significantly faster than instance-wise computing // when batch size is large. // launch batch threads int matrices_size = batch * sizeof(float *); auto input_b = static_cast<const float **>(THCudaMalloc(state, matrices_size)); auto output_b = static_cast<float **>(THCudaMalloc(state, matrices_size)); auto columns_b = static_cast<float **>(THCudaMalloc(state, matrices_size)); auto ones_b = static_cast<const float **>(THCudaMalloc(state, matrices_size)); auto weight_b = static_cast<const float **>(THCudaMalloc(state, matrices_size)); auto bias_b = static_cast<const float **>(THCudaMalloc(state, matrices_size)); const int block = 128; const int grid = (batch + block - 1) / block; createBatchGemmBuffer<<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>( input_b, output_b, columns_b, ones_b, weight_b, bias_b, input.data<scalar_t>(), output.data<scalar_t>(), columns.data<scalar_t>(), ones.data<scalar_t>(), weight.data<scalar_t>(), bias.data<scalar_t>(), channels * width * height, channels_out * width_out * height_out, channels * kernel_h * kernel_w * height_out * width_out, height_out * width_out, batch); long m_ = channels_out; long n_ = height_out * width_out; long k_ = 1; THCudaBlas_SgemmBatched(state, 't', 'n', n_, m_, k_, 1.0f, ones_b, k_, bias_b, k_, 0.0f, output_b, n_, batch); modulated_deformable_im2col_cuda(c10::cuda::getCurrentCUDAStream(), input.data<scalar_t>(), offset.data<scalar_t>(), mask.data<scalar_t>(), batch, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, columns.data<scalar_t>()); long m = channels_out; long n = height_out * width_out; long k = channels * kernel_h * kernel_w; THCudaBlas_SgemmBatched(state, 'n', 'n', n, m, k, 1.0f, (const float **)columns_b, n, weight_b, k, 1.0f, output_b, n, batch); THCudaFree(state, input_b); THCudaFree(state, output_b); THCudaFree(state, columns_b); THCudaFree(state, ones_b); THCudaFree(state, weight_b); THCudaFree(state, bias_b); return output; } __global__ void createBatchGemmBufferBackward( float **grad_output_b, float **columns_b, float **ones_b, float **weight_b, float **grad_weight_b, float **grad_bias_b, float *grad_output, float *columns, float *ones, float *weight, float *grad_weight, float *grad_bias, const int grad_output_stride, const int columns_stride, const int ones_stride, const int num_batches) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_batches) { grad_output_b[idx] = grad_output + idx * grad_output_stride; columns_b[idx] = columns + idx * columns_stride; ones_b[idx] = ones + idx * ones_stride; // share weights and bias within a Mini-Batch weight_b[idx] = weight; grad_weight_b[idx] = grad_weight; grad_bias_b[idx] = grad_bias; } } std::vector<at::Tensor> dcn_v2_cuda_backward(const at::Tensor &input, const at::Tensor &weight, const at::Tensor &bias, const at::Tensor &offset, const at::Tensor &mask, const at::Tensor &grad_output, int kernel_h, int kernel_w, int stride_h, int stride_w, int pad_h, int pad_w, int dilation_h, int dilation_w, int deformable_group) { THArgCheck(input.is_contiguous(), 1, "input tensor has to be contiguous"); THArgCheck(weight.is_contiguous(), 2, "weight tensor has to be contiguous"); AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor"); AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor"); AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor"); AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor"); const int batch = input.size(0); const int channels = input.size(1); const int height = input.size(2); const int width = input.size(3); const int channels_out = weight.size(0); const int channels_kernel = weight.size(1); const int kernel_h_ = weight.size(2); const int kernel_w_ = weight.size(3); AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w, "Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_); AT_ASSERTM(channels == channels_kernel, "Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel); const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; auto ones = at::ones({height_out, width_out}, input.options()); auto columns = at::empty({channels * kernel_h * kernel_w, 1 * height_out * width_out}, input.options()); auto output = at::empty({batch, channels_out, height_out, width_out}, input.options()); auto grad_input = at::zeros_like(input); auto grad_weight = at::zeros_like(weight); auto grad_bias = at::zeros_like(bias); auto grad_offset = at::zeros_like(offset); auto grad_mask = at::zeros_like(mask); using scalar_t = float; for (int b = 0; b < batch; b++) { auto input_n = input.select(0, b); auto offset_n = offset.select(0, b); auto mask_n = mask.select(0, b); auto grad_output_n = grad_output.select(0, b); auto grad_input_n = grad_input.select(0, b); auto grad_offset_n = grad_offset.select(0, b); auto grad_mask_n = grad_mask.select(0, b); long m = channels * kernel_h * kernel_w; long n = height_out * width_out; long k = channels_out; THCudaBlas_Sgemm(state, 'n', 't', n, m, k, 1.0f, grad_output_n.data<scalar_t>(), n, weight.data<scalar_t>(), m, 0.0f, columns.data<scalar_t>(), n); // gradient w.r.t. input coordinate data modulated_deformable_col2im_coord_cuda(c10::cuda::getCurrentCUDAStream(), columns.data<scalar_t>(), input_n.data<scalar_t>(), offset_n.data<scalar_t>(), mask_n.data<scalar_t>(), 1, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, grad_offset_n.data<scalar_t>(), grad_mask_n.data<scalar_t>()); // gradient w.r.t. input data modulated_deformable_col2im_cuda(c10::cuda::getCurrentCUDAStream(), columns.data<scalar_t>(), offset_n.data<scalar_t>(), mask_n.data<scalar_t>(), 1, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, grad_input_n.data<scalar_t>()); // gradient w.r.t. weight, dWeight should accumulate across the batch and group modulated_deformable_im2col_cuda(c10::cuda::getCurrentCUDAStream(), input_n.data<scalar_t>(), offset_n.data<scalar_t>(), mask_n.data<scalar_t>(), 1, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, columns.data<scalar_t>()); long m_ = channels_out; long n_ = channels * kernel_h * kernel_w; long k_ = height_out * width_out; THCudaBlas_Sgemm(state, 't', 'n', n_, m_, k_, 1.0f, columns.data<scalar_t>(), k_, grad_output_n.data<scalar_t>(), k_, 1.0f, grad_weight.data<scalar_t>(), n_); // gradient w.r.t. bias // long m_ = channels_out; // long k__ = height_out * width_out; THCudaBlas_Sgemm(state, 'N', 'N', 1, m_, k_, 1.0f, ones.data<scalar_t>(), 1, grad_output_n.data<scalar_t>(), k_, 1.0f, grad_bias.data<scalar_t>(), 1); } return { grad_input, grad_offset, grad_mask, grad_weight, grad_bias }; }
08003edb95c0c8ab45e988c2077c43db714cc0d4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * @file grating.cu * * @author Jayanth Chennamangalam * @date 2011.07.08 */ #include "grating.h" #define PL 1 int g_iIsDataReadDone = FALSE; int g_iIsProcDone = FALSE; int g_iMaxThreadsPerBlock = 0; char4* g_pc4InBuf = NULL; char4* g_pc4InBufRead = NULL; int g_iSizeFile = 0; int g_iReadCount = 0; char4* g_pc4Data_d = NULL; /* raw data starting address */ char4* g_pc4DataRead_d = NULL; /* raw data read pointer */ int g_iNFFT = DEF_LEN_SPEC; int g_iMaxPhysThreads; dim3 g_dimBPFB(1, 1, 1); dim3 g_dimGPFB(1, 1); dim3 g_dimBCopy(1, 1, 1); dim3 g_dimGCopy(1, 1); dim3 g_dimBAccum(1, 1, 1); dim3 g_dimGAccum(1, 1); float4* g_pf4FFTIn_d = NULL; float4* g_pf4FFTOut_d = NULL; hipfftHandle g_stPlan = {0}; float4* g_pf4SumStokes = NULL; float4* g_pf4SumStokes_d = NULL; int g_iIsPFBOn = DEF_PFB_ON; int g_iNTaps = 1; /* 1 if no PFB, NUM_TAPS if PFB */ char g_acFileData[256] = {0}; /* BUG: crash if file size is less than 32MB */ int g_iSizeRead = DEF_SIZE_READ; //int g_iSizeRead = DEF_SIZE_READ * 2; int g_iNumSubBands = DEF_NUM_SUBBANDS; int g_iFileCoeff = 0; char g_acFileCoeff[256] = {0}; float *g_pfPFBCoeff = NULL; float *g_pfPFBCoeff_d = NULL; #if PLOT float* g_pfSumPowX = NULL; float* g_pfSumPowY = NULL; float* g_pfSumStokesRe = NULL; float* g_pfSumStokesIm = NULL; float* g_pfFreq = NULL; float g_fFSamp = 1.0; /* 1 [frequency] */ #endif #if BENCHMARKING float g_fTimeCpIn = 0.0; float g_fTotCpIn = 0.0; int g_iCountCpIn = 0; hipEvent_t g_cuStart; hipEvent_t g_cuStop; #endif int main(int argc, char *argv[]) { int iRet = EXIT_SUCCESS; int iSpecCount = 0; int iNumAcc = DEF_ACC; int iCUDADevice = DEF_CUDA_DEVICE; int iProcData = 0; long int lProcDataAll = 0; hipError_t iCUDARet = hipSuccess; #if BENCHMARKING float fTimePFB = 0.0; float fTotPFB = 0.0; int iCountPFB = 0; float fTimeCpInFFT = 0.0; float fTotCpInFFT = 0.0; int iCountCpInFFT = 0; float fTimeFFT = 0.0; float fTotFFT = 0.0; int iCountFFT = 0; float fTimeCpOut = 0.0; float fTotCpOut = 0.0; int iCountCpOut = 0; float fTimeAccum = 0.0; float fTotAccum = 0.0; int iCountAccum = 0; #else struct timeval stStart = {0}; struct timeval stStop = {0}; float fTimeTaken = 0.0; float fTotThroughput = 0.0; #endif #if OUTFILE int iFileSpec = 0; #endif const char *pcProgName = NULL; int iNextOpt = 0; /* valid short options */ #if PLOT const char* const pcOptsShort = "hb:n:pa:d:s:"; #else const char* const pcOptsShort = "hb:n:pa:d:"; #endif /* valid long options */ const struct option stOptsLong[] = { { "help", 0, NULL, 'h' }, { "nsub", 1, NULL, 'b' }, { "nfft", 1, NULL, 'n' }, { "pfb", 0, NULL, 'p' }, { "nacc", 1, NULL, 'a' }, { "device", 1, NULL, 'd' }, #if PLOT { "fsamp", 1, NULL, 's' }, #endif { NULL, 0, NULL, 0 } }; /* get the filename of the program from the argument list */ pcProgName = argv[0]; /* parse the input */ do { iNextOpt = getopt_long(argc, argv, pcOptsShort, stOptsLong, NULL); switch (iNextOpt) { case 'h': /* -h or --help */ /* print usage info and terminate */ PrintUsage(pcProgName); return EXIT_SUCCESS; case 'b': /* -b or --nsub */ /* set option */ g_iNumSubBands = (int) atoi(optarg); break; case 'n': /* -n or --nfft */ /* set option */ g_iNFFT = (int) atoi(optarg); break; case 'p': /* -p or --pfb */ /* set option */ g_iIsPFBOn = TRUE; break; case 'a': /* -a or --nacc */ /* set option */ iNumAcc = (int) atoi(optarg); break; case 'd': /* -d or --device */ /* set option */ iCUDADevice = (int) atoi(optarg); break; #if PLOT case 's': /* -s or --fsamp */ /* set option */ g_fFSamp = (float) atof(optarg); break; #endif case '?': /* user specified an invalid option */ /* print usage info and terminate with error */ (void) fprintf(stderr, "ERROR: Invalid option!\n"); PrintUsage(pcProgName); return EXIT_FAILURE; case -1: /* done with options */ break; default: /* unexpected */ assert(0); } } while (iNextOpt != -1); /* no arguments */ if (argc <= optind) { (void) fprintf(stderr, "ERROR: Data file not specified!\n"); PrintUsage(pcProgName); return EXIT_FAILURE; } (void) strncpy(g_acFileData, argv[optind], 256); g_acFileData[255] = '\0'; #if BENCHMARKING (void) printf("* Benchmarking run commencing...\n"); #endif /* initialise */ iRet = Init(iCUDADevice); if (iRet != EXIT_SUCCESS) { (void) fprintf(stderr, "ERROR! Init failed!\n"); CleanUp(); return EXIT_FAILURE; } #if OUTFILE iFileSpec = open("spec.dat", O_CREAT | O_TRUNC | O_WRONLY, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); if (iFileSpec < EXIT_SUCCESS) { (void) fprintf(stderr, "ERROR! Opening spectrum file failed!\n"); CleanUp(); return EXIT_FAILURE; } #endif #if (!BENCHMARKING) (void) gettimeofday(&stStart, NULL); #endif while (!g_iIsProcDone) { if (g_iIsPFBOn) { /* do pfb */ #if BENCHMARKING CUDASafeCallWithCleanUp(hipEventRecord(g_cuStart, 0)); CUDASafeCallWithCleanUp(hipEventSynchronize(g_cuStart)); #endif hipLaunchKernelGGL(( DoPFB), dim3(g_dimGPFB), dim3(g_dimBPFB), 0, 0, g_pc4DataRead_d, g_pf4FFTIn_d, g_pfPFBCoeff_d); CUDASafeCallWithCleanUp(hipDeviceSynchronize()); iCUDARet = hipGetLastError(); if (iCUDARet != hipSuccess) { (void) fprintf(stderr, "ERROR: File <%s>, Line %d: %s\n", __FILE__, __LINE__, hipGetErrorString(iCUDARet)); /* free resources */ CleanUp(); return EXIT_FAILURE; } #if BENCHMARKING CUDASafeCallWithCleanUp(hipEventRecord(g_cuStop, 0)); CUDASafeCallWithCleanUp(hipEventSynchronize(g_cuStop)); CUDASafeCallWithCleanUp(hipEventElapsedTime(&fTimePFB, g_cuStart, g_cuStop)); fTotPFB += fTimePFB; ++iCountPFB; #endif /* update the data read pointer */ g_pc4DataRead_d += (g_iNumSubBands * g_iNFFT); } else { #if BENCHMARKING CUDASafeCallWithCleanUp(hipEventRecord(g_cuStart, 0)); CUDASafeCallWithCleanUp(hipEventSynchronize(g_cuStart)); #endif hipLaunchKernelGGL(( CopyDataForFFT), dim3(g_dimGCopy), dim3(g_dimBCopy), 0, 0, g_pc4DataRead_d, g_pf4FFTIn_d); CUDASafeCallWithCleanUp(hipDeviceSynchronize()); iCUDARet = hipGetLastError(); if (iCUDARet != hipSuccess) { (void) fprintf(stderr, "ERROR: File <%s>, Line %d: %s\n", __FILE__, __LINE__, hipGetErrorString(iCUDARet)); /* free resources */ CleanUp(); return EXIT_FAILURE; } #if BENCHMARKING CUDASafeCallWithCleanUp(hipEventRecord(g_cuStop, 0)); CUDASafeCallWithCleanUp(hipEventSynchronize(g_cuStop)); CUDASafeCallWithCleanUp(hipEventElapsedTime(&fTimeCpInFFT, g_cuStart, g_cuStop)); fTotCpInFFT += fTimeCpInFFT; ++iCountCpInFFT; #endif /* update the data read pointer */ g_pc4DataRead_d += (g_iNumSubBands * g_iNFFT); } /* do fft */ #if BENCHMARKING CUDASafeCallWithCleanUp(hipEventRecord(g_cuStart, 0)); CUDASafeCallWithCleanUp(hipEventSynchronize(g_cuStart)); #endif iRet = DoFFT(); if (iRet != EXIT_SUCCESS) { (void) fprintf(stderr, "ERROR! FFT failed!\n"); #if OUTFILE (void) close(iFileSpec); #endif CleanUp(); return EXIT_FAILURE; } #if BENCHMARKING CUDASafeCallWithCleanUp(hipEventRecord(g_cuStop, 0)); CUDASafeCallWithCleanUp(hipEventSynchronize(g_cuStop)); CUDASafeCallWithCleanUp(hipEventElapsedTime(&fTimeFFT, g_cuStart, g_cuStop)); fTotFFT += fTimeFFT; ++iCountFFT; #endif /* accumulate power x, power y, stokes, if the blanking bit is not set */ #if BENCHMARKING CUDASafeCallWithCleanUp(hipEventRecord(g_cuStart, 0)); CUDASafeCallWithCleanUp(hipEventSynchronize(g_cuStart)); #endif hipLaunchKernelGGL(( Accumulate), dim3(g_dimGAccum), dim3(g_dimBAccum), 0, 0, g_pf4FFTOut_d, g_pf4SumStokes_d); CUDASafeCallWithCleanUp(hipDeviceSynchronize()); iCUDARet = hipGetLastError(); if (iCUDARet != hipSuccess) { (void) fprintf(stderr, "ERROR: File <%s>, Line %d: %s\n", __FILE__, __LINE__, hipGetErrorString(iCUDARet)); /* free resources */ CleanUp(); return EXIT_FAILURE; } #if BENCHMARKING CUDASafeCallWithCleanUp(hipEventRecord(g_cuStop, 0)); CUDASafeCallWithCleanUp(hipEventSynchronize(g_cuStop)); CUDASafeCallWithCleanUp(hipEventElapsedTime(&fTimeAccum, g_cuStart, g_cuStop)); fTotAccum += fTimeAccum; ++iCountAccum; #endif ++iSpecCount; if (iSpecCount == iNumAcc) { /* dump to buffer */ #if BENCHMARKING CUDASafeCallWithCleanUp(hipEventRecord(g_cuStart, 0)); CUDASafeCallWithCleanUp(hipEventSynchronize(g_cuStart)); #endif CUDASafeCallWithCleanUp(hipMemcpy(g_pf4SumStokes, g_pf4SumStokes_d, (g_iNumSubBands * g_iNFFT * sizeof(float4)), hipMemcpyDeviceToHost)); #if BENCHMARKING CUDASafeCallWithCleanUp(hipEventRecord(g_cuStop, 0)); CUDASafeCallWithCleanUp(hipEventSynchronize(g_cuStop)); CUDASafeCallWithCleanUp(hipEventElapsedTime(&fTimeCpOut, g_cuStart, g_cuStop)); fTotCpOut += fTimeCpOut; ++iCountCpOut; #endif #if OUTFILE (void) write(iFileSpec, g_pf4SumStokes, g_iNumSubBands * g_iNFFT * sizeof(float4)); #endif #if PLOT /* NOTE: Plot() will modify data! */ Plot(); (void) usleep(500000); #endif /* reset time */ iSpecCount = 0; /* zero accumulators */ CUDASafeCallWithCleanUp(hipMemset(g_pf4SumStokes_d, '\0', (g_iNumSubBands * g_iNFFT * sizeof(float4)))); } /* if time to read from input buffer */ iProcData += (g_iNumSubBands * g_iNFFT * sizeof(char4)); lProcDataAll += (g_iNumSubBands * g_iNFFT * sizeof(char4)); if ((g_iSizeRead - ((g_iNTaps - 1) * g_iNumSubBands * g_iNFFT * sizeof(char4))) == iProcData) { if (!(g_iIsDataReadDone)) { /* read data from input buffer */ iRet = ReadData(); if (iRet != EXIT_SUCCESS) { (void) fprintf(stderr, "ERROR: Data reading failed!\n"); break; } iProcData = 0; } else /* no more data to be read */ { g_iIsProcDone = TRUE; } } } #if (!BENCHMARKING) (void) gettimeofday(&stStop, NULL); fTimeTaken = (stStop.tv_sec + (stStop.tv_usec * USEC2SEC)) - (stStart.tv_sec + (stStart.tv_usec * USEC2SEC)); fTotThroughput = (float) lProcDataAll / (fTimeTaken * NUM_BYTES_PER_SAMP); (void) printf("Time taken (barring Init()): %gs; " "Total throughput: %5.3f Msps = %5.3f MBps\n", fTimeTaken, fTotThroughput / (1024 * 1024), NUM_BYTES_PER_SAMP * fTotThroughput / (1024 * 1024)); #endif #if OUTFILE (void) close(iFileSpec); #endif CleanUp(); #if BENCHMARKING PrintBenchmarks(fTotPFB, iCountPFB, fTotCpInFFT, iCountCpInFFT, fTotFFT, iCountFFT, fTotAccum, iCountAccum, fTotCpOut, iCountCpOut, lProcDataAll); CUDASafeCallWithCleanUp(hipEventDestroy(g_cuStart)); CUDASafeCallWithCleanUp(hipEventDestroy(g_cuStop)); (void) printf("* Events destroyed.\n"); (void) printf("* Benchmarking run completed.\n"); #endif return EXIT_SUCCESS; } /* function that creates the FFT plan, allocates memory, initialises counters, etc. */ int Init(int iCUDADevice) { int iDevCount = 0; hipDeviceProp_t stDevProp = {0}; int iRet = EXIT_SUCCESS; hipfftResult iCUFFTRet = HIPFFT_SUCCESS; size_t lTotCUDAMalloc = 0; int i = 0; iRet = RegisterSignalHandlers(); if (iRet != EXIT_SUCCESS) { (void) fprintf(stderr, "ERROR: Signal-handler registration failed!\n"); return EXIT_FAILURE; } /* since CUDASafeCallWithCleanUp() calls hipGetErrorString(), it should not be used here - will cause crash if no CUDA device is found */ (void) hipGetDeviceCount(&iDevCount); if (0 == iDevCount) { (void) fprintf(stderr, "ERROR: No CUDA-capable device found!\n"); return EXIT_FAILURE; } if (iCUDADevice >= iDevCount) { (void) fprintf(stderr, "ERROR: requested device %d not among present %d devices!\n", iCUDADevice, iDevCount); return EXIT_FAILURE; } for (i = 0; i < iDevCount; ++i) { CUDASafeCallWithCleanUp(hipGetDeviceProperties(&stDevProp, i)); printf("CUDA Device %d: %s, Compute Capability %d.%d, %d physical threads %s\n", i, stDevProp.name, stDevProp.major, stDevProp.minor, stDevProp.multiProcessorCount * stDevProp.maxThreadsPerMultiProcessor, (iCUDADevice == i) ? "(selected)" : ""); } CUDASafeCallWithCleanUp(hipSetDevice(iCUDADevice)); #if BENCHMARKING CUDASafeCallWithCleanUp(hipEventCreate(&g_cuStart)); CUDASafeCallWithCleanUp(hipEventCreate(&g_cuStop)); (void) printf("* Events created.\n"); #endif CUDASafeCallWithCleanUp(hipGetDeviceProperties(&stDevProp, 0)); g_iMaxThreadsPerBlock = stDevProp.maxThreadsPerBlock; g_iMaxPhysThreads = stDevProp.multiProcessorCount * stDevProp.maxThreadsPerMultiProcessor; /* check if the data buffer is large enough to contain at least one [PFB +] transform */ if (g_iSizeRead < (g_iNumSubBands * g_iNTaps * g_iNFFT * NUM_BYTES_PER_SAMP)) { int iMaxNFFT = g_iSizeRead / (g_iNumSubBands * g_iNTaps * NUM_BYTES_PER_SAMP); (void) fprintf(stderr, "ERROR: Cannot perform specified operation with given " "input buffer size! Input buffer size is %d MB, " "maximum length of FFT is %d.\n", (int) ((float) g_iSizeRead) / (1024 * 1024), iMaxNFFT); return EXIT_FAILURE; } if (g_iIsPFBOn) { lTotCUDAMalloc += (g_iNumSubBands * g_iNTaps * g_iNFFT * sizeof(float)); } lTotCUDAMalloc += g_iSizeRead; lTotCUDAMalloc += (g_iNumSubBands * g_iNFFT * sizeof(float4)); lTotCUDAMalloc += (g_iNumSubBands * g_iNFFT * sizeof(float4)); lTotCUDAMalloc += (g_iNumSubBands * g_iNFFT * sizeof(float4)); if (lTotCUDAMalloc > stDevProp.totalGlobalMem) { (void) fprintf(stderr, "ERROR: Total memory requested on GPU is %g of a " "possible %g MB. Memory request break-up:\n" " Input data buffer: %g MB\n" " FFT in array: %g MB\n" " FFT out array: %g MB\n" " Stokes output array: %g MB\n", ((float) lTotCUDAMalloc) / (1024 * 1024), ((float) stDevProp.totalGlobalMem) / (1024 * 1024), ((float) g_iSizeRead) / (1024 * 1024), ((float) g_iNumSubBands * g_iNFFT * sizeof(float4)) / (1024 * 1024), ((float) g_iNumSubBands * g_iNFFT * sizeof(float4)) / (1024 * 1024), ((float) g_iNumSubBands * g_iNFFT * sizeof(float4)) / (1024 * 1024)); if (g_iIsPFBOn) { (void) fprintf(stderr, " PFB coefficients: %g MB\n", ((float) g_iNumSubBands * g_iNFFT * sizeof(float4)) / (1024 * 1024)); } return EXIT_FAILURE; } #ifdef DEBUG else { (void) printf("INFO: Total memory requested on GPU is %g of a " "possible %g MB. Memory request break-up:\n" " Input data buffer: %g MB\n" " FFT in array: %g MB\n" " FFT out array: %g MB\n" " Stokes output array: %g MB\n", ((float) lTotCUDAMalloc) / (1024 * 1024), ((float) stDevProp.totalGlobalMem) / (1024 * 1024), ((float) g_iSizeRead) / (1024 * 1024), ((float) g_iNumSubBands * g_iNFFT * sizeof(float4)) / (1024 * 1024), ((float) g_iNumSubBands * g_iNFFT * sizeof(float4)) / (1024 * 1024), ((float) g_iNumSubBands * g_iNFFT * sizeof(float4)) / (1024 * 1024)); if (g_iIsPFBOn) { (void) printf(" PFB coefficients: %d MB\n", ((float) g_iNumSubBands * g_iNFFT * sizeof(float4)) / (1024 * 1024)); } } #endif if (g_iIsPFBOn) { /* set number of taps to NUM_TAPS if PFB is on, else number of taps = 1 */ g_iNTaps = NUM_TAPS; g_pfPFBCoeff = (float *) malloc(g_iNumSubBands * g_iNTaps * g_iNFFT * sizeof(float)); if (NULL == g_pfPFBCoeff) { (void) fprintf(stderr, "ERROR: Memory allocation failed! %s.\n", strerror(errno)); return EXIT_FAILURE; } /* allocate memory for the filter coefficient array on the device */ CUDASafeCallWithCleanUp(hipMalloc((void **) &g_pfPFBCoeff_d, g_iNumSubBands * g_iNTaps * g_iNFFT * sizeof(float))); /* read filter coefficients */ /* build file name */ (void) sprintf(g_acFileCoeff, "%s_%s_%d_%d_%d%s", FILE_COEFF_PREFIX, FILE_COEFF_DATATYPE, g_iNTaps, g_iNFFT, g_iNumSubBands, FILE_COEFF_SUFFIX); g_iFileCoeff = open(g_acFileCoeff, O_RDONLY); if (g_iFileCoeff < EXIT_SUCCESS) { (void) fprintf(stderr, "ERROR: Opening filter coefficients file %s " "failed! %s.\n", g_acFileCoeff, strerror(errno)); return EXIT_FAILURE; } iRet = read(g_iFileCoeff, g_pfPFBCoeff, g_iNumSubBands * g_iNTaps * g_iNFFT * sizeof(float)); if (iRet != (g_iNumSubBands * g_iNTaps * g_iNFFT * sizeof(float))) { (void) fprintf(stderr, "ERROR: Reading filter coefficients failed! %s.\n", strerror(errno)); return EXIT_FAILURE; } (void) close(g_iFileCoeff); /* copy filter coefficients to the device */ CUDASafeCallWithCleanUp(hipMemcpy(g_pfPFBCoeff_d, g_pfPFBCoeff, g_iNumSubBands * g_iNTaps * g_iNFFT * sizeof(float), hipMemcpyHostToDevice)); } /* allocate memory for data array - 32MB is the block size for the VEGAS input buffer */ CUDASafeCallWithCleanUp(hipMalloc((void **) &g_pc4Data_d, g_iSizeRead)); g_pc4DataRead_d = g_pc4Data_d; /* load data into memory */ iRet = LoadDataToMem(); if (iRet != EXIT_SUCCESS) { (void) fprintf(stderr, "ERROR! Loading to memory failed!\n"); return EXIT_FAILURE; } /* calculate kernel parameters */ if (g_iNFFT < g_iMaxThreadsPerBlock) { g_dimBPFB.x = g_iNFFT; g_dimBCopy.x = g_iNFFT; g_dimBAccum.x = g_iNFFT; } else { g_dimBPFB.x = g_iMaxThreadsPerBlock; g_dimBCopy.x = g_iMaxThreadsPerBlock; g_dimBAccum.x = g_iMaxThreadsPerBlock; } g_dimGPFB.x = (g_iNumSubBands * g_iNFFT) / g_dimBPFB.x; g_dimGCopy.x = (g_iNumSubBands * g_iNFFT) / g_dimBCopy.x; g_dimGAccum.x = (g_iNumSubBands * g_iNFFT) / g_dimBAccum.x; iRet = ReadData(); if (iRet != EXIT_SUCCESS) { (void) fprintf(stderr, "ERROR: Reading data failed!\n"); return EXIT_FAILURE; } CUDASafeCallWithCleanUp(hipMalloc((void **) &g_pf4FFTIn_d, g_iNumSubBands * g_iNFFT * sizeof(float4))); CUDASafeCallWithCleanUp(hipMalloc((void **) &g_pf4FFTOut_d, g_iNumSubBands * g_iNFFT * sizeof(float4))); g_pf4SumStokes = (float4 *) malloc(g_iNumSubBands * g_iNFFT * sizeof(float4)); if (NULL == g_pf4SumStokes) { (void) fprintf(stderr, "ERROR: Memory allocation failed! %s.\n", strerror(errno)); return EXIT_FAILURE; } CUDASafeCallWithCleanUp(hipMalloc((void **) &g_pf4SumStokes_d, g_iNumSubBands * g_iNFFT * sizeof(float4))); CUDASafeCallWithCleanUp(hipMemset(g_pf4SumStokes_d, '\0', g_iNumSubBands * g_iNFFT * sizeof(float4))); /* create plan */ iCUFFTRet = hipfftPlanMany(&g_stPlan, FFTPLAN_RANK, &g_iNFFT, &g_iNFFT, FFTPLAN_ISTRIDE, FFTPLAN_IDIST, &g_iNFFT, FFTPLAN_OSTRIDE, FFTPLAN_ODIST, HIPFFT_C2C, FFTPLAN_BATCH); if (iCUFFTRet != HIPFFT_SUCCESS) { (void) fprintf(stderr, "ERROR: Plan creation failed!\n"); return EXIT_FAILURE; } #if PLOT iRet = InitPlot(); if (iRet != EXIT_SUCCESS) { (void) fprintf(stderr, "ERROR: Plotting initialisation failed!\n"); return EXIT_FAILURE; } #endif return EXIT_SUCCESS; } /* function that reads data from the data file and loads it into memory during initialisation */ int LoadDataToMem() { struct stat stFileStats = {0}; int iRet = EXIT_SUCCESS; int iFileData = 0; iRet = stat(g_acFileData, &stFileStats); if (iRet != EXIT_SUCCESS) { (void) fprintf(stderr, "ERROR: Failed to stat %s: %s!\n", g_acFileData, strerror(errno)); return EXIT_FAILURE; } g_iSizeFile = stFileStats.st_size; #if PL CUDASafeCallWithCleanUp(hipHostMalloc(&g_pc4InBuf, g_iSizeFile)); #else g_pc4InBuf = (char4*) malloc(g_iSizeFile); if (NULL == g_pc4InBuf) { (void) fprintf(stderr, "ERROR: Memory allocation failed! %s.\n", strerror(errno)); return EXIT_FAILURE; } #endif iFileData = open(g_acFileData, O_RDONLY); if (iFileData < EXIT_SUCCESS) { (void) fprintf(stderr, "ERROR! Opening data file %s failed! %s.\n", g_acFileData, strerror(errno)); return EXIT_FAILURE; } iRet = read(iFileData, g_pc4InBuf, g_iSizeFile); if (iRet < EXIT_SUCCESS) { (void) fprintf(stderr, "ERROR: Data reading failed! %s.\n", strerror(errno)); (void) close(iFileData); return EXIT_FAILURE; } else if (iRet != stFileStats.st_size) { (void) printf("File read done!\n"); } (void) close(iFileData); /* set the read pointer to the beginning of the data array */ g_pc4InBufRead = g_pc4InBuf; return EXIT_SUCCESS; } /* function that reads data from input buffer */ int ReadData() { /* write new data to the write buffer */ #if BENCHMARKING CUDASafeCallWithCleanUp(hipEventRecord(g_cuStart, 0)); CUDASafeCallWithCleanUp(hipEventSynchronize(g_cuStart)); #endif CUDASafeCallWithCleanUp(hipMemcpy(g_pc4Data_d, g_pc4InBufRead, g_iSizeRead, hipMemcpyHostToDevice)); #if BENCHMARKING CUDASafeCallWithCleanUp(hipEventRecord(g_cuStop, 0)); CUDASafeCallWithCleanUp(hipEventSynchronize(g_cuStop)); CUDASafeCallWithCleanUp(hipEventElapsedTime(&g_fTimeCpIn, g_cuStart, g_cuStop)); g_fTotCpIn += g_fTimeCpIn; ++g_iCountCpIn; #endif /* update the read pointer to where data needs to be read in from, in the next read */ g_pc4InBufRead += ((g_iSizeRead - ((g_iNTaps - 1) * g_iNumSubBands * g_iNFFT * sizeof(char4))) / sizeof(char4)); /* whenever there is a read, reset the read pointer to the beginning */ g_pc4DataRead_d = g_pc4Data_d; ++g_iReadCount; /* BUG: won't read last block */ if ((((char *) g_pc4InBuf) + g_iSizeFile) - ((char *) g_pc4InBufRead) <= g_iSizeRead) { (void) printf("Data read done! Read count = %d\n", g_iReadCount); g_iIsDataReadDone = TRUE; } return EXIT_SUCCESS; } /* function that performs the PFB */ __global__ void DoPFB(char4 *pc4Data, float4 *pf4FFTIn, float *pfPFBCoeff) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; int iNFFT = (gridDim.x * blockDim.x); int j = 0; int iAbsIdx = 0; float4 f4PFBOut = make_float4(0.0, 0.0, 0.0, 0.0); char4 c4Data = make_char4(0, 0, 0, 0); for (j = 0; j < NUM_TAPS; ++j) { /* calculate the absolute index */ iAbsIdx = (j * iNFFT) + i; /* get the address of the block */ c4Data = pc4Data[iAbsIdx]; f4PFBOut.x += (float) c4Data.x * pfPFBCoeff[iAbsIdx]; f4PFBOut.y += (float) c4Data.y * pfPFBCoeff[iAbsIdx]; f4PFBOut.z += (float) c4Data.z * pfPFBCoeff[iAbsIdx]; f4PFBOut.w += (float) c4Data.w * pfPFBCoeff[iAbsIdx]; } pf4FFTIn[i] = f4PFBOut; return; } __global__ void CopyDataForFFT(char4 *pc4Data, float4 *pf4FFTIn) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; char4 in = pc4Data[i]; float4 out = { (float)in.x, (float)in.y, (float)in.z, (float)in.w }; pf4FFTIn[i] = out; return; } /* function that performs the FFT */ int DoFFT() { hipfftResult iCUFFTRet = HIPFFT_SUCCESS; /* execute plan */ iCUFFTRet = hipfftExecC2C(g_stPlan, (hipfftComplex*) g_pf4FFTIn_d, (hipfftComplex*) g_pf4FFTOut_d, HIPFFT_FORWARD); if (iCUFFTRet != HIPFFT_SUCCESS) { (void) fprintf(stderr, "ERROR! FFT failed!\n"); return EXIT_FAILURE; } return EXIT_SUCCESS; } __global__ void Accumulate(float4 *pf4FFTOut, float4 *pf4SumStokes) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; float4 f4FFTOut = pf4FFTOut[i]; float4 f4SumStokes = pf4SumStokes[i]; /* Re(X)^2 + Im(X)^2 */ f4SumStokes.x += (f4FFTOut.x * f4FFTOut.x) + (f4FFTOut.y * f4FFTOut.y); /* Re(Y)^2 + Im(Y)^2 */ f4SumStokes.y += (f4FFTOut.z * f4FFTOut.z) + (f4FFTOut.w * f4FFTOut.w); /* Re(XY*) */ f4SumStokes.z += (f4FFTOut.x * f4FFTOut.z) + (f4FFTOut.y * f4FFTOut.w); /* Im(XY*) */ f4SumStokes.w += (f4FFTOut.y * f4FFTOut.z) - (f4FFTOut.x * f4FFTOut.w); pf4SumStokes[i] = f4SumStokes; return; } /* function that frees resources */ void CleanUp() { /* free resources */ if (g_pc4InBuf != NULL) { #if PL hipHostFree(&g_pc4InBuf); #else free(g_pc4InBuf); #endif g_pc4InBuf = NULL; } if (g_pc4Data_d != NULL) { (void) hipFree(g_pc4Data_d); g_pc4Data_d = NULL; } if (g_pf4FFTIn_d != NULL) { (void) hipFree(g_pf4FFTIn_d); g_pf4FFTIn_d = NULL; } if (g_pf4FFTOut_d != NULL) { (void) hipFree(g_pf4FFTOut_d); g_pf4FFTOut_d = NULL; } if (g_pf4SumStokes != NULL) { free(g_pf4SumStokes); g_pf4SumStokes = NULL; } if (g_pf4SumStokes_d != NULL) { (void) hipFree(g_pf4SumStokes_d); g_pf4SumStokes_d = NULL; } free(g_pfPFBCoeff); (void) hipFree(g_pfPFBCoeff_d); /* destroy plan */ /* TODO: check for plan */ (void) hipfftDestroy(g_stPlan); #if PLOT if (g_pfSumPowX != NULL) { free(g_pfSumPowX); g_pfSumPowX = NULL; } if (g_pfSumPowY != NULL) { free(g_pfSumPowY); g_pfSumPowY = NULL; } if (g_pfSumStokesRe != NULL) { free(g_pfSumStokesRe); g_pfSumStokesRe = NULL; } if (g_pfSumStokesIm != NULL) { free(g_pfSumStokesIm); g_pfSumStokesIm = NULL; } if (g_pfFreq != NULL) { free(g_pfFreq); g_pfFreq = NULL; } cpgclos(); #endif return; } #if BENCHMARKING /* function to print benchmarking statistics */ void PrintBenchmarks(float fTotPFB, int iCountPFB, float fTotCpInFFT, int iCountCpInFFT, float fTotFFT, int iCountFFT, float fTotAccum, int iCountAccum, float fTotCpOut, int iCountCpOut, long int lProcDataAll) { /* total time taken */ float fTotal = 0.0; float fTotThroughput = 0.0; fTotal = g_fTotCpIn + fTotPFB + fTotCpInFFT + fTotFFT + fTotAccum + fTotCpOut; fTotThroughput = (float) lProcDataAll / (fTotal * 1e-3 * NUM_BYTES_PER_SAMP); (void) printf(" Total elapsed time for\n"); (void) printf(" %6d calls to hipMemcpy(Host2Device) : " "%8.3fms, %2d%%; Average = %5.3fms\n", g_iCountCpIn, g_fTotCpIn, (int) ((g_fTotCpIn / fTotal) * 100), g_fTotCpIn / g_iCountCpIn); if (g_iIsPFBOn) { (void) printf(" %6d calls to DoPFB() : " "%8.3fms, %2d%%; Average = %5.3fms\n", iCountPFB, fTotPFB, (int) ((fTotPFB / fTotal) * 100), fTotPFB / iCountPFB); } else { (void) printf(" %6d calls to CopyDataForFFT() : " "%8.3fms, %2d%%; Average = %5.3fms\n", iCountCpInFFT, fTotCpInFFT, (int) ((fTotCpInFFT / fTotal) * 100), fTotCpInFFT / iCountCpInFFT); } (void) printf(" %6d calls to DoFFT() : " "%8.3fms, %2d%%; Average = %5.3fms\n", iCountFFT, fTotFFT, (int) ((fTotFFT / fTotal) * 100), fTotFFT / iCountFFT); (void) printf(" %6d calls to Accumulate() : " "%8.3fms, %2d%%; Average = %5.3fms\n", iCountAccum, fTotAccum, (int) ((fTotAccum / fTotal) * 100), fTotAccum / iCountAccum); (void) printf(" %6d calls to hipMemcpy(Device2Host) : " "%8.3fms, %2d%%; Average = %5.3fms\n", iCountCpOut, fTotCpOut, (int) ((fTotCpOut / fTotal) * 100), fTotCpOut / iCountCpOut); (void) printf(" Average throughput: %5.3f Msps = %5.3f MBps\n", fTotThroughput / (1024 * 1024), NUM_BYTES_PER_SAMP * fTotThroughput / (1024 * 1024)); return; } #endif #if PLOT int InitPlot() { int iRet = EXIT_SUCCESS; int i = 0; iRet = cpgopen(PG_DEV); if (iRet <= 0) { (void) fprintf(stderr, "ERROR: Opening graphics device %s failed!\n", PG_DEV); return EXIT_FAILURE; } cpgsch(3); cpgsubp(g_iNumSubBands, 4); g_pfSumPowX = (float*) malloc(g_iNFFT * sizeof(float)); if (NULL == g_pfSumPowX) { (void) fprintf(stderr, "ERROR: Memory allocation failed! %s.\n", strerror(errno)); return EXIT_FAILURE; } g_pfSumPowY = (float*) malloc(g_iNFFT * sizeof(float)); if (NULL == g_pfSumPowY) { (void) fprintf(stderr, "ERROR: Memory allocation failed! %s.\n", strerror(errno)); return EXIT_FAILURE; } g_pfSumStokesRe = (float*) malloc(g_iNFFT * sizeof(float)); if (NULL == g_pfSumStokesRe) { (void) fprintf(stderr, "ERROR: Memory allocation failed! %s.\n", strerror(errno)); return EXIT_FAILURE; } g_pfSumStokesIm = (float*) malloc(g_iNFFT * sizeof(float)); if (NULL == g_pfSumStokesIm) { (void) fprintf(stderr, "ERROR: Memory allocation failed! %s.\n", strerror(errno)); return EXIT_FAILURE; } g_pfFreq = (float*) malloc(g_iNFFT * sizeof(float)); if (NULL == g_pfFreq) { (void) fprintf(stderr, "ERROR: Memory allocation failed! %s.\n", strerror(errno)); return EXIT_FAILURE; } /* load the frequency axis */ for (i = 0; i < g_iNFFT; ++i) { g_pfFreq[i] = ((float) i * g_fFSamp) / g_iNFFT; } return EXIT_SUCCESS; } void Plot() { float fMinFreq = g_pfFreq[0]; float fMaxFreq = g_pfFreq[g_iNFFT-1]; float fMinY = FLT_MAX; float fMaxY = -(FLT_MAX); int i = 0; int j = 0; int k = 0; for (k = 0; k < g_iNumSubBands; ++k) { for (i = k, j = 0; i < (g_iNumSubBands * g_iNFFT); i += g_iNumSubBands, ++j) { if (0.0 == g_pf4SumStokes[i].x) { g_pfSumPowX[j] = 0.0; } else { g_pfSumPowX[j] = 10 * log10f(g_pf4SumStokes[i].x); } if (0.0 == g_pf4SumStokes[i].y) { g_pfSumPowY[j] = 0.0; } else { g_pfSumPowY[j] = 10 * log10f(g_pf4SumStokes[i].y); } g_pfSumStokesRe[j] = g_pf4SumStokes[i].z; g_pfSumStokesIm[j] = g_pf4SumStokes[i].w; } /* plot accumulated X-pol. power */ fMinY = FLT_MAX; fMaxY = -(FLT_MAX); for (i = 0; i < g_iNFFT; ++i) { if (g_pfSumPowX[i] > fMaxY) { fMaxY = g_pfSumPowX[i]; } if (g_pfSumPowX[i] < fMinY) { fMinY = g_pfSumPowX[i]; } } /* to avoid min == max */ fMaxY += 1.0; fMinY -= 1.0; for (i = 0; i < g_iNFFT; ++i) { g_pfSumPowX[i] -= fMaxY; } fMinY -= fMaxY; fMaxY = 0; cpgpanl(k + 1, 1); cpgeras(); cpgsvp(PG_VP_ML, PG_VP_MR, PG_VP_MB, PG_VP_MT); cpgswin(fMinFreq, fMaxFreq, fMinY, fMaxY); //cpglab("Bin Number", "", "SumPowX"); cpgbox("BCNST", 0.0, 0, "BCNST", 0.0, 0); cpgsci(PG_CI_PLOT); cpgline(g_iNFFT, g_pfFreq, g_pfSumPowX); cpgsci(PG_CI_DEF); /* plot accumulated Y-pol. power */ fMinY = FLT_MAX; fMaxY = -(FLT_MAX); for (i = 0; i < g_iNFFT; ++i) { if (g_pfSumPowY[i] > fMaxY) { fMaxY = g_pfSumPowY[i]; } if (g_pfSumPowY[i] < fMinY) { fMinY = g_pfSumPowY[i]; } } /* to avoid min == max */ fMaxY += 1.0; fMinY -= 1.0; for (i = 0; i < g_iNFFT; ++i) { g_pfSumPowY[i] -= fMaxY; } fMinY -= fMaxY; fMaxY = 0; cpgpanl(k + 1, 2); cpgeras(); cpgsvp(PG_VP_ML, PG_VP_MR, PG_VP_MB, PG_VP_MT); cpgswin(fMinFreq, fMaxFreq, fMinY, fMaxY); //cpglab("Bin Number", "", "SumPowY"); cpgbox("BCNST", 0.0, 0, "BCNST", 0.0, 0); cpgsci(PG_CI_PLOT); cpgline(g_iNFFT, g_pfFreq, g_pfSumPowY); cpgsci(PG_CI_DEF); /* plot accumulated real(XY*) */ fMinY = FLT_MAX; fMaxY = -(FLT_MAX); for (i = 0; i < g_iNFFT; ++i) { if (g_pfSumStokesRe[i] > fMaxY) { fMaxY = g_pfSumStokesRe[i]; } if (g_pfSumStokesRe[i] < fMinY) { fMinY = g_pfSumStokesRe[i]; } } /* to avoid min == max */ fMaxY += 1.0; fMinY -= 1.0; cpgpanl(k + 1, 3); cpgeras(); cpgsvp(PG_VP_ML, PG_VP_MR, PG_VP_MB, PG_VP_MT); cpgswin(fMinFreq, fMaxFreq, fMinY, fMaxY); //cpglab("Bin Number", "", "SumStokesRe"); cpgbox("BCNST", 0.0, 0, "BCNST", 0.0, 0); cpgsci(PG_CI_PLOT); cpgline(g_iNFFT, g_pfFreq, g_pfSumStokesRe); cpgsci(PG_CI_DEF); /* plot accumulated imag(XY*) */ fMinY = FLT_MAX; fMaxY = -(FLT_MAX); for (i = 0; i < g_iNFFT; ++i) { if (g_pfSumStokesIm[i] > fMaxY) { fMaxY = g_pfSumStokesIm[i]; } if (g_pfSumStokesIm[i] < fMinY) { fMinY = g_pfSumStokesIm[i]; } } /* to avoid min == max */ fMaxY += 1.0; fMinY -= 1.0; cpgpanl(k + 1, 4); cpgeras(); cpgsvp(PG_VP_ML, PG_VP_MR, PG_VP_MB, PG_VP_MT); cpgswin(fMinFreq, fMaxFreq, fMinY, fMaxY); //cpglab("Bin Number", "", "SumStokesIm"); cpgbox("BCNST", 0.0, 0, "BCNST", 0.0, 0); cpgsci(PG_CI_PLOT); cpgline(g_iNFFT, g_pfFreq, g_pfSumStokesIm); cpgsci(PG_CI_DEF); } return; } #endif /* * Registers handlers for SIGTERM and CTRL+C */ int RegisterSignalHandlers() { struct sigaction stSigHandler = {{0}}; int iRet = EXIT_SUCCESS; /* register the CTRL+C-handling function */ stSigHandler.sa_handler = HandleStopSignals; iRet = sigaction(SIGINT, &stSigHandler, NULL); if (iRet != EXIT_SUCCESS) { (void) fprintf(stderr, "ERROR: Handler registration failed for signal %d!\n", SIGINT); return EXIT_FAILURE; } /* register the SIGTERM-handling function */ stSigHandler.sa_handler = HandleStopSignals; iRet = sigaction(SIGTERM, &stSigHandler, NULL); if (iRet != EXIT_SUCCESS) { (void) fprintf(stderr, "ERROR: Handler registration failed for signal %d!\n", SIGTERM); return EXIT_FAILURE; } return EXIT_SUCCESS; } /* * Catches SIGTERM and CTRL+C and cleans up before exiting */ void HandleStopSignals(int iSigNo) { /* clean up */ CleanUp(); /* exit */ exit(EXIT_SUCCESS); /* never reached */ return; } void __CUDASafeCallWithCleanUp(hipError_t iRet, const char* pcFile, const int iLine, void (*pCleanUp)(void)) { if (iRet != hipSuccess) { (void) fprintf(stderr, "ERROR: File <%s>, Line %d: %s\n", pcFile, iLine, hipGetErrorString(iRet)); /* free resources */ (*pCleanUp)(); exit(EXIT_FAILURE); } return; } /* * Prints usage information */ void PrintUsage(const char *pcProgName) { (void) printf("Usage: %s [options] <data-file>\n", pcProgName); (void) printf(" -h --help "); (void) printf("Display this usage information\n"); (void) printf(" -b --nsub "); (void) printf("Number of sub-bands in the data\n"); (void) printf(" -n --nfft <value> "); (void) printf("Number of points in FFT\n"); (void) printf(" -p --pfb "); (void) printf("Enable PFB\n"); (void) printf(" -a --nacc <value> "); (void) printf("Number of spectra to add\n"); (void) printf(" -d --device <value> "); (void) printf("CUDA device to use\n"); #if PLOT (void) printf(" -s --fsamp <value> "); (void) printf("Sampling frequency\n"); #endif return; }
08003edb95c0c8ab45e988c2077c43db714cc0d4.cu
/** * @file grating.cu * * @author Jayanth Chennamangalam * @date 2011.07.08 */ #include "grating.h" #define PL 1 int g_iIsDataReadDone = FALSE; int g_iIsProcDone = FALSE; int g_iMaxThreadsPerBlock = 0; char4* g_pc4InBuf = NULL; char4* g_pc4InBufRead = NULL; int g_iSizeFile = 0; int g_iReadCount = 0; char4* g_pc4Data_d = NULL; /* raw data starting address */ char4* g_pc4DataRead_d = NULL; /* raw data read pointer */ int g_iNFFT = DEF_LEN_SPEC; int g_iMaxPhysThreads; dim3 g_dimBPFB(1, 1, 1); dim3 g_dimGPFB(1, 1); dim3 g_dimBCopy(1, 1, 1); dim3 g_dimGCopy(1, 1); dim3 g_dimBAccum(1, 1, 1); dim3 g_dimGAccum(1, 1); float4* g_pf4FFTIn_d = NULL; float4* g_pf4FFTOut_d = NULL; cufftHandle g_stPlan = {0}; float4* g_pf4SumStokes = NULL; float4* g_pf4SumStokes_d = NULL; int g_iIsPFBOn = DEF_PFB_ON; int g_iNTaps = 1; /* 1 if no PFB, NUM_TAPS if PFB */ char g_acFileData[256] = {0}; /* BUG: crash if file size is less than 32MB */ int g_iSizeRead = DEF_SIZE_READ; //int g_iSizeRead = DEF_SIZE_READ * 2; int g_iNumSubBands = DEF_NUM_SUBBANDS; int g_iFileCoeff = 0; char g_acFileCoeff[256] = {0}; float *g_pfPFBCoeff = NULL; float *g_pfPFBCoeff_d = NULL; #if PLOT float* g_pfSumPowX = NULL; float* g_pfSumPowY = NULL; float* g_pfSumStokesRe = NULL; float* g_pfSumStokesIm = NULL; float* g_pfFreq = NULL; float g_fFSamp = 1.0; /* 1 [frequency] */ #endif #if BENCHMARKING float g_fTimeCpIn = 0.0; float g_fTotCpIn = 0.0; int g_iCountCpIn = 0; cudaEvent_t g_cuStart; cudaEvent_t g_cuStop; #endif int main(int argc, char *argv[]) { int iRet = EXIT_SUCCESS; int iSpecCount = 0; int iNumAcc = DEF_ACC; int iCUDADevice = DEF_CUDA_DEVICE; int iProcData = 0; long int lProcDataAll = 0; cudaError_t iCUDARet = cudaSuccess; #if BENCHMARKING float fTimePFB = 0.0; float fTotPFB = 0.0; int iCountPFB = 0; float fTimeCpInFFT = 0.0; float fTotCpInFFT = 0.0; int iCountCpInFFT = 0; float fTimeFFT = 0.0; float fTotFFT = 0.0; int iCountFFT = 0; float fTimeCpOut = 0.0; float fTotCpOut = 0.0; int iCountCpOut = 0; float fTimeAccum = 0.0; float fTotAccum = 0.0; int iCountAccum = 0; #else struct timeval stStart = {0}; struct timeval stStop = {0}; float fTimeTaken = 0.0; float fTotThroughput = 0.0; #endif #if OUTFILE int iFileSpec = 0; #endif const char *pcProgName = NULL; int iNextOpt = 0; /* valid short options */ #if PLOT const char* const pcOptsShort = "hb:n:pa:d:s:"; #else const char* const pcOptsShort = "hb:n:pa:d:"; #endif /* valid long options */ const struct option stOptsLong[] = { { "help", 0, NULL, 'h' }, { "nsub", 1, NULL, 'b' }, { "nfft", 1, NULL, 'n' }, { "pfb", 0, NULL, 'p' }, { "nacc", 1, NULL, 'a' }, { "device", 1, NULL, 'd' }, #if PLOT { "fsamp", 1, NULL, 's' }, #endif { NULL, 0, NULL, 0 } }; /* get the filename of the program from the argument list */ pcProgName = argv[0]; /* parse the input */ do { iNextOpt = getopt_long(argc, argv, pcOptsShort, stOptsLong, NULL); switch (iNextOpt) { case 'h': /* -h or --help */ /* print usage info and terminate */ PrintUsage(pcProgName); return EXIT_SUCCESS; case 'b': /* -b or --nsub */ /* set option */ g_iNumSubBands = (int) atoi(optarg); break; case 'n': /* -n or --nfft */ /* set option */ g_iNFFT = (int) atoi(optarg); break; case 'p': /* -p or --pfb */ /* set option */ g_iIsPFBOn = TRUE; break; case 'a': /* -a or --nacc */ /* set option */ iNumAcc = (int) atoi(optarg); break; case 'd': /* -d or --device */ /* set option */ iCUDADevice = (int) atoi(optarg); break; #if PLOT case 's': /* -s or --fsamp */ /* set option */ g_fFSamp = (float) atof(optarg); break; #endif case '?': /* user specified an invalid option */ /* print usage info and terminate with error */ (void) fprintf(stderr, "ERROR: Invalid option!\n"); PrintUsage(pcProgName); return EXIT_FAILURE; case -1: /* done with options */ break; default: /* unexpected */ assert(0); } } while (iNextOpt != -1); /* no arguments */ if (argc <= optind) { (void) fprintf(stderr, "ERROR: Data file not specified!\n"); PrintUsage(pcProgName); return EXIT_FAILURE; } (void) strncpy(g_acFileData, argv[optind], 256); g_acFileData[255] = '\0'; #if BENCHMARKING (void) printf("* Benchmarking run commencing...\n"); #endif /* initialise */ iRet = Init(iCUDADevice); if (iRet != EXIT_SUCCESS) { (void) fprintf(stderr, "ERROR! Init failed!\n"); CleanUp(); return EXIT_FAILURE; } #if OUTFILE iFileSpec = open("spec.dat", O_CREAT | O_TRUNC | O_WRONLY, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); if (iFileSpec < EXIT_SUCCESS) { (void) fprintf(stderr, "ERROR! Opening spectrum file failed!\n"); CleanUp(); return EXIT_FAILURE; } #endif #if (!BENCHMARKING) (void) gettimeofday(&stStart, NULL); #endif while (!g_iIsProcDone) { if (g_iIsPFBOn) { /* do pfb */ #if BENCHMARKING CUDASafeCallWithCleanUp(cudaEventRecord(g_cuStart, 0)); CUDASafeCallWithCleanUp(cudaEventSynchronize(g_cuStart)); #endif DoPFB<<<g_dimGPFB, g_dimBPFB>>>(g_pc4DataRead_d, g_pf4FFTIn_d, g_pfPFBCoeff_d); CUDASafeCallWithCleanUp(cudaThreadSynchronize()); iCUDARet = cudaGetLastError(); if (iCUDARet != cudaSuccess) { (void) fprintf(stderr, "ERROR: File <%s>, Line %d: %s\n", __FILE__, __LINE__, cudaGetErrorString(iCUDARet)); /* free resources */ CleanUp(); return EXIT_FAILURE; } #if BENCHMARKING CUDASafeCallWithCleanUp(cudaEventRecord(g_cuStop, 0)); CUDASafeCallWithCleanUp(cudaEventSynchronize(g_cuStop)); CUDASafeCallWithCleanUp(cudaEventElapsedTime(&fTimePFB, g_cuStart, g_cuStop)); fTotPFB += fTimePFB; ++iCountPFB; #endif /* update the data read pointer */ g_pc4DataRead_d += (g_iNumSubBands * g_iNFFT); } else { #if BENCHMARKING CUDASafeCallWithCleanUp(cudaEventRecord(g_cuStart, 0)); CUDASafeCallWithCleanUp(cudaEventSynchronize(g_cuStart)); #endif CopyDataForFFT<<<g_dimGCopy, g_dimBCopy>>>(g_pc4DataRead_d, g_pf4FFTIn_d); CUDASafeCallWithCleanUp(cudaThreadSynchronize()); iCUDARet = cudaGetLastError(); if (iCUDARet != cudaSuccess) { (void) fprintf(stderr, "ERROR: File <%s>, Line %d: %s\n", __FILE__, __LINE__, cudaGetErrorString(iCUDARet)); /* free resources */ CleanUp(); return EXIT_FAILURE; } #if BENCHMARKING CUDASafeCallWithCleanUp(cudaEventRecord(g_cuStop, 0)); CUDASafeCallWithCleanUp(cudaEventSynchronize(g_cuStop)); CUDASafeCallWithCleanUp(cudaEventElapsedTime(&fTimeCpInFFT, g_cuStart, g_cuStop)); fTotCpInFFT += fTimeCpInFFT; ++iCountCpInFFT; #endif /* update the data read pointer */ g_pc4DataRead_d += (g_iNumSubBands * g_iNFFT); } /* do fft */ #if BENCHMARKING CUDASafeCallWithCleanUp(cudaEventRecord(g_cuStart, 0)); CUDASafeCallWithCleanUp(cudaEventSynchronize(g_cuStart)); #endif iRet = DoFFT(); if (iRet != EXIT_SUCCESS) { (void) fprintf(stderr, "ERROR! FFT failed!\n"); #if OUTFILE (void) close(iFileSpec); #endif CleanUp(); return EXIT_FAILURE; } #if BENCHMARKING CUDASafeCallWithCleanUp(cudaEventRecord(g_cuStop, 0)); CUDASafeCallWithCleanUp(cudaEventSynchronize(g_cuStop)); CUDASafeCallWithCleanUp(cudaEventElapsedTime(&fTimeFFT, g_cuStart, g_cuStop)); fTotFFT += fTimeFFT; ++iCountFFT; #endif /* accumulate power x, power y, stokes, if the blanking bit is not set */ #if BENCHMARKING CUDASafeCallWithCleanUp(cudaEventRecord(g_cuStart, 0)); CUDASafeCallWithCleanUp(cudaEventSynchronize(g_cuStart)); #endif Accumulate<<<g_dimGAccum, g_dimBAccum>>>(g_pf4FFTOut_d, g_pf4SumStokes_d); CUDASafeCallWithCleanUp(cudaThreadSynchronize()); iCUDARet = cudaGetLastError(); if (iCUDARet != cudaSuccess) { (void) fprintf(stderr, "ERROR: File <%s>, Line %d: %s\n", __FILE__, __LINE__, cudaGetErrorString(iCUDARet)); /* free resources */ CleanUp(); return EXIT_FAILURE; } #if BENCHMARKING CUDASafeCallWithCleanUp(cudaEventRecord(g_cuStop, 0)); CUDASafeCallWithCleanUp(cudaEventSynchronize(g_cuStop)); CUDASafeCallWithCleanUp(cudaEventElapsedTime(&fTimeAccum, g_cuStart, g_cuStop)); fTotAccum += fTimeAccum; ++iCountAccum; #endif ++iSpecCount; if (iSpecCount == iNumAcc) { /* dump to buffer */ #if BENCHMARKING CUDASafeCallWithCleanUp(cudaEventRecord(g_cuStart, 0)); CUDASafeCallWithCleanUp(cudaEventSynchronize(g_cuStart)); #endif CUDASafeCallWithCleanUp(cudaMemcpy(g_pf4SumStokes, g_pf4SumStokes_d, (g_iNumSubBands * g_iNFFT * sizeof(float4)), cudaMemcpyDeviceToHost)); #if BENCHMARKING CUDASafeCallWithCleanUp(cudaEventRecord(g_cuStop, 0)); CUDASafeCallWithCleanUp(cudaEventSynchronize(g_cuStop)); CUDASafeCallWithCleanUp(cudaEventElapsedTime(&fTimeCpOut, g_cuStart, g_cuStop)); fTotCpOut += fTimeCpOut; ++iCountCpOut; #endif #if OUTFILE (void) write(iFileSpec, g_pf4SumStokes, g_iNumSubBands * g_iNFFT * sizeof(float4)); #endif #if PLOT /* NOTE: Plot() will modify data! */ Plot(); (void) usleep(500000); #endif /* reset time */ iSpecCount = 0; /* zero accumulators */ CUDASafeCallWithCleanUp(cudaMemset(g_pf4SumStokes_d, '\0', (g_iNumSubBands * g_iNFFT * sizeof(float4)))); } /* if time to read from input buffer */ iProcData += (g_iNumSubBands * g_iNFFT * sizeof(char4)); lProcDataAll += (g_iNumSubBands * g_iNFFT * sizeof(char4)); if ((g_iSizeRead - ((g_iNTaps - 1) * g_iNumSubBands * g_iNFFT * sizeof(char4))) == iProcData) { if (!(g_iIsDataReadDone)) { /* read data from input buffer */ iRet = ReadData(); if (iRet != EXIT_SUCCESS) { (void) fprintf(stderr, "ERROR: Data reading failed!\n"); break; } iProcData = 0; } else /* no more data to be read */ { g_iIsProcDone = TRUE; } } } #if (!BENCHMARKING) (void) gettimeofday(&stStop, NULL); fTimeTaken = (stStop.tv_sec + (stStop.tv_usec * USEC2SEC)) - (stStart.tv_sec + (stStart.tv_usec * USEC2SEC)); fTotThroughput = (float) lProcDataAll / (fTimeTaken * NUM_BYTES_PER_SAMP); (void) printf("Time taken (barring Init()): %gs; " "Total throughput: %5.3f Msps = %5.3f MBps\n", fTimeTaken, fTotThroughput / (1024 * 1024), NUM_BYTES_PER_SAMP * fTotThroughput / (1024 * 1024)); #endif #if OUTFILE (void) close(iFileSpec); #endif CleanUp(); #if BENCHMARKING PrintBenchmarks(fTotPFB, iCountPFB, fTotCpInFFT, iCountCpInFFT, fTotFFT, iCountFFT, fTotAccum, iCountAccum, fTotCpOut, iCountCpOut, lProcDataAll); CUDASafeCallWithCleanUp(cudaEventDestroy(g_cuStart)); CUDASafeCallWithCleanUp(cudaEventDestroy(g_cuStop)); (void) printf("* Events destroyed.\n"); (void) printf("* Benchmarking run completed.\n"); #endif return EXIT_SUCCESS; } /* function that creates the FFT plan, allocates memory, initialises counters, etc. */ int Init(int iCUDADevice) { int iDevCount = 0; cudaDeviceProp stDevProp = {0}; int iRet = EXIT_SUCCESS; cufftResult iCUFFTRet = CUFFT_SUCCESS; size_t lTotCUDAMalloc = 0; int i = 0; iRet = RegisterSignalHandlers(); if (iRet != EXIT_SUCCESS) { (void) fprintf(stderr, "ERROR: Signal-handler registration failed!\n"); return EXIT_FAILURE; } /* since CUDASafeCallWithCleanUp() calls cudaGetErrorString(), it should not be used here - will cause crash if no CUDA device is found */ (void) cudaGetDeviceCount(&iDevCount); if (0 == iDevCount) { (void) fprintf(stderr, "ERROR: No CUDA-capable device found!\n"); return EXIT_FAILURE; } if (iCUDADevice >= iDevCount) { (void) fprintf(stderr, "ERROR: requested device %d not among present %d devices!\n", iCUDADevice, iDevCount); return EXIT_FAILURE; } for (i = 0; i < iDevCount; ++i) { CUDASafeCallWithCleanUp(cudaGetDeviceProperties(&stDevProp, i)); printf("CUDA Device %d: %s, Compute Capability %d.%d, %d physical threads %s\n", i, stDevProp.name, stDevProp.major, stDevProp.minor, stDevProp.multiProcessorCount * stDevProp.maxThreadsPerMultiProcessor, (iCUDADevice == i) ? "(selected)" : ""); } CUDASafeCallWithCleanUp(cudaSetDevice(iCUDADevice)); #if BENCHMARKING CUDASafeCallWithCleanUp(cudaEventCreate(&g_cuStart)); CUDASafeCallWithCleanUp(cudaEventCreate(&g_cuStop)); (void) printf("* Events created.\n"); #endif CUDASafeCallWithCleanUp(cudaGetDeviceProperties(&stDevProp, 0)); g_iMaxThreadsPerBlock = stDevProp.maxThreadsPerBlock; g_iMaxPhysThreads = stDevProp.multiProcessorCount * stDevProp.maxThreadsPerMultiProcessor; /* check if the data buffer is large enough to contain at least one [PFB +] transform */ if (g_iSizeRead < (g_iNumSubBands * g_iNTaps * g_iNFFT * NUM_BYTES_PER_SAMP)) { int iMaxNFFT = g_iSizeRead / (g_iNumSubBands * g_iNTaps * NUM_BYTES_PER_SAMP); (void) fprintf(stderr, "ERROR: Cannot perform specified operation with given " "input buffer size! Input buffer size is %d MB, " "maximum length of FFT is %d.\n", (int) ((float) g_iSizeRead) / (1024 * 1024), iMaxNFFT); return EXIT_FAILURE; } if (g_iIsPFBOn) { lTotCUDAMalloc += (g_iNumSubBands * g_iNTaps * g_iNFFT * sizeof(float)); } lTotCUDAMalloc += g_iSizeRead; lTotCUDAMalloc += (g_iNumSubBands * g_iNFFT * sizeof(float4)); lTotCUDAMalloc += (g_iNumSubBands * g_iNFFT * sizeof(float4)); lTotCUDAMalloc += (g_iNumSubBands * g_iNFFT * sizeof(float4)); if (lTotCUDAMalloc > stDevProp.totalGlobalMem) { (void) fprintf(stderr, "ERROR: Total memory requested on GPU is %g of a " "possible %g MB. Memory request break-up:\n" " Input data buffer: %g MB\n" " FFT in array: %g MB\n" " FFT out array: %g MB\n" " Stokes output array: %g MB\n", ((float) lTotCUDAMalloc) / (1024 * 1024), ((float) stDevProp.totalGlobalMem) / (1024 * 1024), ((float) g_iSizeRead) / (1024 * 1024), ((float) g_iNumSubBands * g_iNFFT * sizeof(float4)) / (1024 * 1024), ((float) g_iNumSubBands * g_iNFFT * sizeof(float4)) / (1024 * 1024), ((float) g_iNumSubBands * g_iNFFT * sizeof(float4)) / (1024 * 1024)); if (g_iIsPFBOn) { (void) fprintf(stderr, " PFB coefficients: %g MB\n", ((float) g_iNumSubBands * g_iNFFT * sizeof(float4)) / (1024 * 1024)); } return EXIT_FAILURE; } #ifdef DEBUG else { (void) printf("INFO: Total memory requested on GPU is %g of a " "possible %g MB. Memory request break-up:\n" " Input data buffer: %g MB\n" " FFT in array: %g MB\n" " FFT out array: %g MB\n" " Stokes output array: %g MB\n", ((float) lTotCUDAMalloc) / (1024 * 1024), ((float) stDevProp.totalGlobalMem) / (1024 * 1024), ((float) g_iSizeRead) / (1024 * 1024), ((float) g_iNumSubBands * g_iNFFT * sizeof(float4)) / (1024 * 1024), ((float) g_iNumSubBands * g_iNFFT * sizeof(float4)) / (1024 * 1024), ((float) g_iNumSubBands * g_iNFFT * sizeof(float4)) / (1024 * 1024)); if (g_iIsPFBOn) { (void) printf(" PFB coefficients: %d MB\n", ((float) g_iNumSubBands * g_iNFFT * sizeof(float4)) / (1024 * 1024)); } } #endif if (g_iIsPFBOn) { /* set number of taps to NUM_TAPS if PFB is on, else number of taps = 1 */ g_iNTaps = NUM_TAPS; g_pfPFBCoeff = (float *) malloc(g_iNumSubBands * g_iNTaps * g_iNFFT * sizeof(float)); if (NULL == g_pfPFBCoeff) { (void) fprintf(stderr, "ERROR: Memory allocation failed! %s.\n", strerror(errno)); return EXIT_FAILURE; } /* allocate memory for the filter coefficient array on the device */ CUDASafeCallWithCleanUp(cudaMalloc((void **) &g_pfPFBCoeff_d, g_iNumSubBands * g_iNTaps * g_iNFFT * sizeof(float))); /* read filter coefficients */ /* build file name */ (void) sprintf(g_acFileCoeff, "%s_%s_%d_%d_%d%s", FILE_COEFF_PREFIX, FILE_COEFF_DATATYPE, g_iNTaps, g_iNFFT, g_iNumSubBands, FILE_COEFF_SUFFIX); g_iFileCoeff = open(g_acFileCoeff, O_RDONLY); if (g_iFileCoeff < EXIT_SUCCESS) { (void) fprintf(stderr, "ERROR: Opening filter coefficients file %s " "failed! %s.\n", g_acFileCoeff, strerror(errno)); return EXIT_FAILURE; } iRet = read(g_iFileCoeff, g_pfPFBCoeff, g_iNumSubBands * g_iNTaps * g_iNFFT * sizeof(float)); if (iRet != (g_iNumSubBands * g_iNTaps * g_iNFFT * sizeof(float))) { (void) fprintf(stderr, "ERROR: Reading filter coefficients failed! %s.\n", strerror(errno)); return EXIT_FAILURE; } (void) close(g_iFileCoeff); /* copy filter coefficients to the device */ CUDASafeCallWithCleanUp(cudaMemcpy(g_pfPFBCoeff_d, g_pfPFBCoeff, g_iNumSubBands * g_iNTaps * g_iNFFT * sizeof(float), cudaMemcpyHostToDevice)); } /* allocate memory for data array - 32MB is the block size for the VEGAS input buffer */ CUDASafeCallWithCleanUp(cudaMalloc((void **) &g_pc4Data_d, g_iSizeRead)); g_pc4DataRead_d = g_pc4Data_d; /* load data into memory */ iRet = LoadDataToMem(); if (iRet != EXIT_SUCCESS) { (void) fprintf(stderr, "ERROR! Loading to memory failed!\n"); return EXIT_FAILURE; } /* calculate kernel parameters */ if (g_iNFFT < g_iMaxThreadsPerBlock) { g_dimBPFB.x = g_iNFFT; g_dimBCopy.x = g_iNFFT; g_dimBAccum.x = g_iNFFT; } else { g_dimBPFB.x = g_iMaxThreadsPerBlock; g_dimBCopy.x = g_iMaxThreadsPerBlock; g_dimBAccum.x = g_iMaxThreadsPerBlock; } g_dimGPFB.x = (g_iNumSubBands * g_iNFFT) / g_dimBPFB.x; g_dimGCopy.x = (g_iNumSubBands * g_iNFFT) / g_dimBCopy.x; g_dimGAccum.x = (g_iNumSubBands * g_iNFFT) / g_dimBAccum.x; iRet = ReadData(); if (iRet != EXIT_SUCCESS) { (void) fprintf(stderr, "ERROR: Reading data failed!\n"); return EXIT_FAILURE; } CUDASafeCallWithCleanUp(cudaMalloc((void **) &g_pf4FFTIn_d, g_iNumSubBands * g_iNFFT * sizeof(float4))); CUDASafeCallWithCleanUp(cudaMalloc((void **) &g_pf4FFTOut_d, g_iNumSubBands * g_iNFFT * sizeof(float4))); g_pf4SumStokes = (float4 *) malloc(g_iNumSubBands * g_iNFFT * sizeof(float4)); if (NULL == g_pf4SumStokes) { (void) fprintf(stderr, "ERROR: Memory allocation failed! %s.\n", strerror(errno)); return EXIT_FAILURE; } CUDASafeCallWithCleanUp(cudaMalloc((void **) &g_pf4SumStokes_d, g_iNumSubBands * g_iNFFT * sizeof(float4))); CUDASafeCallWithCleanUp(cudaMemset(g_pf4SumStokes_d, '\0', g_iNumSubBands * g_iNFFT * sizeof(float4))); /* create plan */ iCUFFTRet = cufftPlanMany(&g_stPlan, FFTPLAN_RANK, &g_iNFFT, &g_iNFFT, FFTPLAN_ISTRIDE, FFTPLAN_IDIST, &g_iNFFT, FFTPLAN_OSTRIDE, FFTPLAN_ODIST, CUFFT_C2C, FFTPLAN_BATCH); if (iCUFFTRet != CUFFT_SUCCESS) { (void) fprintf(stderr, "ERROR: Plan creation failed!\n"); return EXIT_FAILURE; } #if PLOT iRet = InitPlot(); if (iRet != EXIT_SUCCESS) { (void) fprintf(stderr, "ERROR: Plotting initialisation failed!\n"); return EXIT_FAILURE; } #endif return EXIT_SUCCESS; } /* function that reads data from the data file and loads it into memory during initialisation */ int LoadDataToMem() { struct stat stFileStats = {0}; int iRet = EXIT_SUCCESS; int iFileData = 0; iRet = stat(g_acFileData, &stFileStats); if (iRet != EXIT_SUCCESS) { (void) fprintf(stderr, "ERROR: Failed to stat %s: %s!\n", g_acFileData, strerror(errno)); return EXIT_FAILURE; } g_iSizeFile = stFileStats.st_size; #if PL CUDASafeCallWithCleanUp(cudaMallocHost(&g_pc4InBuf, g_iSizeFile)); #else g_pc4InBuf = (char4*) malloc(g_iSizeFile); if (NULL == g_pc4InBuf) { (void) fprintf(stderr, "ERROR: Memory allocation failed! %s.\n", strerror(errno)); return EXIT_FAILURE; } #endif iFileData = open(g_acFileData, O_RDONLY); if (iFileData < EXIT_SUCCESS) { (void) fprintf(stderr, "ERROR! Opening data file %s failed! %s.\n", g_acFileData, strerror(errno)); return EXIT_FAILURE; } iRet = read(iFileData, g_pc4InBuf, g_iSizeFile); if (iRet < EXIT_SUCCESS) { (void) fprintf(stderr, "ERROR: Data reading failed! %s.\n", strerror(errno)); (void) close(iFileData); return EXIT_FAILURE; } else if (iRet != stFileStats.st_size) { (void) printf("File read done!\n"); } (void) close(iFileData); /* set the read pointer to the beginning of the data array */ g_pc4InBufRead = g_pc4InBuf; return EXIT_SUCCESS; } /* function that reads data from input buffer */ int ReadData() { /* write new data to the write buffer */ #if BENCHMARKING CUDASafeCallWithCleanUp(cudaEventRecord(g_cuStart, 0)); CUDASafeCallWithCleanUp(cudaEventSynchronize(g_cuStart)); #endif CUDASafeCallWithCleanUp(cudaMemcpy(g_pc4Data_d, g_pc4InBufRead, g_iSizeRead, cudaMemcpyHostToDevice)); #if BENCHMARKING CUDASafeCallWithCleanUp(cudaEventRecord(g_cuStop, 0)); CUDASafeCallWithCleanUp(cudaEventSynchronize(g_cuStop)); CUDASafeCallWithCleanUp(cudaEventElapsedTime(&g_fTimeCpIn, g_cuStart, g_cuStop)); g_fTotCpIn += g_fTimeCpIn; ++g_iCountCpIn; #endif /* update the read pointer to where data needs to be read in from, in the next read */ g_pc4InBufRead += ((g_iSizeRead - ((g_iNTaps - 1) * g_iNumSubBands * g_iNFFT * sizeof(char4))) / sizeof(char4)); /* whenever there is a read, reset the read pointer to the beginning */ g_pc4DataRead_d = g_pc4Data_d; ++g_iReadCount; /* BUG: won't read last block */ if ((((char *) g_pc4InBuf) + g_iSizeFile) - ((char *) g_pc4InBufRead) <= g_iSizeRead) { (void) printf("Data read done! Read count = %d\n", g_iReadCount); g_iIsDataReadDone = TRUE; } return EXIT_SUCCESS; } /* function that performs the PFB */ __global__ void DoPFB(char4 *pc4Data, float4 *pf4FFTIn, float *pfPFBCoeff) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; int iNFFT = (gridDim.x * blockDim.x); int j = 0; int iAbsIdx = 0; float4 f4PFBOut = make_float4(0.0, 0.0, 0.0, 0.0); char4 c4Data = make_char4(0, 0, 0, 0); for (j = 0; j < NUM_TAPS; ++j) { /* calculate the absolute index */ iAbsIdx = (j * iNFFT) + i; /* get the address of the block */ c4Data = pc4Data[iAbsIdx]; f4PFBOut.x += (float) c4Data.x * pfPFBCoeff[iAbsIdx]; f4PFBOut.y += (float) c4Data.y * pfPFBCoeff[iAbsIdx]; f4PFBOut.z += (float) c4Data.z * pfPFBCoeff[iAbsIdx]; f4PFBOut.w += (float) c4Data.w * pfPFBCoeff[iAbsIdx]; } pf4FFTIn[i] = f4PFBOut; return; } __global__ void CopyDataForFFT(char4 *pc4Data, float4 *pf4FFTIn) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; char4 in = pc4Data[i]; float4 out = { (float)in.x, (float)in.y, (float)in.z, (float)in.w }; pf4FFTIn[i] = out; return; } /* function that performs the FFT */ int DoFFT() { cufftResult iCUFFTRet = CUFFT_SUCCESS; /* execute plan */ iCUFFTRet = cufftExecC2C(g_stPlan, (cufftComplex*) g_pf4FFTIn_d, (cufftComplex*) g_pf4FFTOut_d, CUFFT_FORWARD); if (iCUFFTRet != CUFFT_SUCCESS) { (void) fprintf(stderr, "ERROR! FFT failed!\n"); return EXIT_FAILURE; } return EXIT_SUCCESS; } __global__ void Accumulate(float4 *pf4FFTOut, float4 *pf4SumStokes) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; float4 f4FFTOut = pf4FFTOut[i]; float4 f4SumStokes = pf4SumStokes[i]; /* Re(X)^2 + Im(X)^2 */ f4SumStokes.x += (f4FFTOut.x * f4FFTOut.x) + (f4FFTOut.y * f4FFTOut.y); /* Re(Y)^2 + Im(Y)^2 */ f4SumStokes.y += (f4FFTOut.z * f4FFTOut.z) + (f4FFTOut.w * f4FFTOut.w); /* Re(XY*) */ f4SumStokes.z += (f4FFTOut.x * f4FFTOut.z) + (f4FFTOut.y * f4FFTOut.w); /* Im(XY*) */ f4SumStokes.w += (f4FFTOut.y * f4FFTOut.z) - (f4FFTOut.x * f4FFTOut.w); pf4SumStokes[i] = f4SumStokes; return; } /* function that frees resources */ void CleanUp() { /* free resources */ if (g_pc4InBuf != NULL) { #if PL cudaFreeHost(&g_pc4InBuf); #else free(g_pc4InBuf); #endif g_pc4InBuf = NULL; } if (g_pc4Data_d != NULL) { (void) cudaFree(g_pc4Data_d); g_pc4Data_d = NULL; } if (g_pf4FFTIn_d != NULL) { (void) cudaFree(g_pf4FFTIn_d); g_pf4FFTIn_d = NULL; } if (g_pf4FFTOut_d != NULL) { (void) cudaFree(g_pf4FFTOut_d); g_pf4FFTOut_d = NULL; } if (g_pf4SumStokes != NULL) { free(g_pf4SumStokes); g_pf4SumStokes = NULL; } if (g_pf4SumStokes_d != NULL) { (void) cudaFree(g_pf4SumStokes_d); g_pf4SumStokes_d = NULL; } free(g_pfPFBCoeff); (void) cudaFree(g_pfPFBCoeff_d); /* destroy plan */ /* TODO: check for plan */ (void) cufftDestroy(g_stPlan); #if PLOT if (g_pfSumPowX != NULL) { free(g_pfSumPowX); g_pfSumPowX = NULL; } if (g_pfSumPowY != NULL) { free(g_pfSumPowY); g_pfSumPowY = NULL; } if (g_pfSumStokesRe != NULL) { free(g_pfSumStokesRe); g_pfSumStokesRe = NULL; } if (g_pfSumStokesIm != NULL) { free(g_pfSumStokesIm); g_pfSumStokesIm = NULL; } if (g_pfFreq != NULL) { free(g_pfFreq); g_pfFreq = NULL; } cpgclos(); #endif return; } #if BENCHMARKING /* function to print benchmarking statistics */ void PrintBenchmarks(float fTotPFB, int iCountPFB, float fTotCpInFFT, int iCountCpInFFT, float fTotFFT, int iCountFFT, float fTotAccum, int iCountAccum, float fTotCpOut, int iCountCpOut, long int lProcDataAll) { /* total time taken */ float fTotal = 0.0; float fTotThroughput = 0.0; fTotal = g_fTotCpIn + fTotPFB + fTotCpInFFT + fTotFFT + fTotAccum + fTotCpOut; fTotThroughput = (float) lProcDataAll / (fTotal * 1e-3 * NUM_BYTES_PER_SAMP); (void) printf(" Total elapsed time for\n"); (void) printf(" %6d calls to cudaMemcpy(Host2Device) : " "%8.3fms, %2d%%; Average = %5.3fms\n", g_iCountCpIn, g_fTotCpIn, (int) ((g_fTotCpIn / fTotal) * 100), g_fTotCpIn / g_iCountCpIn); if (g_iIsPFBOn) { (void) printf(" %6d calls to DoPFB() : " "%8.3fms, %2d%%; Average = %5.3fms\n", iCountPFB, fTotPFB, (int) ((fTotPFB / fTotal) * 100), fTotPFB / iCountPFB); } else { (void) printf(" %6d calls to CopyDataForFFT() : " "%8.3fms, %2d%%; Average = %5.3fms\n", iCountCpInFFT, fTotCpInFFT, (int) ((fTotCpInFFT / fTotal) * 100), fTotCpInFFT / iCountCpInFFT); } (void) printf(" %6d calls to DoFFT() : " "%8.3fms, %2d%%; Average = %5.3fms\n", iCountFFT, fTotFFT, (int) ((fTotFFT / fTotal) * 100), fTotFFT / iCountFFT); (void) printf(" %6d calls to Accumulate() : " "%8.3fms, %2d%%; Average = %5.3fms\n", iCountAccum, fTotAccum, (int) ((fTotAccum / fTotal) * 100), fTotAccum / iCountAccum); (void) printf(" %6d calls to cudaMemcpy(Device2Host) : " "%8.3fms, %2d%%; Average = %5.3fms\n", iCountCpOut, fTotCpOut, (int) ((fTotCpOut / fTotal) * 100), fTotCpOut / iCountCpOut); (void) printf(" Average throughput: %5.3f Msps = %5.3f MBps\n", fTotThroughput / (1024 * 1024), NUM_BYTES_PER_SAMP * fTotThroughput / (1024 * 1024)); return; } #endif #if PLOT int InitPlot() { int iRet = EXIT_SUCCESS; int i = 0; iRet = cpgopen(PG_DEV); if (iRet <= 0) { (void) fprintf(stderr, "ERROR: Opening graphics device %s failed!\n", PG_DEV); return EXIT_FAILURE; } cpgsch(3); cpgsubp(g_iNumSubBands, 4); g_pfSumPowX = (float*) malloc(g_iNFFT * sizeof(float)); if (NULL == g_pfSumPowX) { (void) fprintf(stderr, "ERROR: Memory allocation failed! %s.\n", strerror(errno)); return EXIT_FAILURE; } g_pfSumPowY = (float*) malloc(g_iNFFT * sizeof(float)); if (NULL == g_pfSumPowY) { (void) fprintf(stderr, "ERROR: Memory allocation failed! %s.\n", strerror(errno)); return EXIT_FAILURE; } g_pfSumStokesRe = (float*) malloc(g_iNFFT * sizeof(float)); if (NULL == g_pfSumStokesRe) { (void) fprintf(stderr, "ERROR: Memory allocation failed! %s.\n", strerror(errno)); return EXIT_FAILURE; } g_pfSumStokesIm = (float*) malloc(g_iNFFT * sizeof(float)); if (NULL == g_pfSumStokesIm) { (void) fprintf(stderr, "ERROR: Memory allocation failed! %s.\n", strerror(errno)); return EXIT_FAILURE; } g_pfFreq = (float*) malloc(g_iNFFT * sizeof(float)); if (NULL == g_pfFreq) { (void) fprintf(stderr, "ERROR: Memory allocation failed! %s.\n", strerror(errno)); return EXIT_FAILURE; } /* load the frequency axis */ for (i = 0; i < g_iNFFT; ++i) { g_pfFreq[i] = ((float) i * g_fFSamp) / g_iNFFT; } return EXIT_SUCCESS; } void Plot() { float fMinFreq = g_pfFreq[0]; float fMaxFreq = g_pfFreq[g_iNFFT-1]; float fMinY = FLT_MAX; float fMaxY = -(FLT_MAX); int i = 0; int j = 0; int k = 0; for (k = 0; k < g_iNumSubBands; ++k) { for (i = k, j = 0; i < (g_iNumSubBands * g_iNFFT); i += g_iNumSubBands, ++j) { if (0.0 == g_pf4SumStokes[i].x) { g_pfSumPowX[j] = 0.0; } else { g_pfSumPowX[j] = 10 * log10f(g_pf4SumStokes[i].x); } if (0.0 == g_pf4SumStokes[i].y) { g_pfSumPowY[j] = 0.0; } else { g_pfSumPowY[j] = 10 * log10f(g_pf4SumStokes[i].y); } g_pfSumStokesRe[j] = g_pf4SumStokes[i].z; g_pfSumStokesIm[j] = g_pf4SumStokes[i].w; } /* plot accumulated X-pol. power */ fMinY = FLT_MAX; fMaxY = -(FLT_MAX); for (i = 0; i < g_iNFFT; ++i) { if (g_pfSumPowX[i] > fMaxY) { fMaxY = g_pfSumPowX[i]; } if (g_pfSumPowX[i] < fMinY) { fMinY = g_pfSumPowX[i]; } } /* to avoid min == max */ fMaxY += 1.0; fMinY -= 1.0; for (i = 0; i < g_iNFFT; ++i) { g_pfSumPowX[i] -= fMaxY; } fMinY -= fMaxY; fMaxY = 0; cpgpanl(k + 1, 1); cpgeras(); cpgsvp(PG_VP_ML, PG_VP_MR, PG_VP_MB, PG_VP_MT); cpgswin(fMinFreq, fMaxFreq, fMinY, fMaxY); //cpglab("Bin Number", "", "SumPowX"); cpgbox("BCNST", 0.0, 0, "BCNST", 0.0, 0); cpgsci(PG_CI_PLOT); cpgline(g_iNFFT, g_pfFreq, g_pfSumPowX); cpgsci(PG_CI_DEF); /* plot accumulated Y-pol. power */ fMinY = FLT_MAX; fMaxY = -(FLT_MAX); for (i = 0; i < g_iNFFT; ++i) { if (g_pfSumPowY[i] > fMaxY) { fMaxY = g_pfSumPowY[i]; } if (g_pfSumPowY[i] < fMinY) { fMinY = g_pfSumPowY[i]; } } /* to avoid min == max */ fMaxY += 1.0; fMinY -= 1.0; for (i = 0; i < g_iNFFT; ++i) { g_pfSumPowY[i] -= fMaxY; } fMinY -= fMaxY; fMaxY = 0; cpgpanl(k + 1, 2); cpgeras(); cpgsvp(PG_VP_ML, PG_VP_MR, PG_VP_MB, PG_VP_MT); cpgswin(fMinFreq, fMaxFreq, fMinY, fMaxY); //cpglab("Bin Number", "", "SumPowY"); cpgbox("BCNST", 0.0, 0, "BCNST", 0.0, 0); cpgsci(PG_CI_PLOT); cpgline(g_iNFFT, g_pfFreq, g_pfSumPowY); cpgsci(PG_CI_DEF); /* plot accumulated real(XY*) */ fMinY = FLT_MAX; fMaxY = -(FLT_MAX); for (i = 0; i < g_iNFFT; ++i) { if (g_pfSumStokesRe[i] > fMaxY) { fMaxY = g_pfSumStokesRe[i]; } if (g_pfSumStokesRe[i] < fMinY) { fMinY = g_pfSumStokesRe[i]; } } /* to avoid min == max */ fMaxY += 1.0; fMinY -= 1.0; cpgpanl(k + 1, 3); cpgeras(); cpgsvp(PG_VP_ML, PG_VP_MR, PG_VP_MB, PG_VP_MT); cpgswin(fMinFreq, fMaxFreq, fMinY, fMaxY); //cpglab("Bin Number", "", "SumStokesRe"); cpgbox("BCNST", 0.0, 0, "BCNST", 0.0, 0); cpgsci(PG_CI_PLOT); cpgline(g_iNFFT, g_pfFreq, g_pfSumStokesRe); cpgsci(PG_CI_DEF); /* plot accumulated imag(XY*) */ fMinY = FLT_MAX; fMaxY = -(FLT_MAX); for (i = 0; i < g_iNFFT; ++i) { if (g_pfSumStokesIm[i] > fMaxY) { fMaxY = g_pfSumStokesIm[i]; } if (g_pfSumStokesIm[i] < fMinY) { fMinY = g_pfSumStokesIm[i]; } } /* to avoid min == max */ fMaxY += 1.0; fMinY -= 1.0; cpgpanl(k + 1, 4); cpgeras(); cpgsvp(PG_VP_ML, PG_VP_MR, PG_VP_MB, PG_VP_MT); cpgswin(fMinFreq, fMaxFreq, fMinY, fMaxY); //cpglab("Bin Number", "", "SumStokesIm"); cpgbox("BCNST", 0.0, 0, "BCNST", 0.0, 0); cpgsci(PG_CI_PLOT); cpgline(g_iNFFT, g_pfFreq, g_pfSumStokesIm); cpgsci(PG_CI_DEF); } return; } #endif /* * Registers handlers for SIGTERM and CTRL+C */ int RegisterSignalHandlers() { struct sigaction stSigHandler = {{0}}; int iRet = EXIT_SUCCESS; /* register the CTRL+C-handling function */ stSigHandler.sa_handler = HandleStopSignals; iRet = sigaction(SIGINT, &stSigHandler, NULL); if (iRet != EXIT_SUCCESS) { (void) fprintf(stderr, "ERROR: Handler registration failed for signal %d!\n", SIGINT); return EXIT_FAILURE; } /* register the SIGTERM-handling function */ stSigHandler.sa_handler = HandleStopSignals; iRet = sigaction(SIGTERM, &stSigHandler, NULL); if (iRet != EXIT_SUCCESS) { (void) fprintf(stderr, "ERROR: Handler registration failed for signal %d!\n", SIGTERM); return EXIT_FAILURE; } return EXIT_SUCCESS; } /* * Catches SIGTERM and CTRL+C and cleans up before exiting */ void HandleStopSignals(int iSigNo) { /* clean up */ CleanUp(); /* exit */ exit(EXIT_SUCCESS); /* never reached */ return; } void __CUDASafeCallWithCleanUp(cudaError_t iRet, const char* pcFile, const int iLine, void (*pCleanUp)(void)) { if (iRet != cudaSuccess) { (void) fprintf(stderr, "ERROR: File <%s>, Line %d: %s\n", pcFile, iLine, cudaGetErrorString(iRet)); /* free resources */ (*pCleanUp)(); exit(EXIT_FAILURE); } return; } /* * Prints usage information */ void PrintUsage(const char *pcProgName) { (void) printf("Usage: %s [options] <data-file>\n", pcProgName); (void) printf(" -h --help "); (void) printf("Display this usage information\n"); (void) printf(" -b --nsub "); (void) printf("Number of sub-bands in the data\n"); (void) printf(" -n --nfft <value> "); (void) printf("Number of points in FFT\n"); (void) printf(" -p --pfb "); (void) printf("Enable PFB\n"); (void) printf(" -a --nacc <value> "); (void) printf("Number of spectra to add\n"); (void) printf(" -d --device <value> "); (void) printf("CUDA device to use\n"); #if PLOT (void) printf(" -s --fsamp <value> "); (void) printf("Sampling frequency\n"); #endif return; }
2ba25f38b9d00aa75db71849c151b05fc36faa85.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <hip/hip_runtime.h> long long getCurrentTime() { struct timeval te; gettimeofday(&te, NULL); // get current time long long microseconds = te.tv_sec*1000000LL + te.tv_usec; return microseconds; } #define CUDA_ERROR_CHECK #define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ ) inline void __cudaSafeCall( hipError_t err, const char *file, const int line ) { #ifdef CUDA_ERROR_CHECK if ( hipSuccess != err ) { fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n", file, line, hipGetErrorString( err ) ); exit( -1 ); } #endif return; } __inline__ __device__ int warpReduceSum(int val) { for (int offset = warpSize/2; offset > 0; offset /= 2) { #if TORCH_HIP_VERSION >= 9000 val += __shfl_down_sync(0xffffffff, val, offset); #else val += __shfl_down(val, offset); #endif } return val; } __inline__ __device__ int blockReduceSum(int val) { // Shared memory for 32 partial sums static __shared__ int shared[32]; int lane = threadIdx.x % warpSize; int wid = threadIdx.x / warpSize; // Reduction within a warp val = warpReduceSum(val); // Write reduced value to shared memory if (lane == 0) shared[wid] = val; // Wait for all partial reductions __syncthreads(); // Read from shared memory only if that warp existed val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0; // Final reduce within first warp if (wid==0) val = warpReduceSum(val); return val; } __global__ void reduce(int *A, int *sum, int N) { int val = 0; for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { val += A[i]; } int valPerBlock = blockReduceSum(val); if (threadIdx.x == 0) { atomicAdd(sum, valPerBlock); } } int ReduceCPU(int *A, int N, double *cpuTime) { long long startTime = getCurrentTime(); int sum = 0; for (int i = 0; i < N; i++) { sum += A[i]; } *cpuTime = (double)(getCurrentTime() - startTime) / 1000000; return sum; } int ReduceGPU(int *A, int N, double *gpuOverallTime, double *gpuKernelTime) { long long startTime = getCurrentTime(); int threads = 512; int blocks = min((N + threads - 1) / threads, 1024); int *S = (int*)malloc(sizeof(int) * 1); int *dA; int *dSum; // Allocate memory on the device CudaSafeCall(hipMalloc(&dA, sizeof(int) * N)); CudaSafeCall(hipMalloc(&dSum, sizeof(int) * 1)); // Copy the data from the host to the device CudaSafeCall(hipMemcpy(dA, A, N * sizeof (int), hipMemcpyHostToDevice)); CudaSafeCall(hipMemset(dSum, 0, sizeof (int))); hipEvent_t start, stop; CudaSafeCall(hipEventCreate(&start)); CudaSafeCall(hipEventCreate(&stop)); // Launch the kernel CudaSafeCall(hipEventRecord(start)); hipLaunchKernelGGL(( reduce), dim3(blocks), dim3(threads), 0, 0, dA, dSum, N); CudaSafeCall(hipEventRecord(stop)); CudaSafeCall(hipEventSynchronize(stop)); CudaSafeCall(hipDeviceSynchronize()); // Copy back the data from the host CudaSafeCall(hipMemcpy(S, dSum, 1 * sizeof (int), hipMemcpyDeviceToHost)); // Compute the performance numbers *gpuOverallTime = (double)(getCurrentTime() - startTime) / 1000000; float msec = 0; CudaSafeCall(hipEventElapsedTime(&msec, start, stop)); *gpuKernelTime = msec / 1000; // Cleanup CudaSafeCall(hipFree(dA)); CudaSafeCall(hipFree(dSum)); return *S; } int main(int argc, char **argv) { if (argc != 2) { printf("Usage: ./reduce repeat\n"); exit(0); } int REPEATS = atoi(argv[1]); for (int repeat = 0; repeat < REPEATS; repeat++) { printf("[Iteration %d]\n", repeat); for (int N = 1024; N < 256 * 1024 * 1024; N = N * 2) { int* A = NULL; double cpuTime = 0.0; double gpuOverallTime = 0.0; double gpuKernelTime = 0.0; A = (int*)malloc(sizeof(int) * N); for (int i = 0; i < N; i++) { A[i] = i; } // CPU version int expected = ReduceCPU(A, N, &cpuTime); // GPU version int computed = ReduceGPU(A, N, &gpuOverallTime, &gpuKernelTime); if (computed == expected) { float GB = (float)(N * 4) / (1024 * 1024 * 1024); printf ("\tVERIFIED, %d, CPU (%lf sec) %lf GB/s, GPU (Overall: %lf sec) %lf GB/s, GPU (Kernel: %lf sec) %lf GB/s\n", 4*N, cpuTime, GB / cpuTime, gpuOverallTime, GB / gpuOverallTime, gpuKernelTime, GB / gpuKernelTime); } else { printf ("\tFAILED, %d, computed: %d, excepted %u\n", 4*N, computed, expected); } free(A); } } }
2ba25f38b9d00aa75db71849c151b05fc36faa85.cu
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <cuda.h> long long getCurrentTime() { struct timeval te; gettimeofday(&te, NULL); // get current time long long microseconds = te.tv_sec*1000000LL + te.tv_usec; return microseconds; } #define CUDA_ERROR_CHECK #define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ ) inline void __cudaSafeCall( cudaError err, const char *file, const int line ) { #ifdef CUDA_ERROR_CHECK if ( cudaSuccess != err ) { fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n", file, line, cudaGetErrorString( err ) ); exit( -1 ); } #endif return; } __inline__ __device__ int warpReduceSum(int val) { for (int offset = warpSize/2; offset > 0; offset /= 2) { #if CUDA_VERSION >= 9000 val += __shfl_down_sync(0xffffffff, val, offset); #else val += __shfl_down(val, offset); #endif } return val; } __inline__ __device__ int blockReduceSum(int val) { // Shared memory for 32 partial sums static __shared__ int shared[32]; int lane = threadIdx.x % warpSize; int wid = threadIdx.x / warpSize; // Reduction within a warp val = warpReduceSum(val); // Write reduced value to shared memory if (lane == 0) shared[wid] = val; // Wait for all partial reductions __syncthreads(); // Read from shared memory only if that warp existed val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0; // Final reduce within first warp if (wid==0) val = warpReduceSum(val); return val; } __global__ void reduce(int *A, int *sum, int N) { int val = 0; for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { val += A[i]; } int valPerBlock = blockReduceSum(val); if (threadIdx.x == 0) { atomicAdd(sum, valPerBlock); } } int ReduceCPU(int *A, int N, double *cpuTime) { long long startTime = getCurrentTime(); int sum = 0; for (int i = 0; i < N; i++) { sum += A[i]; } *cpuTime = (double)(getCurrentTime() - startTime) / 1000000; return sum; } int ReduceGPU(int *A, int N, double *gpuOverallTime, double *gpuKernelTime) { long long startTime = getCurrentTime(); int threads = 512; int blocks = min((N + threads - 1) / threads, 1024); int *S = (int*)malloc(sizeof(int) * 1); int *dA; int *dSum; // Allocate memory on the device CudaSafeCall(cudaMalloc(&dA, sizeof(int) * N)); CudaSafeCall(cudaMalloc(&dSum, sizeof(int) * 1)); // Copy the data from the host to the device CudaSafeCall(cudaMemcpy(dA, A, N * sizeof (int), cudaMemcpyHostToDevice)); CudaSafeCall(cudaMemset(dSum, 0, sizeof (int))); cudaEvent_t start, stop; CudaSafeCall(cudaEventCreate(&start)); CudaSafeCall(cudaEventCreate(&stop)); // Launch the kernel CudaSafeCall(cudaEventRecord(start)); reduce<<<blocks, threads>>>(dA, dSum, N); CudaSafeCall(cudaEventRecord(stop)); CudaSafeCall(cudaEventSynchronize(stop)); CudaSafeCall(cudaDeviceSynchronize()); // Copy back the data from the host CudaSafeCall(cudaMemcpy(S, dSum, 1 * sizeof (int), cudaMemcpyDeviceToHost)); // Compute the performance numbers *gpuOverallTime = (double)(getCurrentTime() - startTime) / 1000000; float msec = 0; CudaSafeCall(cudaEventElapsedTime(&msec, start, stop)); *gpuKernelTime = msec / 1000; // Cleanup CudaSafeCall(cudaFree(dA)); CudaSafeCall(cudaFree(dSum)); return *S; } int main(int argc, char **argv) { if (argc != 2) { printf("Usage: ./reduce repeat\n"); exit(0); } int REPEATS = atoi(argv[1]); for (int repeat = 0; repeat < REPEATS; repeat++) { printf("[Iteration %d]\n", repeat); for (int N = 1024; N < 256 * 1024 * 1024; N = N * 2) { int* A = NULL; double cpuTime = 0.0; double gpuOverallTime = 0.0; double gpuKernelTime = 0.0; A = (int*)malloc(sizeof(int) * N); for (int i = 0; i < N; i++) { A[i] = i; } // CPU version int expected = ReduceCPU(A, N, &cpuTime); // GPU version int computed = ReduceGPU(A, N, &gpuOverallTime, &gpuKernelTime); if (computed == expected) { float GB = (float)(N * 4) / (1024 * 1024 * 1024); printf ("\tVERIFIED, %d, CPU (%lf sec) %lf GB/s, GPU (Overall: %lf sec) %lf GB/s, GPU (Kernel: %lf sec) %lf GB/s\n", 4*N, cpuTime, GB / cpuTime, gpuOverallTime, GB / gpuOverallTime, gpuKernelTime, GB / gpuKernelTime); } else { printf ("\tFAILED, %d, computed: %d, excepted %u\n", 4*N, computed, expected); } free(A); } } }
d11be4ab298928bbe5341a884f42a048132ad1ae.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Modified from // https://github.com/facebookresearch/detectron2/tree/master/detectron2/layers/csrc/ROIAlign // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h> #include <ATen/hip/HIPApplyUtils.cuh> // TODO make it in a common file #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) template <typename T> __device__ T bilinear_interpolate(const T* bottom_data, const int height, const int width, T y, T x, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { // empty return 0; } if (y <= 0) y = 0; if (x <= 0) x = 0; int y_low = (int)y; int x_low = (int)x; int y_high; int x_high; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T)x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // do bilinear interpolation T v1 = bottom_data[y_low * width + x_low]; T v2 = bottom_data[y_low * width + x_high]; T v3 = bottom_data[y_high * width + x_low]; T v4 = bottom_data[y_high * width + x_high]; T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename T> __global__ void RoIAlignForwardV2( const int nthreads, const T* bottom_data, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, const T* bottom_rois, T* top_data, bool aligned) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not use rounding; this implementation detail is critical T offset = aligned ? (T)0.5 : (T)0.0; T roi_start_w = offset_bottom_rois[1] * spatial_scale - offset; T roi_start_h = offset_bottom_rois[2] * spatial_scale - offset; T roi_end_w = offset_bottom_rois[3] * spatial_scale - offset; T roi_end_h = offset_bottom_rois[4] * spatial_scale - offset; T roi_width = roi_end_w - roi_start_w; T roi_height = roi_end_h - roi_start_h; if (!aligned) { // for backward-compatibility only roi_width = max(roi_width, (T)1.); roi_height = max(roi_height, (T)1.); } T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); const T* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // We do average (integral) pooling inside a bin // When the grid is empty, output zeros. const T count = max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4 T output_val = 0.; for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1 { const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T val = bilinear_interpolate(offset_bottom_data, height, width, y, x, index); output_val += val; } } output_val /= count; top_data[index] = output_val; } }
d11be4ab298928bbe5341a884f42a048132ad1ae.cu
// Modified from // https://github.com/facebookresearch/detectron2/tree/master/detectron2/layers/csrc/ROIAlign // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <c10/cuda/CUDAGuard.h> #include <ATen/cuda/CUDAApplyUtils.cuh> // TODO make it in a common file #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) template <typename T> __device__ T bilinear_interpolate(const T* bottom_data, const int height, const int width, T y, T x, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { // empty return 0; } if (y <= 0) y = 0; if (x <= 0) x = 0; int y_low = (int)y; int x_low = (int)x; int y_high; int x_high; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T)x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // do bilinear interpolation T v1 = bottom_data[y_low * width + x_low]; T v2 = bottom_data[y_low * width + x_high]; T v3 = bottom_data[y_high * width + x_low]; T v4 = bottom_data[y_high * width + x_high]; T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename T> __global__ void RoIAlignForwardV2( const int nthreads, const T* bottom_data, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, const T* bottom_rois, T* top_data, bool aligned) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not use rounding; this implementation detail is critical T offset = aligned ? (T)0.5 : (T)0.0; T roi_start_w = offset_bottom_rois[1] * spatial_scale - offset; T roi_start_h = offset_bottom_rois[2] * spatial_scale - offset; T roi_end_w = offset_bottom_rois[3] * spatial_scale - offset; T roi_end_h = offset_bottom_rois[4] * spatial_scale - offset; T roi_width = roi_end_w - roi_start_w; T roi_height = roi_end_h - roi_start_h; if (!aligned) { // for backward-compatibility only roi_width = max(roi_width, (T)1.); roi_height = max(roi_height, (T)1.); } T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); const T* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // We do average (integral) pooling inside a bin // When the grid is empty, output zeros. const T count = max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4 T output_val = 0.; for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1 { const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T val = bilinear_interpolate(offset_bottom_data, height, width, y, x, index); output_val += val; } } output_val /= count; top_data[index] = output_val; } }
2da4bb684a9eeebef4a80d8df17d5bcef5526d96.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Paulius Micikevicius (pauliusm@nvidia.com) * Max Grossman (jmaxg3@gmail.com) */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <sys/time.h> #include <unistd.h> #include "common.h" #include "common2d.h" #define BDIMX 32 #define BDIMY 16 #define SHAREDX(radius) (BDIMX + 2 * (radius)) #define SHAREDY(radius) (BDIMY + 2 * (radius)) #define CACHE_INDEX(y, x, radius) ((y) * SHAREDX(radius) + (x)) __global__ void fwd_kernel(TYPE *next, TYPE *curr, TYPE *vsq, TYPE *c_coeff, int nx, int ny, int dimx, int radius) { extern __shared__ TYPE cache[]; int y = blockIdx.y * blockDim.y + threadIdx.y; int x = blockIdx.x * blockDim.x + threadIdx.x; int this_offset = POINT_OFFSET(x, y, dimx, radius); const int this_y = radius + threadIdx.y; const int this_x = radius + threadIdx.x; cache[CACHE_INDEX(this_y, this_x, radius)] = curr[POINT_OFFSET(x, y, dimx, radius)]; if (threadIdx.y < radius) { cache[CACHE_INDEX(threadIdx.y, this_x, radius)] = curr[POINT_OFFSET(x, y - radius, dimx, radius)]; } if (threadIdx.y >= radius && threadIdx.y < 2 * radius) { cache[CACHE_INDEX(threadIdx.y + blockDim.y, this_x, radius)] = curr[POINT_OFFSET(x, y - radius + blockDim.y, dimx, radius)]; } if (threadIdx.x < radius) { cache[CACHE_INDEX(this_y, threadIdx.x, radius)] = curr[POINT_OFFSET(x - radius, y, dimx, radius)]; } if (threadIdx.x >= radius && threadIdx.x < 2 * radius) { cache[CACHE_INDEX(this_y, threadIdx.x + blockDim.x, radius)] = curr[POINT_OFFSET(x - radius + blockDim.x, y, dimx, radius)]; } __syncthreads(); TYPE temp = 2.0f * cache[CACHE_INDEX(this_y, this_x, radius)] - next[this_offset]; TYPE div = c_coeff[0] * cache[CACHE_INDEX(this_y, this_x, radius)]; for (int d = radius; d >= 1; d--) { div += c_coeff[d] * (cache[CACHE_INDEX(this_y + d, this_x, radius)] + cache[CACHE_INDEX(this_y - d, this_x, radius)] + cache[CACHE_INDEX(this_y, this_x + d, radius)] + cache[CACHE_INDEX(this_y, this_x - d, radius)]); } next[this_offset] = temp + div * vsq[this_offset]; } int main(int argc, char *argv[]) { config conf; setup_config(&conf, argc, argv); init_progress(conf.progress_width, conf.nsteps, conf.progress_disabled); #ifndef PADDING fprintf(stderr, "Must be compiled with -DPADDING\n"); return 1; #endif if (conf.nx % BDIMX != 0) { fprintf(stderr, "Invalid nx configuration, must be an even multiple of " "%d\n", BDIMX); return 1; } if (conf.ny % BDIMY != 0) { fprintf(stderr, "Invalid ny configuration, must be an even multiple of " "%d\n", BDIMY); return 1; } if (conf.radius > TRANSACTION_LEN) { fprintf(stderr, "Radius must be less than TRANSACTION_LEN to include " "it in dimx padding\n"); return 1; } TYPE dx = 20.f; TYPE dt = 0.002f; // compute the pitch for perfect coalescing size_t dimx = TRANSACTION_LEN + conf.nx + conf.radius; dimx += (TRANSACTION_LEN - (dimx % TRANSACTION_LEN)); size_t dimy = conf.ny + 2*conf.radius; size_t nbytes = dimx * dimy * sizeof(TYPE); if (conf.verbose) { printf("x = %zu, y = %zu\n", dimx, dimy); printf("nsteps = %d\n", conf.nsteps); printf("radius = %d\n", conf.radius); } TYPE c_coeff[NUM_COEFF]; TYPE *curr, *next, *vsq; CHECK(hipHostMalloc((void **)&curr, nbytes)); CHECK(hipHostMalloc((void **)&next, nbytes)); CHECK(hipHostMalloc((void **)&vsq, nbytes)); config_sources(&conf.srcs, &conf.nsrcs, conf.nx, conf.ny, conf.nsteps); TYPE **srcs = sample_sources(conf.srcs, conf.nsrcs, conf.nsteps, dt); init_data(curr, next, vsq, c_coeff, dimx, dimy, dx, dt); TYPE *d_curr, *d_next, *d_vsq, *d_c_coeff; CHECK(hipMalloc((void **)&d_curr, nbytes)); CHECK(hipMalloc((void **)&d_next, nbytes)); CHECK(hipMalloc((void **)&d_vsq, nbytes)); CHECK(hipMalloc((void **)&d_c_coeff, NUM_COEFF * sizeof(TYPE))); dim3 block(BDIMX, BDIMY); dim3 grid(conf.nx / block.x, conf.ny / block.y); double mem_start = seconds(); CHECK(hipMemcpy(d_curr, curr, nbytes, hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_next, next, nbytes, hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_vsq, vsq, nbytes, hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_c_coeff, c_coeff, NUM_COEFF * sizeof(TYPE), hipMemcpyHostToDevice)); double start = seconds(); for (int step = 0; step < conf.nsteps; step++) { for (int src = 0; src < conf.nsrcs; src++) { if (conf.srcs[src].t > step) continue; int src_offset = POINT_OFFSET(conf.srcs[src].x, conf.srcs[src].y, dimx, conf.radius); CHECK(hipMemcpy(d_curr + src_offset, srcs[src] + step, sizeof(TYPE), hipMemcpyHostToDevice)); } hipLaunchKernelGGL(( fwd_kernel), dim3(grid), dim3(block), SHAREDY(conf.radius) * SHAREDX(conf.radius) * sizeof(TYPE), 0, d_next, d_curr, d_vsq, d_c_coeff, conf.nx, conf.ny, dimx, conf.radius); TYPE *tmp = d_next; d_next = d_curr; d_curr = tmp; update_progress(step + 1); } CHECK(hipDeviceSynchronize()); double compute_s = seconds() - start; CHECK(hipMemcpy(curr, d_curr, nbytes, hipMemcpyDeviceToHost)); double total_s = seconds() - mem_start; finish_progress(); float point_rate = (float)conf.nx * conf.ny / (compute_s / conf.nsteps); fprintf(stderr, "iso_r4_2x: %8.10f s total, %8.10f s/step, %8.2f Mcells/s/step\n", total_s, compute_s / conf.nsteps, point_rate / 1000000.f); if (conf.save_text) { save_text(curr, dimx, dimy, conf.ny, conf.nx, "snap.text", conf.radius); } CHECK(hipHostFree(curr)); CHECK(hipHostFree(next)); CHECK(hipHostFree(vsq)); for (int i = 0; i < conf.nsrcs; i++) { free(srcs[i]); } free(srcs); CHECK(hipFree(d_curr)); CHECK(hipFree(d_next)); CHECK(hipFree(d_vsq)); CHECK(hipFree(d_c_coeff)); return 0; }
2da4bb684a9eeebef4a80d8df17d5bcef5526d96.cu
/* * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Paulius Micikevicius (pauliusm@nvidia.com) * Max Grossman (jmaxg3@gmail.com) */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <sys/time.h> #include <unistd.h> #include "common.h" #include "common2d.h" #define BDIMX 32 #define BDIMY 16 #define SHAREDX(radius) (BDIMX + 2 * (radius)) #define SHAREDY(radius) (BDIMY + 2 * (radius)) #define CACHE_INDEX(y, x, radius) ((y) * SHAREDX(radius) + (x)) __global__ void fwd_kernel(TYPE *next, TYPE *curr, TYPE *vsq, TYPE *c_coeff, int nx, int ny, int dimx, int radius) { extern __shared__ TYPE cache[]; int y = blockIdx.y * blockDim.y + threadIdx.y; int x = blockIdx.x * blockDim.x + threadIdx.x; int this_offset = POINT_OFFSET(x, y, dimx, radius); const int this_y = radius + threadIdx.y; const int this_x = radius + threadIdx.x; cache[CACHE_INDEX(this_y, this_x, radius)] = curr[POINT_OFFSET(x, y, dimx, radius)]; if (threadIdx.y < radius) { cache[CACHE_INDEX(threadIdx.y, this_x, radius)] = curr[POINT_OFFSET(x, y - radius, dimx, radius)]; } if (threadIdx.y >= radius && threadIdx.y < 2 * radius) { cache[CACHE_INDEX(threadIdx.y + blockDim.y, this_x, radius)] = curr[POINT_OFFSET(x, y - radius + blockDim.y, dimx, radius)]; } if (threadIdx.x < radius) { cache[CACHE_INDEX(this_y, threadIdx.x, radius)] = curr[POINT_OFFSET(x - radius, y, dimx, radius)]; } if (threadIdx.x >= radius && threadIdx.x < 2 * radius) { cache[CACHE_INDEX(this_y, threadIdx.x + blockDim.x, radius)] = curr[POINT_OFFSET(x - radius + blockDim.x, y, dimx, radius)]; } __syncthreads(); TYPE temp = 2.0f * cache[CACHE_INDEX(this_y, this_x, radius)] - next[this_offset]; TYPE div = c_coeff[0] * cache[CACHE_INDEX(this_y, this_x, radius)]; for (int d = radius; d >= 1; d--) { div += c_coeff[d] * (cache[CACHE_INDEX(this_y + d, this_x, radius)] + cache[CACHE_INDEX(this_y - d, this_x, radius)] + cache[CACHE_INDEX(this_y, this_x + d, radius)] + cache[CACHE_INDEX(this_y, this_x - d, radius)]); } next[this_offset] = temp + div * vsq[this_offset]; } int main(int argc, char *argv[]) { config conf; setup_config(&conf, argc, argv); init_progress(conf.progress_width, conf.nsteps, conf.progress_disabled); #ifndef PADDING fprintf(stderr, "Must be compiled with -DPADDING\n"); return 1; #endif if (conf.nx % BDIMX != 0) { fprintf(stderr, "Invalid nx configuration, must be an even multiple of " "%d\n", BDIMX); return 1; } if (conf.ny % BDIMY != 0) { fprintf(stderr, "Invalid ny configuration, must be an even multiple of " "%d\n", BDIMY); return 1; } if (conf.radius > TRANSACTION_LEN) { fprintf(stderr, "Radius must be less than TRANSACTION_LEN to include " "it in dimx padding\n"); return 1; } TYPE dx = 20.f; TYPE dt = 0.002f; // compute the pitch for perfect coalescing size_t dimx = TRANSACTION_LEN + conf.nx + conf.radius; dimx += (TRANSACTION_LEN - (dimx % TRANSACTION_LEN)); size_t dimy = conf.ny + 2*conf.radius; size_t nbytes = dimx * dimy * sizeof(TYPE); if (conf.verbose) { printf("x = %zu, y = %zu\n", dimx, dimy); printf("nsteps = %d\n", conf.nsteps); printf("radius = %d\n", conf.radius); } TYPE c_coeff[NUM_COEFF]; TYPE *curr, *next, *vsq; CHECK(cudaMallocHost((void **)&curr, nbytes)); CHECK(cudaMallocHost((void **)&next, nbytes)); CHECK(cudaMallocHost((void **)&vsq, nbytes)); config_sources(&conf.srcs, &conf.nsrcs, conf.nx, conf.ny, conf.nsteps); TYPE **srcs = sample_sources(conf.srcs, conf.nsrcs, conf.nsteps, dt); init_data(curr, next, vsq, c_coeff, dimx, dimy, dx, dt); TYPE *d_curr, *d_next, *d_vsq, *d_c_coeff; CHECK(cudaMalloc((void **)&d_curr, nbytes)); CHECK(cudaMalloc((void **)&d_next, nbytes)); CHECK(cudaMalloc((void **)&d_vsq, nbytes)); CHECK(cudaMalloc((void **)&d_c_coeff, NUM_COEFF * sizeof(TYPE))); dim3 block(BDIMX, BDIMY); dim3 grid(conf.nx / block.x, conf.ny / block.y); double mem_start = seconds(); CHECK(cudaMemcpy(d_curr, curr, nbytes, cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_next, next, nbytes, cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_vsq, vsq, nbytes, cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_c_coeff, c_coeff, NUM_COEFF * sizeof(TYPE), cudaMemcpyHostToDevice)); double start = seconds(); for (int step = 0; step < conf.nsteps; step++) { for (int src = 0; src < conf.nsrcs; src++) { if (conf.srcs[src].t > step) continue; int src_offset = POINT_OFFSET(conf.srcs[src].x, conf.srcs[src].y, dimx, conf.radius); CHECK(cudaMemcpy(d_curr + src_offset, srcs[src] + step, sizeof(TYPE), cudaMemcpyHostToDevice)); } fwd_kernel<<<grid, block, SHAREDY(conf.radius) * SHAREDX(conf.radius) * sizeof(TYPE)>>>(d_next, d_curr, d_vsq, d_c_coeff, conf.nx, conf.ny, dimx, conf.radius); TYPE *tmp = d_next; d_next = d_curr; d_curr = tmp; update_progress(step + 1); } CHECK(cudaDeviceSynchronize()); double compute_s = seconds() - start; CHECK(cudaMemcpy(curr, d_curr, nbytes, cudaMemcpyDeviceToHost)); double total_s = seconds() - mem_start; finish_progress(); float point_rate = (float)conf.nx * conf.ny / (compute_s / conf.nsteps); fprintf(stderr, "iso_r4_2x: %8.10f s total, %8.10f s/step, %8.2f Mcells/s/step\n", total_s, compute_s / conf.nsteps, point_rate / 1000000.f); if (conf.save_text) { save_text(curr, dimx, dimy, conf.ny, conf.nx, "snap.text", conf.radius); } CHECK(cudaFreeHost(curr)); CHECK(cudaFreeHost(next)); CHECK(cudaFreeHost(vsq)); for (int i = 0; i < conf.nsrcs; i++) { free(srcs[i]); } free(srcs); CHECK(cudaFree(d_curr)); CHECK(cudaFree(d_next)); CHECK(cudaFree(d_vsq)); CHECK(cudaFree(d_c_coeff)); return 0; }
cc75f20a05dd5707322762da6b1cb51e56c07759.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <fstream> #include <iostream> #include <float.h> using namespace std; #define TILE_WIDTH 32 //#define THREADS_PER_BLOCK 32; void MatrixMulOnHost(float* M, float* N, float* P, int Width) { for (int i = 0; i < Width; ++i) for (int j = 0; j < Width; ++j) { float sum = 0; for (int k = 0; k < Width; ++k) { float a = M[i * Width + k]; float b = N[k * Width + j]; sum += a * b; } P[i * Width + j] = sum; } } void llenar(int* a, int n) { int i; for (i = 0; i < n*n; ++i) a[i] = rand()%5+1; } __global__ void matrixMulti(int *c, int *a, int *b,int n) { int row = blockIdx.y * blockDim.y + threadIdx.y ; int col = blockIdx.x * blockDim.x + threadIdx.x ; if ((row <n) && (col<n)) { int suma=0; for(int i=0;i<n;++i) { suma+=a[row*n+i]*b[i*n+col]; } c[row*n+col] = suma; } } __global__ void MatrixMulTiled(int * d_P, int * d_M, int* d_N,int Width) { __shared__ int Mds[TILE_WIDTH][TILE_WIDTH]; __shared__ int Nds[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; // Identify the row and column of the d_P element to work on int Row = by * TILE_WIDTH + ty; int Col = bx * TILE_WIDTH + tx; int Pvalue = 0; // Loop over the d_M and d_N tiles required to compute d_P element for (int ph = 0; ph < Width/TILE_WIDTH; ++ph) { // Collaborative loading of d_M and d_N tiles into shared memory if ((Row< Width) && (ph*TILE_WIDTH+tx)< Width) Mds[ty][tx] = d_M[Row*Width + ph*TILE_WIDTH + tx]; if ((ph*TILE_WIDTH+ty)<Width && Col<Width) Nds[ty][tx] = d_N[(ph*TILE_WIDTH + ty)*Width + Col]; __syncthreads(); for (int k = 0; k < TILE_WIDTH; ++k) { Pvalue += Mds[ty][k] * Nds[k][tx]; } __syncthreads(); } d_P[Row*Width + Col] = Pvalue; } __global__ void MatrixMulTiledMod(int * d_P, int * d_M, int* d_N,int Width) { __shared__ int Mds[TILE_WIDTH][TILE_WIDTH]; __shared__ int Nds[TILE_WIDTH][TILE_WIDTH]; __shared__ int Nds2[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; // Identify the row and column of the d_P element to work on int Row = by * TILE_WIDTH + ty; int Col = bx * TILE_WIDTH*2 + tx; int Pvalue =0 , Pvalue2=0; Mds[ty][tx]=0; Nds[ty][tx]=0; Nds2[ty][tx]=0; __syncthreads(); // Loop over the d_M and d_N tiles required to compute d_P element if((Row < Width) && (Col < Width)){ for (int ph = 0; ph <Width/TILE_WIDTH; ph++) { // Collaborative loading of d_M and d_N tiles into shared memory //printf("%i - %i -%i \n",ph, Row, Col ); if ((Row< Width) && (ph*TILE_WIDTH+tx)< Width) Mds[ty][tx] = d_M[Row*Width + ph*TILE_WIDTH + tx]; if ((ph*TILE_WIDTH+ty)<Width && Col<Width) Nds[ty][tx] = d_N[(ph*TILE_WIDTH + ty)*Width + Col]; //printf("%i %i\n",(ph*TILE_WIDTH+ty),Col+TILE_WIDTH); if (((ph*TILE_WIDTH + ty)*Width + Col+TILE_WIDTH)<(Width*Width)) { Nds2[ty][tx] = d_N[(ph*TILE_WIDTH + ty)*Width + Col+TILE_WIDTH]; } __syncthreads(); for (int k = 0; k < TILE_WIDTH; k++) { Pvalue += Mds[ty][k] * Nds[k][tx]; Pvalue2 += Mds[ty][k] * Nds2[k][tx]; } __syncthreads(); } d_P[Row*Width + Col] = Pvalue; d_P[Row*Width + Col +TILE_WIDTH] = Pvalue2; } } void printMatrix( int *a , int tam){ for(int i=0;i<tam;i++) { for(int j=0;j<tam;j++) { cout<<a[i*tam+j]<<" "; } cout<<endl; } } int main(int argc, char *argv[]) { srand (time(NULL)); int N= strtol(argv[1], NULL, 10); int THREADS_PER_BLOCK=TILE_WIDTH; //cout<<N<<endl; return 1; //printf("Storage size for float : %d \n", sizeof(float)); //printf("Storage size for int : %d \n", sizeof(int)); int *a, *b, *c; // host copies of a, b, c int *d_a, *d_b, *d_c; //device copies of a,b,c //int size = N*N*sizeof(int); int size=N*N*sizeof(int); hipMalloc((void **)&d_a, size); hipMalloc((void **)&d_b, size); hipMalloc((void **)&d_c, size); a = (int *)malloc(size); llenar(a, N); b = (int *)malloc(size); llenar(b, N); c = (int *)malloc(size); hipMemcpy(d_a, a, size, hipMemcpyHostToDevice); hipMemcpy(d_b, b, size, hipMemcpyHostToDevice); int blocks= (N + THREADS_PER_BLOCK -1)/THREADS_PER_BLOCK; dim3 dimGrid((blocks+THREADS_PER_BLOCK -1)/2, blocks, 1); dim3 dimBlock(THREADS_PER_BLOCK,THREADS_PER_BLOCK, 1); cout<<"N: "<<N<<"\tBloques : "<<blocks<<"\t Hebras/Bloque: "<<THREADS_PER_BLOCK<<endl; hipEvent_t start, stop; float elapsedTime; hipEventCreate(&start); hipEventRecord(start,0); //matrixMulti<<<dimGrid,dimBlock>>>(d_c, d_a, d_b, N); hipLaunchKernelGGL(( MatrixMulTiled), dim3(dimGrid),dim3(dimBlock), 0, 0, d_c, d_a, d_b, N); //MatrixMulTiledMod<<<dimGrid,dimBlock>>>(d_c, d_a, d_b, N); //matrixMulti<<<dimGrid,dimBlock>>>(d_c, d_a, d_b, N); //MatrixMulTiled<<<dimGrid,dimBlock>>>(d_c, d_a, d_b, N); hipEventElapsedTime() hipEventCreate(&stop); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start,stop); hipEventDestroy(start); hipEventDestroy(stop); printf("Tiempo : %f ms\n" ,elapsedTime); hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost); //cout<<"------A------------"<<endl; //printMatrix(a,N); //cout<<"------B------------"<<endl; //printMatrix(b,N); //cout<<"------C------------"<<endl; //printMatrix(c,N); free(a); free(b); free(c); hipFree(d_a); hipFree(d_b); hipFree(d_c); return 0; }
cc75f20a05dd5707322762da6b1cb51e56c07759.cu
#include <stdio.h> #include <stdlib.h> #include <fstream> #include <iostream> #include <float.h> using namespace std; #define TILE_WIDTH 32 //#define THREADS_PER_BLOCK 32; void MatrixMulOnHost(float* M, float* N, float* P, int Width) { for (int i = 0; i < Width; ++i) for (int j = 0; j < Width; ++j) { float sum = 0; for (int k = 0; k < Width; ++k) { float a = M[i * Width + k]; float b = N[k * Width + j]; sum += a * b; } P[i * Width + j] = sum; } } void llenar(int* a, int n) { int i; for (i = 0; i < n*n; ++i) a[i] = rand()%5+1; } __global__ void matrixMulti(int *c, int *a, int *b,int n) { int row = blockIdx.y * blockDim.y + threadIdx.y ; int col = blockIdx.x * blockDim.x + threadIdx.x ; if ((row <n) && (col<n)) { int suma=0; for(int i=0;i<n;++i) { suma+=a[row*n+i]*b[i*n+col]; } c[row*n+col] = suma; } } __global__ void MatrixMulTiled(int * d_P, int * d_M, int* d_N,int Width) { __shared__ int Mds[TILE_WIDTH][TILE_WIDTH]; __shared__ int Nds[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; // Identify the row and column of the d_P element to work on int Row = by * TILE_WIDTH + ty; int Col = bx * TILE_WIDTH + tx; int Pvalue = 0; // Loop over the d_M and d_N tiles required to compute d_P element for (int ph = 0; ph < Width/TILE_WIDTH; ++ph) { // Collaborative loading of d_M and d_N tiles into shared memory if ((Row< Width) && (ph*TILE_WIDTH+tx)< Width) Mds[ty][tx] = d_M[Row*Width + ph*TILE_WIDTH + tx]; if ((ph*TILE_WIDTH+ty)<Width && Col<Width) Nds[ty][tx] = d_N[(ph*TILE_WIDTH + ty)*Width + Col]; __syncthreads(); for (int k = 0; k < TILE_WIDTH; ++k) { Pvalue += Mds[ty][k] * Nds[k][tx]; } __syncthreads(); } d_P[Row*Width + Col] = Pvalue; } __global__ void MatrixMulTiledMod(int * d_P, int * d_M, int* d_N,int Width) { __shared__ int Mds[TILE_WIDTH][TILE_WIDTH]; __shared__ int Nds[TILE_WIDTH][TILE_WIDTH]; __shared__ int Nds2[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; // Identify the row and column of the d_P element to work on int Row = by * TILE_WIDTH + ty; int Col = bx * TILE_WIDTH*2 + tx; int Pvalue =0 , Pvalue2=0; Mds[ty][tx]=0; Nds[ty][tx]=0; Nds2[ty][tx]=0; __syncthreads(); // Loop over the d_M and d_N tiles required to compute d_P element if((Row < Width) && (Col < Width)){ for (int ph = 0; ph <Width/TILE_WIDTH; ph++) { // Collaborative loading of d_M and d_N tiles into shared memory //printf("%i - %i -%i \n",ph, Row, Col ); if ((Row< Width) && (ph*TILE_WIDTH+tx)< Width) Mds[ty][tx] = d_M[Row*Width + ph*TILE_WIDTH + tx]; if ((ph*TILE_WIDTH+ty)<Width && Col<Width) Nds[ty][tx] = d_N[(ph*TILE_WIDTH + ty)*Width + Col]; //printf("%i %i\n",(ph*TILE_WIDTH+ty),Col+TILE_WIDTH); if (((ph*TILE_WIDTH + ty)*Width + Col+TILE_WIDTH)<(Width*Width)) { Nds2[ty][tx] = d_N[(ph*TILE_WIDTH + ty)*Width + Col+TILE_WIDTH]; } __syncthreads(); for (int k = 0; k < TILE_WIDTH; k++) { Pvalue += Mds[ty][k] * Nds[k][tx]; Pvalue2 += Mds[ty][k] * Nds2[k][tx]; } __syncthreads(); } d_P[Row*Width + Col] = Pvalue; d_P[Row*Width + Col +TILE_WIDTH] = Pvalue2; } } void printMatrix( int *a , int tam){ for(int i=0;i<tam;i++) { for(int j=0;j<tam;j++) { cout<<a[i*tam+j]<<" "; } cout<<endl; } } int main(int argc, char *argv[]) { srand (time(NULL)); int N= strtol(argv[1], NULL, 10); int THREADS_PER_BLOCK=TILE_WIDTH; //cout<<N<<endl; return 1; //printf("Storage size for float : %d \n", sizeof(float)); //printf("Storage size for int : %d \n", sizeof(int)); int *a, *b, *c; // host copies of a, b, c int *d_a, *d_b, *d_c; //device copies of a,b,c //int size = N*N*sizeof(int); int size=N*N*sizeof(int); cudaMalloc((void **)&d_a, size); cudaMalloc((void **)&d_b, size); cudaMalloc((void **)&d_c, size); a = (int *)malloc(size); llenar(a, N); b = (int *)malloc(size); llenar(b, N); c = (int *)malloc(size); cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); int blocks= (N + THREADS_PER_BLOCK -1)/THREADS_PER_BLOCK; dim3 dimGrid((blocks+THREADS_PER_BLOCK -1)/2, blocks, 1); dim3 dimBlock(THREADS_PER_BLOCK,THREADS_PER_BLOCK, 1); cout<<"N: "<<N<<"\tBloques : "<<blocks<<"\t Hebras/Bloque: "<<THREADS_PER_BLOCK<<endl; cudaEvent_t start, stop; float elapsedTime; cudaEventCreate(&start); cudaEventRecord(start,0); //matrixMulti<<<dimGrid,dimBlock>>>(d_c, d_a, d_b, N); MatrixMulTiled<<<dimGrid,dimBlock>>>(d_c, d_a, d_b, N); //MatrixMulTiledMod<<<dimGrid,dimBlock>>>(d_c, d_a, d_b, N); //matrixMulti<<<dimGrid,dimBlock>>>(d_c, d_a, d_b, N); //MatrixMulTiled<<<dimGrid,dimBlock>>>(d_c, d_a, d_b, N); cudaEventElapsedTime() cudaEventCreate(&stop); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start,stop); cudaEventDestroy(start); cudaEventDestroy(stop); printf("Tiempo : %f ms\n" ,elapsedTime); cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost); //cout<<"------A------------"<<endl; //printMatrix(a,N); //cout<<"------B------------"<<endl; //printMatrix(b,N); //cout<<"------C------------"<<endl; //printMatrix(c,N); free(a); free(b); free(c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
0683132f71e44d89d0637b0e3032e2fd41f9d5b9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <fstream> #include <iostream> #include <stdlib.h> #include <vector> #include <string> #include <math.h> #include <time.h> #include <stddef.h> #include "OpenMM.h" #include <openbabel/obconversion.h> #include <openbabel/mol.h> #include <hipfft.h> #include "ReadCrd.h" #include "ReadGrids.h" #include "ReadQuaternions.h" #include "Rotate.h" #include "GetNonbondedParameters.h" #include "GetMinCoors.h" #include "GetMaxCoors.h" #include "GetIdxOfAtomsForVdwRadius.h" #include "FillLigandGrid.h" #include "GeneDiverseConformations.h" #include "GeneRandomConformations.h" #include "kernel.h" #include "QuaternionUniformSampling.h" #include "AddGridForcesToOpenMMSystem.h" #include "FilterQuaternions.h" #define CUDA_CALL(F) if( (F) != hipSuccess ) \ {printf("Error %s at %s:%d\n", hipGetErrorString(hipGetLastError()), \ __FILE__,__LINE__); exit(-1);} #define CUDA_CHECK() if( (hipPeekAtLastError()) != hipSuccess ) \ {printf("Error %s at %s:%d\n", hipGetErrorString(hipGetLastError()), \ __FILE__,__LINE__-1); exit(-1);} //// main function //// // Usage: // TranRotaConfSearch ligand.mol2 ligand.xml grid.txt maxNumOfConf numOfRotaPerConf maxNumOfRotaPerConf numOfRotaSample nLowest mode // Arguments: // - ligand.mol2 // - ligand.xml: serialized xml file for ligand // - grid.txt: txt file for grid potential generated from protein // - maxNumOfConf: maximum num of random conformations // - numOfRotatPerConf: num of directions for each conformation // - maxNumOfRotaPerConf: maximum num of direactions for each conformations // - numOfRotaSample: num of directions random sampled, from which the valid directions are selected. // - mode: // - 0: only search translation // - 1: only search translation and rotation. The conforamtion is given in mol2 file // - 2: search translation, rotation, and conformation. The final minimization step is done using grid. // - 3: search translation, rotation, and conformation. No final minimization. int main(int argc, char** argv) { OpenMM::Platform::loadPluginsFromDirectory( "/home/xqding/apps/openmmDev/lib/plugins"); // parse the command line parameters std::string mol2FileName(argv[1]); std::string ligandSysFileName(argv[2]); std::string gridFileName(argv[3]); int maxNumOfConformations = atoi(argv[4]); int numOfRotaPerConformation = atoi(argv[5]); int maxNumOfRotaPerConf = atoi(argv[6]); int numOfRotaSample = atoi(argv[7]); int nLowest = atoi(argv[8]); int mode = atoi(argv[9]); // read ligand molecule OpenBabel::OBMol ligandOBMol; OpenBabel::OBConversion conv(&std::cin, &std::cout); conv.SetInFormat("mol2"); conv.SetOutFormat("pdb"); conv.ReadFile(&ligandOBMol, mol2FileName); int nAtom = ligandOBMol.NumAtoms(); // read ligand openmm system std::ifstream ligandSysFile; ligandSysFile.open(ligandSysFileName, std::ifstream::in); if (ligandSysFile.fail()) { std::cout << "Open system file failed: " << ligandSysFileName << std::endl; return 1; } OpenMM::System *ligandOmmSys = new OpenMM::System(); ligandOmmSys = OpenMM::XmlSerializer::deserialize<OpenMM::System>(ligandSysFile); // read grid potential int numOfGrids, xdim, ydim, zdim; float midx, midy, midz; float xlen, ylen, zlen; float spacing, restraint_k; float *gridRadii, *gridValues; ReadGrids(numOfGrids, xdim, ydim, zdim, midx, midy, midz, xlen, ylen, zlen, spacing, restraint_k, gridRadii, gridValues, gridFileName); int numOfVdwGrids = numOfGrids - 1; // generate conformations double *coorsConformations; int numOfConformations; if (mode == 0 || mode == 1) // use one conformatoin in mol file { coorsConformations = new double [nAtom * 3]; memcpy(coorsConformations, ligandOBMol.GetCoordinates(), sizeof(double) * nAtom * 3); numOfConformations = 1; } if (mode == 2 || mode == 3) { numOfConformations = GeneRandomConformations(ligandOBMol, ligandOmmSys, maxNumOfConformations, coorsConformations); } std::cout << "num of conformations: " << numOfConformations << std::endl; // get nonbonded parameters float atomCharges[nAtom]; float atomEpsilons[nAtom]; float atomRadii[nAtom]; GetNonbondedParameters(ligandOmmSys, atomCharges, atomEpsilons, atomRadii); // get index of atoms for each vdw radius int numOfVdwGridsUsed; std::vector<int> idxOfVdwUsed; std::vector< std::vector<int> > idxOfAtomVdwRadius(numOfVdwGrids); GetIdxOfAtomsForVdwRadius(nAtom, atomRadii, numOfVdwGrids, gridRadii, numOfVdwGridsUsed, idxOfVdwUsed, idxOfAtomVdwRadius); int numOfGridsUsed = numOfVdwGridsUsed + 1; // copy out the potential grids which are used float *usedGridValues; usedGridValues = new float[numOfGridsUsed*xdim*ydim*zdim]; for(int i = 0; i < numOfVdwGridsUsed; i++) { memcpy(&usedGridValues[i*xdim*ydim*zdim], &gridValues[idxOfVdwUsed[i]*xdim*ydim*zdim], sizeof(float)*xdim*ydim*zdim); } memcpy(&usedGridValues[numOfVdwGridsUsed*xdim*ydim*zdim], &gridValues[numOfVdwGrids*xdim*ydim*zdim], sizeof(float)*xdim*ydim*zdim); // set up ligand grid context or ligand protein context double gridMinX = midx - xlen / 2; double gridMinY = midy - ylen / 2; double gridMinZ = midz - zlen / 2; double gridMaxX = gridMinX + (xdim - 1) * spacing; double gridMaxY = gridMinY + (ydim - 1) * spacing; double gridMaxZ = gridMinZ + (zdim - 1) * spacing; ////////////////// ligand grid /////////////////// // add grid forces to ligand OpenMM System via cumstomized forces AddGridForcesToOpenMMSystem(xdim, ydim, zdim, gridMinX, gridMinY, gridMinZ, gridMaxX, gridMaxY, gridMaxZ, numOfVdwGridsUsed, usedGridValues, idxOfVdwUsed, idxOfAtomVdwRadius, &usedGridValues[numOfVdwGridsUsed*xdim*ydim*zdim], ligandOmmSys ); // build OpenMM ligand grid context OpenMM::VerletIntegrator ligandGridIntegrator(0.001); OpenMM::LocalEnergyMinimizer ligandGridMinimizer; OpenMM::Context ligandGridContext(*ligandOmmSys, ligandGridIntegrator); printf( "REMARK Build ligandGridContext Using OpenMM platform %s\n", ligandGridContext.getPlatform().getName().c_str() ); OpenMM::State ligandGridState; std::vector<OpenMM::Vec3> ligandGridPosition(ligandOmmSys->getNumParticles()); //// cufft transform for grid potential //// // batch cudaFFT for potential grids int n[3]; n[0] = xdim; n[1] = ydim; n[2] = zdim; int inembed[3]; inembed[0] = xdim; inembed[1] = ydim; inembed[2] = zdim; int idist = inembed[0] * inembed[1] * inembed[2]; int istride = 1; int onembed[3]; onembed[0] = xdim; onembed[1] = ydim; onembed[2] = zdim/2 + 1; int odist = onembed[0] * onembed[1] * onembed[2]; int ostride = 1; int nBatchPotential = numOfGridsUsed; hipfftReal* d_potential_f; hipMalloc((void **)&d_potential_f, sizeof(hipfftReal)*nBatchPotential*idist); hipMemcpy(d_potential_f, usedGridValues, sizeof(hipfftReal)*nBatchPotential*idist, hipMemcpyHostToDevice); hipfftComplex *d_potential_F; hipMalloc((void **)&d_potential_F, sizeof(hipfftComplex)*nBatchPotential*odist); hipfftHandle potentialPlan; hipfftResult potentialRes = hipfftPlanMany(&potentialPlan, 3, n, inembed, istride, idist, onembed, ostride, odist, HIPFFT_R2C, nBatchPotential); if (potentialRes != HIPFFT_SUCCESS) { std::cout << "grid potential plan creat failed!"; return 1; } potentialRes = hipfftExecR2C(potentialPlan, d_potential_f, d_potential_F); if (potentialRes != HIPFFT_SUCCESS) { std::cout << "grid potential cufft transform failed!"; return 1; } //// // generate uniform quaternions and setup data structure for all quaternions float* quaternions = 0; std::random_device rd; std::mt19937_64 gen(rd()); //// set up one one batch of cufft transform for ligand grid //// // for one batch of quaternions int numOfQuaternionsOneBatch = 50; int numOfBatches = 0; // ligand grid for one batch float *ligandGridValues; // grid for ligand ligandGridValues = new float[numOfQuaternionsOneBatch*numOfGridsUsed*xdim*ydim*zdim]; // cudaFFT for ligand grid int nBatchLigand = numOfQuaternionsOneBatch*numOfGridsUsed; hipfftReal* d_ligand_f; hipMalloc((void **)&d_ligand_f, sizeof(hipfftReal)*nBatchLigand*idist); hipfftComplex * d_ligand_F; hipMalloc((void **)&d_ligand_F, sizeof(hipfftComplex)*nBatchLigand*odist); hipfftHandle ligandPlan; hipfftResult ligandRes = hipfftPlanMany(&ligandPlan, 3, n, inembed, istride, idist, onembed, ostride, odist, HIPFFT_R2C, nBatchLigand); if (ligandRes != HIPFFT_SUCCESS) { std::cout << "ligand plan creat failed!"; return 1; } dim3 threads_ConjMult(1024, 1, 1); dim3 blocks_ConjMult((numOfQuaternionsOneBatch*numOfGridsUsed*odist)/(1024*1024) + 1,1024,1); hipfftComplex * d_ligand_sum_F; hipMalloc((void **)&d_ligand_sum_F, sizeof(hipfftComplex)*numOfQuaternionsOneBatch*odist); dim3 threads_SumGrids(1024, 1, 1); dim3 blocks_SumGrids((numOfQuaternionsOneBatch*odist)/(1024*1024) + 1,1024,1); hipfftReal *d_ligand_sum_f; hipMalloc((void **)&d_ligand_sum_f, sizeof(hipfftReal)*numOfQuaternionsOneBatch*idist); hipfftHandle ligandRPlan; hipfftResult ligandRRes = hipfftPlanMany(&ligandRPlan, 3, n, onembed, ostride, odist, inembed, istride, idist, HIPFFT_C2R, numOfQuaternionsOneBatch); if (ligandRRes != HIPFFT_SUCCESS) { std::cout << "ligand reverse plan creat failed!"; return 1; } //// // host energy float* energy; energy = new float[numOfQuaternionsOneBatch*idist]; // coordinates for one conformation float* conformerCoor; conformerCoor = new float[nAtom*3]; // ignore quaterions, whose end structures' dimenstion is larger than the grids size_t maxNQuaternionsUsed = maxNumOfConformations * numOfRotaPerConformation / numOfConformations + 1; if (maxNQuaternionsUsed > maxNumOfRotaPerConf) { maxNQuaternionsUsed = maxNumOfRotaPerConf; } size_t numOfQuaternionsUsed; float* quaternionsUsed = 0; int *minEnergyIdxX = 0; int *minEnergyIdxY = 0; int *minEnergyIdxZ = 0; float *coorsUsed = 0; float *mincoorsUsed = 0; float *maxcoorsUsed = 0; float *ligandLengthUsed = 0; // mol for saving lowest energy pose OpenBabel::OBMol finalPoses[numOfConformations * nLowest]; double energyOfFinalPoses[numOfConformations * nLowest]; for(int i = 0; i < numOfConformations * nLowest; i++) { finalPoses[i] = ligandOBMol; } // file for saving energy values of end poses std::ofstream energyFile("energy.txt", std::ofstream::out); //////////////////////// //// start searching /// //////////////////////// for (int idxOfConformer = 0; idxOfConformer < numOfConformations; idxOfConformer++) { std::cout << "idxOfConformer: " << idxOfConformer << std::endl; // get coordinates for one conformer for(int i = 0; i < nAtom; i++) { conformerCoor[i*3 + 0] = (float) coorsConformations[(idxOfConformer*nAtom + i)*3 + 0]; conformerCoor[i*3 + 1] = (float) coorsConformations[(idxOfConformer*nAtom + i)*3 + 1]; conformerCoor[i*3 + 2] = (float) coorsConformations[(idxOfConformer*nAtom + i)*3 + 2]; } // generate all quaternions and ignore some quaternions, which will rotate the ligand // to have larger dimension than the grid if (mode == 0) // one search traslation, so make all quaterions unity. { quaternions = new float[numOfRotaSample * 4]; for(int i = 0; i < numOfRotaSample; i++) { quaternions[i*4 + 0] = 1; quaternions[i*4 + 1] = 0; quaternions[i*4 + 2] = 0; quaternions[i*4 + 3] = 0; } } if (mode == 1 || mode == 2) { QuaternionUniformSampling(gen, quaternions, numOfRotaSample); } numOfQuaternionsUsed = FilterQuaternions(conformerCoor, nAtom, numOfRotaSample, quaternions, xlen, ylen, zlen, maxNQuaternionsUsed, quaternionsUsed); delete[] minEnergyIdxX; delete[] minEnergyIdxY; delete[] minEnergyIdxZ; minEnergyIdxX = new int[numOfQuaternionsUsed]; minEnergyIdxY = new int[numOfQuaternionsUsed]; minEnergyIdxZ = new int[numOfQuaternionsUsed]; std::vector <float> minEnergyQuaternionsUsed(numOfQuaternionsUsed); for(int i = 0; i < numOfQuaternionsUsed; i++) { minEnergyQuaternionsUsed[i] = INFINITY; } delete[] coorsUsed; delete[] mincoorsUsed; delete[] maxcoorsUsed; delete[] ligandLengthUsed; coorsUsed = new float[numOfQuaternionsUsed*nAtom*3]; mincoorsUsed = new float[numOfQuaternionsUsed*3]; maxcoorsUsed = new float[numOfQuaternionsUsed*3]; ligandLengthUsed = new float[numOfQuaternionsUsed*3]; for(int i = 0; i < numOfQuaternionsUsed; i++) { for(int j = 0; j < nAtom; j++) { Rotate(&quaternionsUsed[i*4], &conformerCoor[j*3], &coorsUsed[i*nAtom*3+j*3]); } } // calculate minimum coor for each quaternions GetMinCoors(numOfQuaternionsUsed, nAtom, coorsUsed, mincoorsUsed); // calculate maximum coor for each quaternions GetMaxCoors(numOfQuaternionsUsed, nAtom, coorsUsed, maxcoorsUsed); // calculate the length for each quaternion for(int i = 0; i < numOfQuaternionsUsed; i++) { ligandLengthUsed[i*3 + 0] = maxcoorsUsed[i*3 + 0] - mincoorsUsed[i*3 + 0]; ligandLengthUsed[i*3 + 1] = maxcoorsUsed[i*3 + 1] - mincoorsUsed[i*3 + 1]; ligandLengthUsed[i*3 + 2] = maxcoorsUsed[i*3 + 2] - mincoorsUsed[i*3 + 2]; } // loop over batches of quaternions // num of batches if (numOfQuaternionsUsed % numOfQuaternionsOneBatch == 0) { numOfBatches = numOfQuaternionsUsed / numOfQuaternionsOneBatch; } else { numOfBatches = numOfQuaternionsUsed / numOfQuaternionsOneBatch + 1; } for(int idxOfBatch = 0; idxOfBatch < numOfBatches; idxOfBatch++) { std::cout << "idxOfBatch: " << idxOfBatch << std::endl; // fill ligand grid memset(ligandGridValues, 0, sizeof(float)*numOfQuaternionsOneBatch*numOfGridsUsed*xdim*ydim*zdim); if ((idxOfBatch + 1) * numOfQuaternionsOneBatch > numOfQuaternionsUsed) { FillLigandGrid(numOfQuaternionsUsed - idxOfBatch * numOfQuaternionsOneBatch, nAtom, &coorsUsed[idxOfBatch*numOfQuaternionsOneBatch*nAtom*3], &mincoorsUsed[idxOfBatch*numOfQuaternionsOneBatch*3], atomCharges, atomEpsilons, numOfVdwGridsUsed, idxOfVdwUsed, idxOfAtomVdwRadius, xdim, ydim, zdim, spacing, ligandGridValues); } else { FillLigandGrid(numOfQuaternionsOneBatch, nAtom, &coorsUsed[idxOfBatch*numOfQuaternionsOneBatch*nAtom*3], &mincoorsUsed[idxOfBatch*numOfQuaternionsOneBatch*3], atomCharges, atomEpsilons, numOfVdwGridsUsed, idxOfVdwUsed, idxOfAtomVdwRadius, xdim, ydim, zdim, spacing, ligandGridValues); } // batch cudaFFT for ligand grid hipMemcpy(d_ligand_f, ligandGridValues, sizeof(hipfftReal)*nBatchLigand*idist, hipMemcpyHostToDevice); ligandRes = hipfftExecR2C(ligandPlan, d_ligand_f, d_ligand_F); if (ligandRes != HIPFFT_SUCCESS) { std::cout << "ligand grid transform failed!"; return 1; } // calcualte energy using reverse FFT hipLaunchKernelGGL(( ConjMult) , dim3(blocks_ConjMult), dim3(threads_ConjMult), 0, 0, d_potential_F, d_ligand_F, odist, numOfGridsUsed); CUDA_CHECK(); hipLaunchKernelGGL(( SumGrids) , dim3(blocks_SumGrids), dim3(threads_SumGrids), 0, 0, d_ligand_F, d_ligand_sum_F, numOfGridsUsed, odist, idist); CUDA_CHECK(); ligandRRes = hipfftExecC2R(ligandRPlan, d_ligand_sum_F, d_ligand_sum_f); if (ligandRRes != HIPFFT_SUCCESS) { std::cout << "ligand grid reverse transform failed!"; return 1; } // copy energy back hipMemcpy(energy, d_ligand_sum_f, sizeof(float)*numOfQuaternionsOneBatch*idist, hipMemcpyDeviceToHost); // record the minimum energy pose in terms of quaternions, x, y and z for(int q = 0; q < numOfQuaternionsOneBatch; q++) { int idxOfQuaternions = idxOfBatch * numOfQuaternionsOneBatch + q; if(idxOfQuaternions < numOfQuaternionsUsed) { for(int i = 0; i < (xdim-int(ligandLengthUsed[idxOfQuaternions*3+0]/spacing)-2); i++) { for(int j = 0; j < (ydim-int(ligandLengthUsed[idxOfQuaternions*3+1]/spacing)-2); j++) { for(int k = 0; k < (ydim-int(ligandLengthUsed[idxOfQuaternions*3+2]/spacing)-2); k++) { int tmp = q*idist + (i*ydim + j)*zdim + k; if(energy[tmp] / sqrt(idist) < minEnergyQuaternionsUsed[idxOfQuaternions]) { minEnergyQuaternionsUsed[idxOfQuaternions] = energy[tmp] / sqrt(idist); minEnergyIdxX[idxOfQuaternions] = i; minEnergyIdxY[idxOfQuaternions] = j; minEnergyIdxZ[idxOfQuaternions] = k; } } } } } } } // finish all batches of quaternions for one conformer // calculate the coordinates corresponding to lowest nLowest energy orientation std::vector<size_t> idxOfSortedQuater; idxOfSortedQuater = sort_index<float>(minEnergyQuaternionsUsed); for(int iLowest = 0; iLowest < nLowest && iLowest < numOfQuaternionsUsed; iLowest++) { int idxQ = idxOfSortedQuater[iLowest]; double minEnergyCoorDouble[nAtom*3]; // coordinate corresponding to the lowest energy pose in term of orientations for(int i = 0; i < nAtom; i++) { minEnergyCoorDouble[i*3 + 0] = (double) coorsUsed[idxQ*nAtom*3 + i*3 + 0]; minEnergyCoorDouble[i*3 + 1] = (double) coorsUsed[idxQ*nAtom*3 + i*3 + 1]; minEnergyCoorDouble[i*3 + 2] = (double) coorsUsed[idxQ*nAtom*3 + i*3 + 2]; } for(int i = 0; i < nAtom; i++) { minEnergyCoorDouble[i*3 + 0] += (gridMinX - mincoorsUsed[idxQ*3 + 0] + minEnergyIdxX[idxQ] * spacing); minEnergyCoorDouble[i*3 + 1] += (gridMinY - mincoorsUsed[idxQ*3 + 1] + minEnergyIdxY[idxQ] * spacing); minEnergyCoorDouble[i*3 + 2] += (gridMinZ - mincoorsUsed[idxQ*3 + 2] + minEnergyIdxZ[idxQ] * spacing); } // final minimize // minimize with presence of only grid if (mode == 2) { for(int i = 0; i < ligandOmmSys->getNumParticles(); i++) { ligandGridPosition[i] = OpenMM::Vec3(minEnergyCoorDouble[i*3+0]*OpenMM::NmPerAngstrom, minEnergyCoorDouble[i*3+1]*OpenMM::NmPerAngstrom, minEnergyCoorDouble[i*3+2]*OpenMM::NmPerAngstrom); } ligandGridContext.setPositions(ligandGridPosition); if (mode == 2) { ligandGridMinimizer.minimize(ligandGridContext, 0.001, 1000); } ligandGridState = ligandGridContext.getState(OpenMM::State::Energy); for(int i = 0; i < ligandOmmSys->getNumParticles(); i++) { minEnergyCoorDouble[i*3 + 0] = ligandGridPosition[i][0] * OpenMM::AngstromsPerNm; minEnergyCoorDouble[i*3 + 1] = ligandGridPosition[i][1] * OpenMM::AngstromsPerNm; minEnergyCoorDouble[i*3 + 2] = ligandGridPosition[i][2] * OpenMM::AngstromsPerNm; } energyOfFinalPoses[idxOfConformer * nLowest + iLowest] = ligandGridState.getPotentialEnergy() * OpenMM::KcalPerKJ; } // write nlowest energy pose out finalPoses[idxOfConformer * nLowest + iLowest].SetCoordinates(minEnergyCoorDouble); std::string fileName; fileName = "conformer_"; fileName += std::to_string(idxOfConformer); fileName += "_"; fileName += std::to_string(iLowest); fileName += ".pdb"; conv.WriteFile(&finalPoses[idxOfConformer*nLowest+iLowest], fileName); energyFile << fileName << "," << idxOfConformer << "," << iLowest << "," << energyOfFinalPoses[idxOfConformer * nLowest + iLowest] << std::endl; std::cout << "Conformer: " << idxOfConformer << ", IdxQ: " << idxQ << ", IdxX: " << minEnergyIdxX[idxQ] << ", IdxY: " << minEnergyIdxY[idxQ] << ", IdxZ: " << minEnergyIdxZ[idxQ] << ", MinEnergyTranRota:" << minEnergyQuaternionsUsed[idxQ] << ", Potential Energy: " << energyOfFinalPoses[idxOfConformer * nLowest + iLowest] << std::endl; } } energyFile.close(); return 0; }
0683132f71e44d89d0637b0e3032e2fd41f9d5b9.cu
#include <fstream> #include <iostream> #include <stdlib.h> #include <vector> #include <string> #include <math.h> #include <time.h> #include <stddef.h> #include "OpenMM.h" #include <openbabel/obconversion.h> #include <openbabel/mol.h> #include <cufft.h> #include "ReadCrd.h" #include "ReadGrids.h" #include "ReadQuaternions.h" #include "Rotate.h" #include "GetNonbondedParameters.h" #include "GetMinCoors.h" #include "GetMaxCoors.h" #include "GetIdxOfAtomsForVdwRadius.h" #include "FillLigandGrid.h" #include "GeneDiverseConformations.h" #include "GeneRandomConformations.h" #include "kernel.h" #include "QuaternionUniformSampling.h" #include "AddGridForcesToOpenMMSystem.h" #include "FilterQuaternions.h" #define CUDA_CALL(F) if( (F) != cudaSuccess ) \ {printf("Error %s at %s:%d\n", cudaGetErrorString(cudaGetLastError()), \ __FILE__,__LINE__); exit(-1);} #define CUDA_CHECK() if( (cudaPeekAtLastError()) != cudaSuccess ) \ {printf("Error %s at %s:%d\n", cudaGetErrorString(cudaGetLastError()), \ __FILE__,__LINE__-1); exit(-1);} //// main function //// // Usage: // TranRotaConfSearch ligand.mol2 ligand.xml grid.txt maxNumOfConf numOfRotaPerConf maxNumOfRotaPerConf numOfRotaSample nLowest mode // Arguments: // - ligand.mol2 // - ligand.xml: serialized xml file for ligand // - grid.txt: txt file for grid potential generated from protein // - maxNumOfConf: maximum num of random conformations // - numOfRotatPerConf: num of directions for each conformation // - maxNumOfRotaPerConf: maximum num of direactions for each conformations // - numOfRotaSample: num of directions random sampled, from which the valid directions are selected. // - mode: // - 0: only search translation // - 1: only search translation and rotation. The conforamtion is given in mol2 file // - 2: search translation, rotation, and conformation. The final minimization step is done using grid. // - 3: search translation, rotation, and conformation. No final minimization. int main(int argc, char** argv) { OpenMM::Platform::loadPluginsFromDirectory( "/home/xqding/apps/openmmDev/lib/plugins"); // parse the command line parameters std::string mol2FileName(argv[1]); std::string ligandSysFileName(argv[2]); std::string gridFileName(argv[3]); int maxNumOfConformations = atoi(argv[4]); int numOfRotaPerConformation = atoi(argv[5]); int maxNumOfRotaPerConf = atoi(argv[6]); int numOfRotaSample = atoi(argv[7]); int nLowest = atoi(argv[8]); int mode = atoi(argv[9]); // read ligand molecule OpenBabel::OBMol ligandOBMol; OpenBabel::OBConversion conv(&std::cin, &std::cout); conv.SetInFormat("mol2"); conv.SetOutFormat("pdb"); conv.ReadFile(&ligandOBMol, mol2FileName); int nAtom = ligandOBMol.NumAtoms(); // read ligand openmm system std::ifstream ligandSysFile; ligandSysFile.open(ligandSysFileName, std::ifstream::in); if (ligandSysFile.fail()) { std::cout << "Open system file failed: " << ligandSysFileName << std::endl; return 1; } OpenMM::System *ligandOmmSys = new OpenMM::System(); ligandOmmSys = OpenMM::XmlSerializer::deserialize<OpenMM::System>(ligandSysFile); // read grid potential int numOfGrids, xdim, ydim, zdim; float midx, midy, midz; float xlen, ylen, zlen; float spacing, restraint_k; float *gridRadii, *gridValues; ReadGrids(numOfGrids, xdim, ydim, zdim, midx, midy, midz, xlen, ylen, zlen, spacing, restraint_k, gridRadii, gridValues, gridFileName); int numOfVdwGrids = numOfGrids - 1; // generate conformations double *coorsConformations; int numOfConformations; if (mode == 0 || mode == 1) // use one conformatoin in mol file { coorsConformations = new double [nAtom * 3]; memcpy(coorsConformations, ligandOBMol.GetCoordinates(), sizeof(double) * nAtom * 3); numOfConformations = 1; } if (mode == 2 || mode == 3) { numOfConformations = GeneRandomConformations(ligandOBMol, ligandOmmSys, maxNumOfConformations, coorsConformations); } std::cout << "num of conformations: " << numOfConformations << std::endl; // get nonbonded parameters float atomCharges[nAtom]; float atomEpsilons[nAtom]; float atomRadii[nAtom]; GetNonbondedParameters(ligandOmmSys, atomCharges, atomEpsilons, atomRadii); // get index of atoms for each vdw radius int numOfVdwGridsUsed; std::vector<int> idxOfVdwUsed; std::vector< std::vector<int> > idxOfAtomVdwRadius(numOfVdwGrids); GetIdxOfAtomsForVdwRadius(nAtom, atomRadii, numOfVdwGrids, gridRadii, numOfVdwGridsUsed, idxOfVdwUsed, idxOfAtomVdwRadius); int numOfGridsUsed = numOfVdwGridsUsed + 1; // copy out the potential grids which are used float *usedGridValues; usedGridValues = new float[numOfGridsUsed*xdim*ydim*zdim]; for(int i = 0; i < numOfVdwGridsUsed; i++) { memcpy(&usedGridValues[i*xdim*ydim*zdim], &gridValues[idxOfVdwUsed[i]*xdim*ydim*zdim], sizeof(float)*xdim*ydim*zdim); } memcpy(&usedGridValues[numOfVdwGridsUsed*xdim*ydim*zdim], &gridValues[numOfVdwGrids*xdim*ydim*zdim], sizeof(float)*xdim*ydim*zdim); // set up ligand grid context or ligand protein context double gridMinX = midx - xlen / 2; double gridMinY = midy - ylen / 2; double gridMinZ = midz - zlen / 2; double gridMaxX = gridMinX + (xdim - 1) * spacing; double gridMaxY = gridMinY + (ydim - 1) * spacing; double gridMaxZ = gridMinZ + (zdim - 1) * spacing; ////////////////// ligand grid /////////////////// // add grid forces to ligand OpenMM System via cumstomized forces AddGridForcesToOpenMMSystem(xdim, ydim, zdim, gridMinX, gridMinY, gridMinZ, gridMaxX, gridMaxY, gridMaxZ, numOfVdwGridsUsed, usedGridValues, idxOfVdwUsed, idxOfAtomVdwRadius, &usedGridValues[numOfVdwGridsUsed*xdim*ydim*zdim], ligandOmmSys ); // build OpenMM ligand grid context OpenMM::VerletIntegrator ligandGridIntegrator(0.001); OpenMM::LocalEnergyMinimizer ligandGridMinimizer; OpenMM::Context ligandGridContext(*ligandOmmSys, ligandGridIntegrator); printf( "REMARK Build ligandGridContext Using OpenMM platform %s\n", ligandGridContext.getPlatform().getName().c_str() ); OpenMM::State ligandGridState; std::vector<OpenMM::Vec3> ligandGridPosition(ligandOmmSys->getNumParticles()); //// cufft transform for grid potential //// // batch cudaFFT for potential grids int n[3]; n[0] = xdim; n[1] = ydim; n[2] = zdim; int inembed[3]; inembed[0] = xdim; inembed[1] = ydim; inembed[2] = zdim; int idist = inembed[0] * inembed[1] * inembed[2]; int istride = 1; int onembed[3]; onembed[0] = xdim; onembed[1] = ydim; onembed[2] = zdim/2 + 1; int odist = onembed[0] * onembed[1] * onembed[2]; int ostride = 1; int nBatchPotential = numOfGridsUsed; cufftReal* d_potential_f; cudaMalloc((void **)&d_potential_f, sizeof(cufftReal)*nBatchPotential*idist); cudaMemcpy(d_potential_f, usedGridValues, sizeof(cufftReal)*nBatchPotential*idist, cudaMemcpyHostToDevice); cufftComplex *d_potential_F; cudaMalloc((void **)&d_potential_F, sizeof(cufftComplex)*nBatchPotential*odist); cufftHandle potentialPlan; cufftResult potentialRes = cufftPlanMany(&potentialPlan, 3, n, inembed, istride, idist, onembed, ostride, odist, CUFFT_R2C, nBatchPotential); if (potentialRes != CUFFT_SUCCESS) { std::cout << "grid potential plan creat failed!"; return 1; } potentialRes = cufftExecR2C(potentialPlan, d_potential_f, d_potential_F); if (potentialRes != CUFFT_SUCCESS) { std::cout << "grid potential cufft transform failed!"; return 1; } //// // generate uniform quaternions and setup data structure for all quaternions float* quaternions = 0; std::random_device rd; std::mt19937_64 gen(rd()); //// set up one one batch of cufft transform for ligand grid //// // for one batch of quaternions int numOfQuaternionsOneBatch = 50; int numOfBatches = 0; // ligand grid for one batch float *ligandGridValues; // grid for ligand ligandGridValues = new float[numOfQuaternionsOneBatch*numOfGridsUsed*xdim*ydim*zdim]; // cudaFFT for ligand grid int nBatchLigand = numOfQuaternionsOneBatch*numOfGridsUsed; cufftReal* d_ligand_f; cudaMalloc((void **)&d_ligand_f, sizeof(cufftReal)*nBatchLigand*idist); cufftComplex * d_ligand_F; cudaMalloc((void **)&d_ligand_F, sizeof(cufftComplex)*nBatchLigand*odist); cufftHandle ligandPlan; cufftResult ligandRes = cufftPlanMany(&ligandPlan, 3, n, inembed, istride, idist, onembed, ostride, odist, CUFFT_R2C, nBatchLigand); if (ligandRes != CUFFT_SUCCESS) { std::cout << "ligand plan creat failed!"; return 1; } dim3 threads_ConjMult(1024, 1, 1); dim3 blocks_ConjMult((numOfQuaternionsOneBatch*numOfGridsUsed*odist)/(1024*1024) + 1,1024,1); cufftComplex * d_ligand_sum_F; cudaMalloc((void **)&d_ligand_sum_F, sizeof(cufftComplex)*numOfQuaternionsOneBatch*odist); dim3 threads_SumGrids(1024, 1, 1); dim3 blocks_SumGrids((numOfQuaternionsOneBatch*odist)/(1024*1024) + 1,1024,1); cufftReal *d_ligand_sum_f; cudaMalloc((void **)&d_ligand_sum_f, sizeof(cufftReal)*numOfQuaternionsOneBatch*idist); cufftHandle ligandRPlan; cufftResult ligandRRes = cufftPlanMany(&ligandRPlan, 3, n, onembed, ostride, odist, inembed, istride, idist, CUFFT_C2R, numOfQuaternionsOneBatch); if (ligandRRes != CUFFT_SUCCESS) { std::cout << "ligand reverse plan creat failed!"; return 1; } //// // host energy float* energy; energy = new float[numOfQuaternionsOneBatch*idist]; // coordinates for one conformation float* conformerCoor; conformerCoor = new float[nAtom*3]; // ignore quaterions, whose end structures' dimenstion is larger than the grids size_t maxNQuaternionsUsed = maxNumOfConformations * numOfRotaPerConformation / numOfConformations + 1; if (maxNQuaternionsUsed > maxNumOfRotaPerConf) { maxNQuaternionsUsed = maxNumOfRotaPerConf; } size_t numOfQuaternionsUsed; float* quaternionsUsed = 0; int *minEnergyIdxX = 0; int *minEnergyIdxY = 0; int *minEnergyIdxZ = 0; float *coorsUsed = 0; float *mincoorsUsed = 0; float *maxcoorsUsed = 0; float *ligandLengthUsed = 0; // mol for saving lowest energy pose OpenBabel::OBMol finalPoses[numOfConformations * nLowest]; double energyOfFinalPoses[numOfConformations * nLowest]; for(int i = 0; i < numOfConformations * nLowest; i++) { finalPoses[i] = ligandOBMol; } // file for saving energy values of end poses std::ofstream energyFile("energy.txt", std::ofstream::out); //////////////////////// //// start searching /// //////////////////////// for (int idxOfConformer = 0; idxOfConformer < numOfConformations; idxOfConformer++) { std::cout << "idxOfConformer: " << idxOfConformer << std::endl; // get coordinates for one conformer for(int i = 0; i < nAtom; i++) { conformerCoor[i*3 + 0] = (float) coorsConformations[(idxOfConformer*nAtom + i)*3 + 0]; conformerCoor[i*3 + 1] = (float) coorsConformations[(idxOfConformer*nAtom + i)*3 + 1]; conformerCoor[i*3 + 2] = (float) coorsConformations[(idxOfConformer*nAtom + i)*3 + 2]; } // generate all quaternions and ignore some quaternions, which will rotate the ligand // to have larger dimension than the grid if (mode == 0) // one search traslation, so make all quaterions unity. { quaternions = new float[numOfRotaSample * 4]; for(int i = 0; i < numOfRotaSample; i++) { quaternions[i*4 + 0] = 1; quaternions[i*4 + 1] = 0; quaternions[i*4 + 2] = 0; quaternions[i*4 + 3] = 0; } } if (mode == 1 || mode == 2) { QuaternionUniformSampling(gen, quaternions, numOfRotaSample); } numOfQuaternionsUsed = FilterQuaternions(conformerCoor, nAtom, numOfRotaSample, quaternions, xlen, ylen, zlen, maxNQuaternionsUsed, quaternionsUsed); delete[] minEnergyIdxX; delete[] minEnergyIdxY; delete[] minEnergyIdxZ; minEnergyIdxX = new int[numOfQuaternionsUsed]; minEnergyIdxY = new int[numOfQuaternionsUsed]; minEnergyIdxZ = new int[numOfQuaternionsUsed]; std::vector <float> minEnergyQuaternionsUsed(numOfQuaternionsUsed); for(int i = 0; i < numOfQuaternionsUsed; i++) { minEnergyQuaternionsUsed[i] = INFINITY; } delete[] coorsUsed; delete[] mincoorsUsed; delete[] maxcoorsUsed; delete[] ligandLengthUsed; coorsUsed = new float[numOfQuaternionsUsed*nAtom*3]; mincoorsUsed = new float[numOfQuaternionsUsed*3]; maxcoorsUsed = new float[numOfQuaternionsUsed*3]; ligandLengthUsed = new float[numOfQuaternionsUsed*3]; for(int i = 0; i < numOfQuaternionsUsed; i++) { for(int j = 0; j < nAtom; j++) { Rotate(&quaternionsUsed[i*4], &conformerCoor[j*3], &coorsUsed[i*nAtom*3+j*3]); } } // calculate minimum coor for each quaternions GetMinCoors(numOfQuaternionsUsed, nAtom, coorsUsed, mincoorsUsed); // calculate maximum coor for each quaternions GetMaxCoors(numOfQuaternionsUsed, nAtom, coorsUsed, maxcoorsUsed); // calculate the length for each quaternion for(int i = 0; i < numOfQuaternionsUsed; i++) { ligandLengthUsed[i*3 + 0] = maxcoorsUsed[i*3 + 0] - mincoorsUsed[i*3 + 0]; ligandLengthUsed[i*3 + 1] = maxcoorsUsed[i*3 + 1] - mincoorsUsed[i*3 + 1]; ligandLengthUsed[i*3 + 2] = maxcoorsUsed[i*3 + 2] - mincoorsUsed[i*3 + 2]; } // loop over batches of quaternions // num of batches if (numOfQuaternionsUsed % numOfQuaternionsOneBatch == 0) { numOfBatches = numOfQuaternionsUsed / numOfQuaternionsOneBatch; } else { numOfBatches = numOfQuaternionsUsed / numOfQuaternionsOneBatch + 1; } for(int idxOfBatch = 0; idxOfBatch < numOfBatches; idxOfBatch++) { std::cout << "idxOfBatch: " << idxOfBatch << std::endl; // fill ligand grid memset(ligandGridValues, 0, sizeof(float)*numOfQuaternionsOneBatch*numOfGridsUsed*xdim*ydim*zdim); if ((idxOfBatch + 1) * numOfQuaternionsOneBatch > numOfQuaternionsUsed) { FillLigandGrid(numOfQuaternionsUsed - idxOfBatch * numOfQuaternionsOneBatch, nAtom, &coorsUsed[idxOfBatch*numOfQuaternionsOneBatch*nAtom*3], &mincoorsUsed[idxOfBatch*numOfQuaternionsOneBatch*3], atomCharges, atomEpsilons, numOfVdwGridsUsed, idxOfVdwUsed, idxOfAtomVdwRadius, xdim, ydim, zdim, spacing, ligandGridValues); } else { FillLigandGrid(numOfQuaternionsOneBatch, nAtom, &coorsUsed[idxOfBatch*numOfQuaternionsOneBatch*nAtom*3], &mincoorsUsed[idxOfBatch*numOfQuaternionsOneBatch*3], atomCharges, atomEpsilons, numOfVdwGridsUsed, idxOfVdwUsed, idxOfAtomVdwRadius, xdim, ydim, zdim, spacing, ligandGridValues); } // batch cudaFFT for ligand grid cudaMemcpy(d_ligand_f, ligandGridValues, sizeof(cufftReal)*nBatchLigand*idist, cudaMemcpyHostToDevice); ligandRes = cufftExecR2C(ligandPlan, d_ligand_f, d_ligand_F); if (ligandRes != CUFFT_SUCCESS) { std::cout << "ligand grid transform failed!"; return 1; } // calcualte energy using reverse FFT ConjMult <<<blocks_ConjMult, threads_ConjMult>>> (d_potential_F, d_ligand_F, odist, numOfGridsUsed); CUDA_CHECK(); SumGrids <<<blocks_SumGrids, threads_SumGrids>>> (d_ligand_F, d_ligand_sum_F, numOfGridsUsed, odist, idist); CUDA_CHECK(); ligandRRes = cufftExecC2R(ligandRPlan, d_ligand_sum_F, d_ligand_sum_f); if (ligandRRes != CUFFT_SUCCESS) { std::cout << "ligand grid reverse transform failed!"; return 1; } // copy energy back cudaMemcpy(energy, d_ligand_sum_f, sizeof(float)*numOfQuaternionsOneBatch*idist, cudaMemcpyDeviceToHost); // record the minimum energy pose in terms of quaternions, x, y and z for(int q = 0; q < numOfQuaternionsOneBatch; q++) { int idxOfQuaternions = idxOfBatch * numOfQuaternionsOneBatch + q; if(idxOfQuaternions < numOfQuaternionsUsed) { for(int i = 0; i < (xdim-int(ligandLengthUsed[idxOfQuaternions*3+0]/spacing)-2); i++) { for(int j = 0; j < (ydim-int(ligandLengthUsed[idxOfQuaternions*3+1]/spacing)-2); j++) { for(int k = 0; k < (ydim-int(ligandLengthUsed[idxOfQuaternions*3+2]/spacing)-2); k++) { int tmp = q*idist + (i*ydim + j)*zdim + k; if(energy[tmp] / sqrt(idist) < minEnergyQuaternionsUsed[idxOfQuaternions]) { minEnergyQuaternionsUsed[idxOfQuaternions] = energy[tmp] / sqrt(idist); minEnergyIdxX[idxOfQuaternions] = i; minEnergyIdxY[idxOfQuaternions] = j; minEnergyIdxZ[idxOfQuaternions] = k; } } } } } } } // finish all batches of quaternions for one conformer // calculate the coordinates corresponding to lowest nLowest energy orientation std::vector<size_t> idxOfSortedQuater; idxOfSortedQuater = sort_index<float>(minEnergyQuaternionsUsed); for(int iLowest = 0; iLowest < nLowest && iLowest < numOfQuaternionsUsed; iLowest++) { int idxQ = idxOfSortedQuater[iLowest]; double minEnergyCoorDouble[nAtom*3]; // coordinate corresponding to the lowest energy pose in term of orientations for(int i = 0; i < nAtom; i++) { minEnergyCoorDouble[i*3 + 0] = (double) coorsUsed[idxQ*nAtom*3 + i*3 + 0]; minEnergyCoorDouble[i*3 + 1] = (double) coorsUsed[idxQ*nAtom*3 + i*3 + 1]; minEnergyCoorDouble[i*3 + 2] = (double) coorsUsed[idxQ*nAtom*3 + i*3 + 2]; } for(int i = 0; i < nAtom; i++) { minEnergyCoorDouble[i*3 + 0] += (gridMinX - mincoorsUsed[idxQ*3 + 0] + minEnergyIdxX[idxQ] * spacing); minEnergyCoorDouble[i*3 + 1] += (gridMinY - mincoorsUsed[idxQ*3 + 1] + minEnergyIdxY[idxQ] * spacing); minEnergyCoorDouble[i*3 + 2] += (gridMinZ - mincoorsUsed[idxQ*3 + 2] + minEnergyIdxZ[idxQ] * spacing); } // final minimize // minimize with presence of only grid if (mode == 2) { for(int i = 0; i < ligandOmmSys->getNumParticles(); i++) { ligandGridPosition[i] = OpenMM::Vec3(minEnergyCoorDouble[i*3+0]*OpenMM::NmPerAngstrom, minEnergyCoorDouble[i*3+1]*OpenMM::NmPerAngstrom, minEnergyCoorDouble[i*3+2]*OpenMM::NmPerAngstrom); } ligandGridContext.setPositions(ligandGridPosition); if (mode == 2) { ligandGridMinimizer.minimize(ligandGridContext, 0.001, 1000); } ligandGridState = ligandGridContext.getState(OpenMM::State::Energy); for(int i = 0; i < ligandOmmSys->getNumParticles(); i++) { minEnergyCoorDouble[i*3 + 0] = ligandGridPosition[i][0] * OpenMM::AngstromsPerNm; minEnergyCoorDouble[i*3 + 1] = ligandGridPosition[i][1] * OpenMM::AngstromsPerNm; minEnergyCoorDouble[i*3 + 2] = ligandGridPosition[i][2] * OpenMM::AngstromsPerNm; } energyOfFinalPoses[idxOfConformer * nLowest + iLowest] = ligandGridState.getPotentialEnergy() * OpenMM::KcalPerKJ; } // write nlowest energy pose out finalPoses[idxOfConformer * nLowest + iLowest].SetCoordinates(minEnergyCoorDouble); std::string fileName; fileName = "conformer_"; fileName += std::to_string(idxOfConformer); fileName += "_"; fileName += std::to_string(iLowest); fileName += ".pdb"; conv.WriteFile(&finalPoses[idxOfConformer*nLowest+iLowest], fileName); energyFile << fileName << "," << idxOfConformer << "," << iLowest << "," << energyOfFinalPoses[idxOfConformer * nLowest + iLowest] << std::endl; std::cout << "Conformer: " << idxOfConformer << ", IdxQ: " << idxQ << ", IdxX: " << minEnergyIdxX[idxQ] << ", IdxY: " << minEnergyIdxY[idxQ] << ", IdxZ: " << minEnergyIdxZ[idxQ] << ", MinEnergyTranRota:" << minEnergyQuaternionsUsed[idxQ] << ", Potential Energy: " << energyOfFinalPoses[idxOfConformer * nLowest + iLowest] << std::endl; } } energyFile.close(); return 0; }
8db8a02d780b94816f669fe1f8165e5aeaa7fa73.hip
// !!! This is a file automatically generated by hipify!!! /* standard libraries */ #include <conio.h> //for _getch #include <iostream> #include <float.h> #include <math.h> //for M_PI #include <stdio.h> #include <stdlib.h> #include <string.h> #include <tchar.h> #include <sys/timeb.h> /* CUDA-relevant includes */ #include <rocblas.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <cutil_inline.h> #include <multithreading.h> /* Project includes */ #include "Rohan.h" extern int iDebugLvl, iDevDebug, iTrace; extern float gElapsedTime, gKernelTimeTally; // device-global variables to facilitate data transfer //__device__ __align__(16) __constant__ struct rohanContext devSes; //__device__ __align__(16) __constant__ struct rohanLearningSet devLearn; //__device__ __align__(16) struct rohanNetwork devNet; //__device__ __align__(16) const hipDoubleComplex gpuZero = { 0, 0 }; //__device__ __align__(16) double devdReturn[1024*1024]; //__device__ __align__(16) double devdRMSE=0; //__device__ __align__(16) int devlReturn[1024*1024]; //__device__ __align__(16) int devlTrainable=0; //__device__ __align__(16) int iDevDebug=0;
8db8a02d780b94816f669fe1f8165e5aeaa7fa73.cu
/* standard libraries */ #include <conio.h> //for _getch #include <iostream> #include <float.h> #include <math.h> //for M_PI #include <stdio.h> #include <stdlib.h> #include <string.h> #include <tchar.h> #include <sys/timeb.h> /* CUDA-relevant includes */ #include <cublas.h> #include <cuda.h> #include <cuda_runtime_api.h> #include <cutil_inline.h> #include <multithreading.h> /* Project includes */ #include "Rohan.h" extern int iDebugLvl, iDevDebug, iTrace; extern float gElapsedTime, gKernelTimeTally; // device-global variables to facilitate data transfer //__device__ __align__(16) __constant__ struct rohanContext devSes; //__device__ __align__(16) __constant__ struct rohanLearningSet devLearn; //__device__ __align__(16) struct rohanNetwork devNet; //__device__ __align__(16) const cuDoubleComplex gpuZero = { 0, 0 }; //__device__ __align__(16) double devdReturn[1024*1024]; //__device__ __align__(16) double devdRMSE=0; //__device__ __align__(16) int devlReturn[1024*1024]; //__device__ __align__(16) int devlTrainable=0; //__device__ __align__(16) int iDevDebug=0;
b498589f83c98542cb4a3967dd0bb8efa196dad0.hip
// !!! This is a file automatically generated by hipify!!! // includes, system #include <stdlib.h> // includes CUDA #include <hip/hip_runtime.h> __global__ void testKernel(float *g_idata, float *g_odata) { } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { unsigned int num_threads = 32; unsigned int mem_size = sizeof(float) * num_threads; // allocate host memory float *h_idata = (float *) malloc(mem_size); // initalize the memory for (unsigned int i = 0; i < num_threads; ++i) { h_idata[i] = (float) i; } // allocate device memory float *d_idata; hipMalloc((void **) &d_idata, mem_size); // copy host memory to device hipMemcpy(d_idata, h_idata, mem_size, hipMemcpyHostToDevice); // allocate device memory for result float *d_odata; hipMalloc((void **) &d_odata, mem_size); // setup execution parameters dim3 grid(1, 1, 1); dim3 threads(num_threads, 1, 1); // execute the kernel hipLaunchKernelGGL(( testKernel), dim3(grid), dim3(threads), mem_size , 0, d_idata, d_odata); // allocate mem for the result on host side float *h_odata = (float *) malloc(mem_size); // copy result from device to host hipMemcpy(h_odata, d_odata, sizeof(float) * num_threads, hipMemcpyDeviceToHost); // cleanup memory free(h_idata); free(h_odata); hipFree(d_idata); hipFree(d_odata); exit(EXIT_SUCCESS); }
b498589f83c98542cb4a3967dd0bb8efa196dad0.cu
// includes, system #include <stdlib.h> // includes CUDA #include <cuda_runtime.h> __global__ void testKernel(float *g_idata, float *g_odata) { } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { unsigned int num_threads = 32; unsigned int mem_size = sizeof(float) * num_threads; // allocate host memory float *h_idata = (float *) malloc(mem_size); // initalize the memory for (unsigned int i = 0; i < num_threads; ++i) { h_idata[i] = (float) i; } // allocate device memory float *d_idata; cudaMalloc((void **) &d_idata, mem_size); // copy host memory to device cudaMemcpy(d_idata, h_idata, mem_size, cudaMemcpyHostToDevice); // allocate device memory for result float *d_odata; cudaMalloc((void **) &d_odata, mem_size); // setup execution parameters dim3 grid(1, 1, 1); dim3 threads(num_threads, 1, 1); // execute the kernel testKernel<<< grid, threads, mem_size >>>(d_idata, d_odata); // allocate mem for the result on host side float *h_odata = (float *) malloc(mem_size); // copy result from device to host cudaMemcpy(h_odata, d_odata, sizeof(float) * num_threads, cudaMemcpyDeviceToHost); // cleanup memory free(h_idata); free(h_odata); cudaFree(d_idata); cudaFree(d_odata); exit(EXIT_SUCCESS); }
0f4f452292d422b7b52a3d9824e13d72a1c38c1f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/HIPContext.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <c10/util/Exception.h> #include <THH/THHAtomics.cuh> #include <THH/THHGeneral.h> #include <THH/THHNumerics.cuh> #include <THH/THHDeviceUtils.cuh> #include <algorithm> #include <cfloat> #include <cmath> namespace at { namespace native { __host__ __device__ __forceinline__ int imin(int a, int b) { return a > b ? b : a; } __host__ __device__ __forceinline__ int imax(int a, int b) { return a > b ? a : b; } namespace { template <typename scalar_t> __global__ void replication_pad_forward_kernel1d( PackedTensorAccessor64<scalar_t, 3> input, PackedTensorAccessor64<scalar_t, 3> output, int padL, int padR, int y_shift, int z_shift) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y + y_shift; int batch = blockIdx.z + z_shift; if (outputPointId >= output.size(2)) { return; } int outputPointX = outputPointId % output.size(2); int iStartX = imax(0, -padL); int oStartX = imax(0, padL); int inputPointX = imin(imax(padL, outputPointX), input.size(2) + padL - 1) - oStartX + iStartX; scalar_t valueToCopy = input[batch][plane][inputPointX]; output[batch][plane][outputPointX] = valueToCopy; } template <typename scalar_t> __global__ void replication_pad_backward_kernel( PackedTensorAccessor64<scalar_t, 3> gradInput, PackedTensorAccessor64<scalar_t, 3> gradOutput, int padL, int padR, int y_shift, int z_shift) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y + y_shift; int batch = blockIdx.z + z_shift; if (outputPointId >= gradOutput.size(2)) { return; } int outputPointX = outputPointId % gradOutput.size(2); int iStartX = imax(0, -padL); int oStartX = imax(0, padL); int inputPointX = imin(imax(padL, outputPointX), gradInput.size(2) + padL - 1) - oStartX + iStartX; scalar_t valueToCopy = gradOutput[batch][plane][outputPointX]; gpuAtomicAdd(&gradInput[batch][plane][inputPointX], valueToCopy); } template <typename scalar_t> __global__ void replication_pad_forward_kernel2d( PackedTensorAccessor64<scalar_t, 4> input, PackedTensorAccessor64<scalar_t, 4> output, int padT, int padB, int padL, int padR, int y_shift, int z_shift) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y + y_shift; int batch = blockIdx.z + z_shift; if (outputPointId >= output.size(2) * output.size(3)) { return; } int outputPointX = outputPointId % output.size(3); int outputPointY = outputPointId / output.size(3); int iStartX = imax(0, -padL); int iStartY = imax(0, -padT); int oStartX = imax(0, padL); int oStartY = imax(0, padT); int inputPointX = imin(imax(padL, outputPointX), input.size(3) + padL - 1) - oStartX + iStartX; int inputPointY = imin(imax(padT, outputPointY), input.size(2) + padT - 1) - oStartY + iStartY; scalar_t valueToCopy = input[batch][plane][inputPointY][inputPointX]; output[batch][plane][outputPointY][outputPointX] = valueToCopy; } template <typename scalar_t> __global__ void replication_pad_backward_kernel( PackedTensorAccessor64<scalar_t, 4> gradInput, PackedTensorAccessor64<scalar_t, 4> gradOutput, int padT, int padB, int padL, int padR, int y_shift, int z_shift) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y + y_shift; int batch = blockIdx.z + z_shift; if (outputPointId >= gradOutput.size(2) * gradOutput.size(3)) { return; } int outputPointX = outputPointId % gradOutput.size(3); int outputPointY = outputPointId / gradOutput.size(3); int iStartX = imax(0, -padL); int iStartY = imax(0, -padT); int oStartX = imax(0, padL); int oStartY = imax(0, padT); int inputPointX = imin(imax(padL, outputPointX), gradInput.size(3) + padL - 1) - oStartX + iStartX; int inputPointY = imin(imax(padT, outputPointY), gradInput.size(2) + padT - 1) - oStartY + iStartY; scalar_t valueToCopy = gradOutput[batch][plane][outputPointY][outputPointX]; gpuAtomicAdd(&gradInput[batch][plane][inputPointY][inputPointX], valueToCopy); } template <typename scalar_t> __global__ void replication_pad_forward_kernel3d( PackedTensorAccessor64<scalar_t, 5> input, PackedTensorAccessor64<scalar_t, 5> output, int pfront, int pback, int ptop, int pbottom, int pleft, int pright, int y_shift, int z_shift) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y + y_shift; int batch = blockIdx.z + z_shift; if (outputPointId >= (output.size(2) * output.size(3) * output.size(4))) { return; } int outputPointX = outputPointId % output.size(4); int outputPointY = (outputPointId / output.size(4)) % output.size(3); int outputPointZ = outputPointId / (output.size(3) * output.size(4)); int iStartX = imax(0, -pleft); int iStartY = imax(0, -ptop); int iStartZ = imax(0, -pfront); int oStartX = imax(0, pleft); int oStartY = imax(0, ptop); int oStartZ = imax(0, pfront); int inputPointX = imin(imax(pleft, outputPointX), input.size(4) + pleft - 1) - oStartX + iStartX; int inputPointY = imin(imax(ptop, outputPointY), input.size(3) + ptop - 1) - oStartY + iStartY; int inputPointZ = imin(imax(pfront, outputPointZ), input.size(2) + pfront - 1) - oStartZ + iStartZ; scalar_t valueToCopy = input[batch][plane][inputPointZ][inputPointY][inputPointX]; output[batch][plane][outputPointZ][outputPointY][outputPointX] = valueToCopy; } template <typename scalar_t> __global__ void replication_pad_backward_kernel( PackedTensorAccessor64<scalar_t, 5> gradInput, PackedTensorAccessor64<scalar_t, 5> gradOutput, int pfront, int pback, int ptop, int pbottom, int pleft, int pright, int y_shift, int z_shift) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y + y_shift; int batch = blockIdx.z + z_shift; if (outputPointId >= (gradOutput.size(2) * gradOutput.size(3) * gradOutput.size(4))) { return; } int outputPointX = outputPointId % gradOutput.size(4); int outputPointY = (outputPointId / gradOutput.size(4)) % gradOutput.size(3); int outputPointZ = outputPointId / (gradOutput.size(3) * gradOutput.size(4)); int iStartX = imax(0, -pleft); int iStartY = imax(0, -ptop); int iStartZ = imax(0, -pfront); int oStartX = imax(0, pleft); int oStartY = imax(0, ptop); int oStartZ = imax(0, pfront); int inputPointX = imin(imax(pleft, outputPointX), gradInput.size(4) + pleft - 1) - oStartX + iStartX; int inputPointY = imin(imax(ptop, outputPointY), gradInput.size(3) + ptop - 1) - oStartY + iStartY; int inputPointZ = imin(imax(pfront, outputPointZ), gradInput.size(2) + pfront - 1) - oStartZ + iStartZ; scalar_t valueToCopy = gradOutput[batch][plane][outputPointZ][outputPointY][outputPointX]; gpuAtomicAdd(&gradInput[batch][plane][inputPointZ][inputPointY][inputPointX], valueToCopy); } void replication_pad1d_out_cuda_template( Tensor& output, const Tensor& input, IntArrayRef paddingSize) { TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); TORCH_CHECK(paddingSize.size() == 2, "padding Size is expected to be 2"); int padL = paddingSize[0]; int padR = paddingSize[1]; int planeDim = 0; int dimw = 1; int numBatch = 1; int numInputDims = input.ndimension(); TORCH_CHECK( (numInputDims == 2 && input.size(0) != 0 && input.size(1) != 0) || (numInputDims == 3 && input.size(1) != 0 && input.size(2) != 0), "Expected 2D or 3D (batch mode) tensor with possibly 0 batch size and other non-zero dimensions for input, but got: ", input.sizes()); if (numInputDims == 3) { numBatch = input.size(0); planeDim++; dimw++; } int numPlanes = input.size(planeDim); int inputW = input.size(dimw); int outputW = inputW + padL + padR; TORCH_CHECK(outputW >= 1, "input (W: ", inputW, ")is too small." " Calculated output W: ", outputW); if (numInputDims == 2) { output.resize_({numPlanes, outputW}); } else { output.resize_({numBatch, numPlanes, outputW}); } if (input.numel() == 0) { return; } AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "replication_pad1d_cuda", [&] { at::Tensor input_ = input; at::Tensor output_ = output; if (numInputDims == 2) { input_ = input.unsqueeze(0); output_ = output.unsqueeze(0); } auto devInput = input_.packed_accessor64<scalar_t, 3>(); auto devOutput = output_.packed_accessor64<scalar_t, 3>(); int64_t outputPlaneSize = devOutput.size(2); int64_t size1 = devOutput.size(1); int64_t size0 = devOutput.size(0); for (int64_t block_y = 0; block_y < size1; block_y += 65535) { int64_t block_y_size = ::min(size1 - block_y, static_cast<int64_t>(65535)); for (int64_t block_z = 0; block_z < size0; block_z += 65535) { int64_t block_z_size = ::min(size0 - block_z, static_cast<int64_t>(65535)); dim3 gridSize(THCCeilDiv(outputPlaneSize, static_cast<int64_t>(256)), block_y_size, block_z_size); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); hipLaunchKernelGGL(( replication_pad_forward_kernel1d) , dim3(gridSize), dim3(blockSize), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), devInput, devOutput, padL, padR, block_y, block_z); C10_HIP_KERNEL_LAUNCH_CHECK(); } } } ); } void replication_pad1d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(gradOutput), "output gradient tensor must fit into 32-bit index math"); TORCH_CHECK(paddingSize.size() == 2, "padding Size is expected to be 2"); int padL = paddingSize[0]; int padR = paddingSize[1]; int planeDim = 0; int dimw = 1; int numInputDims = input.ndimension(); if (numInputDims == 3) { planeDim++; dimw++; } int iwidth = input.size(dimw); int owidth = iwidth + padL + padR; TORCH_CHECK(owidth == gradOutput.size(dimw), "gradOutput width unexpected. Expected: ", owidth, ", Got: ", gradOutput.size(dimw)); gradInput.resize_as_(input); if (gradInput.numel() == 0) { return; } gradInput.zero_(); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "replication_pad1d_backward_cuda", [&] { auto gradInput_ = gradInput; auto gradOutput_ = gradOutput; if (numInputDims == 2) { gradInput_ = gradInput.unsqueeze(0); gradOutput_ = gradOutput.unsqueeze(0); } auto devGradInput = gradInput_.packed_accessor64<scalar_t, 3>(); auto devGradOutput = gradOutput_.packed_accessor64<scalar_t, 3>(); int64_t outputPlaneSize = devGradOutput.size(2); int64_t size1 = devGradOutput.size(1); int64_t size0 = devGradOutput.size(0); for (int64_t block_y = 0; block_y < size1; block_y += 65535) { int64_t block_y_size = ::min(size1 - block_y, static_cast<int64_t>(65535)); for (int64_t block_z = 0; block_z < size0; block_z += 65535) { int64_t block_z_size = ::min(size0 - block_z, static_cast<int64_t>(65535)); dim3 gridSize(THCCeilDiv(outputPlaneSize, static_cast<int64_t>(256)), block_y_size, block_z_size); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); hipLaunchKernelGGL(( replication_pad_backward_kernel) , dim3(gridSize), dim3(blockSize), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), devGradInput, devGradOutput, padL, padR, block_y, block_z); C10_HIP_KERNEL_LAUNCH_CHECK(); } } }); } void replication_pad2d_out_cuda_template( Tensor& output, const Tensor& input, IntArrayRef paddingSize) { TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); TORCH_CHECK(paddingSize.size() == 4, "padding Size is expected to be 4"); int padL = paddingSize[0]; int padR = paddingSize[1]; int padT = paddingSize[2]; int padB = paddingSize[3]; int planeDim = 0; int dimh = 1; int dimw = 2; int numBatch = 1; int numInputDims = input.dim(); bool valid_dims = input.size(1) != 0 && input.size(2) != 0; TORCH_CHECK( (numInputDims == 3 && input.size(0) != 0 && valid_dims) || (numInputDims == 4 && valid_dims && input.size(3) != 0), "Expected 3D or 4D (batch mode) tensor with possibly 0 batch size and other non-zero dimensions for input, but got: ", input.sizes()); if (numInputDims == 4) { numBatch = input.size(0); planeDim++; dimh++; dimw++; } int numPlanes = input.size(planeDim); int inputH = input.size(dimh); int inputW = input.size(dimw); int outputH = inputH + padT + padB; int outputW = inputW + padL + padR; TORCH_CHECK(outputW >= 1 || outputH >= 1, "input (H: ", inputH, ", W: ", inputW, ") is too small." " Calculated output H: ", outputH, " W: ", outputW); if (numInputDims == 3) { output.resize_({numPlanes, outputH, outputW}); } else { output.resize_({numBatch, numPlanes, outputH, outputW}); } if (input.numel() == 0) { return; } AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "replication_pad2d_cuda", [&] { at::Tensor input_ = input; at::Tensor output_ = output; if (numInputDims == 3) { input_ = input.unsqueeze(0); output_ = output.unsqueeze(0); } auto devInput = input_.packed_accessor64<scalar_t, 4>(); auto devOutput = output_.packed_accessor64<scalar_t, 4>(); int64_t outputPlaneSize = devOutput.size(2) * devOutput.size(3); int64_t size1 = devOutput.size(1); int64_t size0 = devOutput.size(0); for (int64_t block_y = 0; block_y < size1; block_y += 65535) { int64_t block_y_size = ::min(size1 - block_y, static_cast<int64_t>(65535)); for (int64_t block_z = 0; block_z < size0; block_z += 65535) { int64_t block_z_size = ::min(size0 - block_z, static_cast<int64_t>(65535)); dim3 gridSize(THCCeilDiv(outputPlaneSize, static_cast<int64_t>(256)), block_y_size, block_z_size); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); hipLaunchKernelGGL(( replication_pad_forward_kernel2d) , dim3(gridSize), dim3(blockSize), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), devInput, devOutput, padT, padB, padL, padR, block_y, block_z); C10_HIP_KERNEL_LAUNCH_CHECK(); } } } ); } void replication_pad2d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(gradOutput), "output gradient tensor must fit into 32-bit index math"); TORCH_CHECK(paddingSize.size() == 4, "padding Size is expected to be 4"); int padL = paddingSize[0]; int padR = paddingSize[1]; int padT = paddingSize[2]; int padB = paddingSize[3]; int planeDim = 0; int dimh = 1; int dimw = 2; int numInputDims = input.dim(); if (numInputDims == 4) { planeDim++; dimh++; dimw++; } int iheight = input.size(dimh); int iwidth = input.size(dimw); int oheight = iheight + padT + padB; int owidth = iwidth + padL + padR; TORCH_CHECK(owidth == gradOutput.size(dimw), "gradOutput width unexpected. Expected: ", owidth, ", Got: ", gradOutput.size(dimw)); TORCH_CHECK(oheight == gradOutput.size(dimh), "gradOutput height unexpected. Expected: ", oheight, ", Got: ", gradOutput.size(dimh)); gradInput.resize_as_(input); if (gradInput.numel() == 0) { return; } gradInput.zero_(); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "replication_pad2d_backward_cuda", [&] { auto gradInput_ = gradInput; auto gradOutput_ = gradOutput; if (numInputDims == 3) { gradInput_ = gradInput.unsqueeze(0); gradOutput_ = gradOutput.unsqueeze(0); } auto devGradInput = gradInput_.packed_accessor64<scalar_t, 4>(); auto devGradOutput = gradOutput_.packed_accessor64<scalar_t, 4>(); int64_t outputPlaneSize = devGradOutput.size(2) * devGradOutput.size(3); int64_t size1 = devGradOutput.size(1); int64_t size0 = devGradOutput.size(0); for (int64_t block_y = 0; block_y < size1; block_y += 65535) { int64_t block_y_size = ::min(size1 - block_y, static_cast<int64_t>(65535)); for (int64_t block_z = 0; block_z < size0; block_z += 65535) { int64_t block_z_size = ::min(size0 - block_z, static_cast<int64_t>(65535)); dim3 gridSize(THCCeilDiv(outputPlaneSize, static_cast<int64_t>(256)), block_y_size, block_z_size); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); hipLaunchKernelGGL(( replication_pad_backward_kernel) , dim3(gridSize), dim3(blockSize), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), devGradInput, devGradOutput, padT, padB, padL, padR, block_y, block_z); C10_HIP_KERNEL_LAUNCH_CHECK(); } } } ); } static inline void shapeCheck3d( const Tensor& input, int pleft, int pright, int ptop, int pbottom, int pfront, int pback) { TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); int numInputDims = input.dim(); bool valid_dims = input.size(1) != 0 && input.size(2) != 0 && input.size(3) != 0; TORCH_CHECK( (numInputDims == 4 && input.size(0) != 0 && valid_dims) || (numInputDims == 5 && valid_dims && input.size(4) != 0), "Expected 4D or 5D (batch mode) tensor with possibly 0 batch size and other non-zero dimensions for input, but got: ", input.sizes()); int planeDim = 0; int dimd = 1; int dimh = 2; int dimw = 3; if (numInputDims == 5) { planeDim++; dimd++; dimh++; dimw++; } int numPlanes = input.size(planeDim); int idepth = input.size(dimd); int iheight = input.size(dimh); int iwidth = input.size(dimw); int odepth = idepth + pfront + pback; int oheight = iheight + ptop + pbottom; int owidth = iwidth + pleft + pright; TORCH_CHECK(owidth >= 1 || oheight >= 1 || odepth >= 1, "input (D: ", idepth, " H: ", iheight, ", W: ", iwidth, ") is too small." " Calculated output D: ", odepth, " H: ", oheight, " W: ", owidth); } static inline void shapeAndGradOutputCheck3d( const Tensor& input, const Tensor& gradOutput, int pleft, int pright, int ptop, int pbottom, int pfront, int pback) { TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); int numInputDims = input.dim(); bool valid_dims = input.size(1) != 0 && input.size(2) != 0 && input.size(3) != 0; TORCH_CHECK( (numInputDims == 4 && valid_dims) || (numInputDims == 5 && valid_dims && input.size(4) != 0), "Expected 4D or 5D (batch mode) tensor with possibly 0 batch size and other non-zero dimensions for input, but got: ", input.sizes()); int planeDim = 0; int dimd = 1; int dimh = 2; int dimw = 3; if (numInputDims == 5) { planeDim++; dimd++; dimh++; dimw++; } int numPlanes = input.size(planeDim); int idepth = input.size(dimd); int iheight = input.size(dimh); int iwidth = input.size(dimw); int odepth = idepth + pfront + pback; int oheight = iheight + ptop + pbottom; int owidth = iwidth + pleft + pright; TORCH_CHECK(owidth >= 1 || oheight >= 1 || odepth >= 1, "input (D: ", idepth, " H: ", iheight, ", W: ", iwidth, ") is too small." " Calculated output D: ", odepth, " H: ", oheight, " W: ", owidth); TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(gradOutput), "output gradient tensor must fit into 32-bit index math"); TORCH_CHECK(numPlanes == gradOutput.size(planeDim), "gradOutput width unexpected. Expected: ", numPlanes, ", Got: ", gradOutput.size(planeDim)); TORCH_CHECK(owidth == gradOutput.size(dimw), "gradOutput width unexpected. Expected: ", owidth, ", Got: ", gradOutput.size(dimw)); TORCH_CHECK(oheight == gradOutput.size(dimh), "gradOutput height unexpected. Expected: ", oheight, ", Got: ", gradOutput.size(dimh)); TORCH_CHECK(odepth == gradOutput.size(dimd), "gradOutput depth unexpected. Expected: ", odepth, ", Got: ", gradOutput.size(dimd)); } void replication_pad3d_out_cuda_template( Tensor& output, const Tensor& input, IntArrayRef paddingSize) { TORCH_CHECK(paddingSize.size() == 6, "padding Size is expected to be 6"); int pleft = paddingSize[0]; int pright = paddingSize[1]; int ptop = paddingSize[2]; int pbottom = paddingSize[3]; int pfront = paddingSize[4]; int pback = paddingSize[5]; shapeCheck3d(input, pleft, pright, ptop, pbottom, pfront, pback); int planeDim = 0; int dimd = 1; int dimh = 2; int dimw = 3; int numBatch = 1; int numInputDims = input.dim(); if (numInputDims == 5) { numBatch = input.size(0); planeDim++; dimd++; dimh++; dimw++; } int numPlanes = input.size(planeDim); int inputD = input.size(dimd); int inputH = input.size(dimh); int inputW = input.size(dimw); int outputD = inputD + pfront + pback; int outputH = inputH + ptop + pbottom; int outputW = inputW + pleft + pright; if (numInputDims == 4) { output.resize_({numPlanes, outputD, outputH, outputW}); } else { output.resize_({numBatch, numPlanes, outputD, outputH, outputW}); } if (input.numel() == 0) { return; } AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "replication_pad3d_cuda", [&] { at::Tensor input_ = input; at::Tensor output_ = output; if (numInputDims == 4) { auto input_ = input.unsqueeze(0); auto output_ = output.unsqueeze(0); } auto devInput = input_.packed_accessor64<scalar_t, 5>(); auto devOutput = output_.packed_accessor64<scalar_t, 5>(); int64_t outputPlaneSize = devOutput.size(2) * devOutput.size(3) * devOutput.size(4); int64_t size1 = devOutput.size(1); int64_t size0 = devOutput.size(0); for (int64_t block_y = 0; block_y < size1; block_y += 65535) { int64_t block_y_size = ::min(size1 - block_y, static_cast<int64_t>(65535)); for (int64_t block_z = 0; block_z < size0; block_z += 65535) { int64_t block_z_size = ::min(size0 - block_z, static_cast<int64_t>(65535)); dim3 gridSize(THCCeilDiv(outputPlaneSize, static_cast<int64_t>(256)), block_y_size, block_z_size); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); hipLaunchKernelGGL(( replication_pad_forward_kernel3d) , dim3(gridSize), dim3(blockSize), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), devInput, devOutput, pfront, pback, ptop, pbottom, pleft, pright, block_y, block_z); C10_HIP_KERNEL_LAUNCH_CHECK(); } } } ); } void replication_pad3d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { TORCH_CHECK(paddingSize.size() == 6, "padding Size is expected to be 6"); int pleft = paddingSize[0]; int pright = paddingSize[1]; int ptop = paddingSize[2]; int pbottom = paddingSize[3]; int pfront = paddingSize[4]; int pback = paddingSize[5]; shapeAndGradOutputCheck3d(input, gradOutput, pleft, pright, ptop, pbottom, pfront, pback); int planeDim = 0; int dimd = 1; int dimh = 2; int dimw = 3; int numInputDims = input.dim(); if (numInputDims == 5) { planeDim++; dimd++; dimh++; dimw++; } gradInput.resize_as_(input); if (gradInput.numel() == 0) { return; } gradInput.zero_(); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "replication_pad3d_backward_cuda", [&] { auto gradInput_ = gradInput; auto gradOutput_ = gradOutput; if (numInputDims == 4) { gradInput_ = gradInput.unsqueeze(0); gradOutput_ = gradOutput.unsqueeze(0); } auto devGradInput = gradInput_.packed_accessor64<scalar_t, 5>(); auto devGradOutput = gradOutput_.packed_accessor64<scalar_t, 5>(); int64_t outputPlaneSize = devGradOutput.size(2) * devGradOutput.size(3) * devGradOutput.size(4); int64_t size1 = devGradOutput.size(1); int64_t size0 = devGradOutput.size(0); for (int64_t block_y = 0; block_y < size1; block_y += 65535) { int64_t block_y_size = ::min(size1 - block_y, static_cast<int64_t>(65535)); for (int64_t block_z = 0; block_z < size0; block_z += 65535) { int64_t block_z_size = ::min(size0 - block_z, static_cast<int64_t>(65535)); dim3 gridSize(THCCeilDiv(outputPlaneSize, static_cast<int64_t>(256)), block_y_size, block_z_size); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); hipLaunchKernelGGL(( replication_pad_backward_kernel) , dim3(gridSize), dim3(blockSize), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), devGradInput, devGradOutput, pfront, pback, ptop, pbottom, pleft, pright, block_y, block_z); C10_HIP_KERNEL_LAUNCH_CHECK(); } } } ); } } // namespace Tensor& replication_pad1d_out_cuda(const Tensor& input, IntArrayRef paddingSize, Tensor& output) { replication_pad1d_out_cuda_template( output, input, paddingSize); return output; } Tensor replication_pad1d_cuda( const Tensor& input, IntArrayRef paddingSize) { auto output = at::empty({0}, input.options()); replication_pad1d_out_cuda_template( output, input, paddingSize); return output; } Tensor& replication_pad1d_backward_out_cuda(const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize, Tensor& gradInput) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("replication_pad1d_backward_out_cuda"); replication_pad1d_backward_out_cuda_template( gradInput, gradOutput, input, paddingSize); return gradInput; } Tensor replication_pad1d_backward_cuda( const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("replication_pad1d_backward_cuda"); auto gradInput = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); replication_pad1d_backward_out_cuda_template( gradInput, gradOutput, input, paddingSize); return gradInput; } Tensor& replication_pad2d_out_cuda(const Tensor& input, IntArrayRef paddingSize, Tensor& output) { replication_pad2d_out_cuda_template( output, input, paddingSize); return output; } Tensor replication_pad2d_cuda( const Tensor& input, IntArrayRef paddingSize) { auto output = at::empty({0}, input.options()); replication_pad2d_out_cuda_template( output, input, paddingSize); return output; } Tensor& replication_pad2d_backward_out_cuda(const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize, Tensor& gradInput) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("replication_pad2d_backward_out_cuda"); replication_pad2d_backward_out_cuda_template( gradInput, gradOutput, input, paddingSize); return gradInput; } Tensor replication_pad2d_backward_cuda( const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("replication_pad2d_backward_cuda"); auto gradInput = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); replication_pad2d_backward_out_cuda_template( gradInput, gradOutput, input, paddingSize); return gradInput; } Tensor& replication_pad3d_out_cuda(const Tensor& input, IntArrayRef paddingSize, Tensor& output) { replication_pad3d_out_cuda_template( output, input, paddingSize); return output; } Tensor replication_pad3d_cuda( const Tensor& input, IntArrayRef paddingSize) { auto output = at::empty({0}, input.options()); replication_pad3d_out_cuda_template( output, input, paddingSize); return output; } Tensor& replication_pad3d_backward_out_cuda(const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize, Tensor& gradInput) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("replication_pad3d_backward_out_cuda"); replication_pad3d_backward_out_cuda_template( gradInput, gradOutput, input, paddingSize); return gradInput; } Tensor replication_pad3d_backward_cuda( const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("replication_pad3d_backward_cuda"); auto gradInput = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); replication_pad3d_backward_out_cuda_template( gradInput, gradOutput, input, paddingSize); return gradInput; } } // at::native } // at
0f4f452292d422b7b52a3d9824e13d72a1c38c1f.cu
#include <ATen/ATen.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/CUDAContext.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <c10/util/Exception.h> #include <THC/THCAtomics.cuh> #include <THC/THCGeneral.h> #include <THC/THCNumerics.cuh> #include <THC/THCDeviceUtils.cuh> #include <algorithm> #include <cfloat> #include <cmath> namespace at { namespace native { __host__ __device__ __forceinline__ int imin(int a, int b) { return a > b ? b : a; } __host__ __device__ __forceinline__ int imax(int a, int b) { return a > b ? a : b; } namespace { template <typename scalar_t> __global__ void replication_pad_forward_kernel1d( PackedTensorAccessor64<scalar_t, 3> input, PackedTensorAccessor64<scalar_t, 3> output, int padL, int padR, int y_shift, int z_shift) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y + y_shift; int batch = blockIdx.z + z_shift; if (outputPointId >= output.size(2)) { return; } int outputPointX = outputPointId % output.size(2); int iStartX = imax(0, -padL); int oStartX = imax(0, padL); int inputPointX = imin(imax(padL, outputPointX), input.size(2) + padL - 1) - oStartX + iStartX; scalar_t valueToCopy = input[batch][plane][inputPointX]; output[batch][plane][outputPointX] = valueToCopy; } template <typename scalar_t> __global__ void replication_pad_backward_kernel( PackedTensorAccessor64<scalar_t, 3> gradInput, PackedTensorAccessor64<scalar_t, 3> gradOutput, int padL, int padR, int y_shift, int z_shift) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y + y_shift; int batch = blockIdx.z + z_shift; if (outputPointId >= gradOutput.size(2)) { return; } int outputPointX = outputPointId % gradOutput.size(2); int iStartX = imax(0, -padL); int oStartX = imax(0, padL); int inputPointX = imin(imax(padL, outputPointX), gradInput.size(2) + padL - 1) - oStartX + iStartX; scalar_t valueToCopy = gradOutput[batch][plane][outputPointX]; gpuAtomicAdd(&gradInput[batch][plane][inputPointX], valueToCopy); } template <typename scalar_t> __global__ void replication_pad_forward_kernel2d( PackedTensorAccessor64<scalar_t, 4> input, PackedTensorAccessor64<scalar_t, 4> output, int padT, int padB, int padL, int padR, int y_shift, int z_shift) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y + y_shift; int batch = blockIdx.z + z_shift; if (outputPointId >= output.size(2) * output.size(3)) { return; } int outputPointX = outputPointId % output.size(3); int outputPointY = outputPointId / output.size(3); int iStartX = imax(0, -padL); int iStartY = imax(0, -padT); int oStartX = imax(0, padL); int oStartY = imax(0, padT); int inputPointX = imin(imax(padL, outputPointX), input.size(3) + padL - 1) - oStartX + iStartX; int inputPointY = imin(imax(padT, outputPointY), input.size(2) + padT - 1) - oStartY + iStartY; scalar_t valueToCopy = input[batch][plane][inputPointY][inputPointX]; output[batch][plane][outputPointY][outputPointX] = valueToCopy; } template <typename scalar_t> __global__ void replication_pad_backward_kernel( PackedTensorAccessor64<scalar_t, 4> gradInput, PackedTensorAccessor64<scalar_t, 4> gradOutput, int padT, int padB, int padL, int padR, int y_shift, int z_shift) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y + y_shift; int batch = blockIdx.z + z_shift; if (outputPointId >= gradOutput.size(2) * gradOutput.size(3)) { return; } int outputPointX = outputPointId % gradOutput.size(3); int outputPointY = outputPointId / gradOutput.size(3); int iStartX = imax(0, -padL); int iStartY = imax(0, -padT); int oStartX = imax(0, padL); int oStartY = imax(0, padT); int inputPointX = imin(imax(padL, outputPointX), gradInput.size(3) + padL - 1) - oStartX + iStartX; int inputPointY = imin(imax(padT, outputPointY), gradInput.size(2) + padT - 1) - oStartY + iStartY; scalar_t valueToCopy = gradOutput[batch][plane][outputPointY][outputPointX]; gpuAtomicAdd(&gradInput[batch][plane][inputPointY][inputPointX], valueToCopy); } template <typename scalar_t> __global__ void replication_pad_forward_kernel3d( PackedTensorAccessor64<scalar_t, 5> input, PackedTensorAccessor64<scalar_t, 5> output, int pfront, int pback, int ptop, int pbottom, int pleft, int pright, int y_shift, int z_shift) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y + y_shift; int batch = blockIdx.z + z_shift; if (outputPointId >= (output.size(2) * output.size(3) * output.size(4))) { return; } int outputPointX = outputPointId % output.size(4); int outputPointY = (outputPointId / output.size(4)) % output.size(3); int outputPointZ = outputPointId / (output.size(3) * output.size(4)); int iStartX = imax(0, -pleft); int iStartY = imax(0, -ptop); int iStartZ = imax(0, -pfront); int oStartX = imax(0, pleft); int oStartY = imax(0, ptop); int oStartZ = imax(0, pfront); int inputPointX = imin(imax(pleft, outputPointX), input.size(4) + pleft - 1) - oStartX + iStartX; int inputPointY = imin(imax(ptop, outputPointY), input.size(3) + ptop - 1) - oStartY + iStartY; int inputPointZ = imin(imax(pfront, outputPointZ), input.size(2) + pfront - 1) - oStartZ + iStartZ; scalar_t valueToCopy = input[batch][plane][inputPointZ][inputPointY][inputPointX]; output[batch][plane][outputPointZ][outputPointY][outputPointX] = valueToCopy; } template <typename scalar_t> __global__ void replication_pad_backward_kernel( PackedTensorAccessor64<scalar_t, 5> gradInput, PackedTensorAccessor64<scalar_t, 5> gradOutput, int pfront, int pback, int ptop, int pbottom, int pleft, int pright, int y_shift, int z_shift) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y + y_shift; int batch = blockIdx.z + z_shift; if (outputPointId >= (gradOutput.size(2) * gradOutput.size(3) * gradOutput.size(4))) { return; } int outputPointX = outputPointId % gradOutput.size(4); int outputPointY = (outputPointId / gradOutput.size(4)) % gradOutput.size(3); int outputPointZ = outputPointId / (gradOutput.size(3) * gradOutput.size(4)); int iStartX = imax(0, -pleft); int iStartY = imax(0, -ptop); int iStartZ = imax(0, -pfront); int oStartX = imax(0, pleft); int oStartY = imax(0, ptop); int oStartZ = imax(0, pfront); int inputPointX = imin(imax(pleft, outputPointX), gradInput.size(4) + pleft - 1) - oStartX + iStartX; int inputPointY = imin(imax(ptop, outputPointY), gradInput.size(3) + ptop - 1) - oStartY + iStartY; int inputPointZ = imin(imax(pfront, outputPointZ), gradInput.size(2) + pfront - 1) - oStartZ + iStartZ; scalar_t valueToCopy = gradOutput[batch][plane][outputPointZ][outputPointY][outputPointX]; gpuAtomicAdd(&gradInput[batch][plane][inputPointZ][inputPointY][inputPointX], valueToCopy); } void replication_pad1d_out_cuda_template( Tensor& output, const Tensor& input, IntArrayRef paddingSize) { TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); TORCH_CHECK(paddingSize.size() == 2, "padding Size is expected to be 2"); int padL = paddingSize[0]; int padR = paddingSize[1]; int planeDim = 0; int dimw = 1; int numBatch = 1; int numInputDims = input.ndimension(); TORCH_CHECK( (numInputDims == 2 && input.size(0) != 0 && input.size(1) != 0) || (numInputDims == 3 && input.size(1) != 0 && input.size(2) != 0), "Expected 2D or 3D (batch mode) tensor with possibly 0 batch size and other non-zero dimensions for input, but got: ", input.sizes()); if (numInputDims == 3) { numBatch = input.size(0); planeDim++; dimw++; } int numPlanes = input.size(planeDim); int inputW = input.size(dimw); int outputW = inputW + padL + padR; TORCH_CHECK(outputW >= 1, "input (W: ", inputW, ")is too small." " Calculated output W: ", outputW); if (numInputDims == 2) { output.resize_({numPlanes, outputW}); } else { output.resize_({numBatch, numPlanes, outputW}); } if (input.numel() == 0) { return; } AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "replication_pad1d_cuda", [&] { at::Tensor input_ = input; at::Tensor output_ = output; if (numInputDims == 2) { input_ = input.unsqueeze(0); output_ = output.unsqueeze(0); } auto devInput = input_.packed_accessor64<scalar_t, 3>(); auto devOutput = output_.packed_accessor64<scalar_t, 3>(); int64_t outputPlaneSize = devOutput.size(2); int64_t size1 = devOutput.size(1); int64_t size0 = devOutput.size(0); for (int64_t block_y = 0; block_y < size1; block_y += 65535) { int64_t block_y_size = std::min(size1 - block_y, static_cast<int64_t>(65535)); for (int64_t block_z = 0; block_z < size0; block_z += 65535) { int64_t block_z_size = std::min(size0 - block_z, static_cast<int64_t>(65535)); dim3 gridSize(THCCeilDiv(outputPlaneSize, static_cast<int64_t>(256)), block_y_size, block_z_size); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); replication_pad_forward_kernel1d <<<gridSize, blockSize, 0, at::cuda::getCurrentCUDAStream()>>>(devInput, devOutput, padL, padR, block_y, block_z); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } } ); } void replication_pad1d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(gradOutput), "output gradient tensor must fit into 32-bit index math"); TORCH_CHECK(paddingSize.size() == 2, "padding Size is expected to be 2"); int padL = paddingSize[0]; int padR = paddingSize[1]; int planeDim = 0; int dimw = 1; int numInputDims = input.ndimension(); if (numInputDims == 3) { planeDim++; dimw++; } int iwidth = input.size(dimw); int owidth = iwidth + padL + padR; TORCH_CHECK(owidth == gradOutput.size(dimw), "gradOutput width unexpected. Expected: ", owidth, ", Got: ", gradOutput.size(dimw)); gradInput.resize_as_(input); if (gradInput.numel() == 0) { return; } gradInput.zero_(); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "replication_pad1d_backward_cuda", [&] { auto gradInput_ = gradInput; auto gradOutput_ = gradOutput; if (numInputDims == 2) { gradInput_ = gradInput.unsqueeze(0); gradOutput_ = gradOutput.unsqueeze(0); } auto devGradInput = gradInput_.packed_accessor64<scalar_t, 3>(); auto devGradOutput = gradOutput_.packed_accessor64<scalar_t, 3>(); int64_t outputPlaneSize = devGradOutput.size(2); int64_t size1 = devGradOutput.size(1); int64_t size0 = devGradOutput.size(0); for (int64_t block_y = 0; block_y < size1; block_y += 65535) { int64_t block_y_size = std::min(size1 - block_y, static_cast<int64_t>(65535)); for (int64_t block_z = 0; block_z < size0; block_z += 65535) { int64_t block_z_size = std::min(size0 - block_z, static_cast<int64_t>(65535)); dim3 gridSize(THCCeilDiv(outputPlaneSize, static_cast<int64_t>(256)), block_y_size, block_z_size); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); replication_pad_backward_kernel <<<gridSize, blockSize, 0, at::cuda::getCurrentCUDAStream()>>>( devGradInput, devGradOutput, padL, padR, block_y, block_z); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } }); } void replication_pad2d_out_cuda_template( Tensor& output, const Tensor& input, IntArrayRef paddingSize) { TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); TORCH_CHECK(paddingSize.size() == 4, "padding Size is expected to be 4"); int padL = paddingSize[0]; int padR = paddingSize[1]; int padT = paddingSize[2]; int padB = paddingSize[3]; int planeDim = 0; int dimh = 1; int dimw = 2; int numBatch = 1; int numInputDims = input.dim(); bool valid_dims = input.size(1) != 0 && input.size(2) != 0; TORCH_CHECK( (numInputDims == 3 && input.size(0) != 0 && valid_dims) || (numInputDims == 4 && valid_dims && input.size(3) != 0), "Expected 3D or 4D (batch mode) tensor with possibly 0 batch size and other non-zero dimensions for input, but got: ", input.sizes()); if (numInputDims == 4) { numBatch = input.size(0); planeDim++; dimh++; dimw++; } int numPlanes = input.size(planeDim); int inputH = input.size(dimh); int inputW = input.size(dimw); int outputH = inputH + padT + padB; int outputW = inputW + padL + padR; TORCH_CHECK(outputW >= 1 || outputH >= 1, "input (H: ", inputH, ", W: ", inputW, ") is too small." " Calculated output H: ", outputH, " W: ", outputW); if (numInputDims == 3) { output.resize_({numPlanes, outputH, outputW}); } else { output.resize_({numBatch, numPlanes, outputH, outputW}); } if (input.numel() == 0) { return; } AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "replication_pad2d_cuda", [&] { at::Tensor input_ = input; at::Tensor output_ = output; if (numInputDims == 3) { input_ = input.unsqueeze(0); output_ = output.unsqueeze(0); } auto devInput = input_.packed_accessor64<scalar_t, 4>(); auto devOutput = output_.packed_accessor64<scalar_t, 4>(); int64_t outputPlaneSize = devOutput.size(2) * devOutput.size(3); int64_t size1 = devOutput.size(1); int64_t size0 = devOutput.size(0); for (int64_t block_y = 0; block_y < size1; block_y += 65535) { int64_t block_y_size = std::min(size1 - block_y, static_cast<int64_t>(65535)); for (int64_t block_z = 0; block_z < size0; block_z += 65535) { int64_t block_z_size = std::min(size0 - block_z, static_cast<int64_t>(65535)); dim3 gridSize(THCCeilDiv(outputPlaneSize, static_cast<int64_t>(256)), block_y_size, block_z_size); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); replication_pad_forward_kernel2d <<<gridSize, blockSize, 0, at::cuda::getCurrentCUDAStream()>>>( devInput, devOutput, padT, padB, padL, padR, block_y, block_z); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } } ); } void replication_pad2d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(gradOutput), "output gradient tensor must fit into 32-bit index math"); TORCH_CHECK(paddingSize.size() == 4, "padding Size is expected to be 4"); int padL = paddingSize[0]; int padR = paddingSize[1]; int padT = paddingSize[2]; int padB = paddingSize[3]; int planeDim = 0; int dimh = 1; int dimw = 2; int numInputDims = input.dim(); if (numInputDims == 4) { planeDim++; dimh++; dimw++; } int iheight = input.size(dimh); int iwidth = input.size(dimw); int oheight = iheight + padT + padB; int owidth = iwidth + padL + padR; TORCH_CHECK(owidth == gradOutput.size(dimw), "gradOutput width unexpected. Expected: ", owidth, ", Got: ", gradOutput.size(dimw)); TORCH_CHECK(oheight == gradOutput.size(dimh), "gradOutput height unexpected. Expected: ", oheight, ", Got: ", gradOutput.size(dimh)); gradInput.resize_as_(input); if (gradInput.numel() == 0) { return; } gradInput.zero_(); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "replication_pad2d_backward_cuda", [&] { auto gradInput_ = gradInput; auto gradOutput_ = gradOutput; if (numInputDims == 3) { gradInput_ = gradInput.unsqueeze(0); gradOutput_ = gradOutput.unsqueeze(0); } auto devGradInput = gradInput_.packed_accessor64<scalar_t, 4>(); auto devGradOutput = gradOutput_.packed_accessor64<scalar_t, 4>(); int64_t outputPlaneSize = devGradOutput.size(2) * devGradOutput.size(3); int64_t size1 = devGradOutput.size(1); int64_t size0 = devGradOutput.size(0); for (int64_t block_y = 0; block_y < size1; block_y += 65535) { int64_t block_y_size = std::min(size1 - block_y, static_cast<int64_t>(65535)); for (int64_t block_z = 0; block_z < size0; block_z += 65535) { int64_t block_z_size = std::min(size0 - block_z, static_cast<int64_t>(65535)); dim3 gridSize(THCCeilDiv(outputPlaneSize, static_cast<int64_t>(256)), block_y_size, block_z_size); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); replication_pad_backward_kernel <<<gridSize, blockSize, 0, at::cuda::getCurrentCUDAStream()>>>( devGradInput, devGradOutput, padT, padB, padL, padR, block_y, block_z); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } } ); } static inline void shapeCheck3d( const Tensor& input, int pleft, int pright, int ptop, int pbottom, int pfront, int pback) { TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); int numInputDims = input.dim(); bool valid_dims = input.size(1) != 0 && input.size(2) != 0 && input.size(3) != 0; TORCH_CHECK( (numInputDims == 4 && input.size(0) != 0 && valid_dims) || (numInputDims == 5 && valid_dims && input.size(4) != 0), "Expected 4D or 5D (batch mode) tensor with possibly 0 batch size and other non-zero dimensions for input, but got: ", input.sizes()); int planeDim = 0; int dimd = 1; int dimh = 2; int dimw = 3; if (numInputDims == 5) { planeDim++; dimd++; dimh++; dimw++; } int numPlanes = input.size(planeDim); int idepth = input.size(dimd); int iheight = input.size(dimh); int iwidth = input.size(dimw); int odepth = idepth + pfront + pback; int oheight = iheight + ptop + pbottom; int owidth = iwidth + pleft + pright; TORCH_CHECK(owidth >= 1 || oheight >= 1 || odepth >= 1, "input (D: ", idepth, " H: ", iheight, ", W: ", iwidth, ") is too small." " Calculated output D: ", odepth, " H: ", oheight, " W: ", owidth); } static inline void shapeAndGradOutputCheck3d( const Tensor& input, const Tensor& gradOutput, int pleft, int pright, int ptop, int pbottom, int pfront, int pback) { TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); int numInputDims = input.dim(); bool valid_dims = input.size(1) != 0 && input.size(2) != 0 && input.size(3) != 0; TORCH_CHECK( (numInputDims == 4 && valid_dims) || (numInputDims == 5 && valid_dims && input.size(4) != 0), "Expected 4D or 5D (batch mode) tensor with possibly 0 batch size and other non-zero dimensions for input, but got: ", input.sizes()); int planeDim = 0; int dimd = 1; int dimh = 2; int dimw = 3; if (numInputDims == 5) { planeDim++; dimd++; dimh++; dimw++; } int numPlanes = input.size(planeDim); int idepth = input.size(dimd); int iheight = input.size(dimh); int iwidth = input.size(dimw); int odepth = idepth + pfront + pback; int oheight = iheight + ptop + pbottom; int owidth = iwidth + pleft + pright; TORCH_CHECK(owidth >= 1 || oheight >= 1 || odepth >= 1, "input (D: ", idepth, " H: ", iheight, ", W: ", iwidth, ") is too small." " Calculated output D: ", odepth, " H: ", oheight, " W: ", owidth); TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(gradOutput), "output gradient tensor must fit into 32-bit index math"); TORCH_CHECK(numPlanes == gradOutput.size(planeDim), "gradOutput width unexpected. Expected: ", numPlanes, ", Got: ", gradOutput.size(planeDim)); TORCH_CHECK(owidth == gradOutput.size(dimw), "gradOutput width unexpected. Expected: ", owidth, ", Got: ", gradOutput.size(dimw)); TORCH_CHECK(oheight == gradOutput.size(dimh), "gradOutput height unexpected. Expected: ", oheight, ", Got: ", gradOutput.size(dimh)); TORCH_CHECK(odepth == gradOutput.size(dimd), "gradOutput depth unexpected. Expected: ", odepth, ", Got: ", gradOutput.size(dimd)); } void replication_pad3d_out_cuda_template( Tensor& output, const Tensor& input, IntArrayRef paddingSize) { TORCH_CHECK(paddingSize.size() == 6, "padding Size is expected to be 6"); int pleft = paddingSize[0]; int pright = paddingSize[1]; int ptop = paddingSize[2]; int pbottom = paddingSize[3]; int pfront = paddingSize[4]; int pback = paddingSize[5]; shapeCheck3d(input, pleft, pright, ptop, pbottom, pfront, pback); int planeDim = 0; int dimd = 1; int dimh = 2; int dimw = 3; int numBatch = 1; int numInputDims = input.dim(); if (numInputDims == 5) { numBatch = input.size(0); planeDim++; dimd++; dimh++; dimw++; } int numPlanes = input.size(planeDim); int inputD = input.size(dimd); int inputH = input.size(dimh); int inputW = input.size(dimw); int outputD = inputD + pfront + pback; int outputH = inputH + ptop + pbottom; int outputW = inputW + pleft + pright; if (numInputDims == 4) { output.resize_({numPlanes, outputD, outputH, outputW}); } else { output.resize_({numBatch, numPlanes, outputD, outputH, outputW}); } if (input.numel() == 0) { return; } AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "replication_pad3d_cuda", [&] { at::Tensor input_ = input; at::Tensor output_ = output; if (numInputDims == 4) { auto input_ = input.unsqueeze(0); auto output_ = output.unsqueeze(0); } auto devInput = input_.packed_accessor64<scalar_t, 5>(); auto devOutput = output_.packed_accessor64<scalar_t, 5>(); int64_t outputPlaneSize = devOutput.size(2) * devOutput.size(3) * devOutput.size(4); int64_t size1 = devOutput.size(1); int64_t size0 = devOutput.size(0); for (int64_t block_y = 0; block_y < size1; block_y += 65535) { int64_t block_y_size = std::min(size1 - block_y, static_cast<int64_t>(65535)); for (int64_t block_z = 0; block_z < size0; block_z += 65535) { int64_t block_z_size = std::min(size0 - block_z, static_cast<int64_t>(65535)); dim3 gridSize(THCCeilDiv(outputPlaneSize, static_cast<int64_t>(256)), block_y_size, block_z_size); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); replication_pad_forward_kernel3d <<<gridSize, blockSize, 0, at::cuda::getCurrentCUDAStream()>>>( devInput, devOutput, pfront, pback, ptop, pbottom, pleft, pright, block_y, block_z); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } } ); } void replication_pad3d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { TORCH_CHECK(paddingSize.size() == 6, "padding Size is expected to be 6"); int pleft = paddingSize[0]; int pright = paddingSize[1]; int ptop = paddingSize[2]; int pbottom = paddingSize[3]; int pfront = paddingSize[4]; int pback = paddingSize[5]; shapeAndGradOutputCheck3d(input, gradOutput, pleft, pright, ptop, pbottom, pfront, pback); int planeDim = 0; int dimd = 1; int dimh = 2; int dimw = 3; int numInputDims = input.dim(); if (numInputDims == 5) { planeDim++; dimd++; dimh++; dimw++; } gradInput.resize_as_(input); if (gradInput.numel() == 0) { return; } gradInput.zero_(); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "replication_pad3d_backward_cuda", [&] { auto gradInput_ = gradInput; auto gradOutput_ = gradOutput; if (numInputDims == 4) { gradInput_ = gradInput.unsqueeze(0); gradOutput_ = gradOutput.unsqueeze(0); } auto devGradInput = gradInput_.packed_accessor64<scalar_t, 5>(); auto devGradOutput = gradOutput_.packed_accessor64<scalar_t, 5>(); int64_t outputPlaneSize = devGradOutput.size(2) * devGradOutput.size(3) * devGradOutput.size(4); int64_t size1 = devGradOutput.size(1); int64_t size0 = devGradOutput.size(0); for (int64_t block_y = 0; block_y < size1; block_y += 65535) { int64_t block_y_size = std::min(size1 - block_y, static_cast<int64_t>(65535)); for (int64_t block_z = 0; block_z < size0; block_z += 65535) { int64_t block_z_size = std::min(size0 - block_z, static_cast<int64_t>(65535)); dim3 gridSize(THCCeilDiv(outputPlaneSize, static_cast<int64_t>(256)), block_y_size, block_z_size); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); replication_pad_backward_kernel <<<gridSize, blockSize, 0, at::cuda::getCurrentCUDAStream()>>>( devGradInput, devGradOutput, pfront, pback, ptop, pbottom, pleft, pright, block_y, block_z); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } } ); } } // namespace Tensor& replication_pad1d_out_cuda(const Tensor& input, IntArrayRef paddingSize, Tensor& output) { replication_pad1d_out_cuda_template( output, input, paddingSize); return output; } Tensor replication_pad1d_cuda( const Tensor& input, IntArrayRef paddingSize) { auto output = at::empty({0}, input.options()); replication_pad1d_out_cuda_template( output, input, paddingSize); return output; } Tensor& replication_pad1d_backward_out_cuda(const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize, Tensor& gradInput) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("replication_pad1d_backward_out_cuda"); replication_pad1d_backward_out_cuda_template( gradInput, gradOutput, input, paddingSize); return gradInput; } Tensor replication_pad1d_backward_cuda( const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("replication_pad1d_backward_cuda"); auto gradInput = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); replication_pad1d_backward_out_cuda_template( gradInput, gradOutput, input, paddingSize); return gradInput; } Tensor& replication_pad2d_out_cuda(const Tensor& input, IntArrayRef paddingSize, Tensor& output) { replication_pad2d_out_cuda_template( output, input, paddingSize); return output; } Tensor replication_pad2d_cuda( const Tensor& input, IntArrayRef paddingSize) { auto output = at::empty({0}, input.options()); replication_pad2d_out_cuda_template( output, input, paddingSize); return output; } Tensor& replication_pad2d_backward_out_cuda(const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize, Tensor& gradInput) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("replication_pad2d_backward_out_cuda"); replication_pad2d_backward_out_cuda_template( gradInput, gradOutput, input, paddingSize); return gradInput; } Tensor replication_pad2d_backward_cuda( const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("replication_pad2d_backward_cuda"); auto gradInput = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); replication_pad2d_backward_out_cuda_template( gradInput, gradOutput, input, paddingSize); return gradInput; } Tensor& replication_pad3d_out_cuda(const Tensor& input, IntArrayRef paddingSize, Tensor& output) { replication_pad3d_out_cuda_template( output, input, paddingSize); return output; } Tensor replication_pad3d_cuda( const Tensor& input, IntArrayRef paddingSize) { auto output = at::empty({0}, input.options()); replication_pad3d_out_cuda_template( output, input, paddingSize); return output; } Tensor& replication_pad3d_backward_out_cuda(const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize, Tensor& gradInput) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("replication_pad3d_backward_out_cuda"); replication_pad3d_backward_out_cuda_template( gradInput, gradOutput, input, paddingSize); return gradInput; } Tensor replication_pad3d_backward_cuda( const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("replication_pad3d_backward_cuda"); auto gradInput = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); replication_pad3d_backward_out_cuda_template( gradInput, gradOutput, input, paddingSize); return gradInput; } } // at::native } // at
1b7da91b5450389eb8428c66d8e15a8d97b71e1c.hip
// !!! This is a file automatically generated by hipify!!! #include <vector> #include <cstdlib> #include <iostream> #include <fstream> #include <math.h> #include <cstring> #include <hip/hip_runtime.h> #define ITER_LIMIT 500 #define MATRIX_MAX 10 using namespace std; void nmfgpu(float *a, int r, int c, int k, int niters, float *w, float *h); void matrix_print(float *a, int r, int c); void nmf_seed(float *out, float *a, int r, int c, int k); void surface_matrix_multi(float *out, float *a, float *b, int r1, int c1, int r2, int c2); __global__ void kernel(float *a, int r, int c, int k, int niters, float *w, float *h, float* wt, float* ht, float* wta, float* wtw, float* wtwh, float* hdiv, float* aht, float* wh, float* whht, float* wdiv, int total); __device__ void matrix_multi(float *out, float *a, float *b, int r1, int c1, int r2, int c2, int total); __device__ void matrix_trans(float *out, float *a, int r, int c, int total); __device__ float matrix_findmax(float *a, int r, int c, int total); __device__ float matrix_distance(float *a, float *b, int r, int c, int total); __device__ void matrix_elemproduct(float* out, float *a, float *b, int r, int c, int total); __device__ void matrix_elemdivison(float* out, float *a, float *b, int r ,int c, int total); void surface_matrix_multi(float *out, float *a, float *b, int r1, int c1, int r2, int c2){ if (c1!=r2){ return; } // int total = omp_get_thread_num(); // int total = omp_get_num_threads(); int nth = 0; int total = 1; int division = r1/total; for (int row = (division*total); row < (nth==(total-1) ? r1 : (division*nth+division)); row++) { for (int col = 0; col < c2; col++) { out[row*c2+col] = 0; for (int inner = 0; inner < c1; inner++) { out[row*c2+col] += a[row*c1+inner] * b[inner*c2+col]*1.0; // printf("%d %d :%.2f += %.2f\n", // row, col, out[row*c2+col]-a[row*c1+inner] * b[inner*c2+col]*1.0, a[row*c1+inner] * b[inner*c2+col]*1.0); } } } } void nmfgpu(float *a, int r, int c, int k, int niters, float *w, float *h){ // setup cuda float *dm; float *dm_w; float *dm_h; int msize = r*c*sizeof(float); int msize_w = r*k*sizeof(float); int msize_h = k*c*sizeof(float); float *dm_wt = new float[r*k]; float *dm_ht = new float[c*k]; float *dm_wta = new float[k*c]; float *dm_wtw = new float[k*k]; float *dm_wtwh = new float[k*c]; float *dm_hdiv = new float[k*c]; float *dm_aht = new float[r*k]; float *dm_wh = new float[r*c]; float *dm_whht = new float[r*k]; float *dm_wdiv = new float[r*k]; nmf_seed(w, a, r, c, k); memcpy(h, w, sizeof(float)*r*k); hipMalloc((void**)&dm, msize); hipMalloc((void**)&dm_h, msize_h); hipMalloc((void**)&dm_w, msize_w); hipMalloc((void**)&dm_wt, r*k*sizeof(float)); hipMalloc((void**)&dm_ht, c*k*sizeof(float)); hipMalloc((void**)&dm_wta, k*c*sizeof(float)); hipMalloc((void**)&dm_wtw, k*k*sizeof(float)); hipMalloc((void**)&dm_wtwh, k*c*sizeof(float)); hipMalloc((void**)&dm_hdiv, k*c*sizeof(float)); hipMalloc((void**)&dm_aht, r*k*sizeof(float)); hipMalloc((void**)&dm_wh, r*c*sizeof(float)); hipMalloc((void**)&dm_whht,r*k*sizeof(float)); hipMalloc((void**)&dm_wdiv,r*k*sizeof(float)); int total = 4; dim3 dimGrid(1,1); dim3 dimBlock(total, 1, 1); hipMemcpy(dm, a, msize, hipMemcpyHostToDevice); hipLaunchKernelGGL(( kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, dm, r, c, k, niters , dm_w, dm_h, dm_wt, dm_ht, dm_wta, dm_wtw, dm_wtwh, dm_hdiv, dm_aht, dm_wh, dm_whht, dm_wdiv, total); hipMemcpy(w, dm_w, msize_w, hipMemcpyDeviceToHost); hipMemcpy(h, dm_h, msize_h, hipMemcpyDeviceToHost); hipFree(dm); hipFree(dm_h); hipFree(dm_w); hipFree(dm_wt); hipFree(dm_ht); hipFree(dm_wta); hipFree(dm_wtw); hipFree(dm_wtwh); hipFree(dm_hdiv); hipFree(dm_aht); hipFree(dm_wh); hipFree(dm_whht); hipFree(dm_wdiv); } __global__ void kernel(float *a, int r, int c, int k, int niters, float *w, float *h, float* wt, float* ht, float* wta, float* wtw, float* wtwh, float* hdiv, float* aht, float* wh, float* whht, float* wdiv, int total){ // calculate h // nominator // printf("NMF: 1\n"); for (int i=0;i<niters;i++){ matrix_trans(wt, w, r, k, total); matrix_multi(wta, wt, a, k, r, r, c, total); // denominator // printf("NMF: 2\n"); matrix_multi(wtw, wt, w, k, r, r, k, total); matrix_multi(wtwh, wtw, h, k, k, k, c, total); // new h // printf("NMF: 3\n"); matrix_elemdivison(hdiv, wta, wtwh, k, c, total); matrix_elemproduct(h, h, hdiv, k, c, total); // calcualte w // nominator // printf("NMF: 4\n"); matrix_trans(ht, h, k, c, total); matrix_multi(aht, a, ht, r, c, c, k, total); // calculate w // printf("NMF: 5\n"); matrix_multi(wh, w, h, r, k, k, c, total); matrix_multi(whht, wh, ht, r, c, c, k, total); matrix_elemdivison(wdiv, aht, whht, r, k, total); // new w // printf("NMF: 6\n"); matrix_elemproduct(w, w, wdiv, r, k, total); } } __device__ void matrix_multi(float *out, float *a, float *b, int r1, int c1, int r2, int c2, int total){ if (c1!=r2){ return; } // int total = omp_get_thread_num(); // int total = omp_get_num_threads(); int nth = threadIdx.x; int division = r1/total; for (int row = (division*nth); row < (nth==(total-1) ? r1 : (division*nth+division)); row++) { for (int col = 0; col < c2; col++) { out[row*c2+col] = 0; for (int inner = 0; inner < c1; inner++) { out[row*c2+col] += a[row*c1+inner] * b[inner*c2+col]*1.0; // printf("%d %d :%.2f += %.2f\n", // row, col, out[row*c2+col]-a[row*c1+inner] * b[inner*c2+col]*1.0, a[row*c1+inner] * b[inner*c2+col]*1.0); } } } } __device__ void matrix_trans(float *out, float *a, int r, int c, int total){ // This trans function can be boosted up by multi thread // int total = omp_get_thread_num(); // int total = omp_get_num_threads(); int nth = threadIdx.x; int division = c/total; for (int j=(division*nth);j<(nth==(total-1) ? c : (division*nth+division));j++){ for (int i=0;i<r;i++){ out[j*r+i]=a[i*c+j]; } } } void matrix_print(float *a, int r, int c){ for(int i=0;i<r;i++){ for (int j=0;j<c;j++){ printf("%.2f\t", a[i*c+j]); } printf("\n"); } } __device__ float matrix_findmax(float *a, int r, int c, int total){ // this function can be boosted by find max in row/col and compare them if (r==0 && c==0){ return 0; } float max = a[0]; for (int i = 0;i<r*c;i++){ if (a[i]>max){ max=a[i]; } } return max; } void nmf_seed(float *out, float *a, int r, int c, int k){ // setup generator srand(time(NULL)); for (int i=0;i<r*k;i++){ out[i] = (float)(MATRIX_MAX*((rand()%100/100.0))); } } __device__ float matrix_distance(float *a, float *b, int r, int c, int total){ float distance=0; for (int i=0;i<r*c;i++){ distance += abs(a[i]-b[i])*abs(a[i]-b[i]); } return distance; } __device__ void matrix_elemproduct(float* out, float *a, float *b, int r, int c, int total){ // int total = omp_get_thread_num(); // int total = omp_get_num_threads(); int nth = threadIdx.x; int division = r*c/total; // printf("Its %d/%d\n", total, total); for (int i=(division*nth);i<(nth==(total-1) ? r*c : (division*nth+division));i++){ out[i]=1.0*a[i]*b[i]; } } __device__ void matrix_elemdivison(float* out, float *a, float *b, int r ,int c, int total){ // int total = omp_get_thread_num(); // int total = omp_get_num_threads(); int nth = threadIdx.x; int division = r*c/total; for (int i=(division*nth);i<(nth==(total-1) ? r*c : (division*nth+division));i++){ if (b[i]==0){ return; } out[i]=1.0*a[i]/b[i]; // printf("Now is %d\n", i); } } int main(int argc, char const *argv[]) { // omp_set_num_threads(8); /* code */ // float a[12]={2, 3, 4, 1, 2, 10, 11, 32, // 3.1, 4.1, 31, 0.2}; // float b[12]={1, 2, 1, 3, 1, 4, 1, 5, // 1, 6, 1, 7}; // matrix for nmf. rxc (row, col) // int r = 300; // int c = 200; // int k = 150; int r = 5; int c = 4; int k = 3; float B[20]={1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}; // matrix generator srand(time(NULL)); float A[r*c]; for (int i=0;i<r;i++){ for (int j=0;j<c;j++){ A[i*c+j]=(float)(1.0*MATRIX_MAX*((float)(rand()%100/100.0))); } } // nmf using originall float w[r*k]; float h[k*c]; // double time_begin=get_wall_time(); for (int i=0;i<1;i++){ nmfgpu(A, r, c, k, 500, w, h); printf("It's run: \t%d\n", i); } // double time_end=get_wall_time(); // printf("Matrix w is: \n"); // matrix_print(w, 5, 3); // printf("Matrix h is: \n"); // matrix_print(h, 3, 4); printf("Matrix A is: \n"); matrix_print(A, r, c); surface_matrix_multi(A, w, h, r, k, k, c); printf("Matrix w*h is: \n"); matrix_print(A, r, c); return 0; }
1b7da91b5450389eb8428c66d8e15a8d97b71e1c.cu
#include <vector> #include <cstdlib> #include <iostream> #include <fstream> #include <math.h> #include <cstring> #include <cuda.h> #define ITER_LIMIT 500 #define MATRIX_MAX 10 using namespace std; void nmfgpu(float *a, int r, int c, int k, int niters, float *w, float *h); void matrix_print(float *a, int r, int c); void nmf_seed(float *out, float *a, int r, int c, int k); void surface_matrix_multi(float *out, float *a, float *b, int r1, int c1, int r2, int c2); __global__ void kernel(float *a, int r, int c, int k, int niters, float *w, float *h, float* wt, float* ht, float* wta, float* wtw, float* wtwh, float* hdiv, float* aht, float* wh, float* whht, float* wdiv, int total); __device__ void matrix_multi(float *out, float *a, float *b, int r1, int c1, int r2, int c2, int total); __device__ void matrix_trans(float *out, float *a, int r, int c, int total); __device__ float matrix_findmax(float *a, int r, int c, int total); __device__ float matrix_distance(float *a, float *b, int r, int c, int total); __device__ void matrix_elemproduct(float* out, float *a, float *b, int r, int c, int total); __device__ void matrix_elemdivison(float* out, float *a, float *b, int r ,int c, int total); void surface_matrix_multi(float *out, float *a, float *b, int r1, int c1, int r2, int c2){ if (c1!=r2){ return; } // int total = omp_get_thread_num(); // int total = omp_get_num_threads(); int nth = 0; int total = 1; int division = r1/total; for (int row = (division*total); row < (nth==(total-1) ? r1 : (division*nth+division)); row++) { for (int col = 0; col < c2; col++) { out[row*c2+col] = 0; for (int inner = 0; inner < c1; inner++) { out[row*c2+col] += a[row*c1+inner] * b[inner*c2+col]*1.0; // printf("%d %d :%.2f += %.2f\n", // row, col, out[row*c2+col]-a[row*c1+inner] * b[inner*c2+col]*1.0, a[row*c1+inner] * b[inner*c2+col]*1.0); } } } } void nmfgpu(float *a, int r, int c, int k, int niters, float *w, float *h){ // setup cuda float *dm; float *dm_w; float *dm_h; int msize = r*c*sizeof(float); int msize_w = r*k*sizeof(float); int msize_h = k*c*sizeof(float); float *dm_wt = new float[r*k]; float *dm_ht = new float[c*k]; float *dm_wta = new float[k*c]; float *dm_wtw = new float[k*k]; float *dm_wtwh = new float[k*c]; float *dm_hdiv = new float[k*c]; float *dm_aht = new float[r*k]; float *dm_wh = new float[r*c]; float *dm_whht = new float[r*k]; float *dm_wdiv = new float[r*k]; nmf_seed(w, a, r, c, k); memcpy(h, w, sizeof(float)*r*k); cudaMalloc((void**)&dm, msize); cudaMalloc((void**)&dm_h, msize_h); cudaMalloc((void**)&dm_w, msize_w); cudaMalloc((void**)&dm_wt, r*k*sizeof(float)); cudaMalloc((void**)&dm_ht, c*k*sizeof(float)); cudaMalloc((void**)&dm_wta, k*c*sizeof(float)); cudaMalloc((void**)&dm_wtw, k*k*sizeof(float)); cudaMalloc((void**)&dm_wtwh, k*c*sizeof(float)); cudaMalloc((void**)&dm_hdiv, k*c*sizeof(float)); cudaMalloc((void**)&dm_aht, r*k*sizeof(float)); cudaMalloc((void**)&dm_wh, r*c*sizeof(float)); cudaMalloc((void**)&dm_whht,r*k*sizeof(float)); cudaMalloc((void**)&dm_wdiv,r*k*sizeof(float)); int total = 4; dim3 dimGrid(1,1); dim3 dimBlock(total, 1, 1); cudaMemcpy(dm, a, msize, cudaMemcpyHostToDevice); kernel<<<dimGrid, dimBlock>>>(dm, r, c, k, niters , dm_w, dm_h, dm_wt, dm_ht, dm_wta, dm_wtw, dm_wtwh, dm_hdiv, dm_aht, dm_wh, dm_whht, dm_wdiv, total); cudaMemcpy(w, dm_w, msize_w, cudaMemcpyDeviceToHost); cudaMemcpy(h, dm_h, msize_h, cudaMemcpyDeviceToHost); cudaFree(dm); cudaFree(dm_h); cudaFree(dm_w); cudaFree(dm_wt); cudaFree(dm_ht); cudaFree(dm_wta); cudaFree(dm_wtw); cudaFree(dm_wtwh); cudaFree(dm_hdiv); cudaFree(dm_aht); cudaFree(dm_wh); cudaFree(dm_whht); cudaFree(dm_wdiv); } __global__ void kernel(float *a, int r, int c, int k, int niters, float *w, float *h, float* wt, float* ht, float* wta, float* wtw, float* wtwh, float* hdiv, float* aht, float* wh, float* whht, float* wdiv, int total){ // calculate h // nominator // printf("NMF: 1\n"); for (int i=0;i<niters;i++){ matrix_trans(wt, w, r, k, total); matrix_multi(wta, wt, a, k, r, r, c, total); // denominator // printf("NMF: 2\n"); matrix_multi(wtw, wt, w, k, r, r, k, total); matrix_multi(wtwh, wtw, h, k, k, k, c, total); // new h // printf("NMF: 3\n"); matrix_elemdivison(hdiv, wta, wtwh, k, c, total); matrix_elemproduct(h, h, hdiv, k, c, total); // calcualte w // nominator // printf("NMF: 4\n"); matrix_trans(ht, h, k, c, total); matrix_multi(aht, a, ht, r, c, c, k, total); // calculate w // printf("NMF: 5\n"); matrix_multi(wh, w, h, r, k, k, c, total); matrix_multi(whht, wh, ht, r, c, c, k, total); matrix_elemdivison(wdiv, aht, whht, r, k, total); // new w // printf("NMF: 6\n"); matrix_elemproduct(w, w, wdiv, r, k, total); } } __device__ void matrix_multi(float *out, float *a, float *b, int r1, int c1, int r2, int c2, int total){ if (c1!=r2){ return; } // int total = omp_get_thread_num(); // int total = omp_get_num_threads(); int nth = threadIdx.x; int division = r1/total; for (int row = (division*nth); row < (nth==(total-1) ? r1 : (division*nth+division)); row++) { for (int col = 0; col < c2; col++) { out[row*c2+col] = 0; for (int inner = 0; inner < c1; inner++) { out[row*c2+col] += a[row*c1+inner] * b[inner*c2+col]*1.0; // printf("%d %d :%.2f += %.2f\n", // row, col, out[row*c2+col]-a[row*c1+inner] * b[inner*c2+col]*1.0, a[row*c1+inner] * b[inner*c2+col]*1.0); } } } } __device__ void matrix_trans(float *out, float *a, int r, int c, int total){ // This trans function can be boosted up by multi thread // int total = omp_get_thread_num(); // int total = omp_get_num_threads(); int nth = threadIdx.x; int division = c/total; for (int j=(division*nth);j<(nth==(total-1) ? c : (division*nth+division));j++){ for (int i=0;i<r;i++){ out[j*r+i]=a[i*c+j]; } } } void matrix_print(float *a, int r, int c){ for(int i=0;i<r;i++){ for (int j=0;j<c;j++){ printf("%.2f\t", a[i*c+j]); } printf("\n"); } } __device__ float matrix_findmax(float *a, int r, int c, int total){ // this function can be boosted by find max in row/col and compare them if (r==0 && c==0){ return 0; } float max = a[0]; for (int i = 0;i<r*c;i++){ if (a[i]>max){ max=a[i]; } } return max; } void nmf_seed(float *out, float *a, int r, int c, int k){ // setup generator srand(time(NULL)); for (int i=0;i<r*k;i++){ out[i] = (float)(MATRIX_MAX*((rand()%100/100.0))); } } __device__ float matrix_distance(float *a, float *b, int r, int c, int total){ float distance=0; for (int i=0;i<r*c;i++){ distance += abs(a[i]-b[i])*abs(a[i]-b[i]); } return distance; } __device__ void matrix_elemproduct(float* out, float *a, float *b, int r, int c, int total){ // int total = omp_get_thread_num(); // int total = omp_get_num_threads(); int nth = threadIdx.x; int division = r*c/total; // printf("Its %d/%d\n", total, total); for (int i=(division*nth);i<(nth==(total-1) ? r*c : (division*nth+division));i++){ out[i]=1.0*a[i]*b[i]; } } __device__ void matrix_elemdivison(float* out, float *a, float *b, int r ,int c, int total){ // int total = omp_get_thread_num(); // int total = omp_get_num_threads(); int nth = threadIdx.x; int division = r*c/total; for (int i=(division*nth);i<(nth==(total-1) ? r*c : (division*nth+division));i++){ if (b[i]==0){ return; } out[i]=1.0*a[i]/b[i]; // printf("Now is %d\n", i); } } int main(int argc, char const *argv[]) { // omp_set_num_threads(8); /* code */ // float a[12]={2, 3, 4, 1, 2, 10, 11, 32, // 3.1, 4.1, 31, 0.2}; // float b[12]={1, 2, 1, 3, 1, 4, 1, 5, // 1, 6, 1, 7}; // matrix for nmf. rxc (row, col) // int r = 300; // int c = 200; // int k = 150; int r = 5; int c = 4; int k = 3; float B[20]={1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}; // matrix generator srand(time(NULL)); float A[r*c]; for (int i=0;i<r;i++){ for (int j=0;j<c;j++){ A[i*c+j]=(float)(1.0*MATRIX_MAX*((float)(rand()%100/100.0))); } } // nmf using originall float w[r*k]; float h[k*c]; // double time_begin=get_wall_time(); for (int i=0;i<1;i++){ nmfgpu(A, r, c, k, 500, w, h); printf("It's run: \t%d\n", i); } // double time_end=get_wall_time(); // printf("Matrix w is: \n"); // matrix_print(w, 5, 3); // printf("Matrix h is: \n"); // matrix_print(h, 3, 4); printf("Matrix A is: \n"); matrix_print(A, r, c); surface_matrix_multi(A, w, h, r, k, k, c); printf("Matrix w*h is: \n"); matrix_print(A, r, c); return 0; }
0046abfea5ef300dceb0c9874e413b2516f6801a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cutil_inline.h> #include <stdio.h> #include <stdint.h> #include "../xsr_rng.h" typedef uint32_t u32; __global__ static void test_inline_xsr_rand_u32(u32 *seed, int N, u32 *out) { int i; int tid=blockIdx.x*blockDim.x+threadIdx.x; inline_xsr_def_u32(); inline_xsr_srand_u32(seed[tid]); for(i=0;i<N-1;i++) { inline_xsr_rand_u32(); } out[tid]=inline_xsr_rand_u32(); } int test_inline_xsr(int dev, int nr_threads, int nr_blocks,int N) { int i,j; u32 *seed_h,*seed_d; u32 *out_h,*out_d; hipDeviceProp_t deviceProp; unsigned int timer = 0; double proc_time=-1.0; size_t size=(nr_threads*nr_blocks)*sizeof(u32); hipGetDeviceProperties(&deviceProp, dev); printf("\nUsing device %d: \"%s\"\n", dev, deviceProp.name); printf("\nClock rate: %d\n",deviceProp.clockRate); hipSetDevice(dev); cutilSafeCall(hipHostMalloc((void**)&(seed_h),size,hipHostMallocPortable)); cutilSafeCall(hipMalloc((void**)&(seed_d),size)); cutilSafeCall(hipHostMalloc((void**)&(out_h),size,hipHostMallocPortable)); cutilSafeCall(hipMalloc((void**)&(out_d),size)); { FILE *fp; if(!(fp = fopen("/dev/urandom", "r"))) { fprintf(stderr,"Failed to open /dev/urandom"); return -1; } fread(seed_h,sizeof(u32),nr_threads*nr_blocks,fp); fclose(fp); } cutilCheckError(cutCreateTimer( &timer)); cutilCheckError(cutStartTimer( timer)); #define NR_TEST_ITERATIONS 1//000 for(int nr_test_iter=0;nr_test_iter<NR_TEST_ITERATIONS;nr_test_iter++) { cutilSafeCall(hipMemcpy(seed_d,seed_h,size,hipMemcpyHostToDevice)); hipLaunchKernelGGL(( test_inline_xsr_rand_u32), dim3(nr_blocks),dim3(nr_threads), 0, 0, seed_d,N,out_d); cutilCheckMsg("Kernel execution failed"); hipDeviceSynchronize(); cutilSafeCall(hipMemcpy(out_h,out_d,size,hipMemcpyDeviceToHost)); } cutilCheckError(cutStopTimer( timer)); proc_time=cutGetTimerValue( timer); cutilCheckError(cutDeleteTimer( timer)); for(i=0;i<nr_threads*nr_blocks;i++) { u32 out_val=0; xsr_srand_u32(seed_h[i]); for(j=0;j<N;j++) { out_val=xsr_rand_u32(); } if(out_val!=out_h[i]) { printf("failed @ [%4d] -> seed=%08x cpu=%08x gpu=%08x\n",i, seed_h[i],out_val,out_h[i]); } } cutilSafeCall(hipFree(seed_d)); cutilSafeCall(hipFree(out_d)); cutilSafeCall(hipHostFree(seed_h)); cutilSafeCall(hipHostFree(out_h)); printf( "\nProcessing time: %8g (ms), %g (us/byte) %g (cycles/byte)\n" ,proc_time ,(1000.0*proc_time)/(NR_TEST_ITERATIONS*N)/ (nr_threads*nr_blocks*sizeof(u32)) ,((proc_time)*(deviceProp.clockRate))/(NR_TEST_ITERATIONS*N)/ (nr_threads*nr_blocks*sizeof(u32))); return 0; } int main(void) { return test_inline_xsr(3,256,256,100000); }
0046abfea5ef300dceb0c9874e413b2516f6801a.cu
#include <cutil_inline.h> #include <stdio.h> #include <stdint.h> #include "../xsr_rng.h" typedef uint32_t u32; __global__ static void test_inline_xsr_rand_u32(u32 *seed, int N, u32 *out) { int i; int tid=blockIdx.x*blockDim.x+threadIdx.x; inline_xsr_def_u32(); inline_xsr_srand_u32(seed[tid]); for(i=0;i<N-1;i++) { inline_xsr_rand_u32(); } out[tid]=inline_xsr_rand_u32(); } int test_inline_xsr(int dev, int nr_threads, int nr_blocks,int N) { int i,j; u32 *seed_h,*seed_d; u32 *out_h,*out_d; cudaDeviceProp deviceProp; unsigned int timer = 0; double proc_time=-1.0; size_t size=(nr_threads*nr_blocks)*sizeof(u32); cudaGetDeviceProperties(&deviceProp, dev); printf("\nUsing device %d: \"%s\"\n", dev, deviceProp.name); printf("\nClock rate: %d\n",deviceProp.clockRate); cudaSetDevice(dev); cutilSafeCall(cudaHostAlloc((void**)&(seed_h),size,cudaHostAllocPortable)); cutilSafeCall(cudaMalloc((void**)&(seed_d),size)); cutilSafeCall(cudaHostAlloc((void**)&(out_h),size,cudaHostAllocPortable)); cutilSafeCall(cudaMalloc((void**)&(out_d),size)); { FILE *fp; if(!(fp = fopen("/dev/urandom", "r"))) { fprintf(stderr,"Failed to open /dev/urandom"); return -1; } fread(seed_h,sizeof(u32),nr_threads*nr_blocks,fp); fclose(fp); } cutilCheckError(cutCreateTimer( &timer)); cutilCheckError(cutStartTimer( timer)); #define NR_TEST_ITERATIONS 1//000 for(int nr_test_iter=0;nr_test_iter<NR_TEST_ITERATIONS;nr_test_iter++) { cutilSafeCall(cudaMemcpy(seed_d,seed_h,size,cudaMemcpyHostToDevice)); test_inline_xsr_rand_u32<<<nr_blocks,nr_threads>>>(seed_d,N,out_d); cutilCheckMsg("Kernel execution failed"); cudaThreadSynchronize(); cutilSafeCall(cudaMemcpy(out_h,out_d,size,cudaMemcpyDeviceToHost)); } cutilCheckError(cutStopTimer( timer)); proc_time=cutGetTimerValue( timer); cutilCheckError(cutDeleteTimer( timer)); for(i=0;i<nr_threads*nr_blocks;i++) { u32 out_val=0; xsr_srand_u32(seed_h[i]); for(j=0;j<N;j++) { out_val=xsr_rand_u32(); } if(out_val!=out_h[i]) { printf("failed @ [%4d] -> seed=%08x cpu=%08x gpu=%08x\n",i, seed_h[i],out_val,out_h[i]); } } cutilSafeCall(cudaFree(seed_d)); cutilSafeCall(cudaFree(out_d)); cutilSafeCall(cudaFreeHost(seed_h)); cutilSafeCall(cudaFreeHost(out_h)); printf( "\nProcessing time: %8g (ms), %g (us/byte) %g (cycles/byte)\n" ,proc_time ,(1000.0*proc_time)/(NR_TEST_ITERATIONS*N)/ (nr_threads*nr_blocks*sizeof(u32)) ,((proc_time)*(deviceProp.clockRate))/(NR_TEST_ITERATIONS*N)/ (nr_threads*nr_blocks*sizeof(u32))); return 0; } int main(void) { return test_inline_xsr(3,256,256,100000); }
2746f793051f10d439a91109eddc4503115197b8.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> int main() { hipDeviceProp_t devProp; hipGetDeviceProperties(&devProp, 0); std::cout << devProp.major * 10 + devProp.minor << ";" << devProp.multiProcessorCount; }
2746f793051f10d439a91109eddc4503115197b8.cu
#include <iostream> int main() { cudaDeviceProp devProp; cudaGetDeviceProperties(&devProp, 0); std::cout << devProp.major * 10 + devProp.minor << ";" << devProp.multiProcessorCount; }
f9681156b50c15e3bbf7bfdc3bd63a3efcd9aa4f.hip
// !!! This is a file automatically generated by hipify!!! // //#include "hip/hip_runtime.h" //#include "device_launch_parameters.h" // //#include <stdio.h> //#include <conio.h> // //hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); // //__global__ void addKernel(int *c, const int *a, const int *b) //{ // int i = threadIdx.x; // c[i] = a[i] + b[i]; //} // //int main() //{ // const int arraySize = 5; // const int a[arraySize] = { 1, 2, 3, 4, 5 }; // const int b[arraySize] = { 10, 20, 30, 40, 50 }; // int c[arraySize] = { 0 }; // // // Add vectors in parallel. // hipError_t cudaStatus = addWithCuda(c, a, b, arraySize); // if (cudaStatus != hipSuccess) { // fprintf(stderr, "addWithCuda failed!"); // return 1; // } // // printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", // c[0], c[1], c[2], c[3], c[4]); // // // hipDeviceReset must be called before exiting in order for profiling and // // tracing tools such as Nsight and Visual Profiler to show complete traces. // cudaStatus = hipDeviceReset(); // if (cudaStatus != hipSuccess) { // fprintf(stderr, "hipDeviceReset failed!"); // return 1; // } // _getch(); // return 0; //} // //// Helper function for using CUDA to add vectors in parallel. //hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) //{ // int *dev_a = 0; // int *dev_b = 0; // int *dev_c = 0; // hipError_t cudaStatus; // // // Choose which GPU to run on, change this on a multi-GPU system. // cudaStatus = hipSetDevice(0); // if (cudaStatus != hipSuccess) { // fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); // goto Error; // } // // // Allocate GPU buffers for three vectors (two input, one output) . // cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int)); // if (cudaStatus != hipSuccess) { // fprintf(stderr, "hipMalloc failed!"); // goto Error; // } // // cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int)); // if (cudaStatus != hipSuccess) { // fprintf(stderr, "hipMalloc failed!"); // goto Error; // } // // cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int)); // if (cudaStatus != hipSuccess) { // fprintf(stderr, "hipMalloc failed!"); // goto Error; // } // // // Copy input vectors from host memory to GPU buffers. // cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice); // if (cudaStatus != hipSuccess) { // fprintf(stderr, "hipMemcpy failed!"); // goto Error; // } // // cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice); // if (cudaStatus != hipSuccess) { // fprintf(stderr, "hipMemcpy failed!"); // goto Error; // } // // // Launch a kernel on the GPU with one thread for each element. // addKernel<<<1, size>>>(dev_c, dev_a, dev_b); // // // Check for any errors launching the kernel // cudaStatus = hipGetLastError(); // if (cudaStatus != hipSuccess) { // fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); // goto Error; // } // // // hipDeviceSynchronize waits for the kernel to finish, and returns // // any errors encountered during the launch. // cudaStatus = hipDeviceSynchronize(); // if (cudaStatus != hipSuccess) { // fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); // goto Error; // } // // // Copy output vector from GPU buffer to host memory. // cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost); // if (cudaStatus != hipSuccess) { // fprintf(stderr, "hipMemcpy failed!"); // goto Error; // } // // // //Error: // hipFree(dev_c); // hipFree(dev_a); // hipFree(dev_b); // // return cudaStatus; //}
f9681156b50c15e3bbf7bfdc3bd63a3efcd9aa4f.cu
// //#include "cuda_runtime.h" //#include "device_launch_parameters.h" // //#include <stdio.h> //#include <conio.h> // //cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); // //__global__ void addKernel(int *c, const int *a, const int *b) //{ // int i = threadIdx.x; // c[i] = a[i] + b[i]; //} // //int main() //{ // const int arraySize = 5; // const int a[arraySize] = { 1, 2, 3, 4, 5 }; // const int b[arraySize] = { 10, 20, 30, 40, 50 }; // int c[arraySize] = { 0 }; // // // Add vectors in parallel. // cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize); // if (cudaStatus != cudaSuccess) { // fprintf(stderr, "addWithCuda failed!"); // return 1; // } // // printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", // c[0], c[1], c[2], c[3], c[4]); // // // cudaDeviceReset must be called before exiting in order for profiling and // // tracing tools such as Nsight and Visual Profiler to show complete traces. // cudaStatus = cudaDeviceReset(); // if (cudaStatus != cudaSuccess) { // fprintf(stderr, "cudaDeviceReset failed!"); // return 1; // } // _getch(); // return 0; //} // //// Helper function for using CUDA to add vectors in parallel. //cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) //{ // int *dev_a = 0; // int *dev_b = 0; // int *dev_c = 0; // cudaError_t cudaStatus; // // // Choose which GPU to run on, change this on a multi-GPU system. // cudaStatus = cudaSetDevice(0); // if (cudaStatus != cudaSuccess) { // fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); // goto Error; // } // // // Allocate GPU buffers for three vectors (two input, one output) . // cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int)); // if (cudaStatus != cudaSuccess) { // fprintf(stderr, "cudaMalloc failed!"); // goto Error; // } // // cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int)); // if (cudaStatus != cudaSuccess) { // fprintf(stderr, "cudaMalloc failed!"); // goto Error; // } // // cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int)); // if (cudaStatus != cudaSuccess) { // fprintf(stderr, "cudaMalloc failed!"); // goto Error; // } // // // Copy input vectors from host memory to GPU buffers. // cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); // if (cudaStatus != cudaSuccess) { // fprintf(stderr, "cudaMemcpy failed!"); // goto Error; // } // // cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice); // if (cudaStatus != cudaSuccess) { // fprintf(stderr, "cudaMemcpy failed!"); // goto Error; // } // // // Launch a kernel on the GPU with one thread for each element. // addKernel<<<1, size>>>(dev_c, dev_a, dev_b); // // // Check for any errors launching the kernel // cudaStatus = cudaGetLastError(); // if (cudaStatus != cudaSuccess) { // fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); // goto Error; // } // // // cudaDeviceSynchronize waits for the kernel to finish, and returns // // any errors encountered during the launch. // cudaStatus = cudaDeviceSynchronize(); // if (cudaStatus != cudaSuccess) { // fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); // goto Error; // } // // // Copy output vector from GPU buffer to host memory. // cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); // if (cudaStatus != cudaSuccess) { // fprintf(stderr, "cudaMemcpy failed!"); // goto Error; // } // // // //Error: // cudaFree(dev_c); // cudaFree(dev_a); // cudaFree(dev_b); // // return cudaStatus; //}
897723015c1d16cf7b63c9ad8858784390729fe0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <string.h> #include "utilities.h" #include "bKernels.cuh" #include "bCommon.h" #include "bSimulator.h" #include "bRenderer.h" #define OPPOSITE(j) ((j) < 5) ? (((j) - 1) + 2) % 4 + 1 : (((j) - 5) + 2) % 4 + 5 __device__ inline bool inside(long long int x, long long int y, unsigned long long int maxX, unsigned long long int maxY){ return (x >= 0 && x < maxX && y >= 0 && y < maxY); } __global__ void cudaComputeVelocity(bSimulator* sim) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= sim->dimX || y >= sim->dimY) return; unsigned long long int elementIdx = y * sim->dimX + x; node& n = *(sim->nodes + elementIdx); if (n.ntype == nodeType::BASE) { float macroVel[2]; float density = sum(n.newDensities, 9); if (density > 0.f) { matMul(n.newDensities, sim->speeds, macroVel, 1, 9, 2); scalarProd((float) sim->c / density, macroVel, macroVel, 2); n.vel = { macroVel[0], macroVel[1] }; } else { n.vel = {0.f, 0.f}; } } } __global__ void cudaComputeEquilibrium(bSimulator* sim) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= sim->dimX || y >= sim->dimY) return; unsigned long long int elementIdx = y * sim->dimX + x; node& n = *(sim->nodes + elementIdx); if (n.ntype == nodeType::BASE) { float density = sum(n.newDensities, 9); float macroVel[2] = { n.vel.x, n.vel.y }; for (auto j = 0; j < 9; j++) { float dotProd = dot(&sim->speeds[2 * j], macroVel, 2); n.eqDensities[j] = density * sim->weights[j] * (1.f + 3.f * dotProd + 9.f * (pow(dotProd, 2)) / 2.f - 3.f * dot(macroVel, macroVel, 2) / 2.f); } } } __global__ void cudaComputeNew(bSimulator* sim) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= sim->dimX || y >= sim->dimY) return; unsigned long long int elementIdx = y * sim->dimX + x; node& n = *(sim->nodes + elementIdx); if (n.ntype == nodeType::BASE) { float newDensities[9]; vecSub(n.eqDensities, n.newDensities, newDensities, 9); scalarProd((float)sim->viscosity, newDensities, newDensities, 9); vecSum(newDensities, n.newDensities, newDensities, 9); memcpy(n.densities, newDensities, 9 * sizeof(float)); memset(n.newDensities, 0.f, 9 * sizeof(float)); } } __global__ void cudaStream(bSimulator* sim) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= sim->dimX || y >= sim->dimY) return; unsigned long long int elementIdx = y * sim->dimX + x; node& n = *(sim->nodes + elementIdx); switch (n.ntype) { case nodeType::BASE: { for (int j = 0; j < 9; j++) { int dx = sim->directions[j][0]; int dy = sim->directions[j][1]; if (dx == 0 && dy == 0) { n.newDensities[j] = n.densities[j]; continue; } int opposite = OPPOSITE(j); long long int newX = n.x + dx; long long int newY = n.y + dy; node* nn = nullptr; if (!inside(newX, newY, sim->dimX, sim->dimY)) { switch (sim->doAtEdge) { case bSimulator::edgeBehaviour::LOOP: { newX = (newX + sim->dimX) % sim->dimX; newY = (newY + sim->dimY) % sim->dimY; nn = (sim->nodes + newY * sim->dimX + newX); break; } case bSimulator::edgeBehaviour::EXIT: { n.newDensities[j] = 0.f; continue; } case bSimulator::edgeBehaviour::WALL: { goto wall; } } } else { nn = (sim->nodes + newY * sim->dimX + newX); } switch (nn->ntype) { case nodeType::BASE: { n.newDensities[opposite] += nn->densities[opposite]; break; } case nodeType::WALL: { wall: n.newDensities[opposite] += n.densities[j]; break; } case nodeType::SOURCE: { n.newDensities[opposite] += (nn->densities[opposite] + n.densities[j]); break; } case nodeType::SINK: { n.newDensities[j] = 0; break; } } } break; } case nodeType::WALL: { break; } } } __global__ void cudaUpdateGraphics(bRenderer* simR) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= simR->sim->dimX || y >= simR->sim->dimY) return; unsigned long long int elementIdx = y * simR->sim->dimX + x; node& n = *(simR->sim->nodes + elementIdx); bRenderer::displayNode& dn = *(simR->cudaGLNodes + elementIdx); switch (n.ntype) { case nodeType::BASE: { float totalDensity = sum(&n.densities[0], 9); dn.density = 1.f; float newSpeeds[2] = { n.vel.x, n.vel.y }; double mag = magnitude(newSpeeds, 2); if (mag > 0) { dn.vel.x = mapNumber<float>(newSpeeds[0] / mag, -1.f, 1.f, 0.f, 1.f); dn.vel.y = mapNumber<float>(newSpeeds[1] / mag, -1.f, 1.f, 0.f, 1.f); } else { dn.vel.x = mapNumber<float>(0.f, -1.f, 1.f, 0.f, 1.f); dn.vel.y = mapNumber<float>(0.f, -1.f, 1.f, 0.f, 1.f); } break; } case nodeType::WALL: { dn.density = 0.f; dn.vel = { 0.f, 0.f }; break; } case nodeType::SOURCE: { dn.density = 1.f; dn.vel = { 1.f, 0.f }; break; } case nodeType::SINK: { dn.density = 1.f; dn.vel = { 0.f, 1.f }; break; } case nodeType::FAN: { dn.density = 1.f; dn.vel = { .7f, .7f }; break; } } } extern "C" { void computeVelocity(bSimulator* sim) { cudaComputeVelocity << < sim->gridDim, sim->blockDim >> > (sim); } void computeEquilibrium(bSimulator* sim){ cudaComputeEquilibrium << < sim->gridDim, sim->blockDim >> > (sim); } void computeNew(bSimulator* sim){ cudaComputeNew << < sim->gridDim, sim->blockDim >> > (sim); } void stream(bSimulator* sim){ hipLaunchKernelGGL(( cudaStream) , dim3(sim->gridDim), dim3(sim->blockDim) , 0, 0, sim); } void updateGraphics(bRenderer* simR) { cudaUpdateGraphics << < simR->sim->gridDim, simR->sim->blockDim >> > (simR); } }
897723015c1d16cf7b63c9ad8858784390729fe0.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <string.h> #include "utilities.h" #include "bKernels.cuh" #include "bCommon.h" #include "bSimulator.h" #include "bRenderer.h" #define OPPOSITE(j) ((j) < 5) ? (((j) - 1) + 2) % 4 + 1 : (((j) - 5) + 2) % 4 + 5 __device__ inline bool inside(long long int x, long long int y, unsigned long long int maxX, unsigned long long int maxY){ return (x >= 0 && x < maxX && y >= 0 && y < maxY); } __global__ void cudaComputeVelocity(bSimulator* sim) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= sim->dimX || y >= sim->dimY) return; unsigned long long int elementIdx = y * sim->dimX + x; node& n = *(sim->nodes + elementIdx); if (n.ntype == nodeType::BASE) { float macroVel[2]; float density = sum(n.newDensities, 9); if (density > 0.f) { matMul(n.newDensities, sim->speeds, macroVel, 1, 9, 2); scalarProd((float) sim->c / density, macroVel, macroVel, 2); n.vel = { macroVel[0], macroVel[1] }; } else { n.vel = {0.f, 0.f}; } } } __global__ void cudaComputeEquilibrium(bSimulator* sim) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= sim->dimX || y >= sim->dimY) return; unsigned long long int elementIdx = y * sim->dimX + x; node& n = *(sim->nodes + elementIdx); if (n.ntype == nodeType::BASE) { float density = sum(n.newDensities, 9); float macroVel[2] = { n.vel.x, n.vel.y }; for (auto j = 0; j < 9; j++) { float dotProd = dot(&sim->speeds[2 * j], macroVel, 2); n.eqDensities[j] = density * sim->weights[j] * (1.f + 3.f * dotProd + 9.f * (pow(dotProd, 2)) / 2.f - 3.f * dot(macroVel, macroVel, 2) / 2.f); } } } __global__ void cudaComputeNew(bSimulator* sim) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= sim->dimX || y >= sim->dimY) return; unsigned long long int elementIdx = y * sim->dimX + x; node& n = *(sim->nodes + elementIdx); if (n.ntype == nodeType::BASE) { float newDensities[9]; vecSub(n.eqDensities, n.newDensities, newDensities, 9); scalarProd((float)sim->viscosity, newDensities, newDensities, 9); vecSum(newDensities, n.newDensities, newDensities, 9); memcpy(n.densities, newDensities, 9 * sizeof(float)); memset(n.newDensities, 0.f, 9 * sizeof(float)); } } __global__ void cudaStream(bSimulator* sim) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= sim->dimX || y >= sim->dimY) return; unsigned long long int elementIdx = y * sim->dimX + x; node& n = *(sim->nodes + elementIdx); switch (n.ntype) { case nodeType::BASE: { for (int j = 0; j < 9; j++) { int dx = sim->directions[j][0]; int dy = sim->directions[j][1]; if (dx == 0 && dy == 0) { n.newDensities[j] = n.densities[j]; continue; } int opposite = OPPOSITE(j); long long int newX = n.x + dx; long long int newY = n.y + dy; node* nn = nullptr; if (!inside(newX, newY, sim->dimX, sim->dimY)) { switch (sim->doAtEdge) { case bSimulator::edgeBehaviour::LOOP: { newX = (newX + sim->dimX) % sim->dimX; newY = (newY + sim->dimY) % sim->dimY; nn = (sim->nodes + newY * sim->dimX + newX); break; } case bSimulator::edgeBehaviour::EXIT: { n.newDensities[j] = 0.f; continue; } case bSimulator::edgeBehaviour::WALL: { goto wall; } } } else { nn = (sim->nodes + newY * sim->dimX + newX); } switch (nn->ntype) { case nodeType::BASE: { n.newDensities[opposite] += nn->densities[opposite]; break; } case nodeType::WALL: { wall: n.newDensities[opposite] += n.densities[j]; break; } case nodeType::SOURCE: { n.newDensities[opposite] += (nn->densities[opposite] + n.densities[j]); break; } case nodeType::SINK: { n.newDensities[j] = 0; break; } } } break; } case nodeType::WALL: { break; } } } __global__ void cudaUpdateGraphics(bRenderer* simR) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= simR->sim->dimX || y >= simR->sim->dimY) return; unsigned long long int elementIdx = y * simR->sim->dimX + x; node& n = *(simR->sim->nodes + elementIdx); bRenderer::displayNode& dn = *(simR->cudaGLNodes + elementIdx); switch (n.ntype) { case nodeType::BASE: { float totalDensity = sum(&n.densities[0], 9); dn.density = 1.f; float newSpeeds[2] = { n.vel.x, n.vel.y }; double mag = magnitude(newSpeeds, 2); if (mag > 0) { dn.vel.x = mapNumber<float>(newSpeeds[0] / mag, -1.f, 1.f, 0.f, 1.f); dn.vel.y = mapNumber<float>(newSpeeds[1] / mag, -1.f, 1.f, 0.f, 1.f); } else { dn.vel.x = mapNumber<float>(0.f, -1.f, 1.f, 0.f, 1.f); dn.vel.y = mapNumber<float>(0.f, -1.f, 1.f, 0.f, 1.f); } break; } case nodeType::WALL: { dn.density = 0.f; dn.vel = { 0.f, 0.f }; break; } case nodeType::SOURCE: { dn.density = 1.f; dn.vel = { 1.f, 0.f }; break; } case nodeType::SINK: { dn.density = 1.f; dn.vel = { 0.f, 1.f }; break; } case nodeType::FAN: { dn.density = 1.f; dn.vel = { .7f, .7f }; break; } } } extern "C" { void computeVelocity(bSimulator* sim) { cudaComputeVelocity << < sim->gridDim, sim->blockDim >> > (sim); } void computeEquilibrium(bSimulator* sim){ cudaComputeEquilibrium << < sim->gridDim, sim->blockDim >> > (sim); } void computeNew(bSimulator* sim){ cudaComputeNew << < sim->gridDim, sim->blockDim >> > (sim); } void stream(bSimulator* sim){ cudaStream <<< sim->gridDim, sim->blockDim >>> (sim); } void updateGraphics(bRenderer* simR) { cudaUpdateGraphics << < simR->sim->gridDim, simR->sim->blockDim >> > (simR); } }
221cd26d75f1a4ec2fa6e9c07ffbb2699d60b249.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Created by xuyufeng1 on 2021/6/25. // #include "affineGridLayer.h" #include <assert.h> #include <math.h> namespace Tn { template<typename T> void write(char* &buffer, const T& val) { *reinterpret_cast<T*>(buffer) = val; buffer += sizeof(T); } template<typename T> void read(const char* &buffer, T& val) { val = *reinterpret_cast<const T*>(buffer); buffer += sizeof(T); } } namespace nvinfer1 { AffineGridPlugin::AffineGridPlugin(int outputHeight, int outputWidth, int outputChannel) { mHeight = outputHeight; mWidth = outputWidth; mChannel = outputChannel; } AffineGridPlugin::~AffineGridPlugin() { } AffineGridPlugin::AffineGridPlugin(const void *data, size_t length) { using namespace Tn; const char* d = reinterpret_cast<const char*>(data), *a = d; read(d, mHeight); read(d, mWidth); read(d, mChannel); read(d, mThreadCount); assert(d == a + length); } void AffineGridPlugin::serialize(void *buffer) const { using namespace Tn; char* d = static_cast<char*>(buffer), *a = d; write(d, mHeight); write(d, mWidth); write(d, mChannel); write(d, mThreadCount); assert(d == a + getSerializationSize()); } size_t AffineGridPlugin::getSerializationSize() const { return sizeof(mHeight) + sizeof(mWidth) + sizeof(mChannel) + sizeof(mThreadCount); } int AffineGridPlugin::initialize() { return 0; } Dims AffineGridPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims) { return Dims3(mHeight, mWidth, mChannel); }; void AffineGridPlugin::setPluginNamespace(const char *pluginNamespace) { mPluginNamespace = pluginNamespace; } const char* AffineGridPlugin::getPluginNamespace() const { return mPluginNamespace; } DataType AffineGridPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const { return DataType::kFLOAT; } bool AffineGridPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool *inputIsBroadcasted, int nbInputs) const { return false; } bool AffineGridPlugin::canBroadcastInputAcrossBatch(int inputIndex) const { return false; } void AffineGridPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) { assert(nbInput == 1); assert(nbOutput == 1); } void AffineGridPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) { } void AffineGridPlugin::detachFromContext() { } const char* AffineGridPlugin::getPluginType() const { return "AffineGridLayer_TRT"; } const char* AffineGridPlugin::getPluginVersion() const { return "1"; } void AffineGridPlugin::destroy() { delete this; } // Clone the plugin IPluginV2IOExt* AffineGridPlugin::clone() const { AffineGridPlugin *p = new AffineGridPlugin(mHeight, mWidth, mChannel); p->setPluginNamespace(mPluginNamespace); return p; } __global__ void affineGrid(const float *input, float *output, const int height, const int width) { float* grid; grid = new float[height * width * 3]; for (int i = 0; i < width; i++) { float tmp = -1.0 + (2.0 * i + 1.0) / width; for (int j = 0; j < height; j++) { grid[i * 3 + j * 3 * width] = tmp; // x } } for (int i = 0; i < height; i++) { float tmp = -1.0 + (2.0 * i + 1.0) / height; for (int j = 0; j < width; j++) { grid[i * width * 3 + j * 3 + 1] = tmp; // y grid[i * width * 3 + j * 3 + 2] = 1.0; // z = 1.0 } } float theta[6] = {0}; theta[0] = 1.0f / (1.0f + exp(-input[0])); theta[3] = 1.0f / (1.0f + exp(-input[1])); theta[4] = tanh(input[2]); theta[5] = tanh(input[3]); for (int i = 0; i < width * height; i++) { for (int j = 0; j < 2; j++) { float sum = 0; for (int k = 0; k < 3; k++) { sum += grid[i * 3 + k] * theta[k * 2 + j]; } output[i * 2 + j] = sum; } } delete[] grid; } void AffineGridPlugin::forwardGpu(const float* const* inputs, float* output, hipStream_t stream, int batchSize) { hipLaunchKernelGGL(( affineGrid), dim3(1), dim3(1), 0, 0, inputs[0], output, mHeight, mWidth); } int AffineGridPlugin::enqueue(int batchSize, const void* const *inputs, void** outputs, void* workspace, hipStream_t stream) { forwardGpu((const float* const*)inputs, (float*)outputs[0], stream, batchSize); return 0; } PluginFieldCollection AffineGridPluginCreator::mFC{}; std::vector<PluginField> AffineGridPluginCreator::mPluginAttributes; AffineGridPluginCreator::AffineGridPluginCreator() { mPluginAttributes.clear(); mFC.nbFields = mPluginAttributes.size(); mFC.fields = mPluginAttributes.data(); } const char* AffineGridPluginCreator::getPluginName() const { return "AffineGridLayer_TRT"; } const char* AffineGridPluginCreator::getPluginVersion() const { return "1"; } const PluginFieldCollection* AffineGridPluginCreator::getFieldNames() { return &mFC; } IPluginV2IOExt* AffineGridPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc) { assert(fc->nbFields == 1); assert(strcmp(fc->fields[0].name, "featureMapShape") == 0); int *p_featureMapShape = (int*)(fc->fields[0].data); int outputHeight = p_featureMapShape[0]; int outputWidth = p_featureMapShape[1]; int outputChannel = p_featureMapShape[2]; AffineGridPlugin* obj = new AffineGridPlugin(outputHeight, outputWidth, outputChannel); obj->setPluginNamespace(mNamespace.c_str()); return obj; } IPluginV2IOExt* AffineGridPluginCreator::deserializePlugin(const char *name, const void *serialData, size_t serialLength) { AffineGridPlugin* obj = new AffineGridPlugin(serialData, serialLength); obj->setPluginNamespace(mNamespace.c_str()); return obj; } }
221cd26d75f1a4ec2fa6e9c07ffbb2699d60b249.cu
// // Created by xuyufeng1 on 2021/6/25. // #include "affineGridLayer.h" #include <assert.h> #include <math.h> namespace Tn { template<typename T> void write(char* &buffer, const T& val) { *reinterpret_cast<T*>(buffer) = val; buffer += sizeof(T); } template<typename T> void read(const char* &buffer, T& val) { val = *reinterpret_cast<const T*>(buffer); buffer += sizeof(T); } } namespace nvinfer1 { AffineGridPlugin::AffineGridPlugin(int outputHeight, int outputWidth, int outputChannel) { mHeight = outputHeight; mWidth = outputWidth; mChannel = outputChannel; } AffineGridPlugin::~AffineGridPlugin() { } AffineGridPlugin::AffineGridPlugin(const void *data, size_t length) { using namespace Tn; const char* d = reinterpret_cast<const char*>(data), *a = d; read(d, mHeight); read(d, mWidth); read(d, mChannel); read(d, mThreadCount); assert(d == a + length); } void AffineGridPlugin::serialize(void *buffer) const { using namespace Tn; char* d = static_cast<char*>(buffer), *a = d; write(d, mHeight); write(d, mWidth); write(d, mChannel); write(d, mThreadCount); assert(d == a + getSerializationSize()); } size_t AffineGridPlugin::getSerializationSize() const { return sizeof(mHeight) + sizeof(mWidth) + sizeof(mChannel) + sizeof(mThreadCount); } int AffineGridPlugin::initialize() { return 0; } Dims AffineGridPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims) { return Dims3(mHeight, mWidth, mChannel); }; void AffineGridPlugin::setPluginNamespace(const char *pluginNamespace) { mPluginNamespace = pluginNamespace; } const char* AffineGridPlugin::getPluginNamespace() const { return mPluginNamespace; } DataType AffineGridPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const { return DataType::kFLOAT; } bool AffineGridPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool *inputIsBroadcasted, int nbInputs) const { return false; } bool AffineGridPlugin::canBroadcastInputAcrossBatch(int inputIndex) const { return false; } void AffineGridPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) { assert(nbInput == 1); assert(nbOutput == 1); } void AffineGridPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) { } void AffineGridPlugin::detachFromContext() { } const char* AffineGridPlugin::getPluginType() const { return "AffineGridLayer_TRT"; } const char* AffineGridPlugin::getPluginVersion() const { return "1"; } void AffineGridPlugin::destroy() { delete this; } // Clone the plugin IPluginV2IOExt* AffineGridPlugin::clone() const { AffineGridPlugin *p = new AffineGridPlugin(mHeight, mWidth, mChannel); p->setPluginNamespace(mPluginNamespace); return p; } __global__ void affineGrid(const float *input, float *output, const int height, const int width) { float* grid; grid = new float[height * width * 3]; for (int i = 0; i < width; i++) { float tmp = -1.0 + (2.0 * i + 1.0) / width; for (int j = 0; j < height; j++) { grid[i * 3 + j * 3 * width] = tmp; // x } } for (int i = 0; i < height; i++) { float tmp = -1.0 + (2.0 * i + 1.0) / height; for (int j = 0; j < width; j++) { grid[i * width * 3 + j * 3 + 1] = tmp; // y grid[i * width * 3 + j * 3 + 2] = 1.0; // z = 1.0 } } float theta[6] = {0}; theta[0] = 1.0f / (1.0f + exp(-input[0])); theta[3] = 1.0f / (1.0f + exp(-input[1])); theta[4] = tanh(input[2]); theta[5] = tanh(input[3]); for (int i = 0; i < width * height; i++) { for (int j = 0; j < 2; j++) { float sum = 0; for (int k = 0; k < 3; k++) { sum += grid[i * 3 + k] * theta[k * 2 + j]; } output[i * 2 + j] = sum; } } delete[] grid; } void AffineGridPlugin::forwardGpu(const float* const* inputs, float* output, cudaStream_t stream, int batchSize) { affineGrid<<<1, 1>>>(inputs[0], output, mHeight, mWidth); } int AffineGridPlugin::enqueue(int batchSize, const void* const *inputs, void** outputs, void* workspace, cudaStream_t stream) { forwardGpu((const float* const*)inputs, (float*)outputs[0], stream, batchSize); return 0; } PluginFieldCollection AffineGridPluginCreator::mFC{}; std::vector<PluginField> AffineGridPluginCreator::mPluginAttributes; AffineGridPluginCreator::AffineGridPluginCreator() { mPluginAttributes.clear(); mFC.nbFields = mPluginAttributes.size(); mFC.fields = mPluginAttributes.data(); } const char* AffineGridPluginCreator::getPluginName() const { return "AffineGridLayer_TRT"; } const char* AffineGridPluginCreator::getPluginVersion() const { return "1"; } const PluginFieldCollection* AffineGridPluginCreator::getFieldNames() { return &mFC; } IPluginV2IOExt* AffineGridPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc) { assert(fc->nbFields == 1); assert(strcmp(fc->fields[0].name, "featureMapShape") == 0); int *p_featureMapShape = (int*)(fc->fields[0].data); int outputHeight = p_featureMapShape[0]; int outputWidth = p_featureMapShape[1]; int outputChannel = p_featureMapShape[2]; AffineGridPlugin* obj = new AffineGridPlugin(outputHeight, outputWidth, outputChannel); obj->setPluginNamespace(mNamespace.c_str()); return obj; } IPluginV2IOExt* AffineGridPluginCreator::deserializePlugin(const char *name, const void *serialData, size_t serialLength) { AffineGridPlugin* obj = new AffineGridPlugin(serialData, serialLength); obj->setPluginNamespace(mNamespace.c_str()); return obj; } }
a9aedd116dc31d5c77e18c400d157773859fb74a.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "vec_ceilf.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; size_t n = XSIZE*YSIZE; float *result = NULL; hipMalloc(&result, XSIZE*YSIZE); float *x = NULL; hipMalloc(&x, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( vec_ceilf), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( vec_ceilf), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( vec_ceilf), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
a9aedd116dc31d5c77e18c400d157773859fb74a.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "vec_ceilf.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; size_t n = XSIZE*YSIZE; float *result = NULL; cudaMalloc(&result, XSIZE*YSIZE); float *x = NULL; cudaMalloc(&x, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); vec_ceilf<<<gridBlock,threadBlock>>>(n,result,x); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { vec_ceilf<<<gridBlock,threadBlock>>>(n,result,x); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { vec_ceilf<<<gridBlock,threadBlock>>>(n,result,x); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
b5759ada61b843f6c216685f19059f16de9f9d7d.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_fp16.h> #include <thrust/device_ptr.h> #include <thrust/transform.h> #include <thrust/functional.h> #include <thrust/system/hip/execution_policy.h> #include "AttentionMask.h" namespace cuBERT { template<typename T> __host__ void _not(const int8_t *in, T *out, const int N, void *stream) { thrust::device_ptr<const int8_t> in_ptr(in); thrust::device_ptr<T> out_ptr(out); thrust::transform(thrust::hip::par.on((hipStream_t) stream), in_ptr, in_ptr + N, out_ptr, thrust::logical_not<const int8_t>()); } template __host__ void _not<float>(const int8_t *in, float *out, const int N, void *stream); template __host__ void _not<half >(const int8_t *in, half *out, const int N, void *stream); }
b5759ada61b843f6c216685f19059f16de9f9d7d.cu
#include <cuda_runtime.h> #include <cuda_fp16.h> #include <thrust/device_ptr.h> #include <thrust/transform.h> #include <thrust/functional.h> #include <thrust/system/cuda/execution_policy.h> #include "AttentionMask.h" namespace cuBERT { template<typename T> __host__ void _not(const int8_t *in, T *out, const int N, void *stream) { thrust::device_ptr<const int8_t> in_ptr(in); thrust::device_ptr<T> out_ptr(out); thrust::transform(thrust::cuda::par.on((cudaStream_t) stream), in_ptr, in_ptr + N, out_ptr, thrust::logical_not<const int8_t>()); } template __host__ void _not<float>(const int8_t *in, float *out, const int N, void *stream); template __host__ void _not<half >(const int8_t *in, half *out, const int N, void *stream); }
c06cdc85b87d948a1da833bd176e5c87c70f0ffa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "auxiliary.hpp" #include "NetStruct.hpp" #include "aes_locl.hpp" extern "C" { __device__ static const u32 Te0[256] = { 0xc66363a5U, 0xf87c7c84U, 0xee777799U, 0xf67b7b8dU, 0xfff2f20dU, 0xd66b6bbdU, 0xde6f6fb1U, 0x91c5c554U, 0x60303050U, 0x02010103U, 0xce6767a9U, 0x562b2b7dU, 0xe7fefe19U, 0xb5d7d762U, 0x4dababe6U, 0xec76769aU, 0x8fcaca45U, 0x1f82829dU, 0x89c9c940U, 0xfa7d7d87U, 0xeffafa15U, 0xb25959ebU, 0x8e4747c9U, 0xfbf0f00bU, 0x41adadecU, 0xb3d4d467U, 0x5fa2a2fdU, 0x45afafeaU, 0x239c9cbfU, 0x53a4a4f7U, 0xe4727296U, 0x9bc0c05bU, 0x75b7b7c2U, 0xe1fdfd1cU, 0x3d9393aeU, 0x4c26266aU, 0x6c36365aU, 0x7e3f3f41U, 0xf5f7f702U, 0x83cccc4fU, 0x6834345cU, 0x51a5a5f4U, 0xd1e5e534U, 0xf9f1f108U, 0xe2717193U, 0xabd8d873U, 0x62313153U, 0x2a15153fU, 0x0804040cU, 0x95c7c752U, 0x46232365U, 0x9dc3c35eU, 0x30181828U, 0x379696a1U, 0x0a05050fU, 0x2f9a9ab5U, 0x0e070709U, 0x24121236U, 0x1b80809bU, 0xdfe2e23dU, 0xcdebeb26U, 0x4e272769U, 0x7fb2b2cdU, 0xea75759fU, 0x1209091bU, 0x1d83839eU, 0x582c2c74U, 0x341a1a2eU, 0x361b1b2dU, 0xdc6e6eb2U, 0xb45a5aeeU, 0x5ba0a0fbU, 0xa45252f6U, 0x763b3b4dU, 0xb7d6d661U, 0x7db3b3ceU, 0x5229297bU, 0xdde3e33eU, 0x5e2f2f71U, 0x13848497U, 0xa65353f5U, 0xb9d1d168U, 0x00000000U, 0xc1eded2cU, 0x40202060U, 0xe3fcfc1fU, 0x79b1b1c8U, 0xb65b5bedU, 0xd46a6abeU, 0x8dcbcb46U, 0x67bebed9U, 0x7239394bU, 0x944a4adeU, 0x984c4cd4U, 0xb05858e8U, 0x85cfcf4aU, 0xbbd0d06bU, 0xc5efef2aU, 0x4faaaae5U, 0xedfbfb16U, 0x864343c5U, 0x9a4d4dd7U, 0x66333355U, 0x11858594U, 0x8a4545cfU, 0xe9f9f910U, 0x04020206U, 0xfe7f7f81U, 0xa05050f0U, 0x783c3c44U, 0x259f9fbaU, 0x4ba8a8e3U, 0xa25151f3U, 0x5da3a3feU, 0x804040c0U, 0x058f8f8aU, 0x3f9292adU, 0x219d9dbcU, 0x70383848U, 0xf1f5f504U, 0x63bcbcdfU, 0x77b6b6c1U, 0xafdada75U, 0x42212163U, 0x20101030U, 0xe5ffff1aU, 0xfdf3f30eU, 0xbfd2d26dU, 0x81cdcd4cU, 0x180c0c14U, 0x26131335U, 0xc3ecec2fU, 0xbe5f5fe1U, 0x359797a2U, 0x884444ccU, 0x2e171739U, 0x93c4c457U, 0x55a7a7f2U, 0xfc7e7e82U, 0x7a3d3d47U, 0xc86464acU, 0xba5d5de7U, 0x3219192bU, 0xe6737395U, 0xc06060a0U, 0x19818198U, 0x9e4f4fd1U, 0xa3dcdc7fU, 0x44222266U, 0x542a2a7eU, 0x3b9090abU, 0x0b888883U, 0x8c4646caU, 0xc7eeee29U, 0x6bb8b8d3U, 0x2814143cU, 0xa7dede79U, 0xbc5e5ee2U, 0x160b0b1dU, 0xaddbdb76U, 0xdbe0e03bU, 0x64323256U, 0x743a3a4eU, 0x140a0a1eU, 0x924949dbU, 0x0c06060aU, 0x4824246cU, 0xb85c5ce4U, 0x9fc2c25dU, 0xbdd3d36eU, 0x43acacefU, 0xc46262a6U, 0x399191a8U, 0x319595a4U, 0xd3e4e437U, 0xf279798bU, 0xd5e7e732U, 0x8bc8c843U, 0x6e373759U, 0xda6d6db7U, 0x018d8d8cU, 0xb1d5d564U, 0x9c4e4ed2U, 0x49a9a9e0U, 0xd86c6cb4U, 0xac5656faU, 0xf3f4f407U, 0xcfeaea25U, 0xca6565afU, 0xf47a7a8eU, 0x47aeaee9U, 0x10080818U, 0x6fbabad5U, 0xf0787888U, 0x4a25256fU, 0x5c2e2e72U, 0x381c1c24U, 0x57a6a6f1U, 0x73b4b4c7U, 0x97c6c651U, 0xcbe8e823U, 0xa1dddd7cU, 0xe874749cU, 0x3e1f1f21U, 0x964b4bddU, 0x61bdbddcU, 0x0d8b8b86U, 0x0f8a8a85U, 0xe0707090U, 0x7c3e3e42U, 0x71b5b5c4U, 0xcc6666aaU, 0x904848d8U, 0x06030305U, 0xf7f6f601U, 0x1c0e0e12U, 0xc26161a3U, 0x6a35355fU, 0xae5757f9U, 0x69b9b9d0U, 0x17868691U, 0x99c1c158U, 0x3a1d1d27U, 0x279e9eb9U, 0xd9e1e138U, 0xebf8f813U, 0x2b9898b3U, 0x22111133U, 0xd26969bbU, 0xa9d9d970U, 0x078e8e89U, 0x339494a7U, 0x2d9b9bb6U, 0x3c1e1e22U, 0x15878792U, 0xc9e9e920U, 0x87cece49U, 0xaa5555ffU, 0x50282878U, 0xa5dfdf7aU, 0x038c8c8fU, 0x59a1a1f8U, 0x09898980U, 0x1a0d0d17U, 0x65bfbfdaU, 0xd7e6e631U, 0x844242c6U, 0xd06868b8U, 0x824141c3U, 0x299999b0U, 0x5a2d2d77U, 0x1e0f0f11U, 0x7bb0b0cbU, 0xa85454fcU, 0x6dbbbbd6U, 0x2c16163aU, }; __device__ static const u32 Te1[256] = { 0xa5c66363U, 0x84f87c7cU, 0x99ee7777U, 0x8df67b7bU, 0x0dfff2f2U, 0xbdd66b6bU, 0xb1de6f6fU, 0x5491c5c5U, 0x50603030U, 0x03020101U, 0xa9ce6767U, 0x7d562b2bU, 0x19e7fefeU, 0x62b5d7d7U, 0xe64dababU, 0x9aec7676U, 0x458fcacaU, 0x9d1f8282U, 0x4089c9c9U, 0x87fa7d7dU, 0x15effafaU, 0xebb25959U, 0xc98e4747U, 0x0bfbf0f0U, 0xec41adadU, 0x67b3d4d4U, 0xfd5fa2a2U, 0xea45afafU, 0xbf239c9cU, 0xf753a4a4U, 0x96e47272U, 0x5b9bc0c0U, 0xc275b7b7U, 0x1ce1fdfdU, 0xae3d9393U, 0x6a4c2626U, 0x5a6c3636U, 0x417e3f3fU, 0x02f5f7f7U, 0x4f83ccccU, 0x5c683434U, 0xf451a5a5U, 0x34d1e5e5U, 0x08f9f1f1U, 0x93e27171U, 0x73abd8d8U, 0x53623131U, 0x3f2a1515U, 0x0c080404U, 0x5295c7c7U, 0x65462323U, 0x5e9dc3c3U, 0x28301818U, 0xa1379696U, 0x0f0a0505U, 0xb52f9a9aU, 0x090e0707U, 0x36241212U, 0x9b1b8080U, 0x3ddfe2e2U, 0x26cdebebU, 0x694e2727U, 0xcd7fb2b2U, 0x9fea7575U, 0x1b120909U, 0x9e1d8383U, 0x74582c2cU, 0x2e341a1aU, 0x2d361b1bU, 0xb2dc6e6eU, 0xeeb45a5aU, 0xfb5ba0a0U, 0xf6a45252U, 0x4d763b3bU, 0x61b7d6d6U, 0xce7db3b3U, 0x7b522929U, 0x3edde3e3U, 0x715e2f2fU, 0x97138484U, 0xf5a65353U, 0x68b9d1d1U, 0x00000000U, 0x2cc1ededU, 0x60402020U, 0x1fe3fcfcU, 0xc879b1b1U, 0xedb65b5bU, 0xbed46a6aU, 0x468dcbcbU, 0xd967bebeU, 0x4b723939U, 0xde944a4aU, 0xd4984c4cU, 0xe8b05858U, 0x4a85cfcfU, 0x6bbbd0d0U, 0x2ac5efefU, 0xe54faaaaU, 0x16edfbfbU, 0xc5864343U, 0xd79a4d4dU, 0x55663333U, 0x94118585U, 0xcf8a4545U, 0x10e9f9f9U, 0x06040202U, 0x81fe7f7fU, 0xf0a05050U, 0x44783c3cU, 0xba259f9fU, 0xe34ba8a8U, 0xf3a25151U, 0xfe5da3a3U, 0xc0804040U, 0x8a058f8fU, 0xad3f9292U, 0xbc219d9dU, 0x48703838U, 0x04f1f5f5U, 0xdf63bcbcU, 0xc177b6b6U, 0x75afdadaU, 0x63422121U, 0x30201010U, 0x1ae5ffffU, 0x0efdf3f3U, 0x6dbfd2d2U, 0x4c81cdcdU, 0x14180c0cU, 0x35261313U, 0x2fc3ececU, 0xe1be5f5fU, 0xa2359797U, 0xcc884444U, 0x392e1717U, 0x5793c4c4U, 0xf255a7a7U, 0x82fc7e7eU, 0x477a3d3dU, 0xacc86464U, 0xe7ba5d5dU, 0x2b321919U, 0x95e67373U, 0xa0c06060U, 0x98198181U, 0xd19e4f4fU, 0x7fa3dcdcU, 0x66442222U, 0x7e542a2aU, 0xab3b9090U, 0x830b8888U, 0xca8c4646U, 0x29c7eeeeU, 0xd36bb8b8U, 0x3c281414U, 0x79a7dedeU, 0xe2bc5e5eU, 0x1d160b0bU, 0x76addbdbU, 0x3bdbe0e0U, 0x56643232U, 0x4e743a3aU, 0x1e140a0aU, 0xdb924949U, 0x0a0c0606U, 0x6c482424U, 0xe4b85c5cU, 0x5d9fc2c2U, 0x6ebdd3d3U, 0xef43acacU, 0xa6c46262U, 0xa8399191U, 0xa4319595U, 0x37d3e4e4U, 0x8bf27979U, 0x32d5e7e7U, 0x438bc8c8U, 0x596e3737U, 0xb7da6d6dU, 0x8c018d8dU, 0x64b1d5d5U, 0xd29c4e4eU, 0xe049a9a9U, 0xb4d86c6cU, 0xfaac5656U, 0x07f3f4f4U, 0x25cfeaeaU, 0xafca6565U, 0x8ef47a7aU, 0xe947aeaeU, 0x18100808U, 0xd56fbabaU, 0x88f07878U, 0x6f4a2525U, 0x725c2e2eU, 0x24381c1cU, 0xf157a6a6U, 0xc773b4b4U, 0x5197c6c6U, 0x23cbe8e8U, 0x7ca1ddddU, 0x9ce87474U, 0x213e1f1fU, 0xdd964b4bU, 0xdc61bdbdU, 0x860d8b8bU, 0x850f8a8aU, 0x90e07070U, 0x427c3e3eU, 0xc471b5b5U, 0xaacc6666U, 0xd8904848U, 0x05060303U, 0x01f7f6f6U, 0x121c0e0eU, 0xa3c26161U, 0x5f6a3535U, 0xf9ae5757U, 0xd069b9b9U, 0x91178686U, 0x5899c1c1U, 0x273a1d1dU, 0xb9279e9eU, 0x38d9e1e1U, 0x13ebf8f8U, 0xb32b9898U, 0x33221111U, 0xbbd26969U, 0x70a9d9d9U, 0x89078e8eU, 0xa7339494U, 0xb62d9b9bU, 0x223c1e1eU, 0x92158787U, 0x20c9e9e9U, 0x4987ceceU, 0xffaa5555U, 0x78502828U, 0x7aa5dfdfU, 0x8f038c8cU, 0xf859a1a1U, 0x80098989U, 0x171a0d0dU, 0xda65bfbfU, 0x31d7e6e6U, 0xc6844242U, 0xb8d06868U, 0xc3824141U, 0xb0299999U, 0x775a2d2dU, 0x111e0f0fU, 0xcb7bb0b0U, 0xfca85454U, 0xd66dbbbbU, 0x3a2c1616U, }; __device__ static const u32 Te2[256] = { 0x63a5c663U, 0x7c84f87cU, 0x7799ee77U, 0x7b8df67bU, 0xf20dfff2U, 0x6bbdd66bU, 0x6fb1de6fU, 0xc55491c5U, 0x30506030U, 0x01030201U, 0x67a9ce67U, 0x2b7d562bU, 0xfe19e7feU, 0xd762b5d7U, 0xabe64dabU, 0x769aec76U, 0xca458fcaU, 0x829d1f82U, 0xc94089c9U, 0x7d87fa7dU, 0xfa15effaU, 0x59ebb259U, 0x47c98e47U, 0xf00bfbf0U, 0xadec41adU, 0xd467b3d4U, 0xa2fd5fa2U, 0xafea45afU, 0x9cbf239cU, 0xa4f753a4U, 0x7296e472U, 0xc05b9bc0U, 0xb7c275b7U, 0xfd1ce1fdU, 0x93ae3d93U, 0x266a4c26U, 0x365a6c36U, 0x3f417e3fU, 0xf702f5f7U, 0xcc4f83ccU, 0x345c6834U, 0xa5f451a5U, 0xe534d1e5U, 0xf108f9f1U, 0x7193e271U, 0xd873abd8U, 0x31536231U, 0x153f2a15U, 0x040c0804U, 0xc75295c7U, 0x23654623U, 0xc35e9dc3U, 0x18283018U, 0x96a13796U, 0x050f0a05U, 0x9ab52f9aU, 0x07090e07U, 0x12362412U, 0x809b1b80U, 0xe23ddfe2U, 0xeb26cdebU, 0x27694e27U, 0xb2cd7fb2U, 0x759fea75U, 0x091b1209U, 0x839e1d83U, 0x2c74582cU, 0x1a2e341aU, 0x1b2d361bU, 0x6eb2dc6eU, 0x5aeeb45aU, 0xa0fb5ba0U, 0x52f6a452U, 0x3b4d763bU, 0xd661b7d6U, 0xb3ce7db3U, 0x297b5229U, 0xe33edde3U, 0x2f715e2fU, 0x84971384U, 0x53f5a653U, 0xd168b9d1U, 0x00000000U, 0xed2cc1edU, 0x20604020U, 0xfc1fe3fcU, 0xb1c879b1U, 0x5bedb65bU, 0x6abed46aU, 0xcb468dcbU, 0xbed967beU, 0x394b7239U, 0x4ade944aU, 0x4cd4984cU, 0x58e8b058U, 0xcf4a85cfU, 0xd06bbbd0U, 0xef2ac5efU, 0xaae54faaU, 0xfb16edfbU, 0x43c58643U, 0x4dd79a4dU, 0x33556633U, 0x85941185U, 0x45cf8a45U, 0xf910e9f9U, 0x02060402U, 0x7f81fe7fU, 0x50f0a050U, 0x3c44783cU, 0x9fba259fU, 0xa8e34ba8U, 0x51f3a251U, 0xa3fe5da3U, 0x40c08040U, 0x8f8a058fU, 0x92ad3f92U, 0x9dbc219dU, 0x38487038U, 0xf504f1f5U, 0xbcdf63bcU, 0xb6c177b6U, 0xda75afdaU, 0x21634221U, 0x10302010U, 0xff1ae5ffU, 0xf30efdf3U, 0xd26dbfd2U, 0xcd4c81cdU, 0x0c14180cU, 0x13352613U, 0xec2fc3ecU, 0x5fe1be5fU, 0x97a23597U, 0x44cc8844U, 0x17392e17U, 0xc45793c4U, 0xa7f255a7U, 0x7e82fc7eU, 0x3d477a3dU, 0x64acc864U, 0x5de7ba5dU, 0x192b3219U, 0x7395e673U, 0x60a0c060U, 0x81981981U, 0x4fd19e4fU, 0xdc7fa3dcU, 0x22664422U, 0x2a7e542aU, 0x90ab3b90U, 0x88830b88U, 0x46ca8c46U, 0xee29c7eeU, 0xb8d36bb8U, 0x143c2814U, 0xde79a7deU, 0x5ee2bc5eU, 0x0b1d160bU, 0xdb76addbU, 0xe03bdbe0U, 0x32566432U, 0x3a4e743aU, 0x0a1e140aU, 0x49db9249U, 0x060a0c06U, 0x246c4824U, 0x5ce4b85cU, 0xc25d9fc2U, 0xd36ebdd3U, 0xacef43acU, 0x62a6c462U, 0x91a83991U, 0x95a43195U, 0xe437d3e4U, 0x798bf279U, 0xe732d5e7U, 0xc8438bc8U, 0x37596e37U, 0x6db7da6dU, 0x8d8c018dU, 0xd564b1d5U, 0x4ed29c4eU, 0xa9e049a9U, 0x6cb4d86cU, 0x56faac56U, 0xf407f3f4U, 0xea25cfeaU, 0x65afca65U, 0x7a8ef47aU, 0xaee947aeU, 0x08181008U, 0xbad56fbaU, 0x7888f078U, 0x256f4a25U, 0x2e725c2eU, 0x1c24381cU, 0xa6f157a6U, 0xb4c773b4U, 0xc65197c6U, 0xe823cbe8U, 0xdd7ca1ddU, 0x749ce874U, 0x1f213e1fU, 0x4bdd964bU, 0xbddc61bdU, 0x8b860d8bU, 0x8a850f8aU, 0x7090e070U, 0x3e427c3eU, 0xb5c471b5U, 0x66aacc66U, 0x48d89048U, 0x03050603U, 0xf601f7f6U, 0x0e121c0eU, 0x61a3c261U, 0x355f6a35U, 0x57f9ae57U, 0xb9d069b9U, 0x86911786U, 0xc15899c1U, 0x1d273a1dU, 0x9eb9279eU, 0xe138d9e1U, 0xf813ebf8U, 0x98b32b98U, 0x11332211U, 0x69bbd269U, 0xd970a9d9U, 0x8e89078eU, 0x94a73394U, 0x9bb62d9bU, 0x1e223c1eU, 0x87921587U, 0xe920c9e9U, 0xce4987ceU, 0x55ffaa55U, 0x28785028U, 0xdf7aa5dfU, 0x8c8f038cU, 0xa1f859a1U, 0x89800989U, 0x0d171a0dU, 0xbfda65bfU, 0xe631d7e6U, 0x42c68442U, 0x68b8d068U, 0x41c38241U, 0x99b02999U, 0x2d775a2dU, 0x0f111e0fU, 0xb0cb7bb0U, 0x54fca854U, 0xbbd66dbbU, 0x163a2c16U, }; __device__ static const u32 Te3[256] = { 0x6363a5c6U, 0x7c7c84f8U, 0x777799eeU, 0x7b7b8df6U, 0xf2f20dffU, 0x6b6bbdd6U, 0x6f6fb1deU, 0xc5c55491U, 0x30305060U, 0x01010302U, 0x6767a9ceU, 0x2b2b7d56U, 0xfefe19e7U, 0xd7d762b5U, 0xababe64dU, 0x76769aecU, 0xcaca458fU, 0x82829d1fU, 0xc9c94089U, 0x7d7d87faU, 0xfafa15efU, 0x5959ebb2U, 0x4747c98eU, 0xf0f00bfbU, 0xadadec41U, 0xd4d467b3U, 0xa2a2fd5fU, 0xafafea45U, 0x9c9cbf23U, 0xa4a4f753U, 0x727296e4U, 0xc0c05b9bU, 0xb7b7c275U, 0xfdfd1ce1U, 0x9393ae3dU, 0x26266a4cU, 0x36365a6cU, 0x3f3f417eU, 0xf7f702f5U, 0xcccc4f83U, 0x34345c68U, 0xa5a5f451U, 0xe5e534d1U, 0xf1f108f9U, 0x717193e2U, 0xd8d873abU, 0x31315362U, 0x15153f2aU, 0x04040c08U, 0xc7c75295U, 0x23236546U, 0xc3c35e9dU, 0x18182830U, 0x9696a137U, 0x05050f0aU, 0x9a9ab52fU, 0x0707090eU, 0x12123624U, 0x80809b1bU, 0xe2e23ddfU, 0xebeb26cdU, 0x2727694eU, 0xb2b2cd7fU, 0x75759feaU, 0x09091b12U, 0x83839e1dU, 0x2c2c7458U, 0x1a1a2e34U, 0x1b1b2d36U, 0x6e6eb2dcU, 0x5a5aeeb4U, 0xa0a0fb5bU, 0x5252f6a4U, 0x3b3b4d76U, 0xd6d661b7U, 0xb3b3ce7dU, 0x29297b52U, 0xe3e33eddU, 0x2f2f715eU, 0x84849713U, 0x5353f5a6U, 0xd1d168b9U, 0x00000000U, 0xeded2cc1U, 0x20206040U, 0xfcfc1fe3U, 0xb1b1c879U, 0x5b5bedb6U, 0x6a6abed4U, 0xcbcb468dU, 0xbebed967U, 0x39394b72U, 0x4a4ade94U, 0x4c4cd498U, 0x5858e8b0U, 0xcfcf4a85U, 0xd0d06bbbU, 0xefef2ac5U, 0xaaaae54fU, 0xfbfb16edU, 0x4343c586U, 0x4d4dd79aU, 0x33335566U, 0x85859411U, 0x4545cf8aU, 0xf9f910e9U, 0x02020604U, 0x7f7f81feU, 0x5050f0a0U, 0x3c3c4478U, 0x9f9fba25U, 0xa8a8e34bU, 0x5151f3a2U, 0xa3a3fe5dU, 0x4040c080U, 0x8f8f8a05U, 0x9292ad3fU, 0x9d9dbc21U, 0x38384870U, 0xf5f504f1U, 0xbcbcdf63U, 0xb6b6c177U, 0xdada75afU, 0x21216342U, 0x10103020U, 0xffff1ae5U, 0xf3f30efdU, 0xd2d26dbfU, 0xcdcd4c81U, 0x0c0c1418U, 0x13133526U, 0xecec2fc3U, 0x5f5fe1beU, 0x9797a235U, 0x4444cc88U, 0x1717392eU, 0xc4c45793U, 0xa7a7f255U, 0x7e7e82fcU, 0x3d3d477aU, 0x6464acc8U, 0x5d5de7baU, 0x19192b32U, 0x737395e6U, 0x6060a0c0U, 0x81819819U, 0x4f4fd19eU, 0xdcdc7fa3U, 0x22226644U, 0x2a2a7e54U, 0x9090ab3bU, 0x8888830bU, 0x4646ca8cU, 0xeeee29c7U, 0xb8b8d36bU, 0x14143c28U, 0xdede79a7U, 0x5e5ee2bcU, 0x0b0b1d16U, 0xdbdb76adU, 0xe0e03bdbU, 0x32325664U, 0x3a3a4e74U, 0x0a0a1e14U, 0x4949db92U, 0x06060a0cU, 0x24246c48U, 0x5c5ce4b8U, 0xc2c25d9fU, 0xd3d36ebdU, 0xacacef43U, 0x6262a6c4U, 0x9191a839U, 0x9595a431U, 0xe4e437d3U, 0x79798bf2U, 0xe7e732d5U, 0xc8c8438bU, 0x3737596eU, 0x6d6db7daU, 0x8d8d8c01U, 0xd5d564b1U, 0x4e4ed29cU, 0xa9a9e049U, 0x6c6cb4d8U, 0x5656faacU, 0xf4f407f3U, 0xeaea25cfU, 0x6565afcaU, 0x7a7a8ef4U, 0xaeaee947U, 0x08081810U, 0xbabad56fU, 0x787888f0U, 0x25256f4aU, 0x2e2e725cU, 0x1c1c2438U, 0xa6a6f157U, 0xb4b4c773U, 0xc6c65197U, 0xe8e823cbU, 0xdddd7ca1U, 0x74749ce8U, 0x1f1f213eU, 0x4b4bdd96U, 0xbdbddc61U, 0x8b8b860dU, 0x8a8a850fU, 0x707090e0U, 0x3e3e427cU, 0xb5b5c471U, 0x6666aaccU, 0x4848d890U, 0x03030506U, 0xf6f601f7U, 0x0e0e121cU, 0x6161a3c2U, 0x35355f6aU, 0x5757f9aeU, 0xb9b9d069U, 0x86869117U, 0xc1c15899U, 0x1d1d273aU, 0x9e9eb927U, 0xe1e138d9U, 0xf8f813ebU, 0x9898b32bU, 0x11113322U, 0x6969bbd2U, 0xd9d970a9U, 0x8e8e8907U, 0x9494a733U, 0x9b9bb62dU, 0x1e1e223cU, 0x87879215U, 0xe9e920c9U, 0xcece4987U, 0x5555ffaaU, 0x28287850U, 0xdfdf7aa5U, 0x8c8c8f03U, 0xa1a1f859U, 0x89898009U, 0x0d0d171aU, 0xbfbfda65U, 0xe6e631d7U, 0x4242c684U, 0x6868b8d0U, 0x4141c382U, 0x9999b029U, 0x2d2d775aU, 0x0f0f111eU, 0xb0b0cb7bU, 0x5454fca8U, 0xbbbbd66dU, 0x16163a2cU, }; /* * Encrypt a single block * in and out can overlap */ __device__ static void dev_AES_encrypt(const unsigned char *in, unsigned char *out, const AES_KEY *key) { const u32 *rk; u32 s0, s1, s2, s3, t0, t1, t2, t3; #ifndef FULL_UNROLL int r; #endif /* ?FULL_UNROLL */ assert(in && out && key); rk = key->rd_key; /* * map byte array block to cipher state * and add initial round key: */ s0 = GETU32(in) ^ rk[0]; s1 = GETU32(in + 4) ^ rk[1]; s2 = GETU32(in + 8) ^ rk[2]; s3 = GETU32(in + 12) ^ rk[3]; #ifdef FULL_UNROLL /* round 1: */ t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[4]; t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[5]; t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[6]; t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[7]; /* round 2: */ s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[8]; s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[9]; s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[10]; s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[11]; /* round 3: */ t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[12]; t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[13]; t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[14]; t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[15]; /* round 4: */ s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[16]; s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[17]; s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[18]; s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[19]; /* round 5: */ t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[20]; t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[21]; t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[22]; t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[23]; /* round 6: */ s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[24]; s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[25]; s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[26]; s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[27]; /* round 7: */ t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[28]; t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[29]; t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[30]; t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[31]; /* round 8: */ s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[32]; s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[33]; s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[34]; s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[35]; /* round 9: */ t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[36]; t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[37]; t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[38]; t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[39]; if (key->rounds > 10) { /* round 10: */ s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[40]; s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[41]; s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[42]; s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[43]; /* round 11: */ t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[44]; t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[45]; t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[46]; t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[47]; if (key->rounds > 12) { /* round 12: */ s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[48]; s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[49]; s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[50]; s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[51]; /* round 13: */ t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[52]; t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[53]; t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[54]; t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[55]; } } rk += key->rounds << 2; #else /* !FULL_UNROLL */ /* * Nr - 1 full rounds: */ r = key->rounds >> 1; for (;;) { t0 = Te0[(s0 >> 24)] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[(s3)&0xff] ^ rk[4]; t1 = Te0[(s1 >> 24)] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[(s0)&0xff] ^ rk[5]; t2 = Te0[(s2 >> 24)] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[(s1)&0xff] ^ rk[6]; t3 = Te0[(s3 >> 24)] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[(s2)&0xff] ^ rk[7]; rk += 8; if (--r == 0) { break; } s0 = Te0[(t0 >> 24)] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[(t3)&0xff] ^ rk[0]; s1 = Te0[(t1 >> 24)] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[(t0)&0xff] ^ rk[1]; s2 = Te0[(t2 >> 24)] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[(t1)&0xff] ^ rk[2]; s3 = Te0[(t3 >> 24)] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[(t2)&0xff] ^ rk[3]; } #endif /* ?FULL_UNROLL */ /* * apply last round and * map cipher state to byte array block: */ s0 = (Te2[(t0 >> 24)] & 0xff000000) ^ (Te3[(t1 >> 16) & 0xff] & 0x00ff0000) ^ (Te0[(t2 >> 8) & 0xff] & 0x0000ff00) ^ (Te1[(t3)&0xff] & 0x000000ff) ^ rk[0]; PUTU32(out, s0); s1 = (Te2[(t1 >> 24)] & 0xff000000) ^ (Te3[(t2 >> 16) & 0xff] & 0x00ff0000) ^ (Te0[(t3 >> 8) & 0xff] & 0x0000ff00) ^ (Te1[(t0)&0xff] & 0x000000ff) ^ rk[1]; PUTU32(out + 4, s1); s2 = (Te2[(t2 >> 24)] & 0xff000000) ^ (Te3[(t3 >> 16) & 0xff] & 0x00ff0000) ^ (Te0[(t0 >> 8) & 0xff] & 0x0000ff00) ^ (Te1[(t1)&0xff] & 0x000000ff) ^ rk[2]; PUTU32(out + 8, s2); s3 = (Te2[(t3 >> 24)] & 0xff000000) ^ (Te3[(t0 >> 16) & 0xff] & 0x00ff0000) ^ (Te0[(t1 >> 8) & 0xff] & 0x0000ff00) ^ (Te1[(t2)&0xff] & 0x000000ff) ^ rk[3]; PUTU32(out + 12, s3); } __device__ static void dev_AES_ctr128_inc(unsigned char *counter) { unsigned long c; /* Grab 3rd dword of counter and increment */ #ifdef L_ENDIAN c = GETU32(counter + 8); c++; PUTU32(counter + 8, c); #else c = GETU32(counter + 4); c++; PUTU32(counter + 4, c); #endif /* if no overflow, we're done */ if (c) return; /* Grab top dword of counter and increment */ #ifdef L_ENDIAN c = GETU32(counter + 12); c++; PUTU32(counter + 12, c); #else c = GETU32(counter + 0); c++; PUTU32(counter + 0, c); #endif } __device__ static inline uint16_t u8tu16(uint8_t *data) { uint16_t result = 0; for (int i = 0; i < 2; i++) { result += data[i] * pow(16, 2 - 2 * i); } return result; } __global__ static void AES_ctr128_encrypt_chunk( const int batch_size, const int block_num_y, unsigned char *__restrict__ iv, uint8_t *__restrict__ pac_data, unsigned int *__restrict__ pac_sign, AES_KEY *__restrict__ aes_key) { int I = blockIdx.x * blockDim.x + threadIdx.x; int J = blockIdx.y * blockDim.y + threadIdx.y; int N = I * block_num_y * blockDim.y + J; if (N < batch_size) { uint8_t *esp_iv = pac_data + pac_sign[N] + sizeof(struct ether_header) + sizeof(struct iphdr) + 2 * sizeof(uint32_t); uint8_t *in, *out = pac_data + pac_sign[N] + sizeof(struct ether_header) + sizeof(struct iphdr) + sizeof(struct esphdr); uint8_t *ip_len = pac_data + pac_sign[N] + sizeof(struct ether_header) + 2 * sizeof(uint8_t); in = out; for (int i = 0; i < ESP_IV_LENGTH; i++) { iv[i] = esp_iv[i]; } int encrypted_len = u8tu16(ip_len) - sizeof(struct iphdr) - sizeof(struct esphdr) - SHA_DIGEST_LENGTH; int pad_len = AES_BLOCK_SIZE - (encrypted_len + 2) % AES_BLOCK_SIZE; int length = encrypted_len + pad_len + 2; uint8_t ecount_buf[AES_BLOCK_SIZE] = {0}; unsigned mode = 0; assert(in && out && aes_key && iv); assert(mode < AES_BLOCK_SIZE); while (length--) { if (mode == 0) { dev_AES_encrypt(iv, ecount_buf, aes_key); dev_AES_ctr128_inc(iv); } *(out++) = *(in++) ^ ecount_buf[mode]; mode = (mode + 1) % AES_BLOCK_SIZE; } } __syncthreads(); } } void ipsec_aes_encryption_get_cuda_kernel( uint8_t *pac_data, const int total_len, const int batch_size, const unsigned int *pac_sign, AES_KEY *aes_key) { // CheckGPUinfo(); //device unsigned char *dev_iv; AES_KEY *dev_aes_key; uint8_t *dev_pac_data; unsigned int *dev_pac_sign; //device hipMalloc((void **)&dev_iv, ESP_IV_LENGTH * sizeof(unsigned char)); hipMalloc((void **)&dev_pac_data, total_len * sizeof(uint8_t)); hipMalloc((void **)&dev_pac_sign, batch_size * sizeof(unsigned int)); hipMalloc((void **)&dev_aes_key, sizeof(AES_KEY)); //hostdevice hipMemcpy(dev_pac_data, pac_data, total_len * sizeof(uint8_t), hipMemcpyHostToDevice); hipMemcpy(dev_pac_sign, pac_sign, batch_size * sizeof(unsigned int), hipMemcpyHostToDevice); hipMemcpy(dev_aes_key, aes_key, sizeof(AES_KEY), hipMemcpyHostToDevice); //kernel dim3 threads_per_block(16, 16); float block_x = sqrt((float)batch_size) / (float)threads_per_block.x; int block_num_x = (block_x == int(block_x) ? block_x : int(block_x) + 1); float block_y = sqrt((float)batch_size) / (float)threads_per_block.y; int block_num_y = (block_y == int(block_y) ? block_y : int(block_y) + 1); dim3 block_num(block_num_x, block_num_y); hipLaunchKernelGGL(( AES_ctr128_encrypt_chunk), dim3(block_num), dim3(threads_per_block), 0, 0, batch_size, block_num_y, dev_iv, dev_pac_data, dev_pac_sign, dev_aes_key); hipDeviceSynchronize(); //devicehost hipMemcpy(pac_data, dev_pac_data, total_len * sizeof(uint8_t), hipMemcpyDeviceToHost); hipFree(dev_iv); hipFree(dev_pac_data); hipFree(dev_pac_sign); hipFree(dev_aes_key); }
c06cdc85b87d948a1da833bd176e5c87c70f0ffa.cu
#include "auxiliary.hpp" #include "NetStruct.hpp" #include "aes_locl.hpp" extern "C" { __device__ static const u32 Te0[256] = { 0xc66363a5U, 0xf87c7c84U, 0xee777799U, 0xf67b7b8dU, 0xfff2f20dU, 0xd66b6bbdU, 0xde6f6fb1U, 0x91c5c554U, 0x60303050U, 0x02010103U, 0xce6767a9U, 0x562b2b7dU, 0xe7fefe19U, 0xb5d7d762U, 0x4dababe6U, 0xec76769aU, 0x8fcaca45U, 0x1f82829dU, 0x89c9c940U, 0xfa7d7d87U, 0xeffafa15U, 0xb25959ebU, 0x8e4747c9U, 0xfbf0f00bU, 0x41adadecU, 0xb3d4d467U, 0x5fa2a2fdU, 0x45afafeaU, 0x239c9cbfU, 0x53a4a4f7U, 0xe4727296U, 0x9bc0c05bU, 0x75b7b7c2U, 0xe1fdfd1cU, 0x3d9393aeU, 0x4c26266aU, 0x6c36365aU, 0x7e3f3f41U, 0xf5f7f702U, 0x83cccc4fU, 0x6834345cU, 0x51a5a5f4U, 0xd1e5e534U, 0xf9f1f108U, 0xe2717193U, 0xabd8d873U, 0x62313153U, 0x2a15153fU, 0x0804040cU, 0x95c7c752U, 0x46232365U, 0x9dc3c35eU, 0x30181828U, 0x379696a1U, 0x0a05050fU, 0x2f9a9ab5U, 0x0e070709U, 0x24121236U, 0x1b80809bU, 0xdfe2e23dU, 0xcdebeb26U, 0x4e272769U, 0x7fb2b2cdU, 0xea75759fU, 0x1209091bU, 0x1d83839eU, 0x582c2c74U, 0x341a1a2eU, 0x361b1b2dU, 0xdc6e6eb2U, 0xb45a5aeeU, 0x5ba0a0fbU, 0xa45252f6U, 0x763b3b4dU, 0xb7d6d661U, 0x7db3b3ceU, 0x5229297bU, 0xdde3e33eU, 0x5e2f2f71U, 0x13848497U, 0xa65353f5U, 0xb9d1d168U, 0x00000000U, 0xc1eded2cU, 0x40202060U, 0xe3fcfc1fU, 0x79b1b1c8U, 0xb65b5bedU, 0xd46a6abeU, 0x8dcbcb46U, 0x67bebed9U, 0x7239394bU, 0x944a4adeU, 0x984c4cd4U, 0xb05858e8U, 0x85cfcf4aU, 0xbbd0d06bU, 0xc5efef2aU, 0x4faaaae5U, 0xedfbfb16U, 0x864343c5U, 0x9a4d4dd7U, 0x66333355U, 0x11858594U, 0x8a4545cfU, 0xe9f9f910U, 0x04020206U, 0xfe7f7f81U, 0xa05050f0U, 0x783c3c44U, 0x259f9fbaU, 0x4ba8a8e3U, 0xa25151f3U, 0x5da3a3feU, 0x804040c0U, 0x058f8f8aU, 0x3f9292adU, 0x219d9dbcU, 0x70383848U, 0xf1f5f504U, 0x63bcbcdfU, 0x77b6b6c1U, 0xafdada75U, 0x42212163U, 0x20101030U, 0xe5ffff1aU, 0xfdf3f30eU, 0xbfd2d26dU, 0x81cdcd4cU, 0x180c0c14U, 0x26131335U, 0xc3ecec2fU, 0xbe5f5fe1U, 0x359797a2U, 0x884444ccU, 0x2e171739U, 0x93c4c457U, 0x55a7a7f2U, 0xfc7e7e82U, 0x7a3d3d47U, 0xc86464acU, 0xba5d5de7U, 0x3219192bU, 0xe6737395U, 0xc06060a0U, 0x19818198U, 0x9e4f4fd1U, 0xa3dcdc7fU, 0x44222266U, 0x542a2a7eU, 0x3b9090abU, 0x0b888883U, 0x8c4646caU, 0xc7eeee29U, 0x6bb8b8d3U, 0x2814143cU, 0xa7dede79U, 0xbc5e5ee2U, 0x160b0b1dU, 0xaddbdb76U, 0xdbe0e03bU, 0x64323256U, 0x743a3a4eU, 0x140a0a1eU, 0x924949dbU, 0x0c06060aU, 0x4824246cU, 0xb85c5ce4U, 0x9fc2c25dU, 0xbdd3d36eU, 0x43acacefU, 0xc46262a6U, 0x399191a8U, 0x319595a4U, 0xd3e4e437U, 0xf279798bU, 0xd5e7e732U, 0x8bc8c843U, 0x6e373759U, 0xda6d6db7U, 0x018d8d8cU, 0xb1d5d564U, 0x9c4e4ed2U, 0x49a9a9e0U, 0xd86c6cb4U, 0xac5656faU, 0xf3f4f407U, 0xcfeaea25U, 0xca6565afU, 0xf47a7a8eU, 0x47aeaee9U, 0x10080818U, 0x6fbabad5U, 0xf0787888U, 0x4a25256fU, 0x5c2e2e72U, 0x381c1c24U, 0x57a6a6f1U, 0x73b4b4c7U, 0x97c6c651U, 0xcbe8e823U, 0xa1dddd7cU, 0xe874749cU, 0x3e1f1f21U, 0x964b4bddU, 0x61bdbddcU, 0x0d8b8b86U, 0x0f8a8a85U, 0xe0707090U, 0x7c3e3e42U, 0x71b5b5c4U, 0xcc6666aaU, 0x904848d8U, 0x06030305U, 0xf7f6f601U, 0x1c0e0e12U, 0xc26161a3U, 0x6a35355fU, 0xae5757f9U, 0x69b9b9d0U, 0x17868691U, 0x99c1c158U, 0x3a1d1d27U, 0x279e9eb9U, 0xd9e1e138U, 0xebf8f813U, 0x2b9898b3U, 0x22111133U, 0xd26969bbU, 0xa9d9d970U, 0x078e8e89U, 0x339494a7U, 0x2d9b9bb6U, 0x3c1e1e22U, 0x15878792U, 0xc9e9e920U, 0x87cece49U, 0xaa5555ffU, 0x50282878U, 0xa5dfdf7aU, 0x038c8c8fU, 0x59a1a1f8U, 0x09898980U, 0x1a0d0d17U, 0x65bfbfdaU, 0xd7e6e631U, 0x844242c6U, 0xd06868b8U, 0x824141c3U, 0x299999b0U, 0x5a2d2d77U, 0x1e0f0f11U, 0x7bb0b0cbU, 0xa85454fcU, 0x6dbbbbd6U, 0x2c16163aU, }; __device__ static const u32 Te1[256] = { 0xa5c66363U, 0x84f87c7cU, 0x99ee7777U, 0x8df67b7bU, 0x0dfff2f2U, 0xbdd66b6bU, 0xb1de6f6fU, 0x5491c5c5U, 0x50603030U, 0x03020101U, 0xa9ce6767U, 0x7d562b2bU, 0x19e7fefeU, 0x62b5d7d7U, 0xe64dababU, 0x9aec7676U, 0x458fcacaU, 0x9d1f8282U, 0x4089c9c9U, 0x87fa7d7dU, 0x15effafaU, 0xebb25959U, 0xc98e4747U, 0x0bfbf0f0U, 0xec41adadU, 0x67b3d4d4U, 0xfd5fa2a2U, 0xea45afafU, 0xbf239c9cU, 0xf753a4a4U, 0x96e47272U, 0x5b9bc0c0U, 0xc275b7b7U, 0x1ce1fdfdU, 0xae3d9393U, 0x6a4c2626U, 0x5a6c3636U, 0x417e3f3fU, 0x02f5f7f7U, 0x4f83ccccU, 0x5c683434U, 0xf451a5a5U, 0x34d1e5e5U, 0x08f9f1f1U, 0x93e27171U, 0x73abd8d8U, 0x53623131U, 0x3f2a1515U, 0x0c080404U, 0x5295c7c7U, 0x65462323U, 0x5e9dc3c3U, 0x28301818U, 0xa1379696U, 0x0f0a0505U, 0xb52f9a9aU, 0x090e0707U, 0x36241212U, 0x9b1b8080U, 0x3ddfe2e2U, 0x26cdebebU, 0x694e2727U, 0xcd7fb2b2U, 0x9fea7575U, 0x1b120909U, 0x9e1d8383U, 0x74582c2cU, 0x2e341a1aU, 0x2d361b1bU, 0xb2dc6e6eU, 0xeeb45a5aU, 0xfb5ba0a0U, 0xf6a45252U, 0x4d763b3bU, 0x61b7d6d6U, 0xce7db3b3U, 0x7b522929U, 0x3edde3e3U, 0x715e2f2fU, 0x97138484U, 0xf5a65353U, 0x68b9d1d1U, 0x00000000U, 0x2cc1ededU, 0x60402020U, 0x1fe3fcfcU, 0xc879b1b1U, 0xedb65b5bU, 0xbed46a6aU, 0x468dcbcbU, 0xd967bebeU, 0x4b723939U, 0xde944a4aU, 0xd4984c4cU, 0xe8b05858U, 0x4a85cfcfU, 0x6bbbd0d0U, 0x2ac5efefU, 0xe54faaaaU, 0x16edfbfbU, 0xc5864343U, 0xd79a4d4dU, 0x55663333U, 0x94118585U, 0xcf8a4545U, 0x10e9f9f9U, 0x06040202U, 0x81fe7f7fU, 0xf0a05050U, 0x44783c3cU, 0xba259f9fU, 0xe34ba8a8U, 0xf3a25151U, 0xfe5da3a3U, 0xc0804040U, 0x8a058f8fU, 0xad3f9292U, 0xbc219d9dU, 0x48703838U, 0x04f1f5f5U, 0xdf63bcbcU, 0xc177b6b6U, 0x75afdadaU, 0x63422121U, 0x30201010U, 0x1ae5ffffU, 0x0efdf3f3U, 0x6dbfd2d2U, 0x4c81cdcdU, 0x14180c0cU, 0x35261313U, 0x2fc3ececU, 0xe1be5f5fU, 0xa2359797U, 0xcc884444U, 0x392e1717U, 0x5793c4c4U, 0xf255a7a7U, 0x82fc7e7eU, 0x477a3d3dU, 0xacc86464U, 0xe7ba5d5dU, 0x2b321919U, 0x95e67373U, 0xa0c06060U, 0x98198181U, 0xd19e4f4fU, 0x7fa3dcdcU, 0x66442222U, 0x7e542a2aU, 0xab3b9090U, 0x830b8888U, 0xca8c4646U, 0x29c7eeeeU, 0xd36bb8b8U, 0x3c281414U, 0x79a7dedeU, 0xe2bc5e5eU, 0x1d160b0bU, 0x76addbdbU, 0x3bdbe0e0U, 0x56643232U, 0x4e743a3aU, 0x1e140a0aU, 0xdb924949U, 0x0a0c0606U, 0x6c482424U, 0xe4b85c5cU, 0x5d9fc2c2U, 0x6ebdd3d3U, 0xef43acacU, 0xa6c46262U, 0xa8399191U, 0xa4319595U, 0x37d3e4e4U, 0x8bf27979U, 0x32d5e7e7U, 0x438bc8c8U, 0x596e3737U, 0xb7da6d6dU, 0x8c018d8dU, 0x64b1d5d5U, 0xd29c4e4eU, 0xe049a9a9U, 0xb4d86c6cU, 0xfaac5656U, 0x07f3f4f4U, 0x25cfeaeaU, 0xafca6565U, 0x8ef47a7aU, 0xe947aeaeU, 0x18100808U, 0xd56fbabaU, 0x88f07878U, 0x6f4a2525U, 0x725c2e2eU, 0x24381c1cU, 0xf157a6a6U, 0xc773b4b4U, 0x5197c6c6U, 0x23cbe8e8U, 0x7ca1ddddU, 0x9ce87474U, 0x213e1f1fU, 0xdd964b4bU, 0xdc61bdbdU, 0x860d8b8bU, 0x850f8a8aU, 0x90e07070U, 0x427c3e3eU, 0xc471b5b5U, 0xaacc6666U, 0xd8904848U, 0x05060303U, 0x01f7f6f6U, 0x121c0e0eU, 0xa3c26161U, 0x5f6a3535U, 0xf9ae5757U, 0xd069b9b9U, 0x91178686U, 0x5899c1c1U, 0x273a1d1dU, 0xb9279e9eU, 0x38d9e1e1U, 0x13ebf8f8U, 0xb32b9898U, 0x33221111U, 0xbbd26969U, 0x70a9d9d9U, 0x89078e8eU, 0xa7339494U, 0xb62d9b9bU, 0x223c1e1eU, 0x92158787U, 0x20c9e9e9U, 0x4987ceceU, 0xffaa5555U, 0x78502828U, 0x7aa5dfdfU, 0x8f038c8cU, 0xf859a1a1U, 0x80098989U, 0x171a0d0dU, 0xda65bfbfU, 0x31d7e6e6U, 0xc6844242U, 0xb8d06868U, 0xc3824141U, 0xb0299999U, 0x775a2d2dU, 0x111e0f0fU, 0xcb7bb0b0U, 0xfca85454U, 0xd66dbbbbU, 0x3a2c1616U, }; __device__ static const u32 Te2[256] = { 0x63a5c663U, 0x7c84f87cU, 0x7799ee77U, 0x7b8df67bU, 0xf20dfff2U, 0x6bbdd66bU, 0x6fb1de6fU, 0xc55491c5U, 0x30506030U, 0x01030201U, 0x67a9ce67U, 0x2b7d562bU, 0xfe19e7feU, 0xd762b5d7U, 0xabe64dabU, 0x769aec76U, 0xca458fcaU, 0x829d1f82U, 0xc94089c9U, 0x7d87fa7dU, 0xfa15effaU, 0x59ebb259U, 0x47c98e47U, 0xf00bfbf0U, 0xadec41adU, 0xd467b3d4U, 0xa2fd5fa2U, 0xafea45afU, 0x9cbf239cU, 0xa4f753a4U, 0x7296e472U, 0xc05b9bc0U, 0xb7c275b7U, 0xfd1ce1fdU, 0x93ae3d93U, 0x266a4c26U, 0x365a6c36U, 0x3f417e3fU, 0xf702f5f7U, 0xcc4f83ccU, 0x345c6834U, 0xa5f451a5U, 0xe534d1e5U, 0xf108f9f1U, 0x7193e271U, 0xd873abd8U, 0x31536231U, 0x153f2a15U, 0x040c0804U, 0xc75295c7U, 0x23654623U, 0xc35e9dc3U, 0x18283018U, 0x96a13796U, 0x050f0a05U, 0x9ab52f9aU, 0x07090e07U, 0x12362412U, 0x809b1b80U, 0xe23ddfe2U, 0xeb26cdebU, 0x27694e27U, 0xb2cd7fb2U, 0x759fea75U, 0x091b1209U, 0x839e1d83U, 0x2c74582cU, 0x1a2e341aU, 0x1b2d361bU, 0x6eb2dc6eU, 0x5aeeb45aU, 0xa0fb5ba0U, 0x52f6a452U, 0x3b4d763bU, 0xd661b7d6U, 0xb3ce7db3U, 0x297b5229U, 0xe33edde3U, 0x2f715e2fU, 0x84971384U, 0x53f5a653U, 0xd168b9d1U, 0x00000000U, 0xed2cc1edU, 0x20604020U, 0xfc1fe3fcU, 0xb1c879b1U, 0x5bedb65bU, 0x6abed46aU, 0xcb468dcbU, 0xbed967beU, 0x394b7239U, 0x4ade944aU, 0x4cd4984cU, 0x58e8b058U, 0xcf4a85cfU, 0xd06bbbd0U, 0xef2ac5efU, 0xaae54faaU, 0xfb16edfbU, 0x43c58643U, 0x4dd79a4dU, 0x33556633U, 0x85941185U, 0x45cf8a45U, 0xf910e9f9U, 0x02060402U, 0x7f81fe7fU, 0x50f0a050U, 0x3c44783cU, 0x9fba259fU, 0xa8e34ba8U, 0x51f3a251U, 0xa3fe5da3U, 0x40c08040U, 0x8f8a058fU, 0x92ad3f92U, 0x9dbc219dU, 0x38487038U, 0xf504f1f5U, 0xbcdf63bcU, 0xb6c177b6U, 0xda75afdaU, 0x21634221U, 0x10302010U, 0xff1ae5ffU, 0xf30efdf3U, 0xd26dbfd2U, 0xcd4c81cdU, 0x0c14180cU, 0x13352613U, 0xec2fc3ecU, 0x5fe1be5fU, 0x97a23597U, 0x44cc8844U, 0x17392e17U, 0xc45793c4U, 0xa7f255a7U, 0x7e82fc7eU, 0x3d477a3dU, 0x64acc864U, 0x5de7ba5dU, 0x192b3219U, 0x7395e673U, 0x60a0c060U, 0x81981981U, 0x4fd19e4fU, 0xdc7fa3dcU, 0x22664422U, 0x2a7e542aU, 0x90ab3b90U, 0x88830b88U, 0x46ca8c46U, 0xee29c7eeU, 0xb8d36bb8U, 0x143c2814U, 0xde79a7deU, 0x5ee2bc5eU, 0x0b1d160bU, 0xdb76addbU, 0xe03bdbe0U, 0x32566432U, 0x3a4e743aU, 0x0a1e140aU, 0x49db9249U, 0x060a0c06U, 0x246c4824U, 0x5ce4b85cU, 0xc25d9fc2U, 0xd36ebdd3U, 0xacef43acU, 0x62a6c462U, 0x91a83991U, 0x95a43195U, 0xe437d3e4U, 0x798bf279U, 0xe732d5e7U, 0xc8438bc8U, 0x37596e37U, 0x6db7da6dU, 0x8d8c018dU, 0xd564b1d5U, 0x4ed29c4eU, 0xa9e049a9U, 0x6cb4d86cU, 0x56faac56U, 0xf407f3f4U, 0xea25cfeaU, 0x65afca65U, 0x7a8ef47aU, 0xaee947aeU, 0x08181008U, 0xbad56fbaU, 0x7888f078U, 0x256f4a25U, 0x2e725c2eU, 0x1c24381cU, 0xa6f157a6U, 0xb4c773b4U, 0xc65197c6U, 0xe823cbe8U, 0xdd7ca1ddU, 0x749ce874U, 0x1f213e1fU, 0x4bdd964bU, 0xbddc61bdU, 0x8b860d8bU, 0x8a850f8aU, 0x7090e070U, 0x3e427c3eU, 0xb5c471b5U, 0x66aacc66U, 0x48d89048U, 0x03050603U, 0xf601f7f6U, 0x0e121c0eU, 0x61a3c261U, 0x355f6a35U, 0x57f9ae57U, 0xb9d069b9U, 0x86911786U, 0xc15899c1U, 0x1d273a1dU, 0x9eb9279eU, 0xe138d9e1U, 0xf813ebf8U, 0x98b32b98U, 0x11332211U, 0x69bbd269U, 0xd970a9d9U, 0x8e89078eU, 0x94a73394U, 0x9bb62d9bU, 0x1e223c1eU, 0x87921587U, 0xe920c9e9U, 0xce4987ceU, 0x55ffaa55U, 0x28785028U, 0xdf7aa5dfU, 0x8c8f038cU, 0xa1f859a1U, 0x89800989U, 0x0d171a0dU, 0xbfda65bfU, 0xe631d7e6U, 0x42c68442U, 0x68b8d068U, 0x41c38241U, 0x99b02999U, 0x2d775a2dU, 0x0f111e0fU, 0xb0cb7bb0U, 0x54fca854U, 0xbbd66dbbU, 0x163a2c16U, }; __device__ static const u32 Te3[256] = { 0x6363a5c6U, 0x7c7c84f8U, 0x777799eeU, 0x7b7b8df6U, 0xf2f20dffU, 0x6b6bbdd6U, 0x6f6fb1deU, 0xc5c55491U, 0x30305060U, 0x01010302U, 0x6767a9ceU, 0x2b2b7d56U, 0xfefe19e7U, 0xd7d762b5U, 0xababe64dU, 0x76769aecU, 0xcaca458fU, 0x82829d1fU, 0xc9c94089U, 0x7d7d87faU, 0xfafa15efU, 0x5959ebb2U, 0x4747c98eU, 0xf0f00bfbU, 0xadadec41U, 0xd4d467b3U, 0xa2a2fd5fU, 0xafafea45U, 0x9c9cbf23U, 0xa4a4f753U, 0x727296e4U, 0xc0c05b9bU, 0xb7b7c275U, 0xfdfd1ce1U, 0x9393ae3dU, 0x26266a4cU, 0x36365a6cU, 0x3f3f417eU, 0xf7f702f5U, 0xcccc4f83U, 0x34345c68U, 0xa5a5f451U, 0xe5e534d1U, 0xf1f108f9U, 0x717193e2U, 0xd8d873abU, 0x31315362U, 0x15153f2aU, 0x04040c08U, 0xc7c75295U, 0x23236546U, 0xc3c35e9dU, 0x18182830U, 0x9696a137U, 0x05050f0aU, 0x9a9ab52fU, 0x0707090eU, 0x12123624U, 0x80809b1bU, 0xe2e23ddfU, 0xebeb26cdU, 0x2727694eU, 0xb2b2cd7fU, 0x75759feaU, 0x09091b12U, 0x83839e1dU, 0x2c2c7458U, 0x1a1a2e34U, 0x1b1b2d36U, 0x6e6eb2dcU, 0x5a5aeeb4U, 0xa0a0fb5bU, 0x5252f6a4U, 0x3b3b4d76U, 0xd6d661b7U, 0xb3b3ce7dU, 0x29297b52U, 0xe3e33eddU, 0x2f2f715eU, 0x84849713U, 0x5353f5a6U, 0xd1d168b9U, 0x00000000U, 0xeded2cc1U, 0x20206040U, 0xfcfc1fe3U, 0xb1b1c879U, 0x5b5bedb6U, 0x6a6abed4U, 0xcbcb468dU, 0xbebed967U, 0x39394b72U, 0x4a4ade94U, 0x4c4cd498U, 0x5858e8b0U, 0xcfcf4a85U, 0xd0d06bbbU, 0xefef2ac5U, 0xaaaae54fU, 0xfbfb16edU, 0x4343c586U, 0x4d4dd79aU, 0x33335566U, 0x85859411U, 0x4545cf8aU, 0xf9f910e9U, 0x02020604U, 0x7f7f81feU, 0x5050f0a0U, 0x3c3c4478U, 0x9f9fba25U, 0xa8a8e34bU, 0x5151f3a2U, 0xa3a3fe5dU, 0x4040c080U, 0x8f8f8a05U, 0x9292ad3fU, 0x9d9dbc21U, 0x38384870U, 0xf5f504f1U, 0xbcbcdf63U, 0xb6b6c177U, 0xdada75afU, 0x21216342U, 0x10103020U, 0xffff1ae5U, 0xf3f30efdU, 0xd2d26dbfU, 0xcdcd4c81U, 0x0c0c1418U, 0x13133526U, 0xecec2fc3U, 0x5f5fe1beU, 0x9797a235U, 0x4444cc88U, 0x1717392eU, 0xc4c45793U, 0xa7a7f255U, 0x7e7e82fcU, 0x3d3d477aU, 0x6464acc8U, 0x5d5de7baU, 0x19192b32U, 0x737395e6U, 0x6060a0c0U, 0x81819819U, 0x4f4fd19eU, 0xdcdc7fa3U, 0x22226644U, 0x2a2a7e54U, 0x9090ab3bU, 0x8888830bU, 0x4646ca8cU, 0xeeee29c7U, 0xb8b8d36bU, 0x14143c28U, 0xdede79a7U, 0x5e5ee2bcU, 0x0b0b1d16U, 0xdbdb76adU, 0xe0e03bdbU, 0x32325664U, 0x3a3a4e74U, 0x0a0a1e14U, 0x4949db92U, 0x06060a0cU, 0x24246c48U, 0x5c5ce4b8U, 0xc2c25d9fU, 0xd3d36ebdU, 0xacacef43U, 0x6262a6c4U, 0x9191a839U, 0x9595a431U, 0xe4e437d3U, 0x79798bf2U, 0xe7e732d5U, 0xc8c8438bU, 0x3737596eU, 0x6d6db7daU, 0x8d8d8c01U, 0xd5d564b1U, 0x4e4ed29cU, 0xa9a9e049U, 0x6c6cb4d8U, 0x5656faacU, 0xf4f407f3U, 0xeaea25cfU, 0x6565afcaU, 0x7a7a8ef4U, 0xaeaee947U, 0x08081810U, 0xbabad56fU, 0x787888f0U, 0x25256f4aU, 0x2e2e725cU, 0x1c1c2438U, 0xa6a6f157U, 0xb4b4c773U, 0xc6c65197U, 0xe8e823cbU, 0xdddd7ca1U, 0x74749ce8U, 0x1f1f213eU, 0x4b4bdd96U, 0xbdbddc61U, 0x8b8b860dU, 0x8a8a850fU, 0x707090e0U, 0x3e3e427cU, 0xb5b5c471U, 0x6666aaccU, 0x4848d890U, 0x03030506U, 0xf6f601f7U, 0x0e0e121cU, 0x6161a3c2U, 0x35355f6aU, 0x5757f9aeU, 0xb9b9d069U, 0x86869117U, 0xc1c15899U, 0x1d1d273aU, 0x9e9eb927U, 0xe1e138d9U, 0xf8f813ebU, 0x9898b32bU, 0x11113322U, 0x6969bbd2U, 0xd9d970a9U, 0x8e8e8907U, 0x9494a733U, 0x9b9bb62dU, 0x1e1e223cU, 0x87879215U, 0xe9e920c9U, 0xcece4987U, 0x5555ffaaU, 0x28287850U, 0xdfdf7aa5U, 0x8c8c8f03U, 0xa1a1f859U, 0x89898009U, 0x0d0d171aU, 0xbfbfda65U, 0xe6e631d7U, 0x4242c684U, 0x6868b8d0U, 0x4141c382U, 0x9999b029U, 0x2d2d775aU, 0x0f0f111eU, 0xb0b0cb7bU, 0x5454fca8U, 0xbbbbd66dU, 0x16163a2cU, }; /* * Encrypt a single block * in and out can overlap */ __device__ static void dev_AES_encrypt(const unsigned char *in, unsigned char *out, const AES_KEY *key) { const u32 *rk; u32 s0, s1, s2, s3, t0, t1, t2, t3; #ifndef FULL_UNROLL int r; #endif /* ?FULL_UNROLL */ assert(in && out && key); rk = key->rd_key; /* * map byte array block to cipher state * and add initial round key: */ s0 = GETU32(in) ^ rk[0]; s1 = GETU32(in + 4) ^ rk[1]; s2 = GETU32(in + 8) ^ rk[2]; s3 = GETU32(in + 12) ^ rk[3]; #ifdef FULL_UNROLL /* round 1: */ t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[4]; t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[5]; t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[6]; t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[7]; /* round 2: */ s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[8]; s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[9]; s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[10]; s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[11]; /* round 3: */ t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[12]; t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[13]; t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[14]; t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[15]; /* round 4: */ s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[16]; s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[17]; s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[18]; s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[19]; /* round 5: */ t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[20]; t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[21]; t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[22]; t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[23]; /* round 6: */ s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[24]; s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[25]; s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[26]; s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[27]; /* round 7: */ t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[28]; t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[29]; t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[30]; t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[31]; /* round 8: */ s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[32]; s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[33]; s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[34]; s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[35]; /* round 9: */ t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[36]; t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[37]; t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[38]; t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[39]; if (key->rounds > 10) { /* round 10: */ s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[40]; s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[41]; s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[42]; s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[43]; /* round 11: */ t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[44]; t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[45]; t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[46]; t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[47]; if (key->rounds > 12) { /* round 12: */ s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[48]; s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[49]; s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[50]; s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[51]; /* round 13: */ t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[52]; t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[53]; t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[54]; t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[55]; } } rk += key->rounds << 2; #else /* !FULL_UNROLL */ /* * Nr - 1 full rounds: */ r = key->rounds >> 1; for (;;) { t0 = Te0[(s0 >> 24)] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[(s3)&0xff] ^ rk[4]; t1 = Te0[(s1 >> 24)] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[(s0)&0xff] ^ rk[5]; t2 = Te0[(s2 >> 24)] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[(s1)&0xff] ^ rk[6]; t3 = Te0[(s3 >> 24)] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[(s2)&0xff] ^ rk[7]; rk += 8; if (--r == 0) { break; } s0 = Te0[(t0 >> 24)] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[(t3)&0xff] ^ rk[0]; s1 = Te0[(t1 >> 24)] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[(t0)&0xff] ^ rk[1]; s2 = Te0[(t2 >> 24)] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[(t1)&0xff] ^ rk[2]; s3 = Te0[(t3 >> 24)] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[(t2)&0xff] ^ rk[3]; } #endif /* ?FULL_UNROLL */ /* * apply last round and * map cipher state to byte array block: */ s0 = (Te2[(t0 >> 24)] & 0xff000000) ^ (Te3[(t1 >> 16) & 0xff] & 0x00ff0000) ^ (Te0[(t2 >> 8) & 0xff] & 0x0000ff00) ^ (Te1[(t3)&0xff] & 0x000000ff) ^ rk[0]; PUTU32(out, s0); s1 = (Te2[(t1 >> 24)] & 0xff000000) ^ (Te3[(t2 >> 16) & 0xff] & 0x00ff0000) ^ (Te0[(t3 >> 8) & 0xff] & 0x0000ff00) ^ (Te1[(t0)&0xff] & 0x000000ff) ^ rk[1]; PUTU32(out + 4, s1); s2 = (Te2[(t2 >> 24)] & 0xff000000) ^ (Te3[(t3 >> 16) & 0xff] & 0x00ff0000) ^ (Te0[(t0 >> 8) & 0xff] & 0x0000ff00) ^ (Te1[(t1)&0xff] & 0x000000ff) ^ rk[2]; PUTU32(out + 8, s2); s3 = (Te2[(t3 >> 24)] & 0xff000000) ^ (Te3[(t0 >> 16) & 0xff] & 0x00ff0000) ^ (Te0[(t1 >> 8) & 0xff] & 0x0000ff00) ^ (Te1[(t2)&0xff] & 0x000000ff) ^ rk[3]; PUTU32(out + 12, s3); } __device__ static void dev_AES_ctr128_inc(unsigned char *counter) { unsigned long c; /* Grab 3rd dword of counter and increment */ #ifdef L_ENDIAN c = GETU32(counter + 8); c++; PUTU32(counter + 8, c); #else c = GETU32(counter + 4); c++; PUTU32(counter + 4, c); #endif /* if no overflow, we're done */ if (c) return; /* Grab top dword of counter and increment */ #ifdef L_ENDIAN c = GETU32(counter + 12); c++; PUTU32(counter + 12, c); #else c = GETU32(counter + 0); c++; PUTU32(counter + 0, c); #endif } __device__ static inline uint16_t u8tu16(uint8_t *data) { uint16_t result = 0; for (int i = 0; i < 2; i++) { result += data[i] * pow(16, 2 - 2 * i); } return result; } __global__ static void AES_ctr128_encrypt_chunk( const int batch_size, const int block_num_y, unsigned char *__restrict__ iv, uint8_t *__restrict__ pac_data, unsigned int *__restrict__ pac_sign, AES_KEY *__restrict__ aes_key) { int I = blockIdx.x * blockDim.x + threadIdx.x; int J = blockIdx.y * blockDim.y + threadIdx.y; int N = I * block_num_y * blockDim.y + J; if (N < batch_size) { uint8_t *esp_iv = pac_data + pac_sign[N] + sizeof(struct ether_header) + sizeof(struct iphdr) + 2 * sizeof(uint32_t); uint8_t *in, *out = pac_data + pac_sign[N] + sizeof(struct ether_header) + sizeof(struct iphdr) + sizeof(struct esphdr); uint8_t *ip_len = pac_data + pac_sign[N] + sizeof(struct ether_header) + 2 * sizeof(uint8_t); in = out; for (int i = 0; i < ESP_IV_LENGTH; i++) { iv[i] = esp_iv[i]; } int encrypted_len = u8tu16(ip_len) - sizeof(struct iphdr) - sizeof(struct esphdr) - SHA_DIGEST_LENGTH; int pad_len = AES_BLOCK_SIZE - (encrypted_len + 2) % AES_BLOCK_SIZE; int length = encrypted_len + pad_len + 2; uint8_t ecount_buf[AES_BLOCK_SIZE] = {0}; unsigned mode = 0; assert(in && out && aes_key && iv); assert(mode < AES_BLOCK_SIZE); while (length--) { if (mode == 0) { dev_AES_encrypt(iv, ecount_buf, aes_key); dev_AES_ctr128_inc(iv); } *(out++) = *(in++) ^ ecount_buf[mode]; mode = (mode + 1) % AES_BLOCK_SIZE; } } __syncthreads(); } } void ipsec_aes_encryption_get_cuda_kernel( uint8_t *pac_data, const int total_len, const int batch_size, const unsigned int *pac_sign, AES_KEY *aes_key) { // CheckGPUinfo(); //定义device变量 unsigned char *dev_iv; AES_KEY *dev_aes_key; uint8_t *dev_pac_data; unsigned int *dev_pac_sign; //申请device内存 cudaMalloc((void **)&dev_iv, ESP_IV_LENGTH * sizeof(unsigned char)); cudaMalloc((void **)&dev_pac_data, total_len * sizeof(uint8_t)); cudaMalloc((void **)&dev_pac_sign, batch_size * sizeof(unsigned int)); cudaMalloc((void **)&dev_aes_key, sizeof(AES_KEY)); //将host数据拷贝到device cudaMemcpy(dev_pac_data, pac_data, total_len * sizeof(uint8_t), cudaMemcpyHostToDevice); cudaMemcpy(dev_pac_sign, pac_sign, batch_size * sizeof(unsigned int), cudaMemcpyHostToDevice); cudaMemcpy(dev_aes_key, aes_key, sizeof(AES_KEY), cudaMemcpyHostToDevice); //定义kernel的执行配置 dim3 threads_per_block(16, 16); float block_x = sqrt((float)batch_size) / (float)threads_per_block.x; int block_num_x = (block_x == int(block_x) ? block_x : int(block_x) + 1); float block_y = sqrt((float)batch_size) / (float)threads_per_block.y; int block_num_y = (block_y == int(block_y) ? block_y : int(block_y) + 1); dim3 block_num(block_num_x, block_num_y); AES_ctr128_encrypt_chunk<<<block_num, threads_per_block>>>( batch_size, block_num_y, dev_iv, dev_pac_data, dev_pac_sign, dev_aes_key); cudaDeviceSynchronize(); //将device的结果拷贝到host cudaMemcpy(pac_data, dev_pac_data, total_len * sizeof(uint8_t), cudaMemcpyDeviceToHost); cudaFree(dev_iv); cudaFree(dev_pac_data); cudaFree(dev_pac_sign); cudaFree(dev_aes_key); }
69aa9879055e99541dcb61182d2bfa7773955f9a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void findDiffLabelsAtomicFree(float* devDiff, int diffPitchInFloats, int nPoints, int nClusters, int* devClusters, int* devChanges) { int x = blockDim.x * blockIdx.x + threadIdx.x; if (x < nPoints) { int index = x; float minDistance = 10000000; int minCluster = -1; for(int cluster = 0; cluster < nClusters; cluster++) { float clusterDistance = devDiff[index]; if (clusterDistance < minDistance) { minDistance = clusterDistance; minCluster = cluster; } index += diffPitchInFloats; } int previousCluster = devClusters[x]; devClusters[x] = minCluster; if (minCluster != previousCluster) { //int change=*devChanges; //change++; //*devChanges = change; } } }
69aa9879055e99541dcb61182d2bfa7773955f9a.cu
#include "includes.h" __global__ void findDiffLabelsAtomicFree(float* devDiff, int diffPitchInFloats, int nPoints, int nClusters, int* devClusters, int* devChanges) { int x = blockDim.x * blockIdx.x + threadIdx.x; if (x < nPoints) { int index = x; float minDistance = 10000000; int minCluster = -1; for(int cluster = 0; cluster < nClusters; cluster++) { float clusterDistance = devDiff[index]; if (clusterDistance < minDistance) { minDistance = clusterDistance; minCluster = cluster; } index += diffPitchInFloats; } int previousCluster = devClusters[x]; devClusters[x] = minCluster; if (minCluster != previousCluster) { //int change=*devChanges; //change++; //*devChanges = change; } } }
646c215ee3b52eefab6dc358bcc569710231dc9e.hip
// !!! This is a file automatically generated by hipify!!! #include "colorconvert.h" #include "imageprocess.h" // nvccC++ #define BLOCK_X 32 #define BLOCK_Y 8 // nvcc, // using namespace aoce; // using namespace aoce::cuda; namespace aoce { namespace cuda { const dim3 block = dim3(BLOCK_X, BLOCK_Y); void rgb2rgba_gpu(PtrStepSz<uchar3> source, PtrStepSz<uchar4> dest, hipStream_t stream) { dim3 grid(divUp(dest.width, block.x), divUp(dest.height, block.y)); rgb2rgba << <grid, block, 0, stream >> > (source, dest); } void rgba2bgr_gpu(PtrStepSz<uchar4> source, PtrStepSz<uchar3> dest, hipStream_t stream) { dim3 grid(divUp(dest.width, block.x), divUp(dest.height, block.y)); rgba2bgr << <grid, block, 0, stream >> > (source, dest); } void argb2rgba_gpu(PtrStepSz<uchar4> source, PtrStepSz<uchar4> dest, hipStream_t stream) { dim3 grid(divUp(dest.width, block.x), divUp(dest.height, block.y)); argb2rgba << <grid, block, 0, stream >> > (source, dest); } //yuv planerrgb void yuv2rgb_gpu(PtrStepSz<uchar> source, PtrStepSz<uchar4> dest, int32_t yuvtype, hipStream_t stream) { dim3 grid(divUp(dest.width/2, block.x), divUp(dest.height/2, block.y)); if (yuvtype == 1) yuv2rgb<1> << <grid, block, 0, stream >> > (source, dest); else if (yuvtype == 2) yuv2rgb<2> << <grid, block, 0, stream >> > (source, dest); else if (yuvtype == 3){ dim3 grid(divUp(dest.width/2, block.x), divUp(dest.height, block.y)); yuv2rgb<3> << <grid, block, 0, stream >> > (source, dest); } } //packed ufront/yfront (yuyv true/true)/(yvyu false/true)/(uyvy true/false) void yuv2rgb_gpu(PtrStepSz<uchar4> source, PtrStepSz<uchar4> dest, bool ufront, bool yfront, hipStream_t stream) { dim3 grid(divUp(source.width, block.x), divUp(source.height, block.y)); int bitx = ufront ? 0 : 2; int yoffset = yfront ? 0 : 1; yuv2rgb << <grid, block, 0, stream >> > (source, dest, bitx, yoffset); } void rgb2yuv_gpu(PtrStepSz<uchar4> source, PtrStepSz<uchar> dest, int32_t yuvtype, hipStream_t stream) { dim3 grid(divUp(source.width/2, block.x), divUp(source.height/2, block.y)); if (yuvtype == 1) rgb2yuv<1> << <grid, block, 0, stream >> > (source, dest); else if (yuvtype == 2) rgb2yuv<2> << <grid, block, 0, stream >> > (source, dest); else if (yuvtype == 3){ dim3 grid(divUp(source.width/2, block.x), divUp(source.height, block.y)); rgb2yuv<3> << <grid, block, 0, stream >> > (source, dest); } } //packed ufront/yfront (yuyv true/true)/(yvyu false/true)/(uyvy true/false) void rgb2yuv_gpu(PtrStepSz<uchar4> source, PtrStepSz<uchar4> dest, bool ufront, bool yfront, hipStream_t stream) { dim3 grid(divUp(dest.width, block.x), divUp(dest.height, block.y)); int bitx = ufront ? 0 : 2; int yoffset = yfront ? 0 : 1; rgb2yuv << <grid, block, 0, stream >> > (source, dest, bitx, yoffset); } void textureMap_gpu(PtrStepSz<uchar4> source, PtrStepSz<uchar4> dest, MapChannel paramt, hipStream_t stream) { dim3 grid(divUp(source.width, block.x), divUp(source.height, block.y)); textureMap << <grid, block, 0, stream >> > (source, dest, paramt); } void blend_gpu(PtrStepSz<uchar4> source, PtrStepSz<uchar4> blendTex, PtrStepSz<uchar4> dest, int32_t left, int32_t top, float opacity, hipStream_t stream) { dim3 grid(divUp(source.width, block.x), divUp(source.height, block.y)); blend << <grid, block, 0, stream >> > (source, blendTex, dest, left, top, opacity); } void operate_gpu(PtrStepSz<uchar4> source, PtrStepSz<uchar4> dest, Operate paramt, hipStream_t stream) { dim3 grid(divUp(source.width, block.x), divUp(source.height, block.y)); operate << <grid, block, 0, stream >> > (source, dest, paramt); } void uchar2float_gpu(PtrStepSz<uchar4> source, PtrStepSz<float4> dest, hipStream_t stream){ dim3 grid(divUp(source.width, block.x), divUp(source.height, block.y)); uchar2float << <grid, block, 0, stream >> > (source, dest); } template <typename T> void resize_gpu(PtrStepSz<T> source, PtrStepSz<T> dest, bool bLinear, hipStream_t stream) { float fx = static_cast<float>(source.width) / dest.width; float fy = static_cast<float>(source.height) / dest.height; dim3 grid(divUp(dest.width, block.x), divUp(dest.height, block.y)); if (bLinear) { resize_linear<T> << <grid, block, 0, stream >> > (source, dest, fx, fy); } else { resize_nearest<T> << <grid, block, 0, stream >> > (source, dest, fx, fy); } } // template void resize_gpu<uchar4>(PtrStepSz<uchar4> source, PtrStepSz<uchar4> dest, bool bLinear, hipStream_t stream); template void resize_gpu<uchar>(PtrStepSz<uchar> source, PtrStepSz<uchar> dest, bool bLinear, hipStream_t stream); template void resize_gpu<float4>(PtrStepSz<float4> source, PtrStepSz<float4> dest, bool bLinear, hipStream_t stream); } }
646c215ee3b52eefab6dc358bcc569710231dc9e.cu
#include "colorconvert.h" #include "imageprocess.h" // nvcc与C++编译的转接文件 #define BLOCK_X 32 #define BLOCK_Y 8 // 这几个文件只用于nvcc编译,不会污染别的头文件 // using namespace aoce; // using namespace aoce::cuda; namespace aoce { namespace cuda { const dim3 block = dim3(BLOCK_X, BLOCK_Y); void rgb2rgba_gpu(PtrStepSz<uchar3> source, PtrStepSz<uchar4> dest, cudaStream_t stream) { dim3 grid(divUp(dest.width, block.x), divUp(dest.height, block.y)); rgb2rgba << <grid, block, 0, stream >> > (source, dest); } void rgba2bgr_gpu(PtrStepSz<uchar4> source, PtrStepSz<uchar3> dest, cudaStream_t stream) { dim3 grid(divUp(dest.width, block.x), divUp(dest.height, block.y)); rgba2bgr << <grid, block, 0, stream >> > (source, dest); } void argb2rgba_gpu(PtrStepSz<uchar4> source, PtrStepSz<uchar4> dest, cudaStream_t stream) { dim3 grid(divUp(dest.width, block.x), divUp(dest.height, block.y)); argb2rgba << <grid, block, 0, stream >> > (source, dest); } //yuv planer转换成rgb void yuv2rgb_gpu(PtrStepSz<uchar> source, PtrStepSz<uchar4> dest, int32_t yuvtype, cudaStream_t stream) { dim3 grid(divUp(dest.width/2, block.x), divUp(dest.height/2, block.y)); if (yuvtype == 1) yuv2rgb<1> << <grid, block, 0, stream >> > (source, dest); else if (yuvtype == 2) yuv2rgb<2> << <grid, block, 0, stream >> > (source, dest); else if (yuvtype == 3){ dim3 grid(divUp(dest.width/2, block.x), divUp(dest.height, block.y)); yuv2rgb<3> << <grid, block, 0, stream >> > (source, dest); } } //packed ufront/yfront (yuyv true/true)/(yvyu false/true)/(uyvy true/false) void yuv2rgb_gpu(PtrStepSz<uchar4> source, PtrStepSz<uchar4> dest, bool ufront, bool yfront, cudaStream_t stream) { dim3 grid(divUp(source.width, block.x), divUp(source.height, block.y)); int bitx = ufront ? 0 : 2; int yoffset = yfront ? 0 : 1; yuv2rgb << <grid, block, 0, stream >> > (source, dest, bitx, yoffset); } void rgb2yuv_gpu(PtrStepSz<uchar4> source, PtrStepSz<uchar> dest, int32_t yuvtype, cudaStream_t stream) { dim3 grid(divUp(source.width/2, block.x), divUp(source.height/2, block.y)); if (yuvtype == 1) rgb2yuv<1> << <grid, block, 0, stream >> > (source, dest); else if (yuvtype == 2) rgb2yuv<2> << <grid, block, 0, stream >> > (source, dest); else if (yuvtype == 3){ dim3 grid(divUp(source.width/2, block.x), divUp(source.height, block.y)); rgb2yuv<3> << <grid, block, 0, stream >> > (source, dest); } } //packed ufront/yfront (yuyv true/true)/(yvyu false/true)/(uyvy true/false) void rgb2yuv_gpu(PtrStepSz<uchar4> source, PtrStepSz<uchar4> dest, bool ufront, bool yfront, cudaStream_t stream) { dim3 grid(divUp(dest.width, block.x), divUp(dest.height, block.y)); int bitx = ufront ? 0 : 2; int yoffset = yfront ? 0 : 1; rgb2yuv << <grid, block, 0, stream >> > (source, dest, bitx, yoffset); } void textureMap_gpu(PtrStepSz<uchar4> source, PtrStepSz<uchar4> dest, MapChannel paramt, cudaStream_t stream) { dim3 grid(divUp(source.width, block.x), divUp(source.height, block.y)); textureMap << <grid, block, 0, stream >> > (source, dest, paramt); } void blend_gpu(PtrStepSz<uchar4> source, PtrStepSz<uchar4> blendTex, PtrStepSz<uchar4> dest, int32_t left, int32_t top, float opacity, cudaStream_t stream) { dim3 grid(divUp(source.width, block.x), divUp(source.height, block.y)); blend << <grid, block, 0, stream >> > (source, blendTex, dest, left, top, opacity); } void operate_gpu(PtrStepSz<uchar4> source, PtrStepSz<uchar4> dest, Operate paramt, cudaStream_t stream) { dim3 grid(divUp(source.width, block.x), divUp(source.height, block.y)); operate << <grid, block, 0, stream >> > (source, dest, paramt); } void uchar2float_gpu(PtrStepSz<uchar4> source, PtrStepSz<float4> dest, cudaStream_t stream){ dim3 grid(divUp(source.width, block.x), divUp(source.height, block.y)); uchar2float << <grid, block, 0, stream >> > (source, dest); } template <typename T> void resize_gpu(PtrStepSz<T> source, PtrStepSz<T> dest, bool bLinear, cudaStream_t stream) { float fx = static_cast<float>(source.width) / dest.width; float fy = static_cast<float>(source.height) / dest.height; dim3 grid(divUp(dest.width, block.x), divUp(dest.height, block.y)); if (bLinear) { resize_linear<T> << <grid, block, 0, stream >> > (source, dest, fx, fy); } else { resize_nearest<T> << <grid, block, 0, stream >> > (source, dest, fx, fy); } } //实例化几个 template void resize_gpu<uchar4>(PtrStepSz<uchar4> source, PtrStepSz<uchar4> dest, bool bLinear, cudaStream_t stream); template void resize_gpu<uchar>(PtrStepSz<uchar> source, PtrStepSz<uchar> dest, bool bLinear, cudaStream_t stream); template void resize_gpu<float4>(PtrStepSz<float4> source, PtrStepSz<float4> dest, bool bLinear, cudaStream_t stream); } }
34680f40019a3a729e18f9a49624738183c2b5af.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2012 The Trustees of Indiana University. All rights reserved. CGL MapReduce Framework on GPUs and CPUs. Code Name: Panda File: PandaLib.cu First Version: 2012-07-01 V0.1 Current Version: 2012-09-01 V0.3 Last Updates: 2012-09-016 Developer: Hui Li (lihui@indiana.edu) This is the source code for Panda, a MapReduce runtime on GPUs and CPUs. */ #ifndef __PANDALIB_CU__ #define __PANDALIB_CU__ #include "Panda.h" #include "UserAPI.cu" //---------------------------------------------- //Get default job configuration //---------------------------------------------- job_configuration *CreateJobConf(){ job_configuration *job_conf = (job_configuration *)malloc(sizeof(job_configuration)); if (job_conf == NULL) exit(-1); memset(job_conf, 0, sizeof(job_configuration)); job_conf->num_input_record = 0; job_conf->input_keyval_arr = NULL; job_conf->auto_tuning = false; job_conf->iterative_support = false; job_conf->local_combiner = false; job_conf->num_mappers = 0; job_conf->num_reducers = 0; job_conf->num_gpu_core_groups = 0; job_conf->num_cpus_cores = 0; job_conf->num_cpus_groups = 0; return job_conf; }//gpu_context gpu_card_context *CreateGPUCardContext(){ gpu_card_context *d_g_state = (gpu_card_context*)malloc(sizeof(gpu_card_context)); if (d_g_state == NULL) exit(-1); memset(d_g_state, 0, sizeof(gpu_card_context)); d_g_state->iterative_support = false; d_g_state->input_keyval_arr = NULL; //d_g_state->num_mappers = 0; //d_g_state->num_reducers = 0; d_g_state->local_combiner = false; return d_g_state; }//gpu_context gpu_context *CreateGPUCoreContext(){ gpu_context *d_g_state = (gpu_context*)malloc(sizeof(gpu_context)); if (d_g_state == NULL) exit(-1); memset(d_g_state, 0, sizeof(gpu_context)); d_g_state->iterative_support = false; d_g_state->h_input_keyval_arr = NULL; d_g_state->num_mappers = 0; d_g_state->num_reducers = 0; d_g_state->local_combiner = false; return d_g_state; }//gpu_context cpu_context *CreateCPUContext(){ cpu_context *d_g_state = (cpu_context*)malloc(sizeof(cpu_context)); if (d_g_state == NULL) exit(-1); memset(d_g_state, 0, sizeof(cpu_context)); d_g_state->iterative_support = false; d_g_state->local_combiner = false; d_g_state->input_keyval_arr = NULL; return d_g_state; }//gpu_context panda_context *CreatePandaContext(){ panda_context *d_g_state = (panda_context*)malloc(sizeof(panda_context)); if (d_g_state == NULL) exit(-1); d_g_state->input_keyval_arr = NULL; d_g_state->intermediate_keyval_arr_arr_p = NULL; d_g_state->sorted_intermediate_keyvals_arr = NULL; d_g_state->sorted_keyvals_arr_len = 0; d_g_state->num_gpu_core_groups = 0; d_g_state->num_gpu_card_groups = 0; d_g_state->num_cpus_groups = 0; d_g_state->gpu_core_context = NULL; d_g_state->gpu_card_context = NULL; d_g_state->cpu_context = NULL; return d_g_state; }//panda_context //For version 0.3 void InitCPUMapReduce2(thread_info_t * thread_info){ cpu_context *d_g_state = (cpu_context *)(thread_info->d_g_state); job_configuration *job_conf = (job_configuration *)(thread_info->job_conf); if (job_conf->num_input_record<=0) { ShowError("Error: no any input keys"); exit(-1);} if (job_conf->input_keyval_arr == NULL) { ShowError("Error: input_keyval_arr == NULL"); exit(-1);} if (d_g_state->num_cpus_cores <= 0) { ShowError("Error: d_g_state->num_cpus == 0"); exit(-1);} int totalKeySize = 0; int totalValSize = 0; for(int i=0;i<job_conf->num_input_record;i++){ totalKeySize += job_conf->input_keyval_arr[i].keySize; totalValSize += job_conf->input_keyval_arr[i].valSize; }//for ShowLog("CPU_GROUP_ID:[%d] num_input_record:%d, totalKeySize:%d KB totalValSize:%d KB num_cpus:%d", d_g_state->cpu_group_id, job_conf->num_input_record, totalKeySize/1024, totalValSize/1024, d_g_state->num_cpus_cores); //TODO determin num_cpus int num_cpus_cores = d_g_state->num_cpus_cores; d_g_state->panda_cpu_task = (pthread_t *)malloc(sizeof(pthread_t)*(num_cpus_cores)); d_g_state->panda_cpu_task_info = (panda_cpu_task_info_t *)malloc(sizeof(panda_cpu_task_info_t)*(num_cpus_cores)); d_g_state->intermediate_keyval_arr_arr_p = (keyval_arr_t *)malloc(sizeof(keyval_arr_t)*job_conf->num_input_record); memset(d_g_state->intermediate_keyval_arr_arr_p, 0, sizeof(keyval_arr_t)*job_conf->num_input_record); for (int i=0;i<num_cpus_cores;i++){ d_g_state->panda_cpu_task_info[i].d_g_state = d_g_state; d_g_state->panda_cpu_task_info[i].cpu_job_conf = job_conf; d_g_state->panda_cpu_task_info[i].num_cpus_cores = num_cpus_cores; d_g_state->panda_cpu_task_info[i].start_row_idx = 0; d_g_state->panda_cpu_task_info[i].end_row_idx = 0; }//for d_g_state->iterative_support = true; ShowLog("CPU_GROUP_ID:[%d] DONE",d_g_state->cpu_group_id); } #ifdef DEV_MODE //For Version 0.3 test depressed void InitGPUMapReduce4(thread_info_t* thread_info) { gpu_context *d_g_state = (gpu_context *)(thread_info->d_g_state); job_configuration* gpu_job_conf = (job_configuration*)(thread_info->job_conf); keyval_t * kv_p = gpu_job_conf->input_keyval_arr; ShowLog("d_g_state->configured:%s enable for iterative applications",d_g_state->configured? "true" : "false"); //if (d_g_state->configured) // return; ShowLog("copy %d input records from Host to GPU memory",gpu_job_conf->num_input_record); //checkCudaErrors(hipMalloc((void **)&d_g_state->d_input_keyval_arr,sizeof(keyval_t)*d_g_state->num_input_record)); int totalKeySize = 0; int totalValSize = 0; for(int i=0;i<gpu_job_conf->num_input_record;i++){ totalKeySize += kv_p[i].keySize; totalValSize += kv_p[i].valSize; }//for ShowLog("totalKeySize:%d totalValSize:%d", totalKeySize, totalValSize); void *input_vals_shared_buff = malloc(totalValSize); void *input_keys_shared_buff = malloc(totalKeySize); keyval_pos_t *input_keyval_pos_arr = (keyval_pos_t *)malloc(sizeof(keyval_pos_t)*gpu_job_conf->num_input_record); int keyPos = 0; int valPos = 0; int keySize = 0; int valSize = 0; for(int i=0; i<gpu_job_conf->num_input_record; i++){ keySize = kv_p[i].keySize; valSize = kv_p[i].valSize; memcpy((char *)input_keys_shared_buff + keyPos,(char *)(kv_p[i].key), keySize); memcpy((char *)input_vals_shared_buff + valPos,(char *)(kv_p[i].val), valSize); input_keyval_pos_arr[i].keySize = keySize; input_keyval_pos_arr[i].keyPos = keyPos; input_keyval_pos_arr[i].valPos = valPos; input_keyval_pos_arr[i].valSize = valSize; keyPos += keySize; valPos += valSize; }//for checkCudaErrors(hipMalloc((void **)&d_g_state->d_input_keyval_pos_arr,sizeof(keyval_pos_t)*gpu_job_conf->num_input_record)); checkCudaErrors(hipMalloc((void **)&d_g_state->d_input_keys_shared_buff, totalKeySize)); checkCudaErrors(hipMalloc((void **)&d_g_state->d_input_vals_shared_buff, totalValSize)); checkCudaErrors(hipMemcpy(d_g_state->d_input_keyval_pos_arr, input_keyval_pos_arr,sizeof(keyval_pos_t)*gpu_job_conf->num_input_record ,hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_g_state->d_input_keys_shared_buff, input_keys_shared_buff,totalKeySize ,hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_g_state->d_input_vals_shared_buff, input_vals_shared_buff,totalValSize ,hipMemcpyHostToDevice)); //checkCudaErrors(hipMemcpy(d_g_state->d_input_keyval_arr,h_buff,sizeof(keyval_t)*d_g_state->num_input_record,hipMemcpyHostToDevice)); hipDeviceSynchronize(); d_g_state->configured = true; }//void #endif void InitGPUCardMapReduce(gpu_card_context* d_g_state){ //cpu_context *d_g_state = (cpu_context *)(thread_info->d_g_state); //job_configuration *job_conf = (job_configuration *)(thread_info->job_conf); ////////////////// if (d_g_state->num_input_record<=0) { ShowError("Error: no any input keys"); exit(-1);} if (d_g_state->input_keyval_arr == NULL) { ShowError("Error: input_keyval_arr == NULL"); exit(-1);} //if (d_g_state->num_cpus_cores <= 0) { ShowError("Error: d_g_state->num_cpus == 0"); exit(-1);} int totalKeySize = 0; int totalValSize = 0; for(int i=0;i<d_g_state->num_input_record;i++){ totalKeySize += d_g_state->input_keyval_arr[i].keySize; totalValSize += d_g_state->input_keyval_arr[i].valSize; }//for ShowLog("GPU_CARD_GROUP_ID:[%d] num_input_record:%d, totalKeySize:%d totalValSize:%d ", d_g_state->gpu_group_id, d_g_state->num_input_record, totalKeySize, totalValSize); //TODO determin num_cpus //int num_cpus_cores = d_g_state->num_cpus_cores; int num_task_per_gpu_card = d_g_state->num_input_record; d_g_state->panda_gpu_task = (pthread_t *)malloc(sizeof(pthread_t)*(num_task_per_gpu_card)); d_g_state->panda_cpu_task_info = (panda_cpu_task_info_t *)malloc(sizeof(panda_cpu_task_info_t)*(num_task_per_gpu_card)); d_g_state->intermediate_keyval_arr_arr_p = (keyval_arr_t *)malloc(sizeof(keyval_arr_t)*d_g_state->num_input_record); memset(d_g_state->intermediate_keyval_arr_arr_p, 0, sizeof(keyval_arr_t)*d_g_state->num_input_record); for (int i=0;i<num_task_per_gpu_card;i++){ //d_g_state->panda_cpu_task_info[i].d_g_state = d_g_state; //d_g_state->panda_cpu_task_info[i].cpu_job_conf = job_conf; //d_g_state->panda_cpu_task_info[i].num_cpus_cores = num_cpus_cores; d_g_state->panda_cpu_task_info[i].start_row_idx = 0; d_g_state->panda_cpu_task_info[i].end_row_idx = 0; }//for d_g_state->iterative_support = true; ShowLog("GPU_CARD_GROUP_ID:[%d] DONE",d_g_state->gpu_group_id); }//void void InitGPUMapReduce3(gpu_context* d_g_state) { //ShowLog("d_g_state->iterative_support:%s enable for iterative applications",d_g_state->iterative_support? "true" : "false"); //if (d_g_state->iterative_support){ //ShowLog("d_g_state->configured:%s skip configuration...",d_g_state->iterative_support? "true" : "false"); //return; //} int totalKeySize = 0; int totalValSize = 0; for(int i=0;i<d_g_state->num_input_record;i++){ totalKeySize += d_g_state->h_input_keyval_arr[i].keySize; totalValSize += d_g_state->h_input_keyval_arr[i].valSize; }//for ShowLog("GPU_ID:[%d] copy %d input records from Host to GPU memory totalKeySize:%d KB totalValSize:%d KB",d_g_state->gpu_id, d_g_state->num_input_record, totalKeySize/1024, totalValSize/1024); double t1 = PandaTimer(); void *input_vals_shared_buff = malloc(totalValSize); void *input_keys_shared_buff = malloc(totalKeySize); keyval_pos_t *input_keyval_pos_arr = (keyval_pos_t *)malloc(sizeof(keyval_pos_t)*d_g_state->num_input_record); int keyPos = 0; int valPos = 0; int keySize = 0; int valSize = 0; for(int i=0;i<d_g_state->num_input_record;i++){ keySize = d_g_state->h_input_keyval_arr[i].keySize; valSize = d_g_state->h_input_keyval_arr[i].valSize; memcpy((char *)input_keys_shared_buff + keyPos,(char *)(d_g_state->h_input_keyval_arr[i].key), keySize); memcpy((char *)input_vals_shared_buff + valPos,(char *)(d_g_state->h_input_keyval_arr[i].val), valSize); input_keyval_pos_arr[i].keySize = keySize; input_keyval_pos_arr[i].keyPos = keyPos; input_keyval_pos_arr[i].valPos = valPos; input_keyval_pos_arr[i].valSize = valSize; keyPos += keySize; valPos += valSize; }//for checkCudaErrors(hipMalloc((void **)&d_g_state->d_input_keyval_pos_arr,sizeof(keyval_pos_t)*d_g_state->num_input_record)); checkCudaErrors(hipMalloc((void **)&d_g_state->d_input_keys_shared_buff, totalKeySize)); checkCudaErrors(hipMalloc((void **)&d_g_state->d_input_vals_shared_buff, totalValSize)); checkCudaErrors(hipMemcpy(d_g_state->d_input_keyval_pos_arr, input_keyval_pos_arr,sizeof(keyval_pos_t)*d_g_state->num_input_record ,hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_g_state->d_input_keys_shared_buff, input_keys_shared_buff,totalKeySize ,hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_g_state->d_input_vals_shared_buff, input_vals_shared_buff,totalValSize ,hipMemcpyHostToDevice)); //checkCudaErrors(hipMemcpy(d_g_state->d_input_keyval_arr,h_buff,sizeof(keyval_t)*d_g_state->num_input_record,hipMemcpyHostToDevice)); hipDeviceSynchronize(); double t2 = PandaTimer(); ShowLog("GPU_ID:[%d] copy keyvalue pairs done. Take:%f sec",d_g_state->gpu_id, t2-t1); //d_g_state->iterative_support = true; }//void #ifdef DEV_MODE void InitGPUMapReduce2(gpu_context* d_g_state) { ShowLog("d_g_state->num_input_record:%d",d_g_state->num_input_record); //checkCudaErrors(hipMalloc((void **)&d_g_state->d_input_keyval_arr,sizeof(keyval_t)*d_g_state->num_input_record)); int totalKeySize = 0; int totalValSize = 0; for(int i=0;i<d_g_state->num_input_record;i++){ totalKeySize += d_g_state->h_input_keyval_arr[i].keySize; totalValSize += d_g_state->h_input_keyval_arr[i].valSize; }//for void *input_vals_shared_buff = malloc(totalValSize); void *input_keys_shared_buff = malloc(totalKeySize); keyval_pos_t *input_keyval_pos_arr = (keyval_pos_t *)malloc(sizeof(keyval_pos_t)*d_g_state->num_input_record); int keyPos = 0; int valPos = 0; int keySize = 0; int valSize = 0; for(int i=0;i<d_g_state->num_input_record;i++){ keySize = d_g_state->h_input_keyval_arr[i].keySize; valSize = d_g_state->h_input_keyval_arr[i].valSize; memcpy((char *)input_keys_shared_buff + keyPos,(char *)(d_g_state->h_input_keyval_arr[i].key), keySize); memcpy((char *)input_vals_shared_buff + valPos,(char *)(d_g_state->h_input_keyval_arr[i].val), valSize); input_keyval_pos_arr[i].keySize = keySize; input_keyval_pos_arr[i].keyPos = keyPos; input_keyval_pos_arr[i].valPos = valPos; input_keyval_pos_arr[i].valSize = valSize; keyPos += keySize; valPos += valSize; }//for checkCudaErrors(hipMalloc((void **)&d_g_state->d_input_keyval_pos_arr,sizeof(keyval_pos_t)*d_g_state->num_input_record)); checkCudaErrors(hipMalloc((void **)&d_g_state->d_input_keys_shared_buff, totalKeySize)); checkCudaErrors(hipMalloc((void **)&d_g_state->d_input_vals_shared_buff, totalValSize)); checkCudaErrors(hipMemcpy(d_g_state->d_input_keyval_pos_arr, input_keyval_pos_arr,sizeof(keyval_pos_t)*d_g_state->num_input_record ,hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_g_state->d_input_keys_shared_buff, input_keys_shared_buff,totalKeySize ,hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_g_state->d_input_vals_shared_buff, input_vals_shared_buff,totalValSize ,hipMemcpyHostToDevice)); //checkCudaErrors(hipMemcpy(d_g_state->d_input_keyval_arr,h_buff,sizeof(keyval_t)*d_g_state->num_input_record,hipMemcpyHostToDevice)); hipDeviceSynchronize(); }//void #endif void InitCPUDevice(thread_info_t*thread_info){ //------------------------------------------ //1, init CPU device //------------------------------------------ cpu_context *d_g_state = (cpu_context *)(thread_info->d_g_state); if (d_g_state->num_cpus_cores<=0) d_g_state->num_cpus_cores = getCPUCoresNum(); //int tid = thread_info->tid; ShowLog( "CPU_GROUP_ID:[%d] Init CPU Deivce Num cpus cores:%d",d_g_state->cpu_group_id, d_g_state->num_cpus_cores); } void InitGPUDevice(thread_info_t*thread_info){ //------------------------------------------ //1, init device //------------------------------------------ int tid, assigned_gpu_id; if (thread_info->device_type == GPU_CORE_ACC){ gpu_context *d_g_state = (gpu_context *)(thread_info->d_g_state); tid = thread_info->tid; assigned_gpu_id = d_g_state->gpu_id; int num_gpu_core_groups = d_g_state->num_gpu_core_groups; if (num_gpu_core_groups == 0) { ShowError("error num_gpu_core_groups == 0"); exit(-1); }//gpu_context } if (thread_info->device_type == GPU_CARD_ACC){ gpu_card_context *d_g_state = (gpu_card_context *)(thread_info->d_g_state); //int tid = thread_info->tid; assigned_gpu_id = d_g_state->gpu_id; int num_gpu_card_groups = d_g_state->num_gpu_card_groups; if (num_gpu_card_groups == 0) { ShowError("error num_gpu_core_groups == 0"); exit(-1); }//gpu_context }//if //int tid = thread_info->tid; int gpu_id; hipGetDevice(&gpu_id); int gpu_count = 0; hipGetDeviceCount(&gpu_count); hipDeviceProp_t gpu_dev; hipGetDeviceProperties(&gpu_dev, gpu_id); ShowLog("TID:[%d] check GPU ids: cur_gpu_id:[%d] assig_gpu_id:[%d] hipGetDeviceCount:[%d] GPU name:%s", tid, gpu_id, assigned_gpu_id, gpu_count, gpu_dev.name); //TODO int num_gpus = 1; if ( gpu_id != assigned_gpu_id ){ //ShowLog("hipSetDevice gpu_id %d == (tid num_gpu_core_groups) %d ", gpu_id, tid%num_gpu_core_groups); hipSetDevice(assigned_gpu_id % num_gpus); }//if size_t total_mem,avail_mem, heap_limit; checkCudaErrors(hipMemGetInfo( &avail_mem, &total_mem )); size_t heap_size = (avail_mem*0.8); hipDeviceSetLimit(hipLimitMallocHeapSize, heap_size); hipDeviceGetLimit(&heap_limit, hipLimitMallocHeapSize); int numGPUCores = getGPUCoresNum(); ShowLog("GPU_ID:[%d] numGPUCores:%d total_mem:%d MB HeapSize:%d MB avail_mem:%d MB ", gpu_id, numGPUCores,total_mem/1024/1024, heap_limit/1024/1024, avail_mem/1024/1024); } void AddPandaTask(job_configuration* job_conf, void* key, void* val, int keySize, int valSize){ int len = job_conf->num_input_record; if (len<0) return; if (len == 0) job_conf->input_keyval_arr = NULL; job_conf->input_keyval_arr = (keyval_t *)realloc(job_conf->input_keyval_arr, sizeof(keyval_t)*(len+1)); job_conf->input_keyval_arr[len].keySize = keySize; job_conf->input_keyval_arr[len].valSize = valSize; job_conf->input_keyval_arr[len].key = malloc(keySize); job_conf->input_keyval_arr[len].val = malloc(valSize); memcpy(job_conf->input_keyval_arr[len].key,key,keySize); memcpy(job_conf->input_keyval_arr[len].val,val,valSize); job_conf->num_input_record++; } void AddReduceInputRecordGPU(gpu_context* d_g_state, keyvals_t * sorted_intermediate_keyvals_arr, int start_row_id, int end_row_id){ int total_count = 0; for(int i=start_row_id;i<end_row_id;i++){ total_count += sorted_intermediate_keyvals_arr[i].val_arr_len; }//for int totalKeySize = 0; int totalValSize = 0; for(int i=start_row_id;i<end_row_id;i++){ totalKeySize += (sorted_intermediate_keyvals_arr[i].keySize+3)/4*4; for (int j=0;j<sorted_intermediate_keyvals_arr[i].val_arr_len;j++) totalValSize += (sorted_intermediate_keyvals_arr[i].vals[j].valSize+3)/4*4; }//for checkCudaErrors(hipMalloc((void **)&d_g_state->d_sorted_keys_shared_buff,totalKeySize)); checkCudaErrors(hipMalloc((void **)&d_g_state->d_sorted_vals_shared_buff,totalValSize)); checkCudaErrors(hipMalloc((void **)&d_g_state->d_keyval_pos_arr,sizeof(keyval_pos_t)*total_count)); d_g_state->h_sorted_keys_shared_buff = malloc(sizeof(char)*totalKeySize); d_g_state->h_sorted_vals_shared_buff = malloc(sizeof(char)*totalValSize); char *sorted_keys_shared_buff = (char *)d_g_state->h_sorted_keys_shared_buff; char *sorted_vals_shared_buff = (char *)d_g_state->h_sorted_vals_shared_buff; char *keyval_pos_arr = (char *)malloc(sizeof(keyval_pos_t)*total_count); int sorted_key_arr_len = (end_row_id-start_row_id); keyval_pos_t *tmp_keyval_pos_arr = (keyval_pos_t *)malloc(sizeof(keyval_pos_t)*total_count); ShowLog("GPU_ID:[%d] total #different intermediate records:%d total records:%d totalKeySize:%d KB totalValSize:%d KB", d_g_state->gpu_id, end_row_id - start_row_id, total_count, totalKeySize/1024, totalValSize/1024); int *pos_arr_4_pos_arr = (int*)malloc(sizeof(int)*(sorted_key_arr_len)); memset(pos_arr_4_pos_arr,0,sizeof(int)*sorted_key_arr_len); int index = 0; int keyPos = 0; int valPos = 0; for (int i=start_row_id;i<end_row_id;i++){ keyvals_t* p = (keyvals_t*)&(sorted_intermediate_keyvals_arr[i]); memcpy(sorted_keys_shared_buff+keyPos,p->key, p->keySize); for (int j=0;j<p->val_arr_len;j++){ tmp_keyval_pos_arr[index].keyPos = keyPos; tmp_keyval_pos_arr[index].keySize = p->keySize; tmp_keyval_pos_arr[index].valPos = valPos; tmp_keyval_pos_arr[index].valSize = p->vals[j].valSize; memcpy(sorted_vals_shared_buff + valPos,p->vals[j].val,p->vals[j].valSize); valPos += (p->vals[j].valSize+3)/4*4; index++; }//for keyPos += (p->keySize+3)/4*4; pos_arr_4_pos_arr[i-start_row_id] = index; }// d_g_state->d_sorted_keyvals_arr_len = end_row_id-start_row_id; checkCudaErrors(hipMemcpy(d_g_state->d_keyval_pos_arr,tmp_keyval_pos_arr,sizeof(keyval_pos_t)*total_count,hipMemcpyHostToDevice)); checkCudaErrors(hipMalloc((void**)&(d_g_state->d_pos_arr_4_sorted_keyval_pos_arr),sizeof(int)*sorted_key_arr_len)); checkCudaErrors(hipMemcpy(d_g_state->d_pos_arr_4_sorted_keyval_pos_arr,pos_arr_4_pos_arr,sizeof(int)*sorted_key_arr_len,hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_g_state->d_sorted_keys_shared_buff, sorted_keys_shared_buff, sizeof(char)*totalKeySize,hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_g_state->d_sorted_vals_shared_buff, sorted_vals_shared_buff, sizeof(char)*totalValSize,hipMemcpyHostToDevice)); } void AddMapInputRecord4GPUCore(gpu_context* d_g_state, keyval_t *kv_p, int start_row_id, int end_row_id){ if (end_row_id<=start_row_id) { ShowError("error! end_row_id:%d <=start_row_id:%d",end_row_id, start_row_id); return; } int len = d_g_state->num_input_record; if (len<0) { ShowError("error! len<0"); return; } if (len == 0) d_g_state->h_input_keyval_arr = NULL; ShowLog("GPU_ID:[%d] add map tasks into gpu; #total input:%d #added input:%d",d_g_state->gpu_id, len, end_row_id-start_row_id); d_g_state->h_input_keyval_arr = (keyval_t *)realloc(d_g_state->h_input_keyval_arr, sizeof(keyval_t)*(len + end_row_id - start_row_id)); //assert(d_g_state->h_input_keyval_arr != NULL); for (int i=start_row_id;i<end_row_id;i++){ d_g_state->h_input_keyval_arr[len].keySize = kv_p[i].keySize; d_g_state->h_input_keyval_arr[len].valSize = kv_p[i].valSize; d_g_state->h_input_keyval_arr[len].key = kv_p[i].key; d_g_state->h_input_keyval_arr[len].val = kv_p[i].val; d_g_state->num_input_record++; len++; } } void AddMapInputRecord4GPUCard(gpu_card_context* d_g_state, keyval_t *kv_p, int start_row_id, int end_row_id){ if (end_row_id<=start_row_id) { ShowError("error! end_row_id[%d] <= start_row_id[%d]",end_row_id, start_row_id); return; } int len = d_g_state->num_input_record; if (len<0) { ShowError("error! d_g_state->num_input_record<0"); return; } if (len == 0) d_g_state->input_keyval_arr = NULL; ShowLog("GPU_CARD_GROUP_ID:[%d] add map input record for cpu device current #input:%d added #input:%d",d_g_state->gpu_group_id,len,end_row_id-start_row_id); d_g_state->input_keyval_arr = (keyval_t *)realloc(d_g_state->input_keyval_arr, sizeof(keyval_t)*(len+end_row_id-start_row_id)); for (int i=start_row_id;i<end_row_id;i++){ d_g_state->input_keyval_arr[len].keySize = kv_p[i].keySize; d_g_state->input_keyval_arr[len].valSize = kv_p[i].valSize; d_g_state->input_keyval_arr[len].key = kv_p[i].key; d_g_state->input_keyval_arr[len].val = kv_p[i].val; d_g_state->num_input_record++; len++; }//for } void AddMapInputRecordCPU(cpu_context* d_g_state, keyval_t *kv_p, int start_row_id, int end_row_id){ if (end_row_id<=start_row_id) { ShowError("error! end_row_id[%d] <= start_row_id[%d]",end_row_id, start_row_id); return; } int len = d_g_state->num_input_record; if (len<0) { ShowError("error! len<0"); return; } if (len == 0) d_g_state->input_keyval_arr = NULL; ShowLog("CPU_GROUP_ID:[%d] add map input record for cpu device current #input:%d added #input:%d",d_g_state->cpu_group_id,len,end_row_id-start_row_id); d_g_state->input_keyval_arr = (keyval_t *)realloc(d_g_state->input_keyval_arr, sizeof(keyval_t)*(len+end_row_id-start_row_id)); for (int i=start_row_id;i<end_row_id;i++){ d_g_state->input_keyval_arr[len].keySize = kv_p[i].keySize; d_g_state->input_keyval_arr[len].valSize = kv_p[i].valSize; d_g_state->input_keyval_arr[len].key = kv_p[i].key; d_g_state->input_keyval_arr[len].val = kv_p[i].val; d_g_state->num_input_record++; len++; }//for } void AddReduceInputRecordCPU(cpu_context* d_g_state, keyvals_t *kv_p, int start_row_id, int end_row_id){ if (end_row_id<start_row_id){ ShowError("error! end_row_id<=start_row_id"); return; } int len = d_g_state->sorted_keyvals_arr_len; if (len<0) { ShowError("error! len<0"); return; } if (len == 0) d_g_state->sorted_intermediate_keyvals_arr = NULL; d_g_state->sorted_intermediate_keyvals_arr = (keyvals_t *)malloc(sizeof(keyvals_t)*(len+end_row_id-start_row_id)); for (int i = len; i< len+end_row_id-start_row_id; i++){ d_g_state->sorted_intermediate_keyvals_arr[i].keySize = kv_p[start_row_id+i-len].keySize; d_g_state->sorted_intermediate_keyvals_arr[i].key = kv_p[start_row_id+i-len].key; d_g_state->sorted_intermediate_keyvals_arr[i].vals = kv_p[start_row_id+i-len].vals; d_g_state->sorted_intermediate_keyvals_arr[i].val_arr_len = kv_p[start_row_id+i-len].val_arr_len; //ShowLog("key:%s vals_arr_len:%d", // d_g_state->sorted_intermediate_keyvals_arr[i].key, d_g_state->sorted_intermediate_keyvals_arr[i].val_arr_len); //for (int j=0;j<d_g_state->sorted_intermediate_keyvals_arr[i].val_arr_len;j++) // printf("val:%d ",*(int*)(kv_p[start_row_id+i-len].vals[j].val)); //printf("\n"); }//for d_g_state->sorted_keyvals_arr_len = len + end_row_id-start_row_id; } __device__ void GPUEmitReduceOuput (void* key, void* val, int keySize, int valSize, gpu_context *d_g_state){ keyval_t *p = &(d_g_state->d_reduced_keyval_arr[TID]); p->keySize = keySize; p->key = malloc(keySize); memcpy(p->key,key,keySize); p->valSize = valSize; p->val = malloc(valSize); memcpy(p->val,val,valSize); printf("[gpu output]: key:%s val:%d\n",key,*(int *)val); }//__device__ void CPUEmitReduceOutput (void* key, void* val, int keySize, int valSize, cpu_context *d_g_state){ /*keyval_t *p = &(d_g_state->d_reduced_keyval_arr[TID]); p->keySize = keySize; p->key = malloc(keySize); memcpy(p->key,key,keySize); p->valSize = valSize; p->val = malloc(valSize); memcpy(p->val,val,valSize);*/ printf("[cpu output]: key:%s val:%d\n",(char*)key,*(int *)val); }//__device__ void GPUCardEmitMapOutput(void *key, void *val, int keySize, int valSize, gpu_card_context *d_g_state, int map_task_idx){ if(map_task_idx >= d_g_state->num_input_record) { ShowError("error ! map_task_idx >= d_g_state->num_input_record"); return; } keyval_arr_t *kv_arr_p = &(d_g_state->intermediate_keyval_arr_arr_p[map_task_idx]); char *buff = (char*)(kv_arr_p->shared_buff); if (!((*kv_arr_p->shared_buff_pos) + keySize + valSize < (*kv_arr_p->shared_buff_len) - sizeof(keyval_pos_t)*((*kv_arr_p->shared_arr_len)+1))){ ShowWarn("Warning! not enough memory at GPU task:%d *kv_arr_p->shared_arr_len:%d current buff_size:%d KB", map_task_idx,*kv_arr_p->shared_arr_len,(*kv_arr_p->shared_buff_len)/1024); char *new_buff = (char*)malloc(sizeof(char)*((*kv_arr_p->shared_buff_len)*2)); if(new_buff==NULL){ ShowError("Error ! There is not enough memory to allocat!"); return; } memcpy(new_buff, buff, sizeof(char)*(*kv_arr_p->shared_buff_pos)); int blockSize = sizeof(keyval_pos_t)*(*kv_arr_p->shared_arr_len); memcpy(new_buff + (*kv_arr_p->shared_buff_len)*2 - blockSize, (char*)buff + (*kv_arr_p->shared_buff_len) - blockSize, blockSize); (*kv_arr_p->shared_buff_len) = 2*(*kv_arr_p->shared_buff_len); for(int idx = 0; idx < (kv_arr_p->shared_buddy_len); idx++){ int cur_map_task_idx = kv_arr_p->shared_buddy[idx]; //the buddy relationship won't be changed keyval_arr_t *cur_kv_arr_p = &(d_g_state->intermediate_keyval_arr_arr_p[cur_map_task_idx]); cur_kv_arr_p->shared_buff = new_buff; }//for free(buff);// buff = new_buff; }//if keyval_pos_t *kv_p = (keyval_pos_t *)((char *)buff + *kv_arr_p->shared_buff_len - sizeof(keyval_pos_t)*((*kv_arr_p->shared_arr_len)+1)); (*kv_arr_p->shared_arr_len)++; kv_p->task_idx = map_task_idx; kv_p->next_idx = _MAP; kv_p->keyPos = (*kv_arr_p->shared_buff_pos); *kv_arr_p->shared_buff_pos += ((keySize+3)/4)*4; //alignment 4 bytes for reading and writing memcpy((char *)(buff) + kv_p->keyPos, key, keySize); kv_p->keySize = keySize; kv_p->valPos = (*kv_arr_p->shared_buff_pos); *kv_arr_p->shared_buff_pos += ((valSize+3)/4)*4; char *val_p = (char *)(buff) + kv_p->valPos; memcpy((char *)(buff) + kv_p->valPos, val, valSize); kv_p->valSize = valSize; (kv_arr_p->arr) = kv_p; }//__device__ //Last update 9/1/2012 void CPUEmitMapOutput(void *key, void *val, int keySize, int valSize, cpu_context *d_g_state, int map_task_idx){ if(map_task_idx >= d_g_state->num_input_record) { ShowError("error ! map_task_idx >= d_g_state->num_input_record"); return; } keyval_arr_t *kv_arr_p = &(d_g_state->intermediate_keyval_arr_arr_p[map_task_idx]); char *buff = (char*)(kv_arr_p->shared_buff); if (!((*kv_arr_p->shared_buff_pos) + keySize + valSize < (*kv_arr_p->shared_buff_len) - sizeof(keyval_pos_t)*((*kv_arr_p->shared_arr_len)+1))){ ShowWarn("Warning! not enough memory at CPU task:%d *kv_arr_p->shared_arr_len:%d current buff_size:%d KB", map_task_idx,*kv_arr_p->shared_arr_len,(*kv_arr_p->shared_buff_len)/1024); char *new_buff = (char*)malloc(sizeof(char)*((*kv_arr_p->shared_buff_len)*2)); if(new_buff==NULL){ ShowError("Error ! There is not enough memory to allocat!"); return; } memcpy(new_buff, buff, sizeof(char)*(*kv_arr_p->shared_buff_pos)); int blockSize = sizeof(keyval_pos_t)*(*kv_arr_p->shared_arr_len); memcpy(new_buff + (*kv_arr_p->shared_buff_len)*2 - blockSize, (char*)buff + (*kv_arr_p->shared_buff_len) - blockSize, blockSize); (*kv_arr_p->shared_buff_len) = 2*(*kv_arr_p->shared_buff_len); for(int idx = 0; idx < (kv_arr_p->shared_buddy_len); idx++){ int cur_map_task_idx = kv_arr_p->shared_buddy[idx]; //the buddy relationship won't be changed keyval_arr_t *cur_kv_arr_p = &(d_g_state->intermediate_keyval_arr_arr_p[cur_map_task_idx]); cur_kv_arr_p->shared_buff = new_buff; }//for free(buff);// buff = new_buff; }//if keyval_pos_t *kv_p = (keyval_pos_t *)((char *)buff + *kv_arr_p->shared_buff_len - sizeof(keyval_pos_t)*((*kv_arr_p->shared_arr_len)+1)); (*kv_arr_p->shared_arr_len)++; kv_p->task_idx = map_task_idx; kv_p->next_idx = _MAP; kv_p->keyPos = (*kv_arr_p->shared_buff_pos); *kv_arr_p->shared_buff_pos += ((keySize+3)/4)*4; //alignment 4 bytes for reading and writing memcpy((char *)(buff) + kv_p->keyPos, key, keySize); kv_p->keySize = keySize; kv_p->valPos = (*kv_arr_p->shared_buff_pos); *kv_arr_p->shared_buff_pos += ((valSize+3)/4)*4; char *val_p = (char *)(buff) + kv_p->valPos; memcpy((char *)(buff) + kv_p->valPos, val, valSize); kv_p->valSize = valSize; (kv_arr_p->arr) = kv_p; }//__device__ void CPUEmitCombinerOutput(void *key, void *val, int keySize, int valSize, cpu_context *d_g_state, int map_task_idx){ keyval_arr_t *kv_arr_p = &(d_g_state->intermediate_keyval_arr_arr_p[map_task_idx]); void *shared_buff = kv_arr_p->shared_buff; int shared_buff_len = *kv_arr_p->shared_buff_len; int shared_arr_len = *kv_arr_p->shared_arr_len; int shared_buff_pos = *kv_arr_p->shared_buff_pos; int required_mem_len = (shared_buff_pos) + keySize + valSize + sizeof(keyval_pos_t)*(shared_arr_len+1); if (required_mem_len> shared_buff_len){ ShowWarn("Warning! no enough memory in GPU task:%d need:%d KB KeySize:%d ValSize:%d shared_arr_len:%d shared_buff_pos:%d shared_buff_len:%d", map_task_idx, required_mem_len/1024,keySize,valSize,shared_arr_len,shared_buff_pos,shared_buff_len); char *new_buff = (char*)malloc(sizeof(char)*((*kv_arr_p->shared_buff_len)*2)); if(new_buff==NULL)ShowError(" There is not enough memory to allocat!"); memcpy(new_buff, shared_buff, sizeof(char)*(*kv_arr_p->shared_buff_pos)); memcpy(new_buff + (*kv_arr_p->shared_buff_len)*2 - sizeof(keyval_pos_t)*(*kv_arr_p->shared_arr_len), (char*)shared_buff + (*kv_arr_p->shared_buff_len) - sizeof(keyval_pos_t)*(*kv_arr_p->shared_arr_len), sizeof(keyval_pos_t)*(*kv_arr_p->shared_arr_len)); shared_buff_len = 2*(*kv_arr_p->shared_buff_len); (*kv_arr_p->shared_buff_len) = shared_buff_len; for(int idx = 0; idx < (kv_arr_p->shared_buddy_len); idx++){ int cur_map_task_idx = kv_arr_p->shared_buddy[idx]; //the buddy relationship won't be changed keyval_arr_t *cur_kv_arr_p = &(d_g_state->intermediate_keyval_arr_arr_p[cur_map_task_idx]); cur_kv_arr_p->shared_buff = new_buff; }//for free(shared_buff); shared_buff = new_buff; }//if keyval_pos_t *kv_p = (keyval_pos_t *)((char *)shared_buff + shared_buff_len - sizeof(keyval_pos_t)*(shared_arr_len + 1)); kv_p->keySize = keySize; kv_p->valSize = valSize; kv_p->task_idx = map_task_idx; kv_p->next_idx = _COMBINE; //merged results memcpy( (char*)shared_buff + *kv_arr_p->shared_buff_pos, key, keySize); kv_p->keyPos = *kv_arr_p->shared_buff_pos; *kv_arr_p->shared_buff_pos += (keySize+3)/4*4; memcpy( (char*)shared_buff + *kv_arr_p->shared_buff_pos, val, valSize); kv_p->valPos = *kv_arr_p->shared_buff_pos; *kv_arr_p->shared_buff_pos += (valSize+3)/4*4; (*kv_arr_p->shared_arr_len)++; }//void __device__ void GPUEmitCombinerOutput(void *key, void *val, int keySize, int valSize, gpu_context *d_g_state, int map_task_idx){ keyval_arr_t *kv_arr_p = d_g_state->d_intermediate_keyval_arr_arr_p[map_task_idx]; void *shared_buff = kv_arr_p->shared_buff; int shared_buff_len = *kv_arr_p->shared_buff_len; int shared_arr_len = *kv_arr_p->shared_arr_len; int shared_buff_pos = *kv_arr_p->shared_buff_pos; int required_mem_len = (shared_buff_pos) + keySize + valSize + sizeof(keyval_pos_t)*(shared_arr_len+1); if (required_mem_len> shared_buff_len){ ShowWarn("Warning! no enough memory in GPU task:%d need:%d KB KeySize:%d ValSize:%d shared_arr_len:%d shared_buff_pos:%d shared_buff_len:%d", map_task_idx, required_mem_len/1024,keySize,valSize,shared_arr_len,shared_buff_pos,shared_buff_len); char *new_buff = (char*)malloc(sizeof(char)*((*kv_arr_p->shared_buff_len)*2)); if(new_buff==NULL)ShowError(" There is not enough memory to allocat!"); memcpy(new_buff, shared_buff, sizeof(char)*(*kv_arr_p->shared_buff_pos)); memcpy(new_buff + (*kv_arr_p->shared_buff_len)*2 - sizeof(keyval_pos_t)*(*kv_arr_p->shared_arr_len), (char*)shared_buff + (*kv_arr_p->shared_buff_len) - sizeof(keyval_pos_t)*(*kv_arr_p->shared_arr_len), sizeof(keyval_pos_t)*(*kv_arr_p->shared_arr_len)); shared_buff_len = 2*(*kv_arr_p->shared_buff_len); (*kv_arr_p->shared_buff_len) = shared_buff_len; for(int idx = 0; idx < (kv_arr_p->shared_buddy_len); idx++){ int cur_map_task_idx = kv_arr_p->shared_buddy[idx]; //the buddy relationship won't be changed keyval_arr_t *cur_kv_arr_p = d_g_state->d_intermediate_keyval_arr_arr_p[cur_map_task_idx]; cur_kv_arr_p->shared_buff = new_buff; }//for free(shared_buff); shared_buff = new_buff; }//if keyval_pos_t *kv_p = (keyval_pos_t *)((char *)shared_buff + shared_buff_len - sizeof(keyval_pos_t)*(shared_arr_len + 1)); kv_p->keySize = keySize; kv_p->valSize = valSize; kv_p->task_idx = map_task_idx; kv_p->next_idx = _COMBINE; //merged results memcpy( (char*)shared_buff + *kv_arr_p->shared_buff_pos, key, keySize); kv_p->keyPos = *kv_arr_p->shared_buff_pos; *kv_arr_p->shared_buff_pos += (keySize+3)/4*4; memcpy( (char*)shared_buff + *kv_arr_p->shared_buff_pos, val, valSize); kv_p->valPos = *kv_arr_p->shared_buff_pos; *kv_arr_p->shared_buff_pos += (valSize+3)/4*4; (*kv_arr_p->shared_arr_len)++; }//__device__ //Last update 9/16/2012 __device__ void GPUEmitMapOutput(void *key, void *val, int keySize, int valSize, gpu_context *d_g_state, int map_task_idx){ keyval_arr_t *kv_arr_p = d_g_state->d_intermediate_keyval_arr_arr_p[map_task_idx]; char *buff = (char*)(kv_arr_p->shared_buff); if (!((*kv_arr_p->shared_buff_pos) + keySize + valSize < (*kv_arr_p->shared_buff_len) - sizeof(keyval_pos_t)*((*kv_arr_p->shared_arr_len)+1))){ ShowWarn("Warning! not enough memory at GPU task:%d *kv_arr_p->shared_arr_len:%d current buff_size:%d KB", map_task_idx,*kv_arr_p->shared_arr_len,(*kv_arr_p->shared_buff_len)/1024); char *new_buff = (char*)malloc(sizeof(char)*((*kv_arr_p->shared_buff_len)*2)); if(new_buff==NULL){ ShowError("Error ! There is not enough memory to allocat!"); return; } memcpy(new_buff, buff, sizeof(char)*(*kv_arr_p->shared_buff_pos)); memcpy(new_buff + (*kv_arr_p->shared_buff_len)*2 - sizeof(keyval_pos_t)*(*kv_arr_p->shared_arr_len), (char*)buff + (*kv_arr_p->shared_buff_len) - sizeof(keyval_pos_t)*(*kv_arr_p->shared_arr_len), sizeof(keyval_pos_t)*(*kv_arr_p->shared_arr_len)); (*kv_arr_p->shared_buff_len) = 2*(*kv_arr_p->shared_buff_len); for(int idx = 0; idx < (kv_arr_p->shared_buddy_len); idx++){ int cur_map_task_idx = kv_arr_p->shared_buddy[idx]; //the buddy relationship won't be changed keyval_arr_t *cur_kv_arr_p = d_g_state->d_intermediate_keyval_arr_arr_p[cur_map_task_idx]; cur_kv_arr_p->shared_buff = new_buff; }//for free(buff);//????? buff = new_buff; }//if keyval_pos_t *kv_p = (keyval_pos_t *)((char *)buff + *kv_arr_p->shared_buff_len - sizeof(keyval_pos_t)*((*kv_arr_p->shared_arr_len)+1)); (*kv_arr_p->shared_arr_len)++; kv_p->task_idx = map_task_idx; kv_p->next_idx = _MAP; kv_p->keyPos = (*kv_arr_p->shared_buff_pos); *kv_arr_p->shared_buff_pos += ((keySize+3)/4)*4; //alignment 4 bytes for reading and writing memcpy((char *)(buff) + kv_p->keyPos,key,keySize); kv_p->keySize = keySize; kv_p->valPos = (*kv_arr_p->shared_buff_pos); *kv_arr_p->shared_buff_pos += ((valSize+3)/4)*4; char *val_p = (char *)(buff) + kv_p->valPos; memcpy((char *)(buff) + kv_p->valPos, val, valSize); kv_p->valSize = valSize; (kv_arr_p->arr) = kv_p; //kv_arr_p->arr_len++; //d_g_state->d_intermediate_keyval_total_count[map_task_idx] = kv_arr_p->arr_len; }//__device__ #if 0 __global__ void GPUCardMapPartitioner(gpu_context d_g_state){ int num_records_per_thread = (d_g_state.num_input_record); //if(TID==0) ShowWarn("hi 0 -- num_records_per_thread:%d",num_records_per_thread); int buddy_arr_len = num_records_per_thread; int * int_arr = (int*)malloc((4+buddy_arr_len)*sizeof(int)); if(int_arr==NULL){ ShowError("there is not enough GPU memory\n"); return;} int *shared_arr_len = int_arr; int *shared_buff_len = int_arr+1; int *shared_buff_pos = int_arr+2; //int *num_buddy = int_arr+3; int *buddy = int_arr+4; //if(TID==0) ShowWarn("hi 1"); (*shared_buff_len) = SHARED_BUFF_LEN; (*shared_arr_len) = 0; (*shared_buff_pos) = 0; char * buff = (char *)malloc(sizeof(char)*(*shared_buff_len)); keyval_arr_t *kv_arr_t_arr = (keyval_arr_t *)malloc(sizeof(keyval_arr_t)*(d_g_state.num_input_record)); int index = 0; index = 0; ShowWarn("d_g_state.num_input_record:%d",d_g_state.num_input_record); for(int map_task_idx = 0; map_task_idx < d_g_state.num_input_record; map_task_idx ++){ keyval_arr_t *kv_arr_t = (keyval_arr_t *)&(kv_arr_t_arr[map_task_idx]); kv_arr_t->shared_buff = buff; kv_arr_t->shared_arr_len = shared_arr_len; kv_arr_t->shared_buff_len = shared_buff_len; kv_arr_t->shared_buff_pos = shared_buff_pos; kv_arr_t->shared_buddy = buddy; kv_arr_t->shared_buddy_len = buddy_arr_len; kv_arr_t->arr = NULL; kv_arr_t->arr_len = 0; d_g_state.d_intermediate_keyval_arr_arr_p[map_task_idx] = kv_arr_t; }//for }//void #endif //------------------------------------------------- //called by user defined map function //------------------------------------------------- //TODO 9/11/2012 merge threads and blocks code into the same place. __global__ void GPUMapPartitioner(gpu_context d_g_state) { //ShowLog("gridDim.x:%d gridDim.y:%d gridDim.z:%d blockDim.x:%d blockDim.y:%d blockDim.z:%d blockIdx.x:%d blockIdx.y:%d blockIdx.z:%d\n", // gridDim.x,gridDim.y,gridDim.z,blockDim.x,blockDim.y,blockDim.z,blockIdx.x,blockIdx.y,blockIdx.z); int num_records_per_thread = (d_g_state.num_input_record + (gridDim.x*blockDim.x*blockDim.y)-1)/(gridDim.x*blockDim.x*blockDim.y); int block_start_idx = num_records_per_thread * blockIdx.x * blockDim.x * blockDim.y; int thread_start_idx = block_start_idx + ((threadIdx.y*blockDim.x + threadIdx.x)/STRIDE)*num_records_per_thread*STRIDE + ((threadIdx.y*blockDim.x + threadIdx.x)%STRIDE); int thread_end_idx = thread_start_idx + num_records_per_thread*STRIDE; if (thread_end_idx > d_g_state.num_input_record) thread_end_idx = d_g_state.num_input_record; if (thread_start_idx >= thread_end_idx) return; //if(TID==0) ShowWarn("hi 0 -- num_records_per_thread:%d",num_records_per_thread); int buddy_arr_len = num_records_per_thread; int * int_arr = (int*)malloc((4+buddy_arr_len)*sizeof(int)); if(int_arr==NULL){ ShowError("there is not enough GPU memory\n"); return;} int *shared_arr_len = int_arr; int *shared_buff_len = int_arr+1; int *shared_buff_pos = int_arr+2; //int *num_buddy = int_arr+3; int *buddy = int_arr+4; //if(TID==0) ShowWarn("hi 1"); (*shared_buff_len) = SHARED_BUFF_LEN; (*shared_arr_len) = 0; (*shared_buff_pos) = 0; char * buff = (char *)malloc(sizeof(char)*(*shared_buff_len)); keyval_arr_t *kv_arr_t_arr = (keyval_arr_t *)malloc(sizeof(keyval_arr_t)*(thread_end_idx-thread_start_idx+STRIDE-1)/STRIDE); int index = 0; for(int idx = thread_start_idx; idx < thread_end_idx; idx += STRIDE){ buddy[index] = idx; index ++; }//for index = 0; //if(TID==0) ShowWarn("hi 2"); for(int map_task_idx = thread_start_idx; map_task_idx < thread_end_idx; map_task_idx += STRIDE){ keyval_arr_t *kv_arr_t = (keyval_arr_t *)&(kv_arr_t_arr[index]); index++; kv_arr_t->shared_buff = buff; kv_arr_t->shared_arr_len = shared_arr_len; kv_arr_t->shared_buff_len = shared_buff_len; kv_arr_t->shared_buff_pos = shared_buff_pos; kv_arr_t->shared_buddy = buddy; kv_arr_t->shared_buddy_len = buddy_arr_len; kv_arr_t->arr = NULL; kv_arr_t->arr_len = 0; d_g_state.d_intermediate_keyval_arr_arr_p[map_task_idx] = kv_arr_t; }//for //if(TID==0) ShowWarn("hi 3"); }//GPUMapPartitioner void RunGPUCardMapFunction(gpu_card_context* d_g_state,int curIter, int totalIter){ int start_row_idx = 0; //panda_cpu_task_info->start_row_idx; int end_row_idx = d_g_state->num_input_record; //panda_cpu_task_info->end_row_idx; char *buff = (char *)malloc(sizeof(char)*CPU_SHARED_BUFF_SIZE); int *int_arr = (int *)malloc(sizeof(int)*(end_row_idx-start_row_idx+3)); int *buddy = int_arr+3; int buddy_len = end_row_idx-start_row_idx; for (int i=0;i<buddy_len;i++){ buddy [i]=i+start_row_idx; }//for //ShowLog("start_idx:%d end_idx:%d",start_row_idx, end_row_idx); for (int map_idx = start_row_idx; map_idx < end_row_idx; map_idx++){ d_g_state->intermediate_keyval_arr_arr_p[map_idx].shared_buff = buff; (d_g_state->intermediate_keyval_arr_arr_p[map_idx].shared_buff_len) = int_arr; (d_g_state->intermediate_keyval_arr_arr_p[map_idx].shared_buff_pos) = int_arr+1; (d_g_state->intermediate_keyval_arr_arr_p[map_idx].shared_arr_len) = int_arr+2; *(d_g_state->intermediate_keyval_arr_arr_p[map_idx].shared_buff_len) = CPU_SHARED_BUFF_SIZE; *(d_g_state->intermediate_keyval_arr_arr_p[map_idx].shared_buff_pos) = 0; *(d_g_state->intermediate_keyval_arr_arr_p[map_idx].shared_arr_len) = 0; (d_g_state->intermediate_keyval_arr_arr_p[map_idx].shared_buddy) = buddy; (d_g_state->intermediate_keyval_arr_arr_p[map_idx].shared_buddy_len) = buddy_len; //ShowWarn("---->(d_g_state->intermediate_keyval_arr_arr_p[%d].shared_buddy_len=:%d)", // map_idx,(d_g_state->intermediate_keyval_arr_arr_p[map_idx].shared_buddy_len)); }//for for (int map_idx = start_row_idx; map_idx < end_row_idx; map_idx++){ keyval_t *kv_p = (keyval_t *)(&(d_g_state->input_keyval_arr[map_idx])); char *key = (char *)(kv_p->key); char *val = (char *)(kv_p->val); int keySize = kv_p->keySize; int valSize = kv_p->valSize; gpu_card_map(key, val, keySize, valSize, d_g_state, map_idx); }//for ShowLog("CPU_GROUP_ID:[%d] Done :%d tasks",d_g_state->gpu_group_id, 1); //////////////////////// //int thread_start_idx = 0; //keyval_arr_t *kv_arr_p = d_g_state.d_intermediate_keyval_arr_arr_p[thread_start_idx]; //char *shared_buff = (char *)(kv_arr_p->shared_buff); //int shared_arr_len = *kv_arr_p->shared_arr_len; //int shared_buff_len = *kv_arr_p->shared_buff_len; //d_g_state.d_intermediate_keyval_total_count[thread_start_idx] = *kv_arr_p->shared_arr_len; } __global__ void RunGPUMapTasks(gpu_context d_g_state, int curIter, int totalIter) { //ShowLog("gridDim.x:%d gridDim.y:%d gridDim.z:%d blockDim.x:%d blockDim.y:%d blockDim.z:%d blockIdx.x:%d blockIdx.y:%d blockIdx.z:%d\n", // gridDim.x,gridDim.y,gridDim.z,blockDim.x,blockDim.y,blockDim.z,blockIdx.x,blockIdx.y,blockIdx.z); int num_records_per_thread = (d_g_state.num_input_record + (gridDim.x*blockDim.x*blockDim.y)-1)/(gridDim.x*blockDim.x*blockDim.y); int block_start_idx = num_records_per_thread * blockIdx.x * blockDim.x * blockDim.y; int thread_start_idx = block_start_idx + ((threadIdx.y*blockDim.x + threadIdx.x)/STRIDE)*num_records_per_thread*STRIDE + ((threadIdx.y*blockDim.x + threadIdx.x)%STRIDE); //ShowLog("num_records_per_thread:%d block_start_idx:%d gridDim.x:%d gridDim.y:%d gridDim.z:%d blockDim.x:%d blockDim.y:%d blockDim.z:%d",num_records_per_thread, block_start_idx, gridDim.x,gridDim.y,gridDim.z,blockDim.x,blockDim.y,blockDim.z); int thread_end_idx = thread_start_idx + num_records_per_thread*STRIDE; if (thread_end_idx > d_g_state.num_input_record) thread_end_idx = d_g_state.num_input_record; if (thread_start_idx + curIter*STRIDE >= thread_end_idx) return; for(int map_task_idx = thread_start_idx + curIter*STRIDE; map_task_idx < thread_end_idx; map_task_idx += totalIter*STRIDE){ char *key = (char *)(d_g_state.d_input_keys_shared_buff) + d_g_state.d_input_keyval_pos_arr[map_task_idx].keyPos; char *val = (char *)(d_g_state.d_input_vals_shared_buff) + d_g_state.d_input_keyval_pos_arr[map_task_idx].valPos; int valSize = d_g_state.d_input_keyval_pos_arr[map_task_idx].valSize; int keySize = d_g_state.d_input_keyval_pos_arr[map_task_idx].keySize; //ShowWarn("valSize:%d keySize:%d",valSize,keySize); //////////////////////////////////////////////////////////////// gpu_core_map(key, val, keySize, valSize, &d_g_state, map_task_idx);// //////////////////////////////////////////////////////////////// }//for keyval_arr_t *kv_arr_p = d_g_state.d_intermediate_keyval_arr_arr_p[thread_start_idx]; //char *shared_buff = (char *)(kv_arr_p->shared_buff); //int shared_arr_len = *kv_arr_p->shared_arr_len; //int shared_buff_len = *kv_arr_p->shared_buff_len; d_g_state.d_intermediate_keyval_total_count[thread_start_idx] = *kv_arr_p->shared_arr_len; //__syncthreads(); }//GPUMapPartitioner //NOTE: gpu_combiner will affect the later program results //Last update 9/16/2012 void StartCPUCombiner(thread_info_t *thread_info){ cpu_context *d_g_state = (cpu_context*)(thread_info->d_g_state); job_configuration *cpu_job_conf = (job_configuration*)(thread_info->job_conf); if (d_g_state->intermediate_keyval_arr_arr_p == NULL) { ShowError("intermediate_keyval_arr_arr_p == NULL"); exit(-1); } if (cpu_job_conf->num_input_record <= 0) { ShowError("no any input keys"); exit(-1); } if (d_g_state->num_cpus_cores <= 0) { ShowError("d_g_state->num_cpus == 0"); exit(-1); } //------------------------------------------------------- //1, prepare buffer to store intermediate results //------------------------------------------------------- keyval_arr_t *d_keyval_arr_p; int *count = NULL; //--------------------------------------------- //3, determine the number of threads to run //--------------------------------------------- ShowLog("CPU_GROUP_ID:[%d] the number of cpus used in computation:%d",d_g_state->cpu_group_id, d_g_state->num_cpus_cores); //-------------------------------------------------- //4, start_row_id map //-------------------------------------------------- int num_threads = d_g_state->num_cpus_cores; ShowLog("num_threads:%d",num_threads); int num_records_per_thread = (cpu_job_conf->num_input_record + num_threads-1)/(num_threads); int start_row_idx = 0; int end_row_idx = 0; for (int tid = 0;tid<num_threads;tid++){ end_row_idx = start_row_idx + num_records_per_thread; if (tid < (cpu_job_conf->num_input_record % num_threads) ) end_row_idx++; if (end_row_idx > cpu_job_conf->num_input_record) end_row_idx = cpu_job_conf->num_input_record; d_g_state->panda_cpu_task_info[tid].start_row_idx = start_row_idx; d_g_state->panda_cpu_task_info[tid].end_row_idx = end_row_idx; //ShowLog("hi-1"); if (pthread_create(&(d_g_state->panda_cpu_task[tid]),NULL,RunPandaCPUCombinerThread,(char *)&(d_g_state->panda_cpu_task_info[tid]))!=0) perror("Thread creation failed!\n"); start_row_idx = end_row_idx; }//for for (int tid = 0;tid<num_threads;tid++){ void *exitstat; if (pthread_join(d_g_state->panda_cpu_task[tid],&exitstat)!=0) perror("joining failed"); }//for ShowLog("CPU_GROUP_ID:[%d] DONE", d_g_state->cpu_group_id); } __global__ void GPUCombiner(gpu_context d_g_state) { //ShowLog("gridDim.x:%d gridDim.y:%d gridDim.z:%d blockDim.x:%d blockDim.y:%d blockDim.z:%d blockIdx.x:%d blockIdx.y:%d blockIdx.z:%d", // gridDim.x,gridDim.y,gridDim.z,blockDim.x,blockDim.y,blockDim.z,blockIdx.x,blockIdx.y,blockIdx.z); int num_records_per_thread = (d_g_state.num_input_record + (gridDim.x*blockDim.x*blockDim.y)-1)/(gridDim.x*blockDim.x*blockDim.y); int block_start_idx = num_records_per_thread * blockIdx.x * blockDim.x * blockDim.y; int thread_start_idx = block_start_idx + ((threadIdx.y*blockDim.x + threadIdx.x)/STRIDE)*num_records_per_thread*STRIDE + ((threadIdx.y*blockDim.x + threadIdx.x)%STRIDE); int thread_end_idx = thread_start_idx + num_records_per_thread*STRIDE; if (thread_end_idx > d_g_state.num_input_record) thread_end_idx = d_g_state.num_input_record; if (thread_start_idx >= thread_end_idx) return; keyval_arr_t *kv_arr_p = d_g_state.d_intermediate_keyval_arr_arr_p[thread_start_idx]; int *buddy = kv_arr_p->shared_buddy; //TODO use host function set /*for (int idx=0;idx<kv_arr_p->shared_buddy_len;idx++){ d_g_state.d_intermediate_keyval_total_count[idx] = 0; }*/ int unmerged_shared_arr_len = *kv_arr_p->shared_arr_len; val_t *val_t_arr = (val_t *)malloc(sizeof(val_t)*unmerged_shared_arr_len); if (val_t_arr == NULL) ShowError("there is no enough memory"); int num_keyval_pairs_after_combiner = 0; for (int i=0; i<unmerged_shared_arr_len;i++){ char *shared_buff = (kv_arr_p->shared_buff); int shared_buff_len = *kv_arr_p->shared_buff_len; keyval_pos_t *head_kv_p = (keyval_pos_t *)(shared_buff + shared_buff_len - sizeof(keyval_pos_t)*(unmerged_shared_arr_len-i)); keyval_pos_t *first_kv_p = head_kv_p; if (first_kv_p->next_idx != _MAP) continue; int iKeySize = first_kv_p->keySize; char *iKey = shared_buff + first_kv_p->keyPos; char *iVal = shared_buff + first_kv_p->valPos; if((first_kv_p->keyPos%4!=0)||(first_kv_p->valPos%4!=0)){ ShowError("keyPos or valPos is not aligned with 4 bytes, results could be wrong"); } int index = 0; first_kv_p = head_kv_p; (val_t_arr[index]).valSize = first_kv_p->valSize; (val_t_arr[index]).val = (char*)shared_buff + first_kv_p->valPos; for (int j=i+1;j<unmerged_shared_arr_len;j++){ keyval_pos_t *next_kv_p = (keyval_pos_t *)((char *)shared_buff + shared_buff_len - sizeof(keyval_pos_t)*(unmerged_shared_arr_len-j)); char *jKey = (char *)shared_buff+next_kv_p->keyPos; int jKeySize = next_kv_p->keySize; if (gpu_compare(iKey,iKeySize,jKey,jKeySize)!=0){ continue; } index++; first_kv_p->next_idx = j; first_kv_p = next_kv_p; (val_t_arr[index]).valSize = next_kv_p->valSize; (val_t_arr[index]).val = (char*)shared_buff + next_kv_p->valPos; } int valCount = index+1; if(valCount>1) gpu_combiner(iKey,val_t_arr,iKeySize,(valCount),&d_g_state,thread_start_idx); else{ first_kv_p->next_idx = _COMBINE; first_kv_p->task_idx = thread_start_idx; } num_keyval_pairs_after_combiner++; }//for free(val_t_arr); d_g_state.d_intermediate_keyval_total_count[thread_start_idx] = num_keyval_pairs_after_combiner; //////////////////////////////////////////////////////////////////// __syncthreads(); }//GPUMapPartitioner int StartCPUMap2(thread_info_t* thread_info) { cpu_context *d_g_state = (cpu_context*)(thread_info->d_g_state); job_configuration *cpu_job_conf = (job_configuration*)(thread_info->job_conf); if (cpu_job_conf->num_input_record<=0) { ShowError("Error: no any input keys"); exit(-1);} if (cpu_job_conf->input_keyval_arr == NULL) { ShowError("Error: input_keyval_arr == NULL"); exit(-1);} if (d_g_state->num_cpus_cores <= 0) { ShowError("Error: d_g_state->num_cpus == 0"); exit(-1);} d_g_state->intermediate_keyval_total_count = (int *)malloc(d_g_state->num_input_record*sizeof(int)); memset(d_g_state->intermediate_keyval_total_count, 0, d_g_state->num_input_record*sizeof(int)); //------------------------------------------------------- //1, prepare buffer to store intermediate results //------------------------------------------------------- keyval_arr_t *d_keyval_arr_p; int *count = NULL; //--------------------------------------------- //3, determine the number of threads to run //--------------------------------------------- ShowLog("CPU_GROUP_ID:[%d] #num_cpus:%d num_input_record:%d", d_g_state->cpu_group_id, d_g_state->num_cpus_cores, cpu_job_conf->num_input_record); //-------------------------------------------------- //4, start_row_id map //-------------------------------------------------- int num_threads = d_g_state->num_cpus_cores; int num_records_per_thread = (cpu_job_conf->num_input_record)/(num_threads); int start_row_idx = 0; int end_row_idx = 0; for (int tid = 0;tid<num_threads;tid++){ end_row_idx = start_row_idx + num_records_per_thread; if (tid < (cpu_job_conf->num_input_record % num_threads) ) end_row_idx++; d_g_state->panda_cpu_task_info[tid].start_row_idx = start_row_idx; if (end_row_idx > cpu_job_conf->num_input_record) end_row_idx = cpu_job_conf->num_input_record; d_g_state->panda_cpu_task_info[tid].end_row_idx = end_row_idx; if (pthread_create(&(d_g_state->panda_cpu_task[tid]),NULL,RunPandaCPUMapThread,(char *)&(d_g_state->panda_cpu_task_info[tid]))!=0) perror("Thread creation failed!\n"); start_row_idx = end_row_idx; }//for for (int tid = 0;tid<num_threads;tid++){ void *exitstat; if (pthread_join(d_g_state->panda_cpu_task[tid],&exitstat)!=0) perror("joining failed"); }//for ShowLog("CPU_GROUP_ID:[%d] DONE", d_g_state->cpu_group_id); return 0; }//int //-------------------------------------------------- // StartGPUCardMap // Last Update 12/9/2012 //-------------------------------------------------- int StartGPUCardMap(gpu_card_context *d_g_state) { if (d_g_state->num_input_record<=0) { ShowError("Error: no any input keys"); exit(-1);} if (d_g_state->input_keyval_arr == NULL) { ShowError("Error: input_keyval_arr == NULL"); exit(-1);} //if (d_g_state->num_cpus_cores <= 0) { ShowError("Error: d_g_state->num_cpus == 0"); exit(-1);} //if (d_g_state->num_mappers<=0) {d_g_state->num_mappers = (NUM_BLOCKS)*(NUM_THREADS);} //if (d_g_state->num_reducers<=0) {d_g_state->num_reducers = (NUM_BLOCKS)*(NUM_THREADS);} d_g_state->intermediate_keyval_total_count = (int *)malloc(d_g_state->num_input_record*sizeof(int)); memset(d_g_state->intermediate_keyval_total_count, 0, d_g_state->num_input_record*sizeof(int)); //------------------------------------------------------- //1, prepare buffer to store intermediate results //------------------------------------------------------- keyval_arr_t *d_keyval_arr_p; int *count = NULL; //--------------------------------------------- //3, determine the number of threads to run //--------------------------------------------- ShowLog("GPU_CARD_GROUP_ID:[%d] num_input_record:%d", d_g_state->gpu_group_id, d_g_state->num_input_record); //-------------------------------------------------- //4, start_row_id map //-------------------------------------------------- //TODO int num_threads = 1; int num_records_per_thread = (d_g_state->num_input_record)/(num_threads); int start_row_idx = 0; int end_row_idx = 0; /*for (int iter = 0; iter< totalIter; iter++){*/ RunGPUCardMapFunction(d_g_state, 0, 1); /*for (int tid = 0;tid<num_threads;tid++){ end_row_idx = start_row_idx + num_records_per_thread; if (tid < (d_g_state->num_input_record % num_threads) ) end_row_idx++; d_g_state->panda_cpu_task_info[tid].start_row_idx = start_row_idx; if (end_row_idx > cpu_job_conf->num_input_record) end_row_idx = cpu_job_conf->num_input_record; d_g_state->panda_cpu_task_info[tid].end_row_idx = end_row_idx; if (pthread_create(&(d_g_state->panda_cpu_task[tid]),NULL,RunPandaCPUMapThread,(char *)&(d_g_state->panda_cpu_task_info[tid]))!=0) perror("Thread creation failed!\n"); start_row_idx = end_row_idx; }//for*/ /*for (int tid = 0;tid<num_threads;tid++){ void *exitstat; if (pthread_join(d_g_state->panda_cpu_task[tid],&exitstat)!=0) perror("joining failed"); }//for*/ //---------------------------------------------- //0, Check status of d_g_state; //---------------------------------------------- //------------------------------------------------------- //1, prepare buffer to store intermediate results //------------------------------------------------------- //GPUCardMapPartitioner<<<1,1>>>(*d_g_state); hipDeviceSynchronize(); double t2 = PandaTimer(); //int num_records_per_thread = (d_g_state->num_input_record/d_g_state->num_input_record); //int totalIter = num_records_per_thread; //ShowLog("GPUMapPartitioner:%f totalIter:%d",t2-t1, totalIter); /*for (int iter = 0; iter< totalIter; iter++){ double t3 = PandaTimer(); //RunGPUMapTasks<<<grids,blocks>>>(*d_g_state, totalIter -1 - iter, totalIter); //RunGPUCardMapFunction(*d_g_state, totalIter -1 - iter, totalIter); ////////////////////////////////////////////////////// //////////////////////////////////// hipDeviceSynchronize(); double t4 = PandaTimer(); size_t total_mem,avail_mem; checkCudaErrors(hipMemGetInfo( &avail_mem, &total_mem )); ShowLog("GPU_ID:[%d] RunGPUMapTasks take %f sec at iter [%d/%d] remain %d mb GPU mem processed", d_g_state->gpu_id, t4-t3,iter,totalIter, avail_mem/1024/1024); }*///for ShowLog("GPU_CARD_ID:[%d] Done %d Tasks",d_g_state->gpu_id,d_g_state->num_input_record); return 0; }//int //-------------------------------------------------- // StartGPUCoreMap // Last Update 9/2/2012 //-------------------------------------------------- int StartGPUCoreMap(gpu_context *d_g_state) { //------------------------------------------------------- //0, Check status of d_g_state; //------------------------------------------------------- ShowLog("GPU_ID:[%d] num_input_record %d", d_g_state->gpu_id, d_g_state->num_input_record); if (d_g_state->num_input_record<0) { ShowLog("Error: no any input keys"); exit(-1);} if (d_g_state->h_input_keyval_arr == NULL) { ShowLog("Error: h_input_keyval_arr == NULL"); exit(-1);} if (d_g_state->num_mappers<=0) {d_g_state->num_mappers = (NUM_BLOCKS)*(NUM_THREADS);} if (d_g_state->num_reducers<=0) {d_g_state->num_reducers = (NUM_BLOCKS)*(NUM_THREADS);} //------------------------------------------------------- //1, prepare buffer to store intermediate results //------------------------------------------------------- keyval_arr_t *h_keyval_arr_arr = (keyval_arr_t *)malloc(sizeof(keyval_arr_t)*d_g_state->num_input_record); keyval_arr_t *d_keyval_arr_arr; checkCudaErrors(hipMalloc((void**)&(d_keyval_arr_arr),d_g_state->num_input_record*sizeof(keyval_arr_t))); for (int i=0; i<d_g_state->num_input_record;i++){ h_keyval_arr_arr[i].arr = NULL; h_keyval_arr_arr[i].arr_len = 0; }//for keyval_arr_t **d_keyval_arr_arr_p; checkCudaErrors(hipMalloc((void***)&(d_keyval_arr_arr_p),d_g_state->num_input_record*sizeof(keyval_arr_t*))); d_g_state->d_intermediate_keyval_arr_arr_p = d_keyval_arr_arr_p; int *count = NULL; checkCudaErrors(hipMalloc((void**)&(count),d_g_state->num_input_record*sizeof(int))); d_g_state->d_intermediate_keyval_total_count = count; checkCudaErrors(hipMemset(d_g_state->d_intermediate_keyval_total_count,0,d_g_state->num_input_record*sizeof(int))); //TODO //printData3<<<1,1>>>(*d_g_state); //---------------------------------------------- //3, determine the number of threads to run //---------------------------------------------- //-------------------------------------------------- //4, start_row_id map //Note: DO *NOT* set large number of threads within block (512), which lead to too many invocation of malloc in the kernel. //-------------------------------------------------- hipDeviceSynchronize(); int numGPUCores = getGPUCoresNum(); dim3 blocks(THREAD_BLOCK_SIZE, THREAD_BLOCK_SIZE); int numBlocks = (numGPUCores*16+(blocks.x*blocks.y)-1)/(blocks.x*blocks.y); dim3 grids(numBlocks, 1); int total_gpu_threads = (grids.x*grids.y*blocks.x*blocks.y); ShowLog("GridDim.X:%d GridDim.Y:%d BlockDim.X:%d BlockDim.Y:%d TotalGPUThreads:%d",grids.x,grids.y,blocks.x,blocks.y,total_gpu_threads); hipDeviceSynchronize(); double t1 = PandaTimer(); hipLaunchKernelGGL(( GPUMapPartitioner), dim3(grids),dim3(blocks), 0, 0, *d_g_state); hipDeviceSynchronize(); double t2 = PandaTimer(); int num_records_per_thread = (d_g_state->num_input_record + (total_gpu_threads)-1)/(total_gpu_threads); int totalIter = num_records_per_thread; ShowLog("GPUMapPartitioner:%f totalIter:%d",t2-t1, totalIter); for (int iter = 0; iter< totalIter; iter++){ double t3 = PandaTimer(); hipLaunchKernelGGL(( RunGPUMapTasks), dim3(grids),dim3(blocks), 0, 0, *d_g_state, totalIter -1 - iter, totalIter); hipDeviceSynchronize(); double t4 = PandaTimer(); size_t total_mem,avail_mem; checkCudaErrors(hipMemGetInfo( &avail_mem, &total_mem )); ShowLog("GPU_ID:[%d] RunGPUMapTasks take %f sec at iter [%d/%d] remain %d mb GPU mem processed", d_g_state->gpu_id, t4-t3,iter,totalIter, avail_mem/1024/1024); }//for ShowLog("GPU_ID:[%d] Done %d Tasks",d_g_state->gpu_id,d_g_state->num_input_record); return 0; }//int void DestroyDGlobalState(gpu_context * d_g_state){ }//void void StartGPUCombiner(gpu_context * state){ double t1 = PandaTimer(); ShowLog("state->num_input_record:%d",state->num_input_record); checkCudaErrors(hipMemset(state->d_intermediate_keyval_total_count,0,state->num_input_record*sizeof(int))); int numGPUCores = getGPUCoresNum(); dim3 blocks(THREAD_BLOCK_SIZE, THREAD_BLOCK_SIZE); int numBlocks = (numGPUCores*16+(blocks.x*blocks.y)-1)/(blocks.x*blocks.y); dim3 grids(numBlocks, 1); hipLaunchKernelGGL(( GPUCombiner), dim3(grids),dim3(blocks), 0, 0, *state); hipDeviceSynchronize(); double t2 = PandaTimer(); ShowLog("GPU_ID:[%d] GPUCombiner take:%f sec",state->gpu_id, t2-t1); } void StartGPUShuffle(gpu_context * state){ gpu_context* d_g_state = state; double t1 = PandaTimer(); Shuffle4GPUOutput(d_g_state); double t2 = PandaTimer(); ShowLog("GPU_ID:[%d] GPUShuffle take %f sec", state->gpu_id,t2-t1); }//void void *RunPandaCPUCombinerThread(void *ptr){ //ShowLog("hi0"); panda_cpu_task_info_t *panda_cpu_task_info = (panda_cpu_task_info_t *)ptr; cpu_context *d_g_state = (cpu_context *)(panda_cpu_task_info->d_g_state); job_configuration *cpu_job_conf = (job_configuration *)(panda_cpu_task_info->cpu_job_conf); //keyval_t * input_keyval_arr; //keyval_arr_t *intermediate_keyval_arr_arr_p = d_g_state->intermediate_keyval_arr_arr_p; int index = 0; keyvals_t * merged_keyvals_arr = NULL; int merged_key_arr_len = 0; int start_idx = panda_cpu_task_info->start_row_idx; keyval_arr_t *kv_arr_p = (keyval_arr_t *)&(d_g_state->intermediate_keyval_arr_arr_p[start_idx]); int unmerged_shared_arr_len = *kv_arr_p->shared_arr_len; int *shared_buddy = kv_arr_p->shared_buddy; int shared_buddy_len = kv_arr_p->shared_buddy_len; //ShowLog("hi1"); char *shared_buff = kv_arr_p->shared_buff; int shared_buff_len = *kv_arr_p->shared_buff_len; int shared_buff_pos = *kv_arr_p->shared_buff_pos; val_t *val_t_arr = (val_t *)malloc(sizeof(val_t)*unmerged_shared_arr_len); if (val_t_arr == NULL) ShowError("there is no enough memory"); int num_keyval_pairs_after_combiner = 0; int total_intermediate_keyvalue_pairs = 0; //ShowLog("hi2"); for (int i = 0; i < unmerged_shared_arr_len; i++){ keyval_pos_t *head_kv_p = (keyval_pos_t *)(shared_buff + shared_buff_len - sizeof(keyval_pos_t)*(unmerged_shared_arr_len-i)); keyval_pos_t *first_kv_p = head_kv_p; if (first_kv_p->next_idx != _MAP) continue; //ShowLog("hi3"); int iKeySize = first_kv_p->keySize; char *iKey = shared_buff + first_kv_p->keyPos; char *iVal = shared_buff + first_kv_p->valPos; if((first_kv_p->keyPos%4!=0)||(first_kv_p->valPos%4!=0)){ ShowError("keyPos or valPos is not aligned with 4 bytes, results could be wrong"); }// int index = 0; first_kv_p = head_kv_p; (val_t_arr[index]).valSize = first_kv_p->valSize; (val_t_arr[index]).val = (char*)shared_buff + first_kv_p->valPos; //ShowLog("hi i:%d",i); for (int j=i+1;j<unmerged_shared_arr_len;j++){ keyval_pos_t *next_kv_p = (keyval_pos_t *)((char *)shared_buff + shared_buff_len - sizeof(keyval_pos_t)*(unmerged_shared_arr_len-j)); char *jKey = (char *)shared_buff+next_kv_p->keyPos; int jKeySize = next_kv_p->keySize; if (cpu_compare(iKey,iKeySize,jKey,jKeySize)!=0){ continue; } index++; first_kv_p->next_idx = j; first_kv_p = next_kv_p; (val_t_arr[index]).valSize = next_kv_p->valSize; (val_t_arr[index]).val = (char*)shared_buff + next_kv_p->valPos; } int valCount = index+1; total_intermediate_keyvalue_pairs += valCount; if(valCount>1) cpu_combiner(iKey,val_t_arr,iKeySize,(valCount),d_g_state,start_idx); else{ first_kv_p->next_idx = _COMBINE; first_kv_p->task_idx = start_idx; } num_keyval_pairs_after_combiner++; }//for free(val_t_arr); d_g_state->intermediate_keyval_total_count[start_idx] = num_keyval_pairs_after_combiner; ShowLog("CPU_GROUP_ID:[%d] Map_Idx:%d Done:%d Combiner: %d => %d Compress Ratio:%f", d_g_state->cpu_group_id, panda_cpu_task_info->start_row_idx, panda_cpu_task_info->end_row_idx - panda_cpu_task_info->start_row_idx, total_intermediate_keyvalue_pairs, num_keyval_pairs_after_combiner, (num_keyval_pairs_after_combiner/(float)total_intermediate_keyvalue_pairs) ); return NULL; } int *shared_arr_len; int *shared_buddy; int shared_buddy_len; char *shared_buff; int *shared_buff_len; int *shared_buff_pos; //int keyval_pos; int arr_len; keyval_pos_t *arr; keyval_t *cpu_arr; void *RunPandaCPUMapThread(void *ptr){ panda_cpu_task_info_t *panda_cpu_task_info = (panda_cpu_task_info_t *)ptr; cpu_context *d_g_state = (cpu_context *)(panda_cpu_task_info->d_g_state); job_configuration *cpu_job_conf = (job_configuration *)(panda_cpu_task_info->cpu_job_conf); int start_row_idx = panda_cpu_task_info->start_row_idx; int end_row_idx = panda_cpu_task_info->end_row_idx; char *buff = (char *)malloc(sizeof(char)*CPU_SHARED_BUFF_SIZE); int *int_arr = (int *)malloc(sizeof(int)*(end_row_idx-start_row_idx+3)); int *buddy = int_arr+3; int buddy_len = end_row_idx-start_row_idx; for (int i=0;i<buddy_len;i++){ buddy [i]=i+start_row_idx; }//for //ShowLog("start_idx:%d end_idx:%d",start_row_idx, end_row_idx); for (int map_idx = start_row_idx; map_idx < end_row_idx; map_idx++){ d_g_state->intermediate_keyval_arr_arr_p[map_idx].shared_buff = buff; (d_g_state->intermediate_keyval_arr_arr_p[map_idx].shared_buff_len) = int_arr; (d_g_state->intermediate_keyval_arr_arr_p[map_idx].shared_buff_pos) = int_arr+1; (d_g_state->intermediate_keyval_arr_arr_p[map_idx].shared_arr_len) = int_arr+2; *(d_g_state->intermediate_keyval_arr_arr_p[map_idx].shared_buff_len) = CPU_SHARED_BUFF_SIZE; *(d_g_state->intermediate_keyval_arr_arr_p[map_idx].shared_buff_pos) = 0; *(d_g_state->intermediate_keyval_arr_arr_p[map_idx].shared_arr_len) = 0; (d_g_state->intermediate_keyval_arr_arr_p[map_idx].shared_buddy) = buddy; (d_g_state->intermediate_keyval_arr_arr_p[map_idx].shared_buddy_len) = buddy_len; //ShowWarn("---->(d_g_state->intermediate_keyval_arr_arr_p[%d].shared_buddy_len=:%d)", // map_idx,(d_g_state->intermediate_keyval_arr_arr_p[map_idx].shared_buddy_len)); }//for for (int map_idx = panda_cpu_task_info->start_row_idx; map_idx < panda_cpu_task_info->end_row_idx; map_idx++){ keyval_t *kv_p = (keyval_t *)(&(cpu_job_conf->input_keyval_arr[map_idx])); cpu_map(kv_p->key,kv_p->val,kv_p->keySize,kv_p->valSize,d_g_state,map_idx); }//for ShowLog("CPU_GROUP_ID:[%d] Done :%d tasks",d_g_state->cpu_group_id, panda_cpu_task_info->end_row_idx - panda_cpu_task_info->start_row_idx); return NULL; } //Use Pthread to process Panda_Reduce GPU Context //http://stackoverflow.com/questions/9139932/cuda-kernels-using-pthreads-missing-configuration-error void * Panda_Reduce(void *ptr){ //GPU Context of Threads may conflict with each other. thread_info_t *thread_info = (thread_info_t *)ptr; if(thread_info->device_type == GPU_CORE_ACC){ InitGPUDevice(thread_info); panda_context *panda = (panda_context *)(thread_info->panda); gpu_context *d_g_state = (gpu_context *)(thread_info->d_g_state); int num_gpu_core_groups = d_g_state->num_gpu_core_groups; if ( num_gpu_core_groups <= 0){ ShowError("num_gpu_core_groups == 0 return"); return NULL; }//if AddReduceInputRecordGPU(d_g_state,(panda->sorted_intermediate_keyvals_arr), thread_info->start_idx, thread_info->end_idx); int tid = thread_info->tid; int assigned_gpu_id = d_g_state->gpu_id; int gpu_id; hipGetDevice(&gpu_id); ShowLog("Start GPU Reduce Tasks. Number of Reduce Tasks:%d Tid:%d gpu_id:%d num_gpu_core_groups:%d",d_g_state->d_sorted_keyvals_arr_len, tid, gpu_id, num_gpu_core_groups); StartGPUReduce(d_g_state); }//if if(thread_info->device_type == CPU_ACC){ cpu_context *d_g_state = (cpu_context *)(thread_info->d_g_state); if (d_g_state->num_cpus_cores == 0){ ShowError("num_cpus_cores == 0 return"); return NULL; }//if ShowLog("Start CPU Reduce Tasks. Number of Reduce Tasks:%d",d_g_state->sorted_keyvals_arr_len); for (int map_idx = 0; map_idx < d_g_state->sorted_keyvals_arr_len; map_idx++){ keyvals_t *kv_p = (keyvals_t *)(&(d_g_state->sorted_intermediate_keyvals_arr[map_idx])); if (kv_p->val_arr_len <=0) ShowError("kv_p->val_arr_len <=0"); else cpu_reduce(kv_p->key, kv_p->vals, kv_p->keySize, kv_p->val_arr_len, d_g_state); }//for }//if return NULL; }//void __device__ void *GetVal(void *vals, int4* interOffsetSizes, int keyIndex, int valStartIndex) { } __device__ void *GetKey(void *key, int4* interOffsetSizes, int keyIndex, int valStartIndex) { } //------------------------------------------------------- //Reducer //------------------------------------------------------- __global__ void ReducePartitioner(gpu_context d_g_state) { int num_records_per_thread = (d_g_state.d_sorted_keyvals_arr_len + (gridDim.x*blockDim.x*blockDim.y)-1)/(gridDim.x*blockDim.x*blockDim.y); int block_start_idx = num_records_per_thread * blockIdx.x * blockDim.x * blockDim.y; int thread_start_idx = block_start_idx + ((threadIdx.y*blockDim.x + threadIdx.x)/STRIDE)*num_records_per_thread*STRIDE + ((threadIdx.y*blockDim.x + threadIdx.x)%STRIDE); int thread_end_idx = thread_start_idx + num_records_per_thread*STRIDE; if (thread_end_idx > d_g_state.d_sorted_keyvals_arr_len) thread_end_idx = d_g_state.d_sorted_keyvals_arr_len; if (thread_start_idx >= thread_end_idx) return; int start_idx, end_idx; for(int reduce_task_idx=thread_start_idx; reduce_task_idx < thread_end_idx; reduce_task_idx+=STRIDE){ if (reduce_task_idx==0) start_idx = 0; else start_idx = d_g_state.d_pos_arr_4_sorted_keyval_pos_arr[reduce_task_idx-1]; end_idx = d_g_state.d_pos_arr_4_sorted_keyval_pos_arr[reduce_task_idx]; val_t *val_t_arr = (val_t*)malloc(sizeof(val_t)*(end_idx-start_idx)); int keySize = d_g_state.d_keyval_pos_arr[start_idx].keySize; int keyPos = d_g_state.d_keyval_pos_arr[start_idx].keyPos; void *key = (char*)d_g_state.d_sorted_keys_shared_buff+keyPos; for (int index = start_idx;index<end_idx;index++){ int valSize = d_g_state.d_keyval_pos_arr[index].valSize; int valPos = d_g_state.d_keyval_pos_arr[index].valPos; val_t_arr[index-start_idx].valSize = valSize; val_t_arr[index-start_idx].val = (char*)d_g_state.d_sorted_vals_shared_buff + valPos; } //for if( end_idx - start_idx == 0) ShowError("gpu_reduce valCount ==0"); else gpu_reduce(key, val_t_arr, keySize, end_idx-start_idx, d_g_state); }//for } void StartGPUReduce(gpu_context *d_g_state) { hipDeviceSynchronize(); d_g_state->d_reduced_keyval_arr_len = d_g_state->d_sorted_keyvals_arr_len; checkCudaErrors(hipMalloc((void **)&(d_g_state->d_reduced_keyval_arr), sizeof(keyval_t)*d_g_state->d_reduced_keyval_arr_len)); hipDeviceSynchronize(); int numGPUCores = getGPUCoresNum(); dim3 blocks(THREAD_BLOCK_SIZE, THREAD_BLOCK_SIZE); int numBlocks = (numGPUCores*16+(blocks.x*blocks.y)-1)/(blocks.x*blocks.y); dim3 grids(numBlocks, 1); int total_gpu_threads = (grids.x*grids.y*blocks.x*blocks.y); ShowLog("number of reduce tasks:%d total gpu threads:%d",d_g_state->d_sorted_keyvals_arr_len, total_gpu_threads); hipLaunchKernelGGL(( ReducePartitioner), dim3(grids),dim3(blocks), 0, 0, *d_g_state); hipDeviceSynchronize(); }//void void* Panda_Map(void *ptr){ thread_info_t *thread_info = (thread_info_t *)ptr; if(thread_info->device_type == GPU_CORE_ACC){ double t1 = PandaTimer(); gpu_context *d_g_state = (gpu_context *)(thread_info->d_g_state); InitGPUDevice(thread_info); //ShowLog("GPU_ID:[%d] Init GPU MapReduce Load Data From Host to GPU memory",d_g_state->gpu_id); InitGPUMapReduce3(d_g_state); ShowLog("GPU_ID:[%d] Start GPU CORE Map Tasks",d_g_state->gpu_id); StartGPUCoreMap(d_g_state); double t2 = PandaTimer(); //Local combiner if(d_g_state->local_combiner){ StartGPUCombiner(d_g_state); } double t3 = PandaTimer(); StartGPUShuffle(d_g_state); double t4 = PandaTimer(); DoLog2Disk(" GPU Map take %f sec",t2-t1); DoLog2Disk(" GPU Combiner take %f sec",t3-t2); DoLog2Disk(" GPU Shuffle take %f sec",t4-t3); }//if if(thread_info->device_type == GPU_CARD_ACC){ double t1 = PandaTimer(); gpu_card_context *d_g_state = (gpu_card_context *)(thread_info->d_g_state); InitGPUDevice(thread_info); //ShowLog("GPU_ID:[%d] Init GPU MapReduce Load Data From Host to GPU memory",d_g_state->gpu_id); InitGPUCardMapReduce(d_g_state); ShowLog("GPU_ID:[%d] Start GPU CARD Map Tasks",d_g_state->gpu_id); StartGPUCardMap(d_g_state); double t2 = PandaTimer(); //Local combiner if(d_g_state->local_combiner){ //StartGPUCombiner(d_g_state); } double t3 = PandaTimer(); //StartGPUShuffle(d_g_state); double t4 = PandaTimer(); DoLog2Disk(" GPU Map take %f sec",t2-t1); DoLog2Disk(" GPU Combiner take %f sec",t3-t2); DoLog2Disk(" GPU Shuffle take %f sec",t4-t3); }//if if(thread_info->device_type == CPU_ACC){ double t1 = PandaTimer(); cpu_context *d_g_state = (cpu_context *)(thread_info->d_g_state); //ShowLog("CPU_GROUP_ID:[%d] Init CPU Device",d_g_state->cpu_group_id); InitCPUDevice(thread_info); //ShowLog("Init CPU MapReduce"); InitCPUMapReduce2(thread_info); ShowLog("CPU_GROUP_ID:[%d] Start CPU Map Tasks",d_g_state->cpu_group_id); StartCPUMap2(thread_info); double t2 = PandaTimer(); if(d_g_state->local_combiner){ StartCPUCombiner(thread_info); } ShowLog("CPU_GROUP_ID:[%d] Start CPU Shuffle2",d_g_state->cpu_group_id); double t3 = PandaTimer(); StartCPUShuffle2(thread_info); double t4 = PandaTimer(); DoLog2Disk(" CPU Map take %f sec",t2-t1); DoLog2Disk(" CPU Combiner take %f sec",t3-t2); DoLog2Disk(" CPU Shuffle take %f sec",t4-t3); } return NULL; }//FinishMapReduce2(d_g_state); void FinishMapReduce(Spec_t* spec) { ShowLog( "=====finish panda mapreduce====="); }//void void FinishMapReduce2(gpu_context* state) { size_t total_mem,avail_mem, heap_limit; checkCudaErrors(hipMemGetInfo( &avail_mem, &total_mem )); ShowLog("avail_mem:%d",avail_mem); }//void #endif //__PANDALIB_CU__
34680f40019a3a729e18f9a49624738183c2b5af.cu
/* Copyright 2012 The Trustees of Indiana University. All rights reserved. CGL MapReduce Framework on GPUs and CPUs. Code Name: Panda File: PandaLib.cu First Version: 2012-07-01 V0.1 Current Version: 2012-09-01 V0.3 Last Updates: 2012-09-016 Developer: Hui Li (lihui@indiana.edu) This is the source code for Panda, a MapReduce runtime on GPUs and CPUs. */ #ifndef __PANDALIB_CU__ #define __PANDALIB_CU__ #include "Panda.h" #include "UserAPI.cu" //---------------------------------------------- //Get default job configuration //---------------------------------------------- job_configuration *CreateJobConf(){ job_configuration *job_conf = (job_configuration *)malloc(sizeof(job_configuration)); if (job_conf == NULL) exit(-1); memset(job_conf, 0, sizeof(job_configuration)); job_conf->num_input_record = 0; job_conf->input_keyval_arr = NULL; job_conf->auto_tuning = false; job_conf->iterative_support = false; job_conf->local_combiner = false; job_conf->num_mappers = 0; job_conf->num_reducers = 0; job_conf->num_gpu_core_groups = 0; job_conf->num_cpus_cores = 0; job_conf->num_cpus_groups = 0; return job_conf; }//gpu_context gpu_card_context *CreateGPUCardContext(){ gpu_card_context *d_g_state = (gpu_card_context*)malloc(sizeof(gpu_card_context)); if (d_g_state == NULL) exit(-1); memset(d_g_state, 0, sizeof(gpu_card_context)); d_g_state->iterative_support = false; d_g_state->input_keyval_arr = NULL; //d_g_state->num_mappers = 0; //d_g_state->num_reducers = 0; d_g_state->local_combiner = false; return d_g_state; }//gpu_context gpu_context *CreateGPUCoreContext(){ gpu_context *d_g_state = (gpu_context*)malloc(sizeof(gpu_context)); if (d_g_state == NULL) exit(-1); memset(d_g_state, 0, sizeof(gpu_context)); d_g_state->iterative_support = false; d_g_state->h_input_keyval_arr = NULL; d_g_state->num_mappers = 0; d_g_state->num_reducers = 0; d_g_state->local_combiner = false; return d_g_state; }//gpu_context cpu_context *CreateCPUContext(){ cpu_context *d_g_state = (cpu_context*)malloc(sizeof(cpu_context)); if (d_g_state == NULL) exit(-1); memset(d_g_state, 0, sizeof(cpu_context)); d_g_state->iterative_support = false; d_g_state->local_combiner = false; d_g_state->input_keyval_arr = NULL; return d_g_state; }//gpu_context panda_context *CreatePandaContext(){ panda_context *d_g_state = (panda_context*)malloc(sizeof(panda_context)); if (d_g_state == NULL) exit(-1); d_g_state->input_keyval_arr = NULL; d_g_state->intermediate_keyval_arr_arr_p = NULL; d_g_state->sorted_intermediate_keyvals_arr = NULL; d_g_state->sorted_keyvals_arr_len = 0; d_g_state->num_gpu_core_groups = 0; d_g_state->num_gpu_card_groups = 0; d_g_state->num_cpus_groups = 0; d_g_state->gpu_core_context = NULL; d_g_state->gpu_card_context = NULL; d_g_state->cpu_context = NULL; return d_g_state; }//panda_context //For version 0.3 void InitCPUMapReduce2(thread_info_t * thread_info){ cpu_context *d_g_state = (cpu_context *)(thread_info->d_g_state); job_configuration *job_conf = (job_configuration *)(thread_info->job_conf); if (job_conf->num_input_record<=0) { ShowError("Error: no any input keys"); exit(-1);} if (job_conf->input_keyval_arr == NULL) { ShowError("Error: input_keyval_arr == NULL"); exit(-1);} if (d_g_state->num_cpus_cores <= 0) { ShowError("Error: d_g_state->num_cpus == 0"); exit(-1);} int totalKeySize = 0; int totalValSize = 0; for(int i=0;i<job_conf->num_input_record;i++){ totalKeySize += job_conf->input_keyval_arr[i].keySize; totalValSize += job_conf->input_keyval_arr[i].valSize; }//for ShowLog("CPU_GROUP_ID:[%d] num_input_record:%d, totalKeySize:%d KB totalValSize:%d KB num_cpus:%d", d_g_state->cpu_group_id, job_conf->num_input_record, totalKeySize/1024, totalValSize/1024, d_g_state->num_cpus_cores); //TODO determin num_cpus int num_cpus_cores = d_g_state->num_cpus_cores; d_g_state->panda_cpu_task = (pthread_t *)malloc(sizeof(pthread_t)*(num_cpus_cores)); d_g_state->panda_cpu_task_info = (panda_cpu_task_info_t *)malloc(sizeof(panda_cpu_task_info_t)*(num_cpus_cores)); d_g_state->intermediate_keyval_arr_arr_p = (keyval_arr_t *)malloc(sizeof(keyval_arr_t)*job_conf->num_input_record); memset(d_g_state->intermediate_keyval_arr_arr_p, 0, sizeof(keyval_arr_t)*job_conf->num_input_record); for (int i=0;i<num_cpus_cores;i++){ d_g_state->panda_cpu_task_info[i].d_g_state = d_g_state; d_g_state->panda_cpu_task_info[i].cpu_job_conf = job_conf; d_g_state->panda_cpu_task_info[i].num_cpus_cores = num_cpus_cores; d_g_state->panda_cpu_task_info[i].start_row_idx = 0; d_g_state->panda_cpu_task_info[i].end_row_idx = 0; }//for d_g_state->iterative_support = true; ShowLog("CPU_GROUP_ID:[%d] DONE",d_g_state->cpu_group_id); } #ifdef DEV_MODE //For Version 0.3 test depressed void InitGPUMapReduce4(thread_info_t* thread_info) { gpu_context *d_g_state = (gpu_context *)(thread_info->d_g_state); job_configuration* gpu_job_conf = (job_configuration*)(thread_info->job_conf); keyval_t * kv_p = gpu_job_conf->input_keyval_arr; ShowLog("d_g_state->configured:%s enable for iterative applications",d_g_state->configured? "true" : "false"); //if (d_g_state->configured) // return; ShowLog("copy %d input records from Host to GPU memory",gpu_job_conf->num_input_record); //checkCudaErrors(cudaMalloc((void **)&d_g_state->d_input_keyval_arr,sizeof(keyval_t)*d_g_state->num_input_record)); int totalKeySize = 0; int totalValSize = 0; for(int i=0;i<gpu_job_conf->num_input_record;i++){ totalKeySize += kv_p[i].keySize; totalValSize += kv_p[i].valSize; }//for ShowLog("totalKeySize:%d totalValSize:%d", totalKeySize, totalValSize); void *input_vals_shared_buff = malloc(totalValSize); void *input_keys_shared_buff = malloc(totalKeySize); keyval_pos_t *input_keyval_pos_arr = (keyval_pos_t *)malloc(sizeof(keyval_pos_t)*gpu_job_conf->num_input_record); int keyPos = 0; int valPos = 0; int keySize = 0; int valSize = 0; for(int i=0; i<gpu_job_conf->num_input_record; i++){ keySize = kv_p[i].keySize; valSize = kv_p[i].valSize; memcpy((char *)input_keys_shared_buff + keyPos,(char *)(kv_p[i].key), keySize); memcpy((char *)input_vals_shared_buff + valPos,(char *)(kv_p[i].val), valSize); input_keyval_pos_arr[i].keySize = keySize; input_keyval_pos_arr[i].keyPos = keyPos; input_keyval_pos_arr[i].valPos = valPos; input_keyval_pos_arr[i].valSize = valSize; keyPos += keySize; valPos += valSize; }//for checkCudaErrors(cudaMalloc((void **)&d_g_state->d_input_keyval_pos_arr,sizeof(keyval_pos_t)*gpu_job_conf->num_input_record)); checkCudaErrors(cudaMalloc((void **)&d_g_state->d_input_keys_shared_buff, totalKeySize)); checkCudaErrors(cudaMalloc((void **)&d_g_state->d_input_vals_shared_buff, totalValSize)); checkCudaErrors(cudaMemcpy(d_g_state->d_input_keyval_pos_arr, input_keyval_pos_arr,sizeof(keyval_pos_t)*gpu_job_conf->num_input_record ,cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_g_state->d_input_keys_shared_buff, input_keys_shared_buff,totalKeySize ,cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_g_state->d_input_vals_shared_buff, input_vals_shared_buff,totalValSize ,cudaMemcpyHostToDevice)); //checkCudaErrors(cudaMemcpy(d_g_state->d_input_keyval_arr,h_buff,sizeof(keyval_t)*d_g_state->num_input_record,cudaMemcpyHostToDevice)); cudaThreadSynchronize(); d_g_state->configured = true; }//void #endif void InitGPUCardMapReduce(gpu_card_context* d_g_state){ //cpu_context *d_g_state = (cpu_context *)(thread_info->d_g_state); //job_configuration *job_conf = (job_configuration *)(thread_info->job_conf); ////////////////// if (d_g_state->num_input_record<=0) { ShowError("Error: no any input keys"); exit(-1);} if (d_g_state->input_keyval_arr == NULL) { ShowError("Error: input_keyval_arr == NULL"); exit(-1);} //if (d_g_state->num_cpus_cores <= 0) { ShowError("Error: d_g_state->num_cpus == 0"); exit(-1);} int totalKeySize = 0; int totalValSize = 0; for(int i=0;i<d_g_state->num_input_record;i++){ totalKeySize += d_g_state->input_keyval_arr[i].keySize; totalValSize += d_g_state->input_keyval_arr[i].valSize; }//for ShowLog("GPU_CARD_GROUP_ID:[%d] num_input_record:%d, totalKeySize:%d totalValSize:%d ", d_g_state->gpu_group_id, d_g_state->num_input_record, totalKeySize, totalValSize); //TODO determin num_cpus //int num_cpus_cores = d_g_state->num_cpus_cores; int num_task_per_gpu_card = d_g_state->num_input_record; d_g_state->panda_gpu_task = (pthread_t *)malloc(sizeof(pthread_t)*(num_task_per_gpu_card)); d_g_state->panda_cpu_task_info = (panda_cpu_task_info_t *)malloc(sizeof(panda_cpu_task_info_t)*(num_task_per_gpu_card)); d_g_state->intermediate_keyval_arr_arr_p = (keyval_arr_t *)malloc(sizeof(keyval_arr_t)*d_g_state->num_input_record); memset(d_g_state->intermediate_keyval_arr_arr_p, 0, sizeof(keyval_arr_t)*d_g_state->num_input_record); for (int i=0;i<num_task_per_gpu_card;i++){ //d_g_state->panda_cpu_task_info[i].d_g_state = d_g_state; //d_g_state->panda_cpu_task_info[i].cpu_job_conf = job_conf; //d_g_state->panda_cpu_task_info[i].num_cpus_cores = num_cpus_cores; d_g_state->panda_cpu_task_info[i].start_row_idx = 0; d_g_state->panda_cpu_task_info[i].end_row_idx = 0; }//for d_g_state->iterative_support = true; ShowLog("GPU_CARD_GROUP_ID:[%d] DONE",d_g_state->gpu_group_id); }//void void InitGPUMapReduce3(gpu_context* d_g_state) { //ShowLog("d_g_state->iterative_support:%s enable for iterative applications",d_g_state->iterative_support? "true" : "false"); //if (d_g_state->iterative_support){ //ShowLog("d_g_state->configured:%s skip configuration...",d_g_state->iterative_support? "true" : "false"); //return; //} int totalKeySize = 0; int totalValSize = 0; for(int i=0;i<d_g_state->num_input_record;i++){ totalKeySize += d_g_state->h_input_keyval_arr[i].keySize; totalValSize += d_g_state->h_input_keyval_arr[i].valSize; }//for ShowLog("GPU_ID:[%d] copy %d input records from Host to GPU memory totalKeySize:%d KB totalValSize:%d KB",d_g_state->gpu_id, d_g_state->num_input_record, totalKeySize/1024, totalValSize/1024); double t1 = PandaTimer(); void *input_vals_shared_buff = malloc(totalValSize); void *input_keys_shared_buff = malloc(totalKeySize); keyval_pos_t *input_keyval_pos_arr = (keyval_pos_t *)malloc(sizeof(keyval_pos_t)*d_g_state->num_input_record); int keyPos = 0; int valPos = 0; int keySize = 0; int valSize = 0; for(int i=0;i<d_g_state->num_input_record;i++){ keySize = d_g_state->h_input_keyval_arr[i].keySize; valSize = d_g_state->h_input_keyval_arr[i].valSize; memcpy((char *)input_keys_shared_buff + keyPos,(char *)(d_g_state->h_input_keyval_arr[i].key), keySize); memcpy((char *)input_vals_shared_buff + valPos,(char *)(d_g_state->h_input_keyval_arr[i].val), valSize); input_keyval_pos_arr[i].keySize = keySize; input_keyval_pos_arr[i].keyPos = keyPos; input_keyval_pos_arr[i].valPos = valPos; input_keyval_pos_arr[i].valSize = valSize; keyPos += keySize; valPos += valSize; }//for checkCudaErrors(cudaMalloc((void **)&d_g_state->d_input_keyval_pos_arr,sizeof(keyval_pos_t)*d_g_state->num_input_record)); checkCudaErrors(cudaMalloc((void **)&d_g_state->d_input_keys_shared_buff, totalKeySize)); checkCudaErrors(cudaMalloc((void **)&d_g_state->d_input_vals_shared_buff, totalValSize)); checkCudaErrors(cudaMemcpy(d_g_state->d_input_keyval_pos_arr, input_keyval_pos_arr,sizeof(keyval_pos_t)*d_g_state->num_input_record ,cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_g_state->d_input_keys_shared_buff, input_keys_shared_buff,totalKeySize ,cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_g_state->d_input_vals_shared_buff, input_vals_shared_buff,totalValSize ,cudaMemcpyHostToDevice)); //checkCudaErrors(cudaMemcpy(d_g_state->d_input_keyval_arr,h_buff,sizeof(keyval_t)*d_g_state->num_input_record,cudaMemcpyHostToDevice)); cudaThreadSynchronize(); double t2 = PandaTimer(); ShowLog("GPU_ID:[%d] copy keyvalue pairs done. Take:%f sec",d_g_state->gpu_id, t2-t1); //d_g_state->iterative_support = true; }//void #ifdef DEV_MODE void InitGPUMapReduce2(gpu_context* d_g_state) { ShowLog("d_g_state->num_input_record:%d",d_g_state->num_input_record); //checkCudaErrors(cudaMalloc((void **)&d_g_state->d_input_keyval_arr,sizeof(keyval_t)*d_g_state->num_input_record)); int totalKeySize = 0; int totalValSize = 0; for(int i=0;i<d_g_state->num_input_record;i++){ totalKeySize += d_g_state->h_input_keyval_arr[i].keySize; totalValSize += d_g_state->h_input_keyval_arr[i].valSize; }//for void *input_vals_shared_buff = malloc(totalValSize); void *input_keys_shared_buff = malloc(totalKeySize); keyval_pos_t *input_keyval_pos_arr = (keyval_pos_t *)malloc(sizeof(keyval_pos_t)*d_g_state->num_input_record); int keyPos = 0; int valPos = 0; int keySize = 0; int valSize = 0; for(int i=0;i<d_g_state->num_input_record;i++){ keySize = d_g_state->h_input_keyval_arr[i].keySize; valSize = d_g_state->h_input_keyval_arr[i].valSize; memcpy((char *)input_keys_shared_buff + keyPos,(char *)(d_g_state->h_input_keyval_arr[i].key), keySize); memcpy((char *)input_vals_shared_buff + valPos,(char *)(d_g_state->h_input_keyval_arr[i].val), valSize); input_keyval_pos_arr[i].keySize = keySize; input_keyval_pos_arr[i].keyPos = keyPos; input_keyval_pos_arr[i].valPos = valPos; input_keyval_pos_arr[i].valSize = valSize; keyPos += keySize; valPos += valSize; }//for checkCudaErrors(cudaMalloc((void **)&d_g_state->d_input_keyval_pos_arr,sizeof(keyval_pos_t)*d_g_state->num_input_record)); checkCudaErrors(cudaMalloc((void **)&d_g_state->d_input_keys_shared_buff, totalKeySize)); checkCudaErrors(cudaMalloc((void **)&d_g_state->d_input_vals_shared_buff, totalValSize)); checkCudaErrors(cudaMemcpy(d_g_state->d_input_keyval_pos_arr, input_keyval_pos_arr,sizeof(keyval_pos_t)*d_g_state->num_input_record ,cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_g_state->d_input_keys_shared_buff, input_keys_shared_buff,totalKeySize ,cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_g_state->d_input_vals_shared_buff, input_vals_shared_buff,totalValSize ,cudaMemcpyHostToDevice)); //checkCudaErrors(cudaMemcpy(d_g_state->d_input_keyval_arr,h_buff,sizeof(keyval_t)*d_g_state->num_input_record,cudaMemcpyHostToDevice)); cudaThreadSynchronize(); }//void #endif void InitCPUDevice(thread_info_t*thread_info){ //------------------------------------------ //1, init CPU device //------------------------------------------ cpu_context *d_g_state = (cpu_context *)(thread_info->d_g_state); if (d_g_state->num_cpus_cores<=0) d_g_state->num_cpus_cores = getCPUCoresNum(); //int tid = thread_info->tid; ShowLog( "CPU_GROUP_ID:[%d] Init CPU Deivce Num cpus cores:%d",d_g_state->cpu_group_id, d_g_state->num_cpus_cores); } void InitGPUDevice(thread_info_t*thread_info){ //------------------------------------------ //1, init device //------------------------------------------ int tid, assigned_gpu_id; if (thread_info->device_type == GPU_CORE_ACC){ gpu_context *d_g_state = (gpu_context *)(thread_info->d_g_state); tid = thread_info->tid; assigned_gpu_id = d_g_state->gpu_id; int num_gpu_core_groups = d_g_state->num_gpu_core_groups; if (num_gpu_core_groups == 0) { ShowError("error num_gpu_core_groups == 0"); exit(-1); }//gpu_context } if (thread_info->device_type == GPU_CARD_ACC){ gpu_card_context *d_g_state = (gpu_card_context *)(thread_info->d_g_state); //int tid = thread_info->tid; assigned_gpu_id = d_g_state->gpu_id; int num_gpu_card_groups = d_g_state->num_gpu_card_groups; if (num_gpu_card_groups == 0) { ShowError("error num_gpu_core_groups == 0"); exit(-1); }//gpu_context }//if //int tid = thread_info->tid; int gpu_id; cudaGetDevice(&gpu_id); int gpu_count = 0; cudaGetDeviceCount(&gpu_count); cudaDeviceProp gpu_dev; cudaGetDeviceProperties(&gpu_dev, gpu_id); ShowLog("TID:[%d] check GPU ids: cur_gpu_id:[%d] assig_gpu_id:[%d] cudaGetDeviceCount:[%d] GPU name:%s", tid, gpu_id, assigned_gpu_id, gpu_count, gpu_dev.name); //TODO int num_gpus = 1; if ( gpu_id != assigned_gpu_id ){ //ShowLog("cudaSetDevice gpu_id %d == (tid num_gpu_core_groups) %d ", gpu_id, tid%num_gpu_core_groups); cudaSetDevice(assigned_gpu_id % num_gpus); }//if size_t total_mem,avail_mem, heap_limit; checkCudaErrors(cudaMemGetInfo( &avail_mem, &total_mem )); size_t heap_size = (avail_mem*0.8); cudaDeviceSetLimit(cudaLimitMallocHeapSize, heap_size); cudaDeviceGetLimit(&heap_limit, cudaLimitMallocHeapSize); int numGPUCores = getGPUCoresNum(); ShowLog("GPU_ID:[%d] numGPUCores:%d total_mem:%d MB HeapSize:%d MB avail_mem:%d MB ", gpu_id, numGPUCores,total_mem/1024/1024, heap_limit/1024/1024, avail_mem/1024/1024); } void AddPandaTask(job_configuration* job_conf, void* key, void* val, int keySize, int valSize){ int len = job_conf->num_input_record; if (len<0) return; if (len == 0) job_conf->input_keyval_arr = NULL; job_conf->input_keyval_arr = (keyval_t *)realloc(job_conf->input_keyval_arr, sizeof(keyval_t)*(len+1)); job_conf->input_keyval_arr[len].keySize = keySize; job_conf->input_keyval_arr[len].valSize = valSize; job_conf->input_keyval_arr[len].key = malloc(keySize); job_conf->input_keyval_arr[len].val = malloc(valSize); memcpy(job_conf->input_keyval_arr[len].key,key,keySize); memcpy(job_conf->input_keyval_arr[len].val,val,valSize); job_conf->num_input_record++; } void AddReduceInputRecordGPU(gpu_context* d_g_state, keyvals_t * sorted_intermediate_keyvals_arr, int start_row_id, int end_row_id){ int total_count = 0; for(int i=start_row_id;i<end_row_id;i++){ total_count += sorted_intermediate_keyvals_arr[i].val_arr_len; }//for int totalKeySize = 0; int totalValSize = 0; for(int i=start_row_id;i<end_row_id;i++){ totalKeySize += (sorted_intermediate_keyvals_arr[i].keySize+3)/4*4; for (int j=0;j<sorted_intermediate_keyvals_arr[i].val_arr_len;j++) totalValSize += (sorted_intermediate_keyvals_arr[i].vals[j].valSize+3)/4*4; }//for checkCudaErrors(cudaMalloc((void **)&d_g_state->d_sorted_keys_shared_buff,totalKeySize)); checkCudaErrors(cudaMalloc((void **)&d_g_state->d_sorted_vals_shared_buff,totalValSize)); checkCudaErrors(cudaMalloc((void **)&d_g_state->d_keyval_pos_arr,sizeof(keyval_pos_t)*total_count)); d_g_state->h_sorted_keys_shared_buff = malloc(sizeof(char)*totalKeySize); d_g_state->h_sorted_vals_shared_buff = malloc(sizeof(char)*totalValSize); char *sorted_keys_shared_buff = (char *)d_g_state->h_sorted_keys_shared_buff; char *sorted_vals_shared_buff = (char *)d_g_state->h_sorted_vals_shared_buff; char *keyval_pos_arr = (char *)malloc(sizeof(keyval_pos_t)*total_count); int sorted_key_arr_len = (end_row_id-start_row_id); keyval_pos_t *tmp_keyval_pos_arr = (keyval_pos_t *)malloc(sizeof(keyval_pos_t)*total_count); ShowLog("GPU_ID:[%d] total #different intermediate records:%d total records:%d totalKeySize:%d KB totalValSize:%d KB", d_g_state->gpu_id, end_row_id - start_row_id, total_count, totalKeySize/1024, totalValSize/1024); int *pos_arr_4_pos_arr = (int*)malloc(sizeof(int)*(sorted_key_arr_len)); memset(pos_arr_4_pos_arr,0,sizeof(int)*sorted_key_arr_len); int index = 0; int keyPos = 0; int valPos = 0; for (int i=start_row_id;i<end_row_id;i++){ keyvals_t* p = (keyvals_t*)&(sorted_intermediate_keyvals_arr[i]); memcpy(sorted_keys_shared_buff+keyPos,p->key, p->keySize); for (int j=0;j<p->val_arr_len;j++){ tmp_keyval_pos_arr[index].keyPos = keyPos; tmp_keyval_pos_arr[index].keySize = p->keySize; tmp_keyval_pos_arr[index].valPos = valPos; tmp_keyval_pos_arr[index].valSize = p->vals[j].valSize; memcpy(sorted_vals_shared_buff + valPos,p->vals[j].val,p->vals[j].valSize); valPos += (p->vals[j].valSize+3)/4*4; index++; }//for keyPos += (p->keySize+3)/4*4; pos_arr_4_pos_arr[i-start_row_id] = index; }// d_g_state->d_sorted_keyvals_arr_len = end_row_id-start_row_id; checkCudaErrors(cudaMemcpy(d_g_state->d_keyval_pos_arr,tmp_keyval_pos_arr,sizeof(keyval_pos_t)*total_count,cudaMemcpyHostToDevice)); checkCudaErrors(cudaMalloc((void**)&(d_g_state->d_pos_arr_4_sorted_keyval_pos_arr),sizeof(int)*sorted_key_arr_len)); checkCudaErrors(cudaMemcpy(d_g_state->d_pos_arr_4_sorted_keyval_pos_arr,pos_arr_4_pos_arr,sizeof(int)*sorted_key_arr_len,cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_g_state->d_sorted_keys_shared_buff, sorted_keys_shared_buff, sizeof(char)*totalKeySize,cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_g_state->d_sorted_vals_shared_buff, sorted_vals_shared_buff, sizeof(char)*totalValSize,cudaMemcpyHostToDevice)); } void AddMapInputRecord4GPUCore(gpu_context* d_g_state, keyval_t *kv_p, int start_row_id, int end_row_id){ if (end_row_id<=start_row_id) { ShowError("error! end_row_id:%d <=start_row_id:%d",end_row_id, start_row_id); return; } int len = d_g_state->num_input_record; if (len<0) { ShowError("error! len<0"); return; } if (len == 0) d_g_state->h_input_keyval_arr = NULL; ShowLog("GPU_ID:[%d] add map tasks into gpu; #total input:%d #added input:%d",d_g_state->gpu_id, len, end_row_id-start_row_id); d_g_state->h_input_keyval_arr = (keyval_t *)realloc(d_g_state->h_input_keyval_arr, sizeof(keyval_t)*(len + end_row_id - start_row_id)); //assert(d_g_state->h_input_keyval_arr != NULL); for (int i=start_row_id;i<end_row_id;i++){ d_g_state->h_input_keyval_arr[len].keySize = kv_p[i].keySize; d_g_state->h_input_keyval_arr[len].valSize = kv_p[i].valSize; d_g_state->h_input_keyval_arr[len].key = kv_p[i].key; d_g_state->h_input_keyval_arr[len].val = kv_p[i].val; d_g_state->num_input_record++; len++; } } void AddMapInputRecord4GPUCard(gpu_card_context* d_g_state, keyval_t *kv_p, int start_row_id, int end_row_id){ if (end_row_id<=start_row_id) { ShowError("error! end_row_id[%d] <= start_row_id[%d]",end_row_id, start_row_id); return; } int len = d_g_state->num_input_record; if (len<0) { ShowError("error! d_g_state->num_input_record<0"); return; } if (len == 0) d_g_state->input_keyval_arr = NULL; ShowLog("GPU_CARD_GROUP_ID:[%d] add map input record for cpu device current #input:%d added #input:%d",d_g_state->gpu_group_id,len,end_row_id-start_row_id); d_g_state->input_keyval_arr = (keyval_t *)realloc(d_g_state->input_keyval_arr, sizeof(keyval_t)*(len+end_row_id-start_row_id)); for (int i=start_row_id;i<end_row_id;i++){ d_g_state->input_keyval_arr[len].keySize = kv_p[i].keySize; d_g_state->input_keyval_arr[len].valSize = kv_p[i].valSize; d_g_state->input_keyval_arr[len].key = kv_p[i].key; d_g_state->input_keyval_arr[len].val = kv_p[i].val; d_g_state->num_input_record++; len++; }//for } void AddMapInputRecordCPU(cpu_context* d_g_state, keyval_t *kv_p, int start_row_id, int end_row_id){ if (end_row_id<=start_row_id) { ShowError("error! end_row_id[%d] <= start_row_id[%d]",end_row_id, start_row_id); return; } int len = d_g_state->num_input_record; if (len<0) { ShowError("error! len<0"); return; } if (len == 0) d_g_state->input_keyval_arr = NULL; ShowLog("CPU_GROUP_ID:[%d] add map input record for cpu device current #input:%d added #input:%d",d_g_state->cpu_group_id,len,end_row_id-start_row_id); d_g_state->input_keyval_arr = (keyval_t *)realloc(d_g_state->input_keyval_arr, sizeof(keyval_t)*(len+end_row_id-start_row_id)); for (int i=start_row_id;i<end_row_id;i++){ d_g_state->input_keyval_arr[len].keySize = kv_p[i].keySize; d_g_state->input_keyval_arr[len].valSize = kv_p[i].valSize; d_g_state->input_keyval_arr[len].key = kv_p[i].key; d_g_state->input_keyval_arr[len].val = kv_p[i].val; d_g_state->num_input_record++; len++; }//for } void AddReduceInputRecordCPU(cpu_context* d_g_state, keyvals_t *kv_p, int start_row_id, int end_row_id){ if (end_row_id<start_row_id){ ShowError("error! end_row_id<=start_row_id"); return; } int len = d_g_state->sorted_keyvals_arr_len; if (len<0) { ShowError("error! len<0"); return; } if (len == 0) d_g_state->sorted_intermediate_keyvals_arr = NULL; d_g_state->sorted_intermediate_keyvals_arr = (keyvals_t *)malloc(sizeof(keyvals_t)*(len+end_row_id-start_row_id)); for (int i = len; i< len+end_row_id-start_row_id; i++){ d_g_state->sorted_intermediate_keyvals_arr[i].keySize = kv_p[start_row_id+i-len].keySize; d_g_state->sorted_intermediate_keyvals_arr[i].key = kv_p[start_row_id+i-len].key; d_g_state->sorted_intermediate_keyvals_arr[i].vals = kv_p[start_row_id+i-len].vals; d_g_state->sorted_intermediate_keyvals_arr[i].val_arr_len = kv_p[start_row_id+i-len].val_arr_len; //ShowLog("key:%s vals_arr_len:%d", // d_g_state->sorted_intermediate_keyvals_arr[i].key, d_g_state->sorted_intermediate_keyvals_arr[i].val_arr_len); //for (int j=0;j<d_g_state->sorted_intermediate_keyvals_arr[i].val_arr_len;j++) // printf("val:%d ",*(int*)(kv_p[start_row_id+i-len].vals[j].val)); //printf("\n"); }//for d_g_state->sorted_keyvals_arr_len = len + end_row_id-start_row_id; } __device__ void GPUEmitReduceOuput (void* key, void* val, int keySize, int valSize, gpu_context *d_g_state){ keyval_t *p = &(d_g_state->d_reduced_keyval_arr[TID]); p->keySize = keySize; p->key = malloc(keySize); memcpy(p->key,key,keySize); p->valSize = valSize; p->val = malloc(valSize); memcpy(p->val,val,valSize); printf("[gpu output]: key:%s val:%d\n",key,*(int *)val); }//__device__ void CPUEmitReduceOutput (void* key, void* val, int keySize, int valSize, cpu_context *d_g_state){ /*keyval_t *p = &(d_g_state->d_reduced_keyval_arr[TID]); p->keySize = keySize; p->key = malloc(keySize); memcpy(p->key,key,keySize); p->valSize = valSize; p->val = malloc(valSize); memcpy(p->val,val,valSize);*/ printf("[cpu output]: key:%s val:%d\n",(char*)key,*(int *)val); }//__device__ void GPUCardEmitMapOutput(void *key, void *val, int keySize, int valSize, gpu_card_context *d_g_state, int map_task_idx){ if(map_task_idx >= d_g_state->num_input_record) { ShowError("error ! map_task_idx >= d_g_state->num_input_record"); return; } keyval_arr_t *kv_arr_p = &(d_g_state->intermediate_keyval_arr_arr_p[map_task_idx]); char *buff = (char*)(kv_arr_p->shared_buff); if (!((*kv_arr_p->shared_buff_pos) + keySize + valSize < (*kv_arr_p->shared_buff_len) - sizeof(keyval_pos_t)*((*kv_arr_p->shared_arr_len)+1))){ ShowWarn("Warning! not enough memory at GPU task:%d *kv_arr_p->shared_arr_len:%d current buff_size:%d KB", map_task_idx,*kv_arr_p->shared_arr_len,(*kv_arr_p->shared_buff_len)/1024); char *new_buff = (char*)malloc(sizeof(char)*((*kv_arr_p->shared_buff_len)*2)); if(new_buff==NULL){ ShowError("Error ! There is not enough memory to allocat!"); return; } memcpy(new_buff, buff, sizeof(char)*(*kv_arr_p->shared_buff_pos)); int blockSize = sizeof(keyval_pos_t)*(*kv_arr_p->shared_arr_len); memcpy(new_buff + (*kv_arr_p->shared_buff_len)*2 - blockSize, (char*)buff + (*kv_arr_p->shared_buff_len) - blockSize, blockSize); (*kv_arr_p->shared_buff_len) = 2*(*kv_arr_p->shared_buff_len); for(int idx = 0; idx < (kv_arr_p->shared_buddy_len); idx++){ int cur_map_task_idx = kv_arr_p->shared_buddy[idx]; //the buddy relationship won't be changed keyval_arr_t *cur_kv_arr_p = &(d_g_state->intermediate_keyval_arr_arr_p[cur_map_task_idx]); cur_kv_arr_p->shared_buff = new_buff; }//for free(buff);// buff = new_buff; }//if keyval_pos_t *kv_p = (keyval_pos_t *)((char *)buff + *kv_arr_p->shared_buff_len - sizeof(keyval_pos_t)*((*kv_arr_p->shared_arr_len)+1)); (*kv_arr_p->shared_arr_len)++; kv_p->task_idx = map_task_idx; kv_p->next_idx = _MAP; kv_p->keyPos = (*kv_arr_p->shared_buff_pos); *kv_arr_p->shared_buff_pos += ((keySize+3)/4)*4; //alignment 4 bytes for reading and writing memcpy((char *)(buff) + kv_p->keyPos, key, keySize); kv_p->keySize = keySize; kv_p->valPos = (*kv_arr_p->shared_buff_pos); *kv_arr_p->shared_buff_pos += ((valSize+3)/4)*4; char *val_p = (char *)(buff) + kv_p->valPos; memcpy((char *)(buff) + kv_p->valPos, val, valSize); kv_p->valSize = valSize; (kv_arr_p->arr) = kv_p; }//__device__ //Last update 9/1/2012 void CPUEmitMapOutput(void *key, void *val, int keySize, int valSize, cpu_context *d_g_state, int map_task_idx){ if(map_task_idx >= d_g_state->num_input_record) { ShowError("error ! map_task_idx >= d_g_state->num_input_record"); return; } keyval_arr_t *kv_arr_p = &(d_g_state->intermediate_keyval_arr_arr_p[map_task_idx]); char *buff = (char*)(kv_arr_p->shared_buff); if (!((*kv_arr_p->shared_buff_pos) + keySize + valSize < (*kv_arr_p->shared_buff_len) - sizeof(keyval_pos_t)*((*kv_arr_p->shared_arr_len)+1))){ ShowWarn("Warning! not enough memory at CPU task:%d *kv_arr_p->shared_arr_len:%d current buff_size:%d KB", map_task_idx,*kv_arr_p->shared_arr_len,(*kv_arr_p->shared_buff_len)/1024); char *new_buff = (char*)malloc(sizeof(char)*((*kv_arr_p->shared_buff_len)*2)); if(new_buff==NULL){ ShowError("Error ! There is not enough memory to allocat!"); return; } memcpy(new_buff, buff, sizeof(char)*(*kv_arr_p->shared_buff_pos)); int blockSize = sizeof(keyval_pos_t)*(*kv_arr_p->shared_arr_len); memcpy(new_buff + (*kv_arr_p->shared_buff_len)*2 - blockSize, (char*)buff + (*kv_arr_p->shared_buff_len) - blockSize, blockSize); (*kv_arr_p->shared_buff_len) = 2*(*kv_arr_p->shared_buff_len); for(int idx = 0; idx < (kv_arr_p->shared_buddy_len); idx++){ int cur_map_task_idx = kv_arr_p->shared_buddy[idx]; //the buddy relationship won't be changed keyval_arr_t *cur_kv_arr_p = &(d_g_state->intermediate_keyval_arr_arr_p[cur_map_task_idx]); cur_kv_arr_p->shared_buff = new_buff; }//for free(buff);// buff = new_buff; }//if keyval_pos_t *kv_p = (keyval_pos_t *)((char *)buff + *kv_arr_p->shared_buff_len - sizeof(keyval_pos_t)*((*kv_arr_p->shared_arr_len)+1)); (*kv_arr_p->shared_arr_len)++; kv_p->task_idx = map_task_idx; kv_p->next_idx = _MAP; kv_p->keyPos = (*kv_arr_p->shared_buff_pos); *kv_arr_p->shared_buff_pos += ((keySize+3)/4)*4; //alignment 4 bytes for reading and writing memcpy((char *)(buff) + kv_p->keyPos, key, keySize); kv_p->keySize = keySize; kv_p->valPos = (*kv_arr_p->shared_buff_pos); *kv_arr_p->shared_buff_pos += ((valSize+3)/4)*4; char *val_p = (char *)(buff) + kv_p->valPos; memcpy((char *)(buff) + kv_p->valPos, val, valSize); kv_p->valSize = valSize; (kv_arr_p->arr) = kv_p; }//__device__ void CPUEmitCombinerOutput(void *key, void *val, int keySize, int valSize, cpu_context *d_g_state, int map_task_idx){ keyval_arr_t *kv_arr_p = &(d_g_state->intermediate_keyval_arr_arr_p[map_task_idx]); void *shared_buff = kv_arr_p->shared_buff; int shared_buff_len = *kv_arr_p->shared_buff_len; int shared_arr_len = *kv_arr_p->shared_arr_len; int shared_buff_pos = *kv_arr_p->shared_buff_pos; int required_mem_len = (shared_buff_pos) + keySize + valSize + sizeof(keyval_pos_t)*(shared_arr_len+1); if (required_mem_len> shared_buff_len){ ShowWarn("Warning! no enough memory in GPU task:%d need:%d KB KeySize:%d ValSize:%d shared_arr_len:%d shared_buff_pos:%d shared_buff_len:%d", map_task_idx, required_mem_len/1024,keySize,valSize,shared_arr_len,shared_buff_pos,shared_buff_len); char *new_buff = (char*)malloc(sizeof(char)*((*kv_arr_p->shared_buff_len)*2)); if(new_buff==NULL)ShowError(" There is not enough memory to allocat!"); memcpy(new_buff, shared_buff, sizeof(char)*(*kv_arr_p->shared_buff_pos)); memcpy(new_buff + (*kv_arr_p->shared_buff_len)*2 - sizeof(keyval_pos_t)*(*kv_arr_p->shared_arr_len), (char*)shared_buff + (*kv_arr_p->shared_buff_len) - sizeof(keyval_pos_t)*(*kv_arr_p->shared_arr_len), sizeof(keyval_pos_t)*(*kv_arr_p->shared_arr_len)); shared_buff_len = 2*(*kv_arr_p->shared_buff_len); (*kv_arr_p->shared_buff_len) = shared_buff_len; for(int idx = 0; idx < (kv_arr_p->shared_buddy_len); idx++){ int cur_map_task_idx = kv_arr_p->shared_buddy[idx]; //the buddy relationship won't be changed keyval_arr_t *cur_kv_arr_p = &(d_g_state->intermediate_keyval_arr_arr_p[cur_map_task_idx]); cur_kv_arr_p->shared_buff = new_buff; }//for free(shared_buff); shared_buff = new_buff; }//if keyval_pos_t *kv_p = (keyval_pos_t *)((char *)shared_buff + shared_buff_len - sizeof(keyval_pos_t)*(shared_arr_len + 1)); kv_p->keySize = keySize; kv_p->valSize = valSize; kv_p->task_idx = map_task_idx; kv_p->next_idx = _COMBINE; //merged results memcpy( (char*)shared_buff + *kv_arr_p->shared_buff_pos, key, keySize); kv_p->keyPos = *kv_arr_p->shared_buff_pos; *kv_arr_p->shared_buff_pos += (keySize+3)/4*4; memcpy( (char*)shared_buff + *kv_arr_p->shared_buff_pos, val, valSize); kv_p->valPos = *kv_arr_p->shared_buff_pos; *kv_arr_p->shared_buff_pos += (valSize+3)/4*4; (*kv_arr_p->shared_arr_len)++; }//void __device__ void GPUEmitCombinerOutput(void *key, void *val, int keySize, int valSize, gpu_context *d_g_state, int map_task_idx){ keyval_arr_t *kv_arr_p = d_g_state->d_intermediate_keyval_arr_arr_p[map_task_idx]; void *shared_buff = kv_arr_p->shared_buff; int shared_buff_len = *kv_arr_p->shared_buff_len; int shared_arr_len = *kv_arr_p->shared_arr_len; int shared_buff_pos = *kv_arr_p->shared_buff_pos; int required_mem_len = (shared_buff_pos) + keySize + valSize + sizeof(keyval_pos_t)*(shared_arr_len+1); if (required_mem_len> shared_buff_len){ ShowWarn("Warning! no enough memory in GPU task:%d need:%d KB KeySize:%d ValSize:%d shared_arr_len:%d shared_buff_pos:%d shared_buff_len:%d", map_task_idx, required_mem_len/1024,keySize,valSize,shared_arr_len,shared_buff_pos,shared_buff_len); char *new_buff = (char*)malloc(sizeof(char)*((*kv_arr_p->shared_buff_len)*2)); if(new_buff==NULL)ShowError(" There is not enough memory to allocat!"); memcpy(new_buff, shared_buff, sizeof(char)*(*kv_arr_p->shared_buff_pos)); memcpy(new_buff + (*kv_arr_p->shared_buff_len)*2 - sizeof(keyval_pos_t)*(*kv_arr_p->shared_arr_len), (char*)shared_buff + (*kv_arr_p->shared_buff_len) - sizeof(keyval_pos_t)*(*kv_arr_p->shared_arr_len), sizeof(keyval_pos_t)*(*kv_arr_p->shared_arr_len)); shared_buff_len = 2*(*kv_arr_p->shared_buff_len); (*kv_arr_p->shared_buff_len) = shared_buff_len; for(int idx = 0; idx < (kv_arr_p->shared_buddy_len); idx++){ int cur_map_task_idx = kv_arr_p->shared_buddy[idx]; //the buddy relationship won't be changed keyval_arr_t *cur_kv_arr_p = d_g_state->d_intermediate_keyval_arr_arr_p[cur_map_task_idx]; cur_kv_arr_p->shared_buff = new_buff; }//for free(shared_buff); shared_buff = new_buff; }//if keyval_pos_t *kv_p = (keyval_pos_t *)((char *)shared_buff + shared_buff_len - sizeof(keyval_pos_t)*(shared_arr_len + 1)); kv_p->keySize = keySize; kv_p->valSize = valSize; kv_p->task_idx = map_task_idx; kv_p->next_idx = _COMBINE; //merged results memcpy( (char*)shared_buff + *kv_arr_p->shared_buff_pos, key, keySize); kv_p->keyPos = *kv_arr_p->shared_buff_pos; *kv_arr_p->shared_buff_pos += (keySize+3)/4*4; memcpy( (char*)shared_buff + *kv_arr_p->shared_buff_pos, val, valSize); kv_p->valPos = *kv_arr_p->shared_buff_pos; *kv_arr_p->shared_buff_pos += (valSize+3)/4*4; (*kv_arr_p->shared_arr_len)++; }//__device__ //Last update 9/16/2012 __device__ void GPUEmitMapOutput(void *key, void *val, int keySize, int valSize, gpu_context *d_g_state, int map_task_idx){ keyval_arr_t *kv_arr_p = d_g_state->d_intermediate_keyval_arr_arr_p[map_task_idx]; char *buff = (char*)(kv_arr_p->shared_buff); if (!((*kv_arr_p->shared_buff_pos) + keySize + valSize < (*kv_arr_p->shared_buff_len) - sizeof(keyval_pos_t)*((*kv_arr_p->shared_arr_len)+1))){ ShowWarn("Warning! not enough memory at GPU task:%d *kv_arr_p->shared_arr_len:%d current buff_size:%d KB", map_task_idx,*kv_arr_p->shared_arr_len,(*kv_arr_p->shared_buff_len)/1024); char *new_buff = (char*)malloc(sizeof(char)*((*kv_arr_p->shared_buff_len)*2)); if(new_buff==NULL){ ShowError("Error ! There is not enough memory to allocat!"); return; } memcpy(new_buff, buff, sizeof(char)*(*kv_arr_p->shared_buff_pos)); memcpy(new_buff + (*kv_arr_p->shared_buff_len)*2 - sizeof(keyval_pos_t)*(*kv_arr_p->shared_arr_len), (char*)buff + (*kv_arr_p->shared_buff_len) - sizeof(keyval_pos_t)*(*kv_arr_p->shared_arr_len), sizeof(keyval_pos_t)*(*kv_arr_p->shared_arr_len)); (*kv_arr_p->shared_buff_len) = 2*(*kv_arr_p->shared_buff_len); for(int idx = 0; idx < (kv_arr_p->shared_buddy_len); idx++){ int cur_map_task_idx = kv_arr_p->shared_buddy[idx]; //the buddy relationship won't be changed keyval_arr_t *cur_kv_arr_p = d_g_state->d_intermediate_keyval_arr_arr_p[cur_map_task_idx]; cur_kv_arr_p->shared_buff = new_buff; }//for free(buff);//????? buff = new_buff; }//if keyval_pos_t *kv_p = (keyval_pos_t *)((char *)buff + *kv_arr_p->shared_buff_len - sizeof(keyval_pos_t)*((*kv_arr_p->shared_arr_len)+1)); (*kv_arr_p->shared_arr_len)++; kv_p->task_idx = map_task_idx; kv_p->next_idx = _MAP; kv_p->keyPos = (*kv_arr_p->shared_buff_pos); *kv_arr_p->shared_buff_pos += ((keySize+3)/4)*4; //alignment 4 bytes for reading and writing memcpy((char *)(buff) + kv_p->keyPos,key,keySize); kv_p->keySize = keySize; kv_p->valPos = (*kv_arr_p->shared_buff_pos); *kv_arr_p->shared_buff_pos += ((valSize+3)/4)*4; char *val_p = (char *)(buff) + kv_p->valPos; memcpy((char *)(buff) + kv_p->valPos, val, valSize); kv_p->valSize = valSize; (kv_arr_p->arr) = kv_p; //kv_arr_p->arr_len++; //d_g_state->d_intermediate_keyval_total_count[map_task_idx] = kv_arr_p->arr_len; }//__device__ #if 0 __global__ void GPUCardMapPartitioner(gpu_context d_g_state){ int num_records_per_thread = (d_g_state.num_input_record); //if(TID==0) ShowWarn("hi 0 -- num_records_per_thread:%d",num_records_per_thread); int buddy_arr_len = num_records_per_thread; int * int_arr = (int*)malloc((4+buddy_arr_len)*sizeof(int)); if(int_arr==NULL){ ShowError("there is not enough GPU memory\n"); return;} int *shared_arr_len = int_arr; int *shared_buff_len = int_arr+1; int *shared_buff_pos = int_arr+2; //int *num_buddy = int_arr+3; int *buddy = int_arr+4; //if(TID==0) ShowWarn("hi 1"); (*shared_buff_len) = SHARED_BUFF_LEN; (*shared_arr_len) = 0; (*shared_buff_pos) = 0; char * buff = (char *)malloc(sizeof(char)*(*shared_buff_len)); keyval_arr_t *kv_arr_t_arr = (keyval_arr_t *)malloc(sizeof(keyval_arr_t)*(d_g_state.num_input_record)); int index = 0; index = 0; ShowWarn("d_g_state.num_input_record:%d",d_g_state.num_input_record); for(int map_task_idx = 0; map_task_idx < d_g_state.num_input_record; map_task_idx ++){ keyval_arr_t *kv_arr_t = (keyval_arr_t *)&(kv_arr_t_arr[map_task_idx]); kv_arr_t->shared_buff = buff; kv_arr_t->shared_arr_len = shared_arr_len; kv_arr_t->shared_buff_len = shared_buff_len; kv_arr_t->shared_buff_pos = shared_buff_pos; kv_arr_t->shared_buddy = buddy; kv_arr_t->shared_buddy_len = buddy_arr_len; kv_arr_t->arr = NULL; kv_arr_t->arr_len = 0; d_g_state.d_intermediate_keyval_arr_arr_p[map_task_idx] = kv_arr_t; }//for }//void #endif //------------------------------------------------- //called by user defined map function //------------------------------------------------- //TODO 9/11/2012 merge threads and blocks code into the same place. __global__ void GPUMapPartitioner(gpu_context d_g_state) { //ShowLog("gridDim.x:%d gridDim.y:%d gridDim.z:%d blockDim.x:%d blockDim.y:%d blockDim.z:%d blockIdx.x:%d blockIdx.y:%d blockIdx.z:%d\n", // gridDim.x,gridDim.y,gridDim.z,blockDim.x,blockDim.y,blockDim.z,blockIdx.x,blockIdx.y,blockIdx.z); int num_records_per_thread = (d_g_state.num_input_record + (gridDim.x*blockDim.x*blockDim.y)-1)/(gridDim.x*blockDim.x*blockDim.y); int block_start_idx = num_records_per_thread * blockIdx.x * blockDim.x * blockDim.y; int thread_start_idx = block_start_idx + ((threadIdx.y*blockDim.x + threadIdx.x)/STRIDE)*num_records_per_thread*STRIDE + ((threadIdx.y*blockDim.x + threadIdx.x)%STRIDE); int thread_end_idx = thread_start_idx + num_records_per_thread*STRIDE; if (thread_end_idx > d_g_state.num_input_record) thread_end_idx = d_g_state.num_input_record; if (thread_start_idx >= thread_end_idx) return; //if(TID==0) ShowWarn("hi 0 -- num_records_per_thread:%d",num_records_per_thread); int buddy_arr_len = num_records_per_thread; int * int_arr = (int*)malloc((4+buddy_arr_len)*sizeof(int)); if(int_arr==NULL){ ShowError("there is not enough GPU memory\n"); return;} int *shared_arr_len = int_arr; int *shared_buff_len = int_arr+1; int *shared_buff_pos = int_arr+2; //int *num_buddy = int_arr+3; int *buddy = int_arr+4; //if(TID==0) ShowWarn("hi 1"); (*shared_buff_len) = SHARED_BUFF_LEN; (*shared_arr_len) = 0; (*shared_buff_pos) = 0; char * buff = (char *)malloc(sizeof(char)*(*shared_buff_len)); keyval_arr_t *kv_arr_t_arr = (keyval_arr_t *)malloc(sizeof(keyval_arr_t)*(thread_end_idx-thread_start_idx+STRIDE-1)/STRIDE); int index = 0; for(int idx = thread_start_idx; idx < thread_end_idx; idx += STRIDE){ buddy[index] = idx; index ++; }//for index = 0; //if(TID==0) ShowWarn("hi 2"); for(int map_task_idx = thread_start_idx; map_task_idx < thread_end_idx; map_task_idx += STRIDE){ keyval_arr_t *kv_arr_t = (keyval_arr_t *)&(kv_arr_t_arr[index]); index++; kv_arr_t->shared_buff = buff; kv_arr_t->shared_arr_len = shared_arr_len; kv_arr_t->shared_buff_len = shared_buff_len; kv_arr_t->shared_buff_pos = shared_buff_pos; kv_arr_t->shared_buddy = buddy; kv_arr_t->shared_buddy_len = buddy_arr_len; kv_arr_t->arr = NULL; kv_arr_t->arr_len = 0; d_g_state.d_intermediate_keyval_arr_arr_p[map_task_idx] = kv_arr_t; }//for //if(TID==0) ShowWarn("hi 3"); }//GPUMapPartitioner void RunGPUCardMapFunction(gpu_card_context* d_g_state,int curIter, int totalIter){ int start_row_idx = 0; //panda_cpu_task_info->start_row_idx; int end_row_idx = d_g_state->num_input_record; //panda_cpu_task_info->end_row_idx; char *buff = (char *)malloc(sizeof(char)*CPU_SHARED_BUFF_SIZE); int *int_arr = (int *)malloc(sizeof(int)*(end_row_idx-start_row_idx+3)); int *buddy = int_arr+3; int buddy_len = end_row_idx-start_row_idx; for (int i=0;i<buddy_len;i++){ buddy [i]=i+start_row_idx; }//for //ShowLog("start_idx:%d end_idx:%d",start_row_idx, end_row_idx); for (int map_idx = start_row_idx; map_idx < end_row_idx; map_idx++){ d_g_state->intermediate_keyval_arr_arr_p[map_idx].shared_buff = buff; (d_g_state->intermediate_keyval_arr_arr_p[map_idx].shared_buff_len) = int_arr; (d_g_state->intermediate_keyval_arr_arr_p[map_idx].shared_buff_pos) = int_arr+1; (d_g_state->intermediate_keyval_arr_arr_p[map_idx].shared_arr_len) = int_arr+2; *(d_g_state->intermediate_keyval_arr_arr_p[map_idx].shared_buff_len) = CPU_SHARED_BUFF_SIZE; *(d_g_state->intermediate_keyval_arr_arr_p[map_idx].shared_buff_pos) = 0; *(d_g_state->intermediate_keyval_arr_arr_p[map_idx].shared_arr_len) = 0; (d_g_state->intermediate_keyval_arr_arr_p[map_idx].shared_buddy) = buddy; (d_g_state->intermediate_keyval_arr_arr_p[map_idx].shared_buddy_len) = buddy_len; //ShowWarn("---->(d_g_state->intermediate_keyval_arr_arr_p[%d].shared_buddy_len=:%d)", // map_idx,(d_g_state->intermediate_keyval_arr_arr_p[map_idx].shared_buddy_len)); }//for for (int map_idx = start_row_idx; map_idx < end_row_idx; map_idx++){ keyval_t *kv_p = (keyval_t *)(&(d_g_state->input_keyval_arr[map_idx])); char *key = (char *)(kv_p->key); char *val = (char *)(kv_p->val); int keySize = kv_p->keySize; int valSize = kv_p->valSize; gpu_card_map(key, val, keySize, valSize, d_g_state, map_idx); }//for ShowLog("CPU_GROUP_ID:[%d] Done :%d tasks",d_g_state->gpu_group_id, 1); //////////////////////// //int thread_start_idx = 0; //keyval_arr_t *kv_arr_p = d_g_state.d_intermediate_keyval_arr_arr_p[thread_start_idx]; //char *shared_buff = (char *)(kv_arr_p->shared_buff); //int shared_arr_len = *kv_arr_p->shared_arr_len; //int shared_buff_len = *kv_arr_p->shared_buff_len; //d_g_state.d_intermediate_keyval_total_count[thread_start_idx] = *kv_arr_p->shared_arr_len; } __global__ void RunGPUMapTasks(gpu_context d_g_state, int curIter, int totalIter) { //ShowLog("gridDim.x:%d gridDim.y:%d gridDim.z:%d blockDim.x:%d blockDim.y:%d blockDim.z:%d blockIdx.x:%d blockIdx.y:%d blockIdx.z:%d\n", // gridDim.x,gridDim.y,gridDim.z,blockDim.x,blockDim.y,blockDim.z,blockIdx.x,blockIdx.y,blockIdx.z); int num_records_per_thread = (d_g_state.num_input_record + (gridDim.x*blockDim.x*blockDim.y)-1)/(gridDim.x*blockDim.x*blockDim.y); int block_start_idx = num_records_per_thread * blockIdx.x * blockDim.x * blockDim.y; int thread_start_idx = block_start_idx + ((threadIdx.y*blockDim.x + threadIdx.x)/STRIDE)*num_records_per_thread*STRIDE + ((threadIdx.y*blockDim.x + threadIdx.x)%STRIDE); //ShowLog("num_records_per_thread:%d block_start_idx:%d gridDim.x:%d gridDim.y:%d gridDim.z:%d blockDim.x:%d blockDim.y:%d blockDim.z:%d",num_records_per_thread, block_start_idx, gridDim.x,gridDim.y,gridDim.z,blockDim.x,blockDim.y,blockDim.z); int thread_end_idx = thread_start_idx + num_records_per_thread*STRIDE; if (thread_end_idx > d_g_state.num_input_record) thread_end_idx = d_g_state.num_input_record; if (thread_start_idx + curIter*STRIDE >= thread_end_idx) return; for(int map_task_idx = thread_start_idx + curIter*STRIDE; map_task_idx < thread_end_idx; map_task_idx += totalIter*STRIDE){ char *key = (char *)(d_g_state.d_input_keys_shared_buff) + d_g_state.d_input_keyval_pos_arr[map_task_idx].keyPos; char *val = (char *)(d_g_state.d_input_vals_shared_buff) + d_g_state.d_input_keyval_pos_arr[map_task_idx].valPos; int valSize = d_g_state.d_input_keyval_pos_arr[map_task_idx].valSize; int keySize = d_g_state.d_input_keyval_pos_arr[map_task_idx].keySize; //ShowWarn("valSize:%d keySize:%d",valSize,keySize); //////////////////////////////////////////////////////////////// gpu_core_map(key, val, keySize, valSize, &d_g_state, map_task_idx);// //////////////////////////////////////////////////////////////// }//for keyval_arr_t *kv_arr_p = d_g_state.d_intermediate_keyval_arr_arr_p[thread_start_idx]; //char *shared_buff = (char *)(kv_arr_p->shared_buff); //int shared_arr_len = *kv_arr_p->shared_arr_len; //int shared_buff_len = *kv_arr_p->shared_buff_len; d_g_state.d_intermediate_keyval_total_count[thread_start_idx] = *kv_arr_p->shared_arr_len; //__syncthreads(); }//GPUMapPartitioner //NOTE: gpu_combiner will affect the later program results //Last update 9/16/2012 void StartCPUCombiner(thread_info_t *thread_info){ cpu_context *d_g_state = (cpu_context*)(thread_info->d_g_state); job_configuration *cpu_job_conf = (job_configuration*)(thread_info->job_conf); if (d_g_state->intermediate_keyval_arr_arr_p == NULL) { ShowError("intermediate_keyval_arr_arr_p == NULL"); exit(-1); } if (cpu_job_conf->num_input_record <= 0) { ShowError("no any input keys"); exit(-1); } if (d_g_state->num_cpus_cores <= 0) { ShowError("d_g_state->num_cpus == 0"); exit(-1); } //------------------------------------------------------- //1, prepare buffer to store intermediate results //------------------------------------------------------- keyval_arr_t *d_keyval_arr_p; int *count = NULL; //--------------------------------------------- //3, determine the number of threads to run //--------------------------------------------- ShowLog("CPU_GROUP_ID:[%d] the number of cpus used in computation:%d",d_g_state->cpu_group_id, d_g_state->num_cpus_cores); //-------------------------------------------------- //4, start_row_id map //-------------------------------------------------- int num_threads = d_g_state->num_cpus_cores; ShowLog("num_threads:%d",num_threads); int num_records_per_thread = (cpu_job_conf->num_input_record + num_threads-1)/(num_threads); int start_row_idx = 0; int end_row_idx = 0; for (int tid = 0;tid<num_threads;tid++){ end_row_idx = start_row_idx + num_records_per_thread; if (tid < (cpu_job_conf->num_input_record % num_threads) ) end_row_idx++; if (end_row_idx > cpu_job_conf->num_input_record) end_row_idx = cpu_job_conf->num_input_record; d_g_state->panda_cpu_task_info[tid].start_row_idx = start_row_idx; d_g_state->panda_cpu_task_info[tid].end_row_idx = end_row_idx; //ShowLog("hi-1"); if (pthread_create(&(d_g_state->panda_cpu_task[tid]),NULL,RunPandaCPUCombinerThread,(char *)&(d_g_state->panda_cpu_task_info[tid]))!=0) perror("Thread creation failed!\n"); start_row_idx = end_row_idx; }//for for (int tid = 0;tid<num_threads;tid++){ void *exitstat; if (pthread_join(d_g_state->panda_cpu_task[tid],&exitstat)!=0) perror("joining failed"); }//for ShowLog("CPU_GROUP_ID:[%d] DONE", d_g_state->cpu_group_id); } __global__ void GPUCombiner(gpu_context d_g_state) { //ShowLog("gridDim.x:%d gridDim.y:%d gridDim.z:%d blockDim.x:%d blockDim.y:%d blockDim.z:%d blockIdx.x:%d blockIdx.y:%d blockIdx.z:%d", // gridDim.x,gridDim.y,gridDim.z,blockDim.x,blockDim.y,blockDim.z,blockIdx.x,blockIdx.y,blockIdx.z); int num_records_per_thread = (d_g_state.num_input_record + (gridDim.x*blockDim.x*blockDim.y)-1)/(gridDim.x*blockDim.x*blockDim.y); int block_start_idx = num_records_per_thread * blockIdx.x * blockDim.x * blockDim.y; int thread_start_idx = block_start_idx + ((threadIdx.y*blockDim.x + threadIdx.x)/STRIDE)*num_records_per_thread*STRIDE + ((threadIdx.y*blockDim.x + threadIdx.x)%STRIDE); int thread_end_idx = thread_start_idx + num_records_per_thread*STRIDE; if (thread_end_idx > d_g_state.num_input_record) thread_end_idx = d_g_state.num_input_record; if (thread_start_idx >= thread_end_idx) return; keyval_arr_t *kv_arr_p = d_g_state.d_intermediate_keyval_arr_arr_p[thread_start_idx]; int *buddy = kv_arr_p->shared_buddy; //TODO use host function set /*for (int idx=0;idx<kv_arr_p->shared_buddy_len;idx++){ d_g_state.d_intermediate_keyval_total_count[idx] = 0; }*/ int unmerged_shared_arr_len = *kv_arr_p->shared_arr_len; val_t *val_t_arr = (val_t *)malloc(sizeof(val_t)*unmerged_shared_arr_len); if (val_t_arr == NULL) ShowError("there is no enough memory"); int num_keyval_pairs_after_combiner = 0; for (int i=0; i<unmerged_shared_arr_len;i++){ char *shared_buff = (kv_arr_p->shared_buff); int shared_buff_len = *kv_arr_p->shared_buff_len; keyval_pos_t *head_kv_p = (keyval_pos_t *)(shared_buff + shared_buff_len - sizeof(keyval_pos_t)*(unmerged_shared_arr_len-i)); keyval_pos_t *first_kv_p = head_kv_p; if (first_kv_p->next_idx != _MAP) continue; int iKeySize = first_kv_p->keySize; char *iKey = shared_buff + first_kv_p->keyPos; char *iVal = shared_buff + first_kv_p->valPos; if((first_kv_p->keyPos%4!=0)||(first_kv_p->valPos%4!=0)){ ShowError("keyPos or valPos is not aligned with 4 bytes, results could be wrong"); } int index = 0; first_kv_p = head_kv_p; (val_t_arr[index]).valSize = first_kv_p->valSize; (val_t_arr[index]).val = (char*)shared_buff + first_kv_p->valPos; for (int j=i+1;j<unmerged_shared_arr_len;j++){ keyval_pos_t *next_kv_p = (keyval_pos_t *)((char *)shared_buff + shared_buff_len - sizeof(keyval_pos_t)*(unmerged_shared_arr_len-j)); char *jKey = (char *)shared_buff+next_kv_p->keyPos; int jKeySize = next_kv_p->keySize; if (gpu_compare(iKey,iKeySize,jKey,jKeySize)!=0){ continue; } index++; first_kv_p->next_idx = j; first_kv_p = next_kv_p; (val_t_arr[index]).valSize = next_kv_p->valSize; (val_t_arr[index]).val = (char*)shared_buff + next_kv_p->valPos; } int valCount = index+1; if(valCount>1) gpu_combiner(iKey,val_t_arr,iKeySize,(valCount),&d_g_state,thread_start_idx); else{ first_kv_p->next_idx = _COMBINE; first_kv_p->task_idx = thread_start_idx; } num_keyval_pairs_after_combiner++; }//for free(val_t_arr); d_g_state.d_intermediate_keyval_total_count[thread_start_idx] = num_keyval_pairs_after_combiner; //////////////////////////////////////////////////////////////////// __syncthreads(); }//GPUMapPartitioner int StartCPUMap2(thread_info_t* thread_info) { cpu_context *d_g_state = (cpu_context*)(thread_info->d_g_state); job_configuration *cpu_job_conf = (job_configuration*)(thread_info->job_conf); if (cpu_job_conf->num_input_record<=0) { ShowError("Error: no any input keys"); exit(-1);} if (cpu_job_conf->input_keyval_arr == NULL) { ShowError("Error: input_keyval_arr == NULL"); exit(-1);} if (d_g_state->num_cpus_cores <= 0) { ShowError("Error: d_g_state->num_cpus == 0"); exit(-1);} d_g_state->intermediate_keyval_total_count = (int *)malloc(d_g_state->num_input_record*sizeof(int)); memset(d_g_state->intermediate_keyval_total_count, 0, d_g_state->num_input_record*sizeof(int)); //------------------------------------------------------- //1, prepare buffer to store intermediate results //------------------------------------------------------- keyval_arr_t *d_keyval_arr_p; int *count = NULL; //--------------------------------------------- //3, determine the number of threads to run //--------------------------------------------- ShowLog("CPU_GROUP_ID:[%d] #num_cpus:%d num_input_record:%d", d_g_state->cpu_group_id, d_g_state->num_cpus_cores, cpu_job_conf->num_input_record); //-------------------------------------------------- //4, start_row_id map //-------------------------------------------------- int num_threads = d_g_state->num_cpus_cores; int num_records_per_thread = (cpu_job_conf->num_input_record)/(num_threads); int start_row_idx = 0; int end_row_idx = 0; for (int tid = 0;tid<num_threads;tid++){ end_row_idx = start_row_idx + num_records_per_thread; if (tid < (cpu_job_conf->num_input_record % num_threads) ) end_row_idx++; d_g_state->panda_cpu_task_info[tid].start_row_idx = start_row_idx; if (end_row_idx > cpu_job_conf->num_input_record) end_row_idx = cpu_job_conf->num_input_record; d_g_state->panda_cpu_task_info[tid].end_row_idx = end_row_idx; if (pthread_create(&(d_g_state->panda_cpu_task[tid]),NULL,RunPandaCPUMapThread,(char *)&(d_g_state->panda_cpu_task_info[tid]))!=0) perror("Thread creation failed!\n"); start_row_idx = end_row_idx; }//for for (int tid = 0;tid<num_threads;tid++){ void *exitstat; if (pthread_join(d_g_state->panda_cpu_task[tid],&exitstat)!=0) perror("joining failed"); }//for ShowLog("CPU_GROUP_ID:[%d] DONE", d_g_state->cpu_group_id); return 0; }//int //-------------------------------------------------- // StartGPUCardMap // Last Update 12/9/2012 //-------------------------------------------------- int StartGPUCardMap(gpu_card_context *d_g_state) { if (d_g_state->num_input_record<=0) { ShowError("Error: no any input keys"); exit(-1);} if (d_g_state->input_keyval_arr == NULL) { ShowError("Error: input_keyval_arr == NULL"); exit(-1);} //if (d_g_state->num_cpus_cores <= 0) { ShowError("Error: d_g_state->num_cpus == 0"); exit(-1);} //if (d_g_state->num_mappers<=0) {d_g_state->num_mappers = (NUM_BLOCKS)*(NUM_THREADS);} //if (d_g_state->num_reducers<=0) {d_g_state->num_reducers = (NUM_BLOCKS)*(NUM_THREADS);} d_g_state->intermediate_keyval_total_count = (int *)malloc(d_g_state->num_input_record*sizeof(int)); memset(d_g_state->intermediate_keyval_total_count, 0, d_g_state->num_input_record*sizeof(int)); //------------------------------------------------------- //1, prepare buffer to store intermediate results //------------------------------------------------------- keyval_arr_t *d_keyval_arr_p; int *count = NULL; //--------------------------------------------- //3, determine the number of threads to run //--------------------------------------------- ShowLog("GPU_CARD_GROUP_ID:[%d] num_input_record:%d", d_g_state->gpu_group_id, d_g_state->num_input_record); //-------------------------------------------------- //4, start_row_id map //-------------------------------------------------- //TODO int num_threads = 1; int num_records_per_thread = (d_g_state->num_input_record)/(num_threads); int start_row_idx = 0; int end_row_idx = 0; /*for (int iter = 0; iter< totalIter; iter++){*/ RunGPUCardMapFunction(d_g_state, 0, 1); /*for (int tid = 0;tid<num_threads;tid++){ end_row_idx = start_row_idx + num_records_per_thread; if (tid < (d_g_state->num_input_record % num_threads) ) end_row_idx++; d_g_state->panda_cpu_task_info[tid].start_row_idx = start_row_idx; if (end_row_idx > cpu_job_conf->num_input_record) end_row_idx = cpu_job_conf->num_input_record; d_g_state->panda_cpu_task_info[tid].end_row_idx = end_row_idx; if (pthread_create(&(d_g_state->panda_cpu_task[tid]),NULL,RunPandaCPUMapThread,(char *)&(d_g_state->panda_cpu_task_info[tid]))!=0) perror("Thread creation failed!\n"); start_row_idx = end_row_idx; }//for*/ /*for (int tid = 0;tid<num_threads;tid++){ void *exitstat; if (pthread_join(d_g_state->panda_cpu_task[tid],&exitstat)!=0) perror("joining failed"); }//for*/ //---------------------------------------------- //0, Check status of d_g_state; //---------------------------------------------- //------------------------------------------------------- //1, prepare buffer to store intermediate results //------------------------------------------------------- //GPUCardMapPartitioner<<<1,1>>>(*d_g_state); cudaThreadSynchronize(); double t2 = PandaTimer(); //int num_records_per_thread = (d_g_state->num_input_record/d_g_state->num_input_record); //int totalIter = num_records_per_thread; //ShowLog("GPUMapPartitioner:%f totalIter:%d",t2-t1, totalIter); /*for (int iter = 0; iter< totalIter; iter++){ double t3 = PandaTimer(); //RunGPUMapTasks<<<grids,blocks>>>(*d_g_state, totalIter -1 - iter, totalIter); //RunGPUCardMapFunction(*d_g_state, totalIter -1 - iter, totalIter); ////////////////////////////////////////////////////// //////////////////////////////////// cudaThreadSynchronize(); double t4 = PandaTimer(); size_t total_mem,avail_mem; checkCudaErrors(cudaMemGetInfo( &avail_mem, &total_mem )); ShowLog("GPU_ID:[%d] RunGPUMapTasks take %f sec at iter [%d/%d] remain %d mb GPU mem processed", d_g_state->gpu_id, t4-t3,iter,totalIter, avail_mem/1024/1024); }*///for ShowLog("GPU_CARD_ID:[%d] Done %d Tasks",d_g_state->gpu_id,d_g_state->num_input_record); return 0; }//int //-------------------------------------------------- // StartGPUCoreMap // Last Update 9/2/2012 //-------------------------------------------------- int StartGPUCoreMap(gpu_context *d_g_state) { //------------------------------------------------------- //0, Check status of d_g_state; //------------------------------------------------------- ShowLog("GPU_ID:[%d] num_input_record %d", d_g_state->gpu_id, d_g_state->num_input_record); if (d_g_state->num_input_record<0) { ShowLog("Error: no any input keys"); exit(-1);} if (d_g_state->h_input_keyval_arr == NULL) { ShowLog("Error: h_input_keyval_arr == NULL"); exit(-1);} if (d_g_state->num_mappers<=0) {d_g_state->num_mappers = (NUM_BLOCKS)*(NUM_THREADS);} if (d_g_state->num_reducers<=0) {d_g_state->num_reducers = (NUM_BLOCKS)*(NUM_THREADS);} //------------------------------------------------------- //1, prepare buffer to store intermediate results //------------------------------------------------------- keyval_arr_t *h_keyval_arr_arr = (keyval_arr_t *)malloc(sizeof(keyval_arr_t)*d_g_state->num_input_record); keyval_arr_t *d_keyval_arr_arr; checkCudaErrors(cudaMalloc((void**)&(d_keyval_arr_arr),d_g_state->num_input_record*sizeof(keyval_arr_t))); for (int i=0; i<d_g_state->num_input_record;i++){ h_keyval_arr_arr[i].arr = NULL; h_keyval_arr_arr[i].arr_len = 0; }//for keyval_arr_t **d_keyval_arr_arr_p; checkCudaErrors(cudaMalloc((void***)&(d_keyval_arr_arr_p),d_g_state->num_input_record*sizeof(keyval_arr_t*))); d_g_state->d_intermediate_keyval_arr_arr_p = d_keyval_arr_arr_p; int *count = NULL; checkCudaErrors(cudaMalloc((void**)&(count),d_g_state->num_input_record*sizeof(int))); d_g_state->d_intermediate_keyval_total_count = count; checkCudaErrors(cudaMemset(d_g_state->d_intermediate_keyval_total_count,0,d_g_state->num_input_record*sizeof(int))); //TODO //printData3<<<1,1>>>(*d_g_state); //---------------------------------------------- //3, determine the number of threads to run //---------------------------------------------- //-------------------------------------------------- //4, start_row_id map //Note: DO *NOT* set large number of threads within block (512), which lead to too many invocation of malloc in the kernel. //-------------------------------------------------- cudaThreadSynchronize(); int numGPUCores = getGPUCoresNum(); dim3 blocks(THREAD_BLOCK_SIZE, THREAD_BLOCK_SIZE); int numBlocks = (numGPUCores*16+(blocks.x*blocks.y)-1)/(blocks.x*blocks.y); dim3 grids(numBlocks, 1); int total_gpu_threads = (grids.x*grids.y*blocks.x*blocks.y); ShowLog("GridDim.X:%d GridDim.Y:%d BlockDim.X:%d BlockDim.Y:%d TotalGPUThreads:%d",grids.x,grids.y,blocks.x,blocks.y,total_gpu_threads); cudaDeviceSynchronize(); double t1 = PandaTimer(); GPUMapPartitioner<<<grids,blocks>>>(*d_g_state); cudaThreadSynchronize(); double t2 = PandaTimer(); int num_records_per_thread = (d_g_state->num_input_record + (total_gpu_threads)-1)/(total_gpu_threads); int totalIter = num_records_per_thread; ShowLog("GPUMapPartitioner:%f totalIter:%d",t2-t1, totalIter); for (int iter = 0; iter< totalIter; iter++){ double t3 = PandaTimer(); RunGPUMapTasks<<<grids,blocks>>>(*d_g_state, totalIter -1 - iter, totalIter); cudaThreadSynchronize(); double t4 = PandaTimer(); size_t total_mem,avail_mem; checkCudaErrors(cudaMemGetInfo( &avail_mem, &total_mem )); ShowLog("GPU_ID:[%d] RunGPUMapTasks take %f sec at iter [%d/%d] remain %d mb GPU mem processed", d_g_state->gpu_id, t4-t3,iter,totalIter, avail_mem/1024/1024); }//for ShowLog("GPU_ID:[%d] Done %d Tasks",d_g_state->gpu_id,d_g_state->num_input_record); return 0; }//int void DestroyDGlobalState(gpu_context * d_g_state){ }//void void StartGPUCombiner(gpu_context * state){ double t1 = PandaTimer(); ShowLog("state->num_input_record:%d",state->num_input_record); checkCudaErrors(cudaMemset(state->d_intermediate_keyval_total_count,0,state->num_input_record*sizeof(int))); int numGPUCores = getGPUCoresNum(); dim3 blocks(THREAD_BLOCK_SIZE, THREAD_BLOCK_SIZE); int numBlocks = (numGPUCores*16+(blocks.x*blocks.y)-1)/(blocks.x*blocks.y); dim3 grids(numBlocks, 1); GPUCombiner<<<grids,blocks>>>(*state); cudaThreadSynchronize(); double t2 = PandaTimer(); ShowLog("GPU_ID:[%d] GPUCombiner take:%f sec",state->gpu_id, t2-t1); } void StartGPUShuffle(gpu_context * state){ gpu_context* d_g_state = state; double t1 = PandaTimer(); Shuffle4GPUOutput(d_g_state); double t2 = PandaTimer(); ShowLog("GPU_ID:[%d] GPUShuffle take %f sec", state->gpu_id,t2-t1); }//void void *RunPandaCPUCombinerThread(void *ptr){ //ShowLog("hi0"); panda_cpu_task_info_t *panda_cpu_task_info = (panda_cpu_task_info_t *)ptr; cpu_context *d_g_state = (cpu_context *)(panda_cpu_task_info->d_g_state); job_configuration *cpu_job_conf = (job_configuration *)(panda_cpu_task_info->cpu_job_conf); //keyval_t * input_keyval_arr; //keyval_arr_t *intermediate_keyval_arr_arr_p = d_g_state->intermediate_keyval_arr_arr_p; int index = 0; keyvals_t * merged_keyvals_arr = NULL; int merged_key_arr_len = 0; int start_idx = panda_cpu_task_info->start_row_idx; keyval_arr_t *kv_arr_p = (keyval_arr_t *)&(d_g_state->intermediate_keyval_arr_arr_p[start_idx]); int unmerged_shared_arr_len = *kv_arr_p->shared_arr_len; int *shared_buddy = kv_arr_p->shared_buddy; int shared_buddy_len = kv_arr_p->shared_buddy_len; //ShowLog("hi1"); char *shared_buff = kv_arr_p->shared_buff; int shared_buff_len = *kv_arr_p->shared_buff_len; int shared_buff_pos = *kv_arr_p->shared_buff_pos; val_t *val_t_arr = (val_t *)malloc(sizeof(val_t)*unmerged_shared_arr_len); if (val_t_arr == NULL) ShowError("there is no enough memory"); int num_keyval_pairs_after_combiner = 0; int total_intermediate_keyvalue_pairs = 0; //ShowLog("hi2"); for (int i = 0; i < unmerged_shared_arr_len; i++){ keyval_pos_t *head_kv_p = (keyval_pos_t *)(shared_buff + shared_buff_len - sizeof(keyval_pos_t)*(unmerged_shared_arr_len-i)); keyval_pos_t *first_kv_p = head_kv_p; if (first_kv_p->next_idx != _MAP) continue; //ShowLog("hi3"); int iKeySize = first_kv_p->keySize; char *iKey = shared_buff + first_kv_p->keyPos; char *iVal = shared_buff + first_kv_p->valPos; if((first_kv_p->keyPos%4!=0)||(first_kv_p->valPos%4!=0)){ ShowError("keyPos or valPos is not aligned with 4 bytes, results could be wrong"); }// int index = 0; first_kv_p = head_kv_p; (val_t_arr[index]).valSize = first_kv_p->valSize; (val_t_arr[index]).val = (char*)shared_buff + first_kv_p->valPos; //ShowLog("hi i:%d",i); for (int j=i+1;j<unmerged_shared_arr_len;j++){ keyval_pos_t *next_kv_p = (keyval_pos_t *)((char *)shared_buff + shared_buff_len - sizeof(keyval_pos_t)*(unmerged_shared_arr_len-j)); char *jKey = (char *)shared_buff+next_kv_p->keyPos; int jKeySize = next_kv_p->keySize; if (cpu_compare(iKey,iKeySize,jKey,jKeySize)!=0){ continue; } index++; first_kv_p->next_idx = j; first_kv_p = next_kv_p; (val_t_arr[index]).valSize = next_kv_p->valSize; (val_t_arr[index]).val = (char*)shared_buff + next_kv_p->valPos; } int valCount = index+1; total_intermediate_keyvalue_pairs += valCount; if(valCount>1) cpu_combiner(iKey,val_t_arr,iKeySize,(valCount),d_g_state,start_idx); else{ first_kv_p->next_idx = _COMBINE; first_kv_p->task_idx = start_idx; } num_keyval_pairs_after_combiner++; }//for free(val_t_arr); d_g_state->intermediate_keyval_total_count[start_idx] = num_keyval_pairs_after_combiner; ShowLog("CPU_GROUP_ID:[%d] Map_Idx:%d Done:%d Combiner: %d => %d Compress Ratio:%f", d_g_state->cpu_group_id, panda_cpu_task_info->start_row_idx, panda_cpu_task_info->end_row_idx - panda_cpu_task_info->start_row_idx, total_intermediate_keyvalue_pairs, num_keyval_pairs_after_combiner, (num_keyval_pairs_after_combiner/(float)total_intermediate_keyvalue_pairs) ); return NULL; } int *shared_arr_len; int *shared_buddy; int shared_buddy_len; char *shared_buff; int *shared_buff_len; int *shared_buff_pos; //int keyval_pos; int arr_len; keyval_pos_t *arr; keyval_t *cpu_arr; void *RunPandaCPUMapThread(void *ptr){ panda_cpu_task_info_t *panda_cpu_task_info = (panda_cpu_task_info_t *)ptr; cpu_context *d_g_state = (cpu_context *)(panda_cpu_task_info->d_g_state); job_configuration *cpu_job_conf = (job_configuration *)(panda_cpu_task_info->cpu_job_conf); int start_row_idx = panda_cpu_task_info->start_row_idx; int end_row_idx = panda_cpu_task_info->end_row_idx; char *buff = (char *)malloc(sizeof(char)*CPU_SHARED_BUFF_SIZE); int *int_arr = (int *)malloc(sizeof(int)*(end_row_idx-start_row_idx+3)); int *buddy = int_arr+3; int buddy_len = end_row_idx-start_row_idx; for (int i=0;i<buddy_len;i++){ buddy [i]=i+start_row_idx; }//for //ShowLog("start_idx:%d end_idx:%d",start_row_idx, end_row_idx); for (int map_idx = start_row_idx; map_idx < end_row_idx; map_idx++){ d_g_state->intermediate_keyval_arr_arr_p[map_idx].shared_buff = buff; (d_g_state->intermediate_keyval_arr_arr_p[map_idx].shared_buff_len) = int_arr; (d_g_state->intermediate_keyval_arr_arr_p[map_idx].shared_buff_pos) = int_arr+1; (d_g_state->intermediate_keyval_arr_arr_p[map_idx].shared_arr_len) = int_arr+2; *(d_g_state->intermediate_keyval_arr_arr_p[map_idx].shared_buff_len) = CPU_SHARED_BUFF_SIZE; *(d_g_state->intermediate_keyval_arr_arr_p[map_idx].shared_buff_pos) = 0; *(d_g_state->intermediate_keyval_arr_arr_p[map_idx].shared_arr_len) = 0; (d_g_state->intermediate_keyval_arr_arr_p[map_idx].shared_buddy) = buddy; (d_g_state->intermediate_keyval_arr_arr_p[map_idx].shared_buddy_len) = buddy_len; //ShowWarn("---->(d_g_state->intermediate_keyval_arr_arr_p[%d].shared_buddy_len=:%d)", // map_idx,(d_g_state->intermediate_keyval_arr_arr_p[map_idx].shared_buddy_len)); }//for for (int map_idx = panda_cpu_task_info->start_row_idx; map_idx < panda_cpu_task_info->end_row_idx; map_idx++){ keyval_t *kv_p = (keyval_t *)(&(cpu_job_conf->input_keyval_arr[map_idx])); cpu_map(kv_p->key,kv_p->val,kv_p->keySize,kv_p->valSize,d_g_state,map_idx); }//for ShowLog("CPU_GROUP_ID:[%d] Done :%d tasks",d_g_state->cpu_group_id, panda_cpu_task_info->end_row_idx - panda_cpu_task_info->start_row_idx); return NULL; } //Use Pthread to process Panda_Reduce GPU Context //http://stackoverflow.com/questions/9139932/cuda-kernels-using-pthreads-missing-configuration-error void * Panda_Reduce(void *ptr){ //GPU Context of Threads may conflict with each other. thread_info_t *thread_info = (thread_info_t *)ptr; if(thread_info->device_type == GPU_CORE_ACC){ InitGPUDevice(thread_info); panda_context *panda = (panda_context *)(thread_info->panda); gpu_context *d_g_state = (gpu_context *)(thread_info->d_g_state); int num_gpu_core_groups = d_g_state->num_gpu_core_groups; if ( num_gpu_core_groups <= 0){ ShowError("num_gpu_core_groups == 0 return"); return NULL; }//if AddReduceInputRecordGPU(d_g_state,(panda->sorted_intermediate_keyvals_arr), thread_info->start_idx, thread_info->end_idx); int tid = thread_info->tid; int assigned_gpu_id = d_g_state->gpu_id; int gpu_id; cudaGetDevice(&gpu_id); ShowLog("Start GPU Reduce Tasks. Number of Reduce Tasks:%d Tid:%d gpu_id:%d num_gpu_core_groups:%d",d_g_state->d_sorted_keyvals_arr_len, tid, gpu_id, num_gpu_core_groups); StartGPUReduce(d_g_state); }//if if(thread_info->device_type == CPU_ACC){ cpu_context *d_g_state = (cpu_context *)(thread_info->d_g_state); if (d_g_state->num_cpus_cores == 0){ ShowError("num_cpus_cores == 0 return"); return NULL; }//if ShowLog("Start CPU Reduce Tasks. Number of Reduce Tasks:%d",d_g_state->sorted_keyvals_arr_len); for (int map_idx = 0; map_idx < d_g_state->sorted_keyvals_arr_len; map_idx++){ keyvals_t *kv_p = (keyvals_t *)(&(d_g_state->sorted_intermediate_keyvals_arr[map_idx])); if (kv_p->val_arr_len <=0) ShowError("kv_p->val_arr_len <=0"); else cpu_reduce(kv_p->key, kv_p->vals, kv_p->keySize, kv_p->val_arr_len, d_g_state); }//for }//if return NULL; }//void __device__ void *GetVal(void *vals, int4* interOffsetSizes, int keyIndex, int valStartIndex) { } __device__ void *GetKey(void *key, int4* interOffsetSizes, int keyIndex, int valStartIndex) { } //------------------------------------------------------- //Reducer //------------------------------------------------------- __global__ void ReducePartitioner(gpu_context d_g_state) { int num_records_per_thread = (d_g_state.d_sorted_keyvals_arr_len + (gridDim.x*blockDim.x*blockDim.y)-1)/(gridDim.x*blockDim.x*blockDim.y); int block_start_idx = num_records_per_thread * blockIdx.x * blockDim.x * blockDim.y; int thread_start_idx = block_start_idx + ((threadIdx.y*blockDim.x + threadIdx.x)/STRIDE)*num_records_per_thread*STRIDE + ((threadIdx.y*blockDim.x + threadIdx.x)%STRIDE); int thread_end_idx = thread_start_idx + num_records_per_thread*STRIDE; if (thread_end_idx > d_g_state.d_sorted_keyvals_arr_len) thread_end_idx = d_g_state.d_sorted_keyvals_arr_len; if (thread_start_idx >= thread_end_idx) return; int start_idx, end_idx; for(int reduce_task_idx=thread_start_idx; reduce_task_idx < thread_end_idx; reduce_task_idx+=STRIDE){ if (reduce_task_idx==0) start_idx = 0; else start_idx = d_g_state.d_pos_arr_4_sorted_keyval_pos_arr[reduce_task_idx-1]; end_idx = d_g_state.d_pos_arr_4_sorted_keyval_pos_arr[reduce_task_idx]; val_t *val_t_arr = (val_t*)malloc(sizeof(val_t)*(end_idx-start_idx)); int keySize = d_g_state.d_keyval_pos_arr[start_idx].keySize; int keyPos = d_g_state.d_keyval_pos_arr[start_idx].keyPos; void *key = (char*)d_g_state.d_sorted_keys_shared_buff+keyPos; for (int index = start_idx;index<end_idx;index++){ int valSize = d_g_state.d_keyval_pos_arr[index].valSize; int valPos = d_g_state.d_keyval_pos_arr[index].valPos; val_t_arr[index-start_idx].valSize = valSize; val_t_arr[index-start_idx].val = (char*)d_g_state.d_sorted_vals_shared_buff + valPos; } //for if( end_idx - start_idx == 0) ShowError("gpu_reduce valCount ==0"); else gpu_reduce(key, val_t_arr, keySize, end_idx-start_idx, d_g_state); }//for } void StartGPUReduce(gpu_context *d_g_state) { cudaThreadSynchronize(); d_g_state->d_reduced_keyval_arr_len = d_g_state->d_sorted_keyvals_arr_len; checkCudaErrors(cudaMalloc((void **)&(d_g_state->d_reduced_keyval_arr), sizeof(keyval_t)*d_g_state->d_reduced_keyval_arr_len)); cudaThreadSynchronize(); int numGPUCores = getGPUCoresNum(); dim3 blocks(THREAD_BLOCK_SIZE, THREAD_BLOCK_SIZE); int numBlocks = (numGPUCores*16+(blocks.x*blocks.y)-1)/(blocks.x*blocks.y); dim3 grids(numBlocks, 1); int total_gpu_threads = (grids.x*grids.y*blocks.x*blocks.y); ShowLog("number of reduce tasks:%d total gpu threads:%d",d_g_state->d_sorted_keyvals_arr_len, total_gpu_threads); ReducePartitioner<<<grids,blocks>>>(*d_g_state); cudaThreadSynchronize(); }//void void* Panda_Map(void *ptr){ thread_info_t *thread_info = (thread_info_t *)ptr; if(thread_info->device_type == GPU_CORE_ACC){ double t1 = PandaTimer(); gpu_context *d_g_state = (gpu_context *)(thread_info->d_g_state); InitGPUDevice(thread_info); //ShowLog("GPU_ID:[%d] Init GPU MapReduce Load Data From Host to GPU memory",d_g_state->gpu_id); InitGPUMapReduce3(d_g_state); ShowLog("GPU_ID:[%d] Start GPU CORE Map Tasks",d_g_state->gpu_id); StartGPUCoreMap(d_g_state); double t2 = PandaTimer(); //Local combiner if(d_g_state->local_combiner){ StartGPUCombiner(d_g_state); } double t3 = PandaTimer(); StartGPUShuffle(d_g_state); double t4 = PandaTimer(); DoLog2Disk(" GPU Map take %f sec",t2-t1); DoLog2Disk(" GPU Combiner take %f sec",t3-t2); DoLog2Disk(" GPU Shuffle take %f sec",t4-t3); }//if if(thread_info->device_type == GPU_CARD_ACC){ double t1 = PandaTimer(); gpu_card_context *d_g_state = (gpu_card_context *)(thread_info->d_g_state); InitGPUDevice(thread_info); //ShowLog("GPU_ID:[%d] Init GPU MapReduce Load Data From Host to GPU memory",d_g_state->gpu_id); InitGPUCardMapReduce(d_g_state); ShowLog("GPU_ID:[%d] Start GPU CARD Map Tasks",d_g_state->gpu_id); StartGPUCardMap(d_g_state); double t2 = PandaTimer(); //Local combiner if(d_g_state->local_combiner){ //StartGPUCombiner(d_g_state); } double t3 = PandaTimer(); //StartGPUShuffle(d_g_state); double t4 = PandaTimer(); DoLog2Disk(" GPU Map take %f sec",t2-t1); DoLog2Disk(" GPU Combiner take %f sec",t3-t2); DoLog2Disk(" GPU Shuffle take %f sec",t4-t3); }//if if(thread_info->device_type == CPU_ACC){ double t1 = PandaTimer(); cpu_context *d_g_state = (cpu_context *)(thread_info->d_g_state); //ShowLog("CPU_GROUP_ID:[%d] Init CPU Device",d_g_state->cpu_group_id); InitCPUDevice(thread_info); //ShowLog("Init CPU MapReduce"); InitCPUMapReduce2(thread_info); ShowLog("CPU_GROUP_ID:[%d] Start CPU Map Tasks",d_g_state->cpu_group_id); StartCPUMap2(thread_info); double t2 = PandaTimer(); if(d_g_state->local_combiner){ StartCPUCombiner(thread_info); } ShowLog("CPU_GROUP_ID:[%d] Start CPU Shuffle2",d_g_state->cpu_group_id); double t3 = PandaTimer(); StartCPUShuffle2(thread_info); double t4 = PandaTimer(); DoLog2Disk(" CPU Map take %f sec",t2-t1); DoLog2Disk(" CPU Combiner take %f sec",t3-t2); DoLog2Disk(" CPU Shuffle take %f sec",t4-t3); } return NULL; }//FinishMapReduce2(d_g_state); void FinishMapReduce(Spec_t* spec) { ShowLog( "=====finish panda mapreduce====="); }//void void FinishMapReduce2(gpu_context* state) { size_t total_mem,avail_mem, heap_limit; checkCudaErrors(cudaMemGetInfo( &avail_mem, &total_mem )); ShowLog("avail_mem:%d",avail_mem); }//void #endif //__PANDALIB_CU__
638c92ed191c460e13473d5befe4d6cf8a40f5a9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2020 Facebook * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "model.h" #include "cuda_helper.h" Tensor FFModel::transpose(const Tensor& input, const std::vector<int>& perm, const char* name) { Transpose* transpose = new Transpose(*this, input, perm, name); layers.push_back(transpose); return transpose->outputs[0]; } Transpose::Transpose(FFModel& model, const Tensor& input, const std::vector<int>& _perm, const char* name) : Op(model, OP_TRANSPOSE, name, input) { assert(_perm.size() == input.numDim); // Use Legion indexing to store perm for (int i = 0; i < input.numDim; i++) perm[i] = input.numDim - 1 - _perm[input.numDim - 1 - i]; outputs[0].numDim = input.numDim; for (int i = 0; i < outputs[0].numDim; i++) outputs[0].adim[i] = input.adim[perm[i]]; numOutputs = 1; numWeights = 0; } void Transpose::create_weights(FFModel& model) { // Do nothing } void Transpose::create_output_and_partition(FFModel& model) { int dim = inputs[0].numDim; switch (dim) { #define DIMFUNC(DIM) \ case DIM: \ { \ task_is = model.get_or_create_task_is(DIM, name); \ create_output_and_partition_with_dim<DIM>(model); \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: { // Unsupported dim for Transpose operator assert(false); } } } template<int NDIM> void Transpose::create_output_and_partition_with_dim(FFModel& model) { // Retrive the task indexspace for the op task_is = IndexSpaceT<NDIM>(model.get_or_create_task_is(NDIM, name)); Context ctx = model.config.lg_ctx; Runtime* runtime = model.config.lg_hlr; Rect<NDIM> part_rect = runtime->get_index_space_domain(ctx, task_is); // Current require all dimensions being transposed should not be partitioned for (int i = 0; i < NDIM; i++) if (i != perm[i]) assert(part_rect.hi[i] == part_rect.lo[i]); int dims[NDIM]; for (int i = 0; i < NDIM; i++) dims[i] = outputs[0].adim[NDIM-1-i]; outputs[0] = model.create_tensor<NDIM>(dims, DT_FLOAT, this); outputs[0].owner_op = this; outputs[0].owner_idx = 0; Rect<NDIM> input_rect; input_rect = runtime->get_index_partition_color_space( ctx, inputs[0].part.get_index_partition()); if (input_rect == part_rect) { input_lps[0] = inputs[0].part; input_grad_lps[0] = inputs[0].part_grad; } else { model.create_disjoint_partition<NDIM>( inputs[0], IndexSpaceT<NDIM>(task_is), input_lps[0], input_grad_lps[0]); } } void Transpose::init_meta(TransposeMeta *m, Domain const &in_domain, Domain const &out_domain) const { for (int i = 0; i < out_domain.get_dim(); i++) { assert(out_domain.hi()[i] == in_domain.hi()[this->perm[i]]); assert(out_domain.lo()[i] == in_domain.lo()[this->perm[i]]); } m->num_dim = out_domain.get_dim(); for (int i = 0; i < m->num_dim; i++) m->perm[i] = this->perm[i]; } OpMeta* Transpose::init_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 2); assert(task->regions.size() == 2); const Transpose* transpose = (const Transpose*) task->args; FFHandler handle = *((const FFHandler*) task->local_args); Domain in_domain = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); Domain out_domain = runtime->get_index_space_domain( ctx, task->regions[1].region.get_index_space()); TransposeMeta* m = new TransposeMeta(handle); transpose->init_meta(m, in_domain, out_domain); m->profiling = transpose->profiling; return m; } void Transpose::init(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Domain domain = runtime->get_index_space_domain(ctx, task_is); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ ParallelConfig pc; \ std::string pcname = name; \ ff.config.find_parallel_config(DIM, pcname, pc); \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ FFHandler handle = ff.handlers[pc.device_ids[idx++]]; \ argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler))); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } IndexLauncher launcher(TRANSPOSE_INIT_TASK_ID, task_is, TaskArgument(this, sizeof(Transpose)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(1, FID_DATA); FutureMap fm = runtime->execute_index_space(ctx, launcher); fm.wait_all_results(); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ meta[idx++] = fm.get_result<OpMeta*>(*it); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } } struct TransposeStrides { int num_dim; int in_strides[MAX_TENSOR_DIM], out_strides[MAX_TENSOR_DIM], perm[MAX_TENSOR_DIM]; }; __global__ void transpose_simple_kernel(coord_t volume, const float* in_ptr, float* out_ptr, const TransposeStrides info, const float beta) { CUDA_KERNEL_LOOP(o_idx, volume) { coord_t i_idx = 0; coord_t t = o_idx; for (int i = info.num_dim-1; i >= 0; i--) { coord_t ratio = t / info.out_strides[i]; t -= ratio * info.out_strides[i]; i_idx += ratio * info.in_strides[info.perm[i]]; } out_ptr[o_idx] += out_ptr[o_idx] * beta + in_ptr[i_idx]; } } /*static*/ void Transpose::forward_kernel(const TransposeMeta* m, const float* input_ptr, float* output_ptr, Domain in_domain, Domain out_domain) { TransposeStrides info; info.num_dim = out_domain.get_dim(); assert(info.num_dim == m->num_dim); for (int i = 0; i < info.num_dim; i++) { int in_dim_size = (in_domain.hi()[i] - in_domain.lo()[i] + 1); int out_dim_size = (out_domain.hi()[i] - out_domain.lo()[i] + 1); info.in_strides[i] = (i == 0) ? 1 : info.in_strides[i-1] * in_dim_size; info.out_strides[i] = (i == 0) ? 1 : info.out_strides[i-1] * out_dim_size; info.perm[i] = m->perm[i]; } hipLaunchKernelGGL(( transpose_simple_kernel), dim3(GET_BLOCKS(out_domain.get_volume())), dim3(CUDA_NUM_THREADS), 0, 0, out_domain.get_volume(), input_ptr, output_ptr, info, 0.0f/*beta*/); } __host__ void Transpose::forward_task(const Task* task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime* runtime) { assert(regions.size() == 2); assert(task->regions.size() == 2); //const Transpose* transpose = (const Transpose*) task->args; const TransposeMeta* m = *((TransposeMeta**) task->local_args); Domain in_domain = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); Domain out_domain = runtime->get_index_space_domain( ctx, task->regions[1].region.get_index_space()); for (int i = 0; i < out_domain.get_dim(); i++) { assert(out_domain.hi()[i] == in_domain.hi()[m->perm[i]]); assert(out_domain.lo()[i] == in_domain.lo()[m->perm[i]]); } const float* in_ptr = helperGetTensorPointerRO<float>( regions[0], task->regions[0], FID_DATA, ctx, runtime); float* out_ptr = helperGetTensorPointerWO<float>( regions[1], task->regions[1], FID_DATA, ctx, runtime); forward_kernel(m, in_ptr, out_ptr, in_domain, out_domain); } void Transpose::forward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Domain domain = runtime->get_index_space_domain(ctx, task_is); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ OpMeta* mp = meta[idx++]; \ argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } IndexLauncher launcher(TRANSPOSE_FWD_TASK_ID, task_is, TaskArgument(NULL, false), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(1, FID_DATA); runtime->execute_index_space(ctx, launcher); } /*static*/ void Transpose::backward_kernel(const TransposeMeta* m, float* input_grad_ptr, const float* output_grad_ptr, Domain in_grad_domain, Domain out_grad_domain) { TransposeStrides info; info.num_dim = in_grad_domain.get_dim(); assert(info.num_dim == m->num_dim); for (int i = 0; i < info.num_dim; i++) { int in_dim_size = (out_grad_domain.hi()[i] - out_grad_domain.lo()[i] + 1); int out_dim_size = (in_grad_domain.hi()[i] - in_grad_domain.lo()[i] + 1); info.in_strides[i] = (i == 0) ? 1 : info.in_strides[i-1] * in_dim_size; info.out_strides[i] = (i == 0) ? 1 : info.out_strides[i-1] * out_dim_size; info.perm[m->perm[i]] = i; } hipLaunchKernelGGL(( transpose_simple_kernel), dim3(GET_BLOCKS(in_grad_domain.get_volume())), dim3(CUDA_NUM_THREADS), 0, 0, in_grad_domain.get_volume(), output_grad_ptr, input_grad_ptr, info, 1.0f/*beta*/); } __host__ void Transpose::backward_task(const Task* task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime* runtime) { assert(regions.size() == 2); assert(task->regions.size() == 2); //const Transpose* transpose = (const Transpose*) task->args; const TransposeMeta* m = *((TransposeMeta**) task->local_args); Domain out_grad_domain = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); Domain in_grad_domain = runtime->get_index_space_domain( ctx, task->regions[1].region.get_index_space()); for (int i = 0; i < out_grad_domain.get_dim(); i++) { assert(out_grad_domain.hi()[i] == in_grad_domain.hi()[m->perm[i]]); assert(out_grad_domain.lo()[i] == in_grad_domain.lo()[m->perm[i]]); } const float* out_grad_ptr = helperGetTensorPointerRO<float>( regions[0], task->regions[0], FID_DATA, ctx, runtime); float* in_grad_ptr = helperGetTensorPointerRW<float>( regions[1], task->regions[1], FID_DATA, ctx, runtime); backward_kernel(m, in_grad_ptr, out_grad_ptr, in_grad_domain, out_grad_domain); } void Transpose::backward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Domain domain = runtime->get_index_space_domain(ctx, task_is); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ OpMeta* mp = meta[idx++]; \ argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } IndexLauncher launcher(TRANSPOSE_BWD_TASK_ID, task_is, TaskArgument(NULL, 0), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); // regions[0](I): output_grad launcher.add_region_requirement( RegionRequirement(outputs[0].part_grad, 0/*projection id*/, READ_ONLY, EXCLUSIVE, outputs[0].region_grad)); launcher.add_field(0, FID_DATA); // regions[1](I/O): input_grad launcher.add_region_requirement( RegionRequirement(input_grad_lps[0], 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[0].region_grad)); launcher.add_field(1, FID_DATA); runtime->execute_index_space(ctx, launcher); } bool Transpose::measure_operator_cost(Simulator* sim, const ParallelConfig& pc, CostMetrics& cost_metrics) { Tensor sub_input, sub_output; if (!outputs[0].get_output_sub_tensor(pc, sub_output, op_type)) { return false; } if (!inputs[0].get_input_sub_tensor(pc, sub_input, op_type)) { return false; } TransposeMeta *m = sim->transpose_meta; this->init_meta(m, sub_input.get_domain(), sub_output.get_domain()); sim->free_all(); float *input_ptr = (float *)sim->allocate(sub_input.get_volume(), DT_FLOAT); assert (input_ptr != NULL); float *output_ptr = (float *)sim->allocate(sub_output.get_volume(), DT_FLOAT); assert (output_ptr != NULL); std::function<void()> forward, backward; forward = [&] { forward_kernel(m, input_ptr, output_ptr, sub_input.get_domain(), sub_output.get_domain()); }; if (sim->computationMode == COMP_MODE_TRAINING) { float *input_grad_ptr = (float *)sim->allocate(sub_input.get_volume(), DT_FLOAT); assert (input_grad_ptr != NULL); float *output_grad_ptr = (float *)sim->allocate(sub_output.get_volume(), DT_FLOAT); assert (output_grad_ptr != NULL); backward = [&] { backward_kernel(m, input_grad_ptr, output_grad_ptr, sub_input.get_domain(), sub_output.get_domain()); }; } inner_measure_operator_cost(sim, forward, backward, cost_metrics); if (sim->computationMode == COMP_MODE_TRAINING) { printf("[Measure Transpose] name(%s) forward_time(%.4lf) backward_time(%.4lf)\n", name, cost_metrics.forward_time, cost_metrics.backward_time); } else { printf("[Measure Transpose] name(%s) forward_time(%.4lf)\n", name, cost_metrics.forward_time); } return true; }
638c92ed191c460e13473d5befe4d6cf8a40f5a9.cu
/* Copyright 2020 Facebook * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "model.h" #include "cuda_helper.h" Tensor FFModel::transpose(const Tensor& input, const std::vector<int>& perm, const char* name) { Transpose* transpose = new Transpose(*this, input, perm, name); layers.push_back(transpose); return transpose->outputs[0]; } Transpose::Transpose(FFModel& model, const Tensor& input, const std::vector<int>& _perm, const char* name) : Op(model, OP_TRANSPOSE, name, input) { assert(_perm.size() == input.numDim); // Use Legion indexing to store perm for (int i = 0; i < input.numDim; i++) perm[i] = input.numDim - 1 - _perm[input.numDim - 1 - i]; outputs[0].numDim = input.numDim; for (int i = 0; i < outputs[0].numDim; i++) outputs[0].adim[i] = input.adim[perm[i]]; numOutputs = 1; numWeights = 0; } void Transpose::create_weights(FFModel& model) { // Do nothing } void Transpose::create_output_and_partition(FFModel& model) { int dim = inputs[0].numDim; switch (dim) { #define DIMFUNC(DIM) \ case DIM: \ { \ task_is = model.get_or_create_task_is(DIM, name); \ create_output_and_partition_with_dim<DIM>(model); \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: { // Unsupported dim for Transpose operator assert(false); } } } template<int NDIM> void Transpose::create_output_and_partition_with_dim(FFModel& model) { // Retrive the task indexspace for the op task_is = IndexSpaceT<NDIM>(model.get_or_create_task_is(NDIM, name)); Context ctx = model.config.lg_ctx; Runtime* runtime = model.config.lg_hlr; Rect<NDIM> part_rect = runtime->get_index_space_domain(ctx, task_is); // Current require all dimensions being transposed should not be partitioned for (int i = 0; i < NDIM; i++) if (i != perm[i]) assert(part_rect.hi[i] == part_rect.lo[i]); int dims[NDIM]; for (int i = 0; i < NDIM; i++) dims[i] = outputs[0].adim[NDIM-1-i]; outputs[0] = model.create_tensor<NDIM>(dims, DT_FLOAT, this); outputs[0].owner_op = this; outputs[0].owner_idx = 0; Rect<NDIM> input_rect; input_rect = runtime->get_index_partition_color_space( ctx, inputs[0].part.get_index_partition()); if (input_rect == part_rect) { input_lps[0] = inputs[0].part; input_grad_lps[0] = inputs[0].part_grad; } else { model.create_disjoint_partition<NDIM>( inputs[0], IndexSpaceT<NDIM>(task_is), input_lps[0], input_grad_lps[0]); } } void Transpose::init_meta(TransposeMeta *m, Domain const &in_domain, Domain const &out_domain) const { for (int i = 0; i < out_domain.get_dim(); i++) { assert(out_domain.hi()[i] == in_domain.hi()[this->perm[i]]); assert(out_domain.lo()[i] == in_domain.lo()[this->perm[i]]); } m->num_dim = out_domain.get_dim(); for (int i = 0; i < m->num_dim; i++) m->perm[i] = this->perm[i]; } OpMeta* Transpose::init_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 2); assert(task->regions.size() == 2); const Transpose* transpose = (const Transpose*) task->args; FFHandler handle = *((const FFHandler*) task->local_args); Domain in_domain = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); Domain out_domain = runtime->get_index_space_domain( ctx, task->regions[1].region.get_index_space()); TransposeMeta* m = new TransposeMeta(handle); transpose->init_meta(m, in_domain, out_domain); m->profiling = transpose->profiling; return m; } void Transpose::init(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Domain domain = runtime->get_index_space_domain(ctx, task_is); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ ParallelConfig pc; \ std::string pcname = name; \ ff.config.find_parallel_config(DIM, pcname, pc); \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ FFHandler handle = ff.handlers[pc.device_ids[idx++]]; \ argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler))); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } IndexLauncher launcher(TRANSPOSE_INIT_TASK_ID, task_is, TaskArgument(this, sizeof(Transpose)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(1, FID_DATA); FutureMap fm = runtime->execute_index_space(ctx, launcher); fm.wait_all_results(); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ meta[idx++] = fm.get_result<OpMeta*>(*it); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } } struct TransposeStrides { int num_dim; int in_strides[MAX_TENSOR_DIM], out_strides[MAX_TENSOR_DIM], perm[MAX_TENSOR_DIM]; }; __global__ void transpose_simple_kernel(coord_t volume, const float* in_ptr, float* out_ptr, const TransposeStrides info, const float beta) { CUDA_KERNEL_LOOP(o_idx, volume) { coord_t i_idx = 0; coord_t t = o_idx; for (int i = info.num_dim-1; i >= 0; i--) { coord_t ratio = t / info.out_strides[i]; t -= ratio * info.out_strides[i]; i_idx += ratio * info.in_strides[info.perm[i]]; } out_ptr[o_idx] += out_ptr[o_idx] * beta + in_ptr[i_idx]; } } /*static*/ void Transpose::forward_kernel(const TransposeMeta* m, const float* input_ptr, float* output_ptr, Domain in_domain, Domain out_domain) { TransposeStrides info; info.num_dim = out_domain.get_dim(); assert(info.num_dim == m->num_dim); for (int i = 0; i < info.num_dim; i++) { int in_dim_size = (in_domain.hi()[i] - in_domain.lo()[i] + 1); int out_dim_size = (out_domain.hi()[i] - out_domain.lo()[i] + 1); info.in_strides[i] = (i == 0) ? 1 : info.in_strides[i-1] * in_dim_size; info.out_strides[i] = (i == 0) ? 1 : info.out_strides[i-1] * out_dim_size; info.perm[i] = m->perm[i]; } transpose_simple_kernel<<<GET_BLOCKS(out_domain.get_volume()), CUDA_NUM_THREADS>>>( out_domain.get_volume(), input_ptr, output_ptr, info, 0.0f/*beta*/); } __host__ void Transpose::forward_task(const Task* task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime* runtime) { assert(regions.size() == 2); assert(task->regions.size() == 2); //const Transpose* transpose = (const Transpose*) task->args; const TransposeMeta* m = *((TransposeMeta**) task->local_args); Domain in_domain = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); Domain out_domain = runtime->get_index_space_domain( ctx, task->regions[1].region.get_index_space()); for (int i = 0; i < out_domain.get_dim(); i++) { assert(out_domain.hi()[i] == in_domain.hi()[m->perm[i]]); assert(out_domain.lo()[i] == in_domain.lo()[m->perm[i]]); } const float* in_ptr = helperGetTensorPointerRO<float>( regions[0], task->regions[0], FID_DATA, ctx, runtime); float* out_ptr = helperGetTensorPointerWO<float>( regions[1], task->regions[1], FID_DATA, ctx, runtime); forward_kernel(m, in_ptr, out_ptr, in_domain, out_domain); } void Transpose::forward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Domain domain = runtime->get_index_space_domain(ctx, task_is); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ OpMeta* mp = meta[idx++]; \ argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } IndexLauncher launcher(TRANSPOSE_FWD_TASK_ID, task_is, TaskArgument(NULL, false), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(1, FID_DATA); runtime->execute_index_space(ctx, launcher); } /*static*/ void Transpose::backward_kernel(const TransposeMeta* m, float* input_grad_ptr, const float* output_grad_ptr, Domain in_grad_domain, Domain out_grad_domain) { TransposeStrides info; info.num_dim = in_grad_domain.get_dim(); assert(info.num_dim == m->num_dim); for (int i = 0; i < info.num_dim; i++) { int in_dim_size = (out_grad_domain.hi()[i] - out_grad_domain.lo()[i] + 1); int out_dim_size = (in_grad_domain.hi()[i] - in_grad_domain.lo()[i] + 1); info.in_strides[i] = (i == 0) ? 1 : info.in_strides[i-1] * in_dim_size; info.out_strides[i] = (i == 0) ? 1 : info.out_strides[i-1] * out_dim_size; info.perm[m->perm[i]] = i; } transpose_simple_kernel<<<GET_BLOCKS(in_grad_domain.get_volume()), CUDA_NUM_THREADS>>>( in_grad_domain.get_volume(), output_grad_ptr, input_grad_ptr, info, 1.0f/*beta*/); } __host__ void Transpose::backward_task(const Task* task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime* runtime) { assert(regions.size() == 2); assert(task->regions.size() == 2); //const Transpose* transpose = (const Transpose*) task->args; const TransposeMeta* m = *((TransposeMeta**) task->local_args); Domain out_grad_domain = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); Domain in_grad_domain = runtime->get_index_space_domain( ctx, task->regions[1].region.get_index_space()); for (int i = 0; i < out_grad_domain.get_dim(); i++) { assert(out_grad_domain.hi()[i] == in_grad_domain.hi()[m->perm[i]]); assert(out_grad_domain.lo()[i] == in_grad_domain.lo()[m->perm[i]]); } const float* out_grad_ptr = helperGetTensorPointerRO<float>( regions[0], task->regions[0], FID_DATA, ctx, runtime); float* in_grad_ptr = helperGetTensorPointerRW<float>( regions[1], task->regions[1], FID_DATA, ctx, runtime); backward_kernel(m, in_grad_ptr, out_grad_ptr, in_grad_domain, out_grad_domain); } void Transpose::backward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Domain domain = runtime->get_index_space_domain(ctx, task_is); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ OpMeta* mp = meta[idx++]; \ argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } IndexLauncher launcher(TRANSPOSE_BWD_TASK_ID, task_is, TaskArgument(NULL, 0), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); // regions[0](I): output_grad launcher.add_region_requirement( RegionRequirement(outputs[0].part_grad, 0/*projection id*/, READ_ONLY, EXCLUSIVE, outputs[0].region_grad)); launcher.add_field(0, FID_DATA); // regions[1](I/O): input_grad launcher.add_region_requirement( RegionRequirement(input_grad_lps[0], 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[0].region_grad)); launcher.add_field(1, FID_DATA); runtime->execute_index_space(ctx, launcher); } bool Transpose::measure_operator_cost(Simulator* sim, const ParallelConfig& pc, CostMetrics& cost_metrics) { Tensor sub_input, sub_output; if (!outputs[0].get_output_sub_tensor(pc, sub_output, op_type)) { return false; } if (!inputs[0].get_input_sub_tensor(pc, sub_input, op_type)) { return false; } TransposeMeta *m = sim->transpose_meta; this->init_meta(m, sub_input.get_domain(), sub_output.get_domain()); sim->free_all(); float *input_ptr = (float *)sim->allocate(sub_input.get_volume(), DT_FLOAT); assert (input_ptr != NULL); float *output_ptr = (float *)sim->allocate(sub_output.get_volume(), DT_FLOAT); assert (output_ptr != NULL); std::function<void()> forward, backward; forward = [&] { forward_kernel(m, input_ptr, output_ptr, sub_input.get_domain(), sub_output.get_domain()); }; if (sim->computationMode == COMP_MODE_TRAINING) { float *input_grad_ptr = (float *)sim->allocate(sub_input.get_volume(), DT_FLOAT); assert (input_grad_ptr != NULL); float *output_grad_ptr = (float *)sim->allocate(sub_output.get_volume(), DT_FLOAT); assert (output_grad_ptr != NULL); backward = [&] { backward_kernel(m, input_grad_ptr, output_grad_ptr, sub_input.get_domain(), sub_output.get_domain()); }; } inner_measure_operator_cost(sim, forward, backward, cost_metrics); if (sim->computationMode == COMP_MODE_TRAINING) { printf("[Measure Transpose] name(%s) forward_time(%.4lf) backward_time(%.4lf)\n", name, cost_metrics.forward_time, cost_metrics.backward_time); } else { printf("[Measure Transpose] name(%s) forward_time(%.4lf)\n", name, cost_metrics.forward_time); } return true; }
a69db1e1666b4df7ab7296255ab9607786ac8254.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2020 NVIDIA Corporation. All rights reserved. * * NOTICE TO LICENSEE: * * This source code and/or documentation ("Licensed Deliverables") are * subject to NVIDIA intellectual property rights under U.S. and * international Copyright laws. * * These Licensed Deliverables contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a form of NVIDIA software license agreement by and * between NVIDIA and Licensee ("License Agreement") or electronically * accepted by Licensee. Notwithstanding any terms or conditions to * the contrary in the License Agreement, reproduction or disclosure * of the Licensed Deliverables to any third party without the express * written consent of NVIDIA is prohibited. * * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THESE LICENSED DELIVERABLES. * * U.S. Government End Users. These Licensed Deliverables are a * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT * 1995), consisting of "commercial computer software" and "commercial * computer software documentation" as such terms are used in 48 * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government * only as a commercial end item. Consistent with 48 C.F.R.12.212 and * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all * U.S. Government End Users acquire the Licensed Deliverables with * only those rights set forth herein. * * Any use of the Licensed Deliverables in individual and commercial * software must include, in the user documentation and internal * comments to the code, the above Disclaimer and U.S. Government End * Users Notice. */ #include <cstdio> #include <cstdlib> #include <vector> #include <hip/hip_runtime.h> #include <cusolverMg.h> #include "cusolverMg_utils.h" #include "cusolver_utils.h" /* compute |x|_inf */ template <typename T> static T vec_nrm_inf(int n, const T *x) { T max_nrm = 0.0; for (int row = 1; row <= n; row++) { T xi = x[IDX1F(row)]; max_nrm = (max_nrm > fabs(xi)) ? max_nrm : fabs(xi); } return max_nrm; } /* A is 1D laplacian, return A(N:-1:1, :) */ template <typename T> static void gen_1d_laplacian(int N, T *A, int lda) { for (int J = 1; J <= N; J++) { /* A(J,J) = 2 */ A[IDX2F(N - J + 1, J, lda)] = 2.0; if ((J - 1) >= 1) { /* A(J, J-1) = -1*/ A[IDX2F(N - J + 1, J - 1, lda)] = -1.0; } if ((J + 1) <= N) { /* A(J, J+1) = -1*/ A[IDX2F(N - J + 1, J + 1, lda)] = -1.0; } } } int main(int argc, char *argv[]) { cusolverMgHandle_t cusolverH = NULL; using data_type = double; /* maximum number of GPUs */ const int MAX_NUM_DEVICES = 16; int nbGpus = 0; std::vector<int> deviceList(MAX_NUM_DEVICES); const int N = 611; const int IA = 1; const int JA = 1; const int T_A = 256; /* tile size */ const int lda = N; const int IB = 1; const int JB = 1; const int T_B = 100; /* tile size of B */ const int ldb = N; int info = 0; cudaLibMgMatrixDesc_t descrA; cudaLibMgMatrixDesc_t descrB; cudaLibMgGrid_t gridA; cudaLibMgGrid_t gridB; cusolverMgGridMapping_t mapping = CUDALIBMG_GRID_MAPPING_COL_MAJOR; int64_t lwork_getrf = 0; int64_t lwork_getrs = 0; int64_t lwork = 0; /* workspace: number of elements per device */ std::printf("Test 1D Laplacian of order %d\n", N); std::printf("Step 1: Create Mg handle and select devices \n"); CUSOLVER_CHECK(cusolverMgCreate(&cusolverH)); CUDA_CHECK(hipGetDeviceCount(&nbGpus)); nbGpus = (nbGpus < MAX_NUM_DEVICES) ? nbGpus : MAX_NUM_DEVICES; std::printf("\tThere are %d GPUs \n", nbGpus); for (int j = 0; j < nbGpus; j++) { deviceList[j] = j; hipDeviceProp_t prop; CUDA_CHECK(hipGetDeviceProperties(&prop, j)); std::printf("\tDevice %d, %s, cc %d.%d \n", j, prop.name, prop.major, prop.minor); } CUSOLVER_CHECK(cusolverMgDeviceSelect(cusolverH, nbGpus, deviceList.data())); std::printf("step 2: Enable peer access.\n"); enablePeerAccess(nbGpus, deviceList.data()); std::printf("Step 3: Allocate host memory A \n"); std::vector<data_type> A(lda * N, 0); std::vector<data_type> B(ldb, 0); std::vector<data_type> X(ldb, 0); std::vector<int> IPIV(N, 0); std::printf("Step 4: Prepare 1D Laplacian \n"); gen_1d_laplacian<data_type>(N, &A[IDX2F(IA, JA, lda)], lda); #ifdef SHOW_FORMAT std::printf("A = matlab base-1\n"); print_matrix(N, N, A.data(), lda); #endif /* B = ones(N,1) */ for (int row = 1; row <= N; row++) { B[IDX1F(row)] = 1.0; } #ifdef SHOW_FORMAT std::printf("B = matlab base-1\n"); print_matrix(N, 1, B.data(), ldb, HIPBLAS_OP_T); #endif std::printf("Step 5: Create matrix descriptors for A and B \n"); CUSOLVER_CHECK(cusolverMgCreateDeviceGrid(&gridA, 1, nbGpus, deviceList.data(), mapping)); CUSOLVER_CHECK(cusolverMgCreateDeviceGrid(&gridB, 1, nbGpus, deviceList.data(), mapping)); /* (global) A is N-by-N */ CUSOLVER_CHECK(cusolverMgCreateMatrixDesc(&descrA, N, /* nubmer of rows of (global) A */ N, /* number of columns of (global) A */ N, /* number or rows in a tile */ T_A, /* number of columns in a tile */ traits<data_type>::cuda_data_type, gridA)); /* (global) B is N-by-1 */ CUSOLVER_CHECK(cusolverMgCreateMatrixDesc(&descrB, N, /* nubmer of rows of (global) B */ 1, /* number of columns of (global) B */ N, /* number or rows in a tile */ T_B, /* number of columns in a tile */ traits<data_type>::cuda_data_type, gridB)); std::printf("Step 6: Allocate distributed matrices A and D \n"); std::vector<data_type *> array_d_A(nbGpus, nullptr); std::vector<data_type *> array_d_B(nbGpus, nullptr); std::vector<int *> array_d_IPIV(nbGpus, nullptr); /* A := 0 */ createMat<data_type>(nbGpus, deviceList.data(), N, /* number of columns of global A */ T_A, /* number of columns per column tile */ lda, /* leading dimension of local A */ array_d_A.data()); /* B := 0 */ createMat<data_type>(nbGpus, deviceList.data(), 1, /* number of columns of global B */ T_B, /* number of columns per column tile */ ldb, /* leading dimension of local B */ array_d_B.data()); /* IPIV := 0, IPIV is consistent with A */ createMat<int>(nbGpus, deviceList.data(), N, /* number of columns of global IPIV */ T_A, /* number of columns per column tile */ 1, /* leading dimension of local IPIV */ array_d_IPIV.data()); std::printf("Step 7: Prepare data on devices \n"); memcpyH2D<data_type>(nbGpus, deviceList.data(), N, N, /* input */ A.data(), lda, /* output */ N, /* number of columns of global A */ T_A, /* number of columns per column tile */ lda, /* leading dimension of local A */ array_d_A.data(), /* host pointer array of dimension nbGpus */ IA, JA); memcpyH2D<data_type>(nbGpus, deviceList.data(), N, 1, /* input */ B.data(), ldb, /* output */ 1, /* number of columns of global A */ T_B, /* number of columns per column tile */ ldb, /* leading dimension of local A */ array_d_B.data(), /* host pointer array of dimension nbGpus */ IB, JB); std::printf("Step 8: Allocate workspace space \n"); CUSOLVER_CHECK(cusolverMgGetrf_bufferSize( cusolverH, N, N, reinterpret_cast<void **>(array_d_A.data()), IA, /* base-1 */ JA, /* base-1 */ descrA, array_d_IPIV.data(), traits<data_type>::cuda_data_type, &lwork_getrf)); CUSOLVER_CHECK(cusolverMgGetrs_bufferSize( cusolverH, HIPBLAS_OP_N, N, 1, /* NRHS */ reinterpret_cast<void **>(array_d_A.data()), IA, JA, descrA, array_d_IPIV.data(), reinterpret_cast<void **>(array_d_B.data()), IB, JB, descrB, traits<data_type>::cuda_data_type, &lwork_getrs)); lwork = ::max(lwork_getrf, lwork_getrs); std::printf("\tAllocate device workspace, lwork = %lld \n", static_cast<long long>(lwork)); std::vector<data_type *> array_d_work(nbGpus, nullptr); /* array_d_work[j] points to device workspace of device j */ workspaceAlloc(nbGpus, deviceList.data(), sizeof(data_type) * lwork, /* number of bytes per device */ reinterpret_cast<void **>(array_d_work.data())); /* sync all devices */ CUDA_CHECK(hipDeviceSynchronize()); std::printf("Step 9: Solve A*X = B by GETRF and GETRS \n"); CUSOLVER_CHECK( cusolverMgGetrf(cusolverH, N, N, reinterpret_cast<void **>(array_d_A.data()), IA, JA, descrA, array_d_IPIV.data(), traits<data_type>::cuda_data_type, reinterpret_cast<void **>(array_d_work.data()), lwork, &info /* host */ )); /* sync all devices */ CUDA_CHECK(hipDeviceSynchronize()); /* check if A is singular */ if (0 > info) { std::printf("%d-th parameter is wrong \n", -info); exit(1); } CUSOLVER_CHECK(cusolverMgGetrs(cusolverH, HIPBLAS_OP_N, N, 1, /* NRHS */ reinterpret_cast<void **>(array_d_A.data()), IA, JA, descrA, array_d_IPIV.data(), reinterpret_cast<void **>(array_d_B.data()), IB, JB, descrB, traits<data_type>::cuda_data_type, reinterpret_cast<void **>(array_d_work.data()), lwork, &info /* host */ )); /* sync all devices */ CUDA_CHECK(hipDeviceSynchronize()); /* check if parameters are valid */ if (0 > info) { std::printf("%d-th parameter is wrong \n", -info); exit(1); } std::printf("Step 10: Retrieve IPIV and solution vector X\n"); memcpyD2H<data_type>(nbGpus, deviceList.data(), N, 1, /* input */ 1, /* number of columns of global B */ T_B, /* number of columns per column tile */ ldb, /* leading dimension of local B */ array_d_B.data(), IB, JB, /* output */ X.data(), /* N-by-1 */ ldb); /* IPIV is consistent with A, use JA and T_A */ memcpyD2H<int>(nbGpus, deviceList.data(), 1, N, /* input */ N, /* number of columns of global IPIV */ T_A, /* number of columns per column tile */ 1, /* leading dimension of local IPIV */ array_d_IPIV.data(), 1, JA, /* output */ IPIV.data(), /* 1-by-N */ 1); #ifdef SHOW_FORMAT /* X is N-by-1 */ std::printf("X = matlab base-1\n"); print_matrix(N, 1, X.data(), ldb, HIPBLAS_OP_T); #endif #ifdef SHOW_FORMAT /* IPIV is 1-by-N */ std::printf("IPIV = matlab base-1, 1-by-%d matrix\n", N); for (int row = 1; row <= N; row++) { std::printf("IPIV(%d) = %d \n", row, IPIV[IDX1F(row)]); } #endif std::printf("Step 11: Measure residual error |b - A*x| \n"); data_type max_err = 0; for (int row = 1; row <= N; row++) { data_type sum = 0.0; for (int col = 1; col <= N; col++) { data_type Aij = A[IDX2F(row, col, lda)]; data_type xj = X[IDX1F(col)]; sum += Aij * xj; } data_type bi = B[IDX1F(row)]; data_type err = fabs(bi - sum); max_err = (max_err > err) ? max_err : err; } data_type x_nrm_inf = vec_nrm_inf(N, X.data()); data_type b_nrm_inf = vec_nrm_inf(N, B.data()); data_type A_nrm_inf = 4.0; data_type rel_err = max_err / (A_nrm_inf * x_nrm_inf + b_nrm_inf); std::printf("\n|b - A*x|_inf = %E\n", max_err); std::printf("|x|_inf = %E\n", x_nrm_inf); std::printf("|b|_inf = %E\n", b_nrm_inf); std::printf("|A|_inf = %E\n", A_nrm_inf); /* relative error is around machine zero */ /* the user can use |b - A*x|/(N*|A|*|x|+|b|) as well */ std::printf("|b - A*x|/(|A|*|x|+|b|) = %E\n\n", rel_err); std::printf("step 12: Free resources \n"); destroyMat(nbGpus, deviceList.data(), N, /* number of columns of global A */ T_A, /* number of columns per column tile */ reinterpret_cast<void **>(array_d_A.data())); destroyMat(nbGpus, deviceList.data(), 1, /* number of columns of global B */ T_B, /* number of columns per column tile */ reinterpret_cast<void **>(array_d_B.data())); destroyMat(nbGpus, deviceList.data(), N, /* number of columns of global IPIV */ T_A, /* number of columns per column tile */ reinterpret_cast<void **>(array_d_IPIV.data())); workspaceFree(nbGpus, deviceList.data(), reinterpret_cast<void **>(array_d_work.data())); return EXIT_SUCCESS; }
a69db1e1666b4df7ab7296255ab9607786ac8254.cu
/* * Copyright 2020 NVIDIA Corporation. All rights reserved. * * NOTICE TO LICENSEE: * * This source code and/or documentation ("Licensed Deliverables") are * subject to NVIDIA intellectual property rights under U.S. and * international Copyright laws. * * These Licensed Deliverables contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a form of NVIDIA software license agreement by and * between NVIDIA and Licensee ("License Agreement") or electronically * accepted by Licensee. Notwithstanding any terms or conditions to * the contrary in the License Agreement, reproduction or disclosure * of the Licensed Deliverables to any third party without the express * written consent of NVIDIA is prohibited. * * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THESE LICENSED DELIVERABLES. * * U.S. Government End Users. These Licensed Deliverables are a * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT * 1995), consisting of "commercial computer software" and "commercial * computer software documentation" as such terms are used in 48 * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government * only as a commercial end item. Consistent with 48 C.F.R.12.212 and * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all * U.S. Government End Users acquire the Licensed Deliverables with * only those rights set forth herein. * * Any use of the Licensed Deliverables in individual and commercial * software must include, in the user documentation and internal * comments to the code, the above Disclaimer and U.S. Government End * Users Notice. */ #include <cstdio> #include <cstdlib> #include <vector> #include <cuda_runtime.h> #include <cusolverMg.h> #include "cusolverMg_utils.h" #include "cusolver_utils.h" /* compute |x|_inf */ template <typename T> static T vec_nrm_inf(int n, const T *x) { T max_nrm = 0.0; for (int row = 1; row <= n; row++) { T xi = x[IDX1F(row)]; max_nrm = (max_nrm > fabs(xi)) ? max_nrm : fabs(xi); } return max_nrm; } /* A is 1D laplacian, return A(N:-1:1, :) */ template <typename T> static void gen_1d_laplacian(int N, T *A, int lda) { for (int J = 1; J <= N; J++) { /* A(J,J) = 2 */ A[IDX2F(N - J + 1, J, lda)] = 2.0; if ((J - 1) >= 1) { /* A(J, J-1) = -1*/ A[IDX2F(N - J + 1, J - 1, lda)] = -1.0; } if ((J + 1) <= N) { /* A(J, J+1) = -1*/ A[IDX2F(N - J + 1, J + 1, lda)] = -1.0; } } } int main(int argc, char *argv[]) { cusolverMgHandle_t cusolverH = NULL; using data_type = double; /* maximum number of GPUs */ const int MAX_NUM_DEVICES = 16; int nbGpus = 0; std::vector<int> deviceList(MAX_NUM_DEVICES); const int N = 611; const int IA = 1; const int JA = 1; const int T_A = 256; /* tile size */ const int lda = N; const int IB = 1; const int JB = 1; const int T_B = 100; /* tile size of B */ const int ldb = N; int info = 0; cudaLibMgMatrixDesc_t descrA; cudaLibMgMatrixDesc_t descrB; cudaLibMgGrid_t gridA; cudaLibMgGrid_t gridB; cusolverMgGridMapping_t mapping = CUDALIBMG_GRID_MAPPING_COL_MAJOR; int64_t lwork_getrf = 0; int64_t lwork_getrs = 0; int64_t lwork = 0; /* workspace: number of elements per device */ std::printf("Test 1D Laplacian of order %d\n", N); std::printf("Step 1: Create Mg handle and select devices \n"); CUSOLVER_CHECK(cusolverMgCreate(&cusolverH)); CUDA_CHECK(cudaGetDeviceCount(&nbGpus)); nbGpus = (nbGpus < MAX_NUM_DEVICES) ? nbGpus : MAX_NUM_DEVICES; std::printf("\tThere are %d GPUs \n", nbGpus); for (int j = 0; j < nbGpus; j++) { deviceList[j] = j; cudaDeviceProp prop; CUDA_CHECK(cudaGetDeviceProperties(&prop, j)); std::printf("\tDevice %d, %s, cc %d.%d \n", j, prop.name, prop.major, prop.minor); } CUSOLVER_CHECK(cusolverMgDeviceSelect(cusolverH, nbGpus, deviceList.data())); std::printf("step 2: Enable peer access.\n"); enablePeerAccess(nbGpus, deviceList.data()); std::printf("Step 3: Allocate host memory A \n"); std::vector<data_type> A(lda * N, 0); std::vector<data_type> B(ldb, 0); std::vector<data_type> X(ldb, 0); std::vector<int> IPIV(N, 0); std::printf("Step 4: Prepare 1D Laplacian \n"); gen_1d_laplacian<data_type>(N, &A[IDX2F(IA, JA, lda)], lda); #ifdef SHOW_FORMAT std::printf("A = matlab base-1\n"); print_matrix(N, N, A.data(), lda); #endif /* B = ones(N,1) */ for (int row = 1; row <= N; row++) { B[IDX1F(row)] = 1.0; } #ifdef SHOW_FORMAT std::printf("B = matlab base-1\n"); print_matrix(N, 1, B.data(), ldb, CUBLAS_OP_T); #endif std::printf("Step 5: Create matrix descriptors for A and B \n"); CUSOLVER_CHECK(cusolverMgCreateDeviceGrid(&gridA, 1, nbGpus, deviceList.data(), mapping)); CUSOLVER_CHECK(cusolverMgCreateDeviceGrid(&gridB, 1, nbGpus, deviceList.data(), mapping)); /* (global) A is N-by-N */ CUSOLVER_CHECK(cusolverMgCreateMatrixDesc(&descrA, N, /* nubmer of rows of (global) A */ N, /* number of columns of (global) A */ N, /* number or rows in a tile */ T_A, /* number of columns in a tile */ traits<data_type>::cuda_data_type, gridA)); /* (global) B is N-by-1 */ CUSOLVER_CHECK(cusolverMgCreateMatrixDesc(&descrB, N, /* nubmer of rows of (global) B */ 1, /* number of columns of (global) B */ N, /* number or rows in a tile */ T_B, /* number of columns in a tile */ traits<data_type>::cuda_data_type, gridB)); std::printf("Step 6: Allocate distributed matrices A and D \n"); std::vector<data_type *> array_d_A(nbGpus, nullptr); std::vector<data_type *> array_d_B(nbGpus, nullptr); std::vector<int *> array_d_IPIV(nbGpus, nullptr); /* A := 0 */ createMat<data_type>(nbGpus, deviceList.data(), N, /* number of columns of global A */ T_A, /* number of columns per column tile */ lda, /* leading dimension of local A */ array_d_A.data()); /* B := 0 */ createMat<data_type>(nbGpus, deviceList.data(), 1, /* number of columns of global B */ T_B, /* number of columns per column tile */ ldb, /* leading dimension of local B */ array_d_B.data()); /* IPIV := 0, IPIV is consistent with A */ createMat<int>(nbGpus, deviceList.data(), N, /* number of columns of global IPIV */ T_A, /* number of columns per column tile */ 1, /* leading dimension of local IPIV */ array_d_IPIV.data()); std::printf("Step 7: Prepare data on devices \n"); memcpyH2D<data_type>(nbGpus, deviceList.data(), N, N, /* input */ A.data(), lda, /* output */ N, /* number of columns of global A */ T_A, /* number of columns per column tile */ lda, /* leading dimension of local A */ array_d_A.data(), /* host pointer array of dimension nbGpus */ IA, JA); memcpyH2D<data_type>(nbGpus, deviceList.data(), N, 1, /* input */ B.data(), ldb, /* output */ 1, /* number of columns of global A */ T_B, /* number of columns per column tile */ ldb, /* leading dimension of local A */ array_d_B.data(), /* host pointer array of dimension nbGpus */ IB, JB); std::printf("Step 8: Allocate workspace space \n"); CUSOLVER_CHECK(cusolverMgGetrf_bufferSize( cusolverH, N, N, reinterpret_cast<void **>(array_d_A.data()), IA, /* base-1 */ JA, /* base-1 */ descrA, array_d_IPIV.data(), traits<data_type>::cuda_data_type, &lwork_getrf)); CUSOLVER_CHECK(cusolverMgGetrs_bufferSize( cusolverH, CUBLAS_OP_N, N, 1, /* NRHS */ reinterpret_cast<void **>(array_d_A.data()), IA, JA, descrA, array_d_IPIV.data(), reinterpret_cast<void **>(array_d_B.data()), IB, JB, descrB, traits<data_type>::cuda_data_type, &lwork_getrs)); lwork = std::max(lwork_getrf, lwork_getrs); std::printf("\tAllocate device workspace, lwork = %lld \n", static_cast<long long>(lwork)); std::vector<data_type *> array_d_work(nbGpus, nullptr); /* array_d_work[j] points to device workspace of device j */ workspaceAlloc(nbGpus, deviceList.data(), sizeof(data_type) * lwork, /* number of bytes per device */ reinterpret_cast<void **>(array_d_work.data())); /* sync all devices */ CUDA_CHECK(cudaDeviceSynchronize()); std::printf("Step 9: Solve A*X = B by GETRF and GETRS \n"); CUSOLVER_CHECK( cusolverMgGetrf(cusolverH, N, N, reinterpret_cast<void **>(array_d_A.data()), IA, JA, descrA, array_d_IPIV.data(), traits<data_type>::cuda_data_type, reinterpret_cast<void **>(array_d_work.data()), lwork, &info /* host */ )); /* sync all devices */ CUDA_CHECK(cudaDeviceSynchronize()); /* check if A is singular */ if (0 > info) { std::printf("%d-th parameter is wrong \n", -info); exit(1); } CUSOLVER_CHECK(cusolverMgGetrs(cusolverH, CUBLAS_OP_N, N, 1, /* NRHS */ reinterpret_cast<void **>(array_d_A.data()), IA, JA, descrA, array_d_IPIV.data(), reinterpret_cast<void **>(array_d_B.data()), IB, JB, descrB, traits<data_type>::cuda_data_type, reinterpret_cast<void **>(array_d_work.data()), lwork, &info /* host */ )); /* sync all devices */ CUDA_CHECK(cudaDeviceSynchronize()); /* check if parameters are valid */ if (0 > info) { std::printf("%d-th parameter is wrong \n", -info); exit(1); } std::printf("Step 10: Retrieve IPIV and solution vector X\n"); memcpyD2H<data_type>(nbGpus, deviceList.data(), N, 1, /* input */ 1, /* number of columns of global B */ T_B, /* number of columns per column tile */ ldb, /* leading dimension of local B */ array_d_B.data(), IB, JB, /* output */ X.data(), /* N-by-1 */ ldb); /* IPIV is consistent with A, use JA and T_A */ memcpyD2H<int>(nbGpus, deviceList.data(), 1, N, /* input */ N, /* number of columns of global IPIV */ T_A, /* number of columns per column tile */ 1, /* leading dimension of local IPIV */ array_d_IPIV.data(), 1, JA, /* output */ IPIV.data(), /* 1-by-N */ 1); #ifdef SHOW_FORMAT /* X is N-by-1 */ std::printf("X = matlab base-1\n"); print_matrix(N, 1, X.data(), ldb, CUBLAS_OP_T); #endif #ifdef SHOW_FORMAT /* IPIV is 1-by-N */ std::printf("IPIV = matlab base-1, 1-by-%d matrix\n", N); for (int row = 1; row <= N; row++) { std::printf("IPIV(%d) = %d \n", row, IPIV[IDX1F(row)]); } #endif std::printf("Step 11: Measure residual error |b - A*x| \n"); data_type max_err = 0; for (int row = 1; row <= N; row++) { data_type sum = 0.0; for (int col = 1; col <= N; col++) { data_type Aij = A[IDX2F(row, col, lda)]; data_type xj = X[IDX1F(col)]; sum += Aij * xj; } data_type bi = B[IDX1F(row)]; data_type err = fabs(bi - sum); max_err = (max_err > err) ? max_err : err; } data_type x_nrm_inf = vec_nrm_inf(N, X.data()); data_type b_nrm_inf = vec_nrm_inf(N, B.data()); data_type A_nrm_inf = 4.0; data_type rel_err = max_err / (A_nrm_inf * x_nrm_inf + b_nrm_inf); std::printf("\n|b - A*x|_inf = %E\n", max_err); std::printf("|x|_inf = %E\n", x_nrm_inf); std::printf("|b|_inf = %E\n", b_nrm_inf); std::printf("|A|_inf = %E\n", A_nrm_inf); /* relative error is around machine zero */ /* the user can use |b - A*x|/(N*|A|*|x|+|b|) as well */ std::printf("|b - A*x|/(|A|*|x|+|b|) = %E\n\n", rel_err); std::printf("step 12: Free resources \n"); destroyMat(nbGpus, deviceList.data(), N, /* number of columns of global A */ T_A, /* number of columns per column tile */ reinterpret_cast<void **>(array_d_A.data())); destroyMat(nbGpus, deviceList.data(), 1, /* number of columns of global B */ T_B, /* number of columns per column tile */ reinterpret_cast<void **>(array_d_B.data())); destroyMat(nbGpus, deviceList.data(), N, /* number of columns of global IPIV */ T_A, /* number of columns per column tile */ reinterpret_cast<void **>(array_d_IPIV.data())); workspaceFree(nbGpus, deviceList.data(), reinterpret_cast<void **>(array_d_work.data())); return EXIT_SUCCESS; }
13d34e4aa694655f334487bb820c628c785c6842.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/layers/crop_layer.hpp" #include "caffe/vision_layers.hpp" namespace caffe { __device__ int compute_uncropped_index( int index, const int ndims, const int* src_strides, const int* dest_strides, const int* offsets) { int dest_index = index; int src_index = 0; for (int i = 0; i < ndims; ++i) { int coord = dest_index / dest_strides[i]; dest_index -= coord * dest_strides[i]; src_index += src_strides[i] * (coord + offsets[i]); } return src_index; } template <typename Dtype> __global__ void crop_kernel_forward(const int nthreads, const int ndims, const int* src_strides, const int* dest_strides, const int* offsets, const Dtype* src, Dtype* dest) { CUDA_KERNEL_LOOP(index, nthreads) { int src_index = compute_uncropped_index( index, ndims, src_strides, dest_strides, offsets); dest[index] = src[src_index]; } } template <typename Dtype> __global__ void crop_kernel_backward(const int nthreads, const int ndims, const int* src_strides, const int* dest_strides, const int* offsets, Dtype* src, const Dtype* dest) { CUDA_KERNEL_LOOP(index, nthreads) { int src_index = compute_uncropped_index( index, ndims, src_strides, dest_strides, offsets); src[src_index] = dest[index]; } } template <typename Dtype> void CropLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int n = top[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( crop_kernel_forward), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, n, bottom[0]->num_axes(), src_strides_.gpu_data(), dest_strides_.gpu_data(), offsets.gpu_data(), bottom_data, top_data); } template <typename Dtype> void CropLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); int n = top[0]->count(); if (propagate_down[0]) { caffe_gpu_set(bottom[0]->count(), static_cast<Dtype>(0), bottom_diff); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( crop_kernel_backward), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, n, bottom[0]->num_axes(), src_strides_.gpu_data(), dest_strides_.gpu_data(), offsets.gpu_data(), bottom_diff, top_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(CropLayer); } // namespace caffe
13d34e4aa694655f334487bb820c628c785c6842.cu
#include <vector> #include "caffe/layers/crop_layer.hpp" #include "caffe/vision_layers.hpp" namespace caffe { __device__ int compute_uncropped_index( int index, const int ndims, const int* src_strides, const int* dest_strides, const int* offsets) { int dest_index = index; int src_index = 0; for (int i = 0; i < ndims; ++i) { int coord = dest_index / dest_strides[i]; dest_index -= coord * dest_strides[i]; src_index += src_strides[i] * (coord + offsets[i]); } return src_index; } template <typename Dtype> __global__ void crop_kernel_forward(const int nthreads, const int ndims, const int* src_strides, const int* dest_strides, const int* offsets, const Dtype* src, Dtype* dest) { CUDA_KERNEL_LOOP(index, nthreads) { int src_index = compute_uncropped_index( index, ndims, src_strides, dest_strides, offsets); dest[index] = src[src_index]; } } template <typename Dtype> __global__ void crop_kernel_backward(const int nthreads, const int ndims, const int* src_strides, const int* dest_strides, const int* offsets, Dtype* src, const Dtype* dest) { CUDA_KERNEL_LOOP(index, nthreads) { int src_index = compute_uncropped_index( index, ndims, src_strides, dest_strides, offsets); src[src_index] = dest[index]; } } template <typename Dtype> void CropLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int n = top[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) crop_kernel_forward<<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>(n, bottom[0]->num_axes(), src_strides_.gpu_data(), dest_strides_.gpu_data(), offsets.gpu_data(), bottom_data, top_data); } template <typename Dtype> void CropLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); int n = top[0]->count(); if (propagate_down[0]) { caffe_gpu_set(bottom[0]->count(), static_cast<Dtype>(0), bottom_diff); // NOLINT_NEXT_LINE(whitespace/operators) crop_kernel_backward<<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>(n, bottom[0]->num_axes(), src_strides_.gpu_data(), dest_strides_.gpu_data(), offsets.gpu_data(), bottom_diff, top_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(CropLayer); } // namespace caffe
90ed8521d68ba5effae1ac942742c96fb90d561c.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "rgb2yuv_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int img_size = XSIZE*YSIZE; unsigned char *img_r = NULL; hipMalloc(&img_r, XSIZE*YSIZE); unsigned char *img_g = NULL; hipMalloc(&img_g, XSIZE*YSIZE); unsigned char *img_b = NULL; hipMalloc(&img_b, XSIZE*YSIZE); unsigned char *img_y = NULL; hipMalloc(&img_y, XSIZE*YSIZE); unsigned char *img_u = NULL; hipMalloc(&img_u, XSIZE*YSIZE); unsigned char *img_v = NULL; hipMalloc(&img_v, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( rgb2yuv_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, img_size,img_r,img_g,img_b,img_y,img_u,img_v); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( rgb2yuv_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, img_size,img_r,img_g,img_b,img_y,img_u,img_v); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( rgb2yuv_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, img_size,img_r,img_g,img_b,img_y,img_u,img_v); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
90ed8521d68ba5effae1ac942742c96fb90d561c.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "rgb2yuv_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int img_size = XSIZE*YSIZE; unsigned char *img_r = NULL; cudaMalloc(&img_r, XSIZE*YSIZE); unsigned char *img_g = NULL; cudaMalloc(&img_g, XSIZE*YSIZE); unsigned char *img_b = NULL; cudaMalloc(&img_b, XSIZE*YSIZE); unsigned char *img_y = NULL; cudaMalloc(&img_y, XSIZE*YSIZE); unsigned char *img_u = NULL; cudaMalloc(&img_u, XSIZE*YSIZE); unsigned char *img_v = NULL; cudaMalloc(&img_v, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); rgb2yuv_kernel<<<gridBlock,threadBlock>>>(img_size,img_r,img_g,img_b,img_y,img_u,img_v); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { rgb2yuv_kernel<<<gridBlock,threadBlock>>>(img_size,img_r,img_g,img_b,img_y,img_u,img_v); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { rgb2yuv_kernel<<<gridBlock,threadBlock>>>(img_size,img_r,img_g,img_b,img_y,img_u,img_v); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
c8790ab9ef17e966021c2eaff8ed269cd5365d32.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @generated from magmablas/ztranspose_inplace.cu, normal z -> d, Tue Aug 30 09:38:34 2016 @author Stan Tomov @author Mark Gates */ #include "magma_internal.h" #define NB 16 /******************************************************************************/ // grid is (n/nb) x ((n/nb)/2 + 1), where n/nb is odd. // lower indicates blocks in lower triangle of grid, including diagonal. // lower blocks cover left side of matrix, including diagonal. // upper blocks swap block indices (x,y) and shift by grid width (or width-1) // to cover right side of matrix. // [ A00 A01 A02 ] [ A00 . . | . . ] // [ A10 A11 A12 ] [ A10 A11 . | . . ] // grid [ A20 A21 A22 ] covers matrix as [ A20 A21 A22 | . . ] // [ A30 A31 A32 ] [ A30 A31 A32 | A01 . ] // [ A40 A41 A42 ] [ A40 A41 A42 | A02 A12 ] // // See dtranspose_inplace_even for description of threads. __global__ void dtranspose_inplace_odd( int n, double *matrix, int lda ) { __shared__ double sA[ NB ][ NB+1 ]; __shared__ double sB[ NB ][ NB+1 ]; int i = threadIdx.x; int j = threadIdx.y; bool lower = (blockIdx.x >= blockIdx.y); int ii = (lower ? blockIdx.x : (blockIdx.y + gridDim.y - 1)); int jj = (lower ? blockIdx.y : (blockIdx.x + gridDim.y )); ii *= NB; jj *= NB; double *A = matrix + ii+i + (jj+j)*lda; if ( ii == jj ) { if ( ii+i < n && jj+j < n ) { sA[j][i] = *A; } __syncthreads(); if ( ii+i < n && jj+j < n ) { *A = sA[i][j]; } } else { double *B = matrix + jj+i + (ii+j)*lda; if ( ii+i < n && jj+j < n ) { sA[j][i] = *A; } if ( jj+i < n && ii+j < n ) { sB[j][i] = *B; } __syncthreads(); if ( ii+i < n && jj+j < n ) { *A = sB[i][j]; } if ( jj+i < n && ii+j < n ) { *B = sA[i][j]; } } } /******************************************************************************/ // grid is ((n/nb) + 1) x (n/nb)/2, where n/nb is even. // lower indicates blocks in strictly lower triangle of grid, excluding diagonal. // lower blocks shift up by one to cover left side of matrix including diagonal. // upper blocks swap block indices (x,y) and shift by grid width // to cover right side of matrix. // [ A00 A01 ] [ A10 . | . . ] // [ A10 A11 ] [ A20 A21 | . . ] // grid [ A20 A21 ] covers matrix as [ A30 A31 | A00 . ] // [ A30 A31 ] [ A40 A41 | A01 A11 ] // [ A40 A41 ] // // Each block is NB x NB threads. // For non-diagonal block A, block B is symmetric block. // Thread (i,j) loads A(i,j) into sA(j,i) and B(i,j) into sB(j,i), i.e., transposed, // syncs, then saves sA(i,j) to B(i,j) and sB(i,j) to A(i,j). // Threads outside the matrix do not touch memory. __global__ void dtranspose_inplace_even( int n, double *matrix, int lda ) { __shared__ double sA[ NB ][ NB+1 ]; __shared__ double sB[ NB ][ NB+1 ]; int i = threadIdx.x; int j = threadIdx.y; bool lower = (blockIdx.x > blockIdx.y); int ii = (lower ? (blockIdx.x - 1) : (blockIdx.y + gridDim.y)); int jj = (lower ? (blockIdx.y ) : (blockIdx.x + gridDim.y)); ii *= NB; jj *= NB; double *A = matrix + ii+i + (jj+j)*lda; if ( ii == jj ) { if ( ii+i < n && jj+j < n ) { sA[j][i] = *A; } __syncthreads(); if ( ii+i < n && jj+j < n ) { *A = sA[i][j]; } } else { double *B = matrix + jj+i + (ii+j)*lda; if ( ii+i < n && jj+j < n ) { sA[j][i] = *A; } if ( jj+i < n && ii+j < n ) { sB[j][i] = *B; } __syncthreads(); if ( ii+i < n && jj+j < n ) { *A = sB[i][j]; } if ( jj+i < n && ii+j < n ) { *B = sA[i][j]; } } } /***************************************************************************//** Purpose ------- dtranspose_inplace_q transposes a square N-by-N matrix in-place. Same as dtranspose_inplace, but adds queue argument. Arguments --------- @param[in] n INTEGER The number of rows & columns of the matrix dA. N >= 0. @param[in] dA DOUBLE PRECISION array, dimension (LDDA,N) The N-by-N matrix dA. On exit, dA(j,i) = dA_original(i,j), for 0 <= i,j < N. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= N. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_transpose *******************************************************************************/ extern "C" void magmablas_dtranspose_inplace_q( magma_int_t n, magmaDouble_ptr dA, magma_int_t ldda, magma_queue_t queue ) { magma_int_t info = 0; if ( n < 0 ) info = -1; else if ( ldda < n ) info = -3; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } dim3 threads( NB, NB ); int nblock = magma_ceildiv( n, NB ); // need 1/2 * (nblock+1) * nblock to cover lower triangle and diagonal of matrix. // block assignment differs depending on whether nblock is odd or even. if ( nblock % 2 == 1 ) { dim3 grid( nblock, (nblock+1)/2 ); hipLaunchKernelGGL(( dtranspose_inplace_odd), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda ); } else { dim3 grid( nblock+1, nblock/2 ); hipLaunchKernelGGL(( dtranspose_inplace_even), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda ); } }
c8790ab9ef17e966021c2eaff8ed269cd5365d32.cu
/* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @generated from magmablas/ztranspose_inplace.cu, normal z -> d, Tue Aug 30 09:38:34 2016 @author Stan Tomov @author Mark Gates */ #include "magma_internal.h" #define NB 16 /******************************************************************************/ // grid is (n/nb) x ((n/nb)/2 + 1), where n/nb is odd. // lower indicates blocks in lower triangle of grid, including diagonal. // lower blocks cover left side of matrix, including diagonal. // upper blocks swap block indices (x,y) and shift by grid width (or width-1) // to cover right side of matrix. // [ A00 A01 A02 ] [ A00 . . | . . ] // [ A10 A11 A12 ] [ A10 A11 . | . . ] // grid [ A20 A21 A22 ] covers matrix as [ A20 A21 A22 | . . ] // [ A30 A31 A32 ] [ A30 A31 A32 | A01 . ] // [ A40 A41 A42 ] [ A40 A41 A42 | A02 A12 ] // // See dtranspose_inplace_even for description of threads. __global__ void dtranspose_inplace_odd( int n, double *matrix, int lda ) { __shared__ double sA[ NB ][ NB+1 ]; __shared__ double sB[ NB ][ NB+1 ]; int i = threadIdx.x; int j = threadIdx.y; bool lower = (blockIdx.x >= blockIdx.y); int ii = (lower ? blockIdx.x : (blockIdx.y + gridDim.y - 1)); int jj = (lower ? blockIdx.y : (blockIdx.x + gridDim.y )); ii *= NB; jj *= NB; double *A = matrix + ii+i + (jj+j)*lda; if ( ii == jj ) { if ( ii+i < n && jj+j < n ) { sA[j][i] = *A; } __syncthreads(); if ( ii+i < n && jj+j < n ) { *A = sA[i][j]; } } else { double *B = matrix + jj+i + (ii+j)*lda; if ( ii+i < n && jj+j < n ) { sA[j][i] = *A; } if ( jj+i < n && ii+j < n ) { sB[j][i] = *B; } __syncthreads(); if ( ii+i < n && jj+j < n ) { *A = sB[i][j]; } if ( jj+i < n && ii+j < n ) { *B = sA[i][j]; } } } /******************************************************************************/ // grid is ((n/nb) + 1) x (n/nb)/2, where n/nb is even. // lower indicates blocks in strictly lower triangle of grid, excluding diagonal. // lower blocks shift up by one to cover left side of matrix including diagonal. // upper blocks swap block indices (x,y) and shift by grid width // to cover right side of matrix. // [ A00 A01 ] [ A10 . | . . ] // [ A10 A11 ] [ A20 A21 | . . ] // grid [ A20 A21 ] covers matrix as [ A30 A31 | A00 . ] // [ A30 A31 ] [ A40 A41 | A01 A11 ] // [ A40 A41 ] // // Each block is NB x NB threads. // For non-diagonal block A, block B is symmetric block. // Thread (i,j) loads A(i,j) into sA(j,i) and B(i,j) into sB(j,i), i.e., transposed, // syncs, then saves sA(i,j) to B(i,j) and sB(i,j) to A(i,j). // Threads outside the matrix do not touch memory. __global__ void dtranspose_inplace_even( int n, double *matrix, int lda ) { __shared__ double sA[ NB ][ NB+1 ]; __shared__ double sB[ NB ][ NB+1 ]; int i = threadIdx.x; int j = threadIdx.y; bool lower = (blockIdx.x > blockIdx.y); int ii = (lower ? (blockIdx.x - 1) : (blockIdx.y + gridDim.y)); int jj = (lower ? (blockIdx.y ) : (blockIdx.x + gridDim.y)); ii *= NB; jj *= NB; double *A = matrix + ii+i + (jj+j)*lda; if ( ii == jj ) { if ( ii+i < n && jj+j < n ) { sA[j][i] = *A; } __syncthreads(); if ( ii+i < n && jj+j < n ) { *A = sA[i][j]; } } else { double *B = matrix + jj+i + (ii+j)*lda; if ( ii+i < n && jj+j < n ) { sA[j][i] = *A; } if ( jj+i < n && ii+j < n ) { sB[j][i] = *B; } __syncthreads(); if ( ii+i < n && jj+j < n ) { *A = sB[i][j]; } if ( jj+i < n && ii+j < n ) { *B = sA[i][j]; } } } /***************************************************************************//** Purpose ------- dtranspose_inplace_q transposes a square N-by-N matrix in-place. Same as dtranspose_inplace, but adds queue argument. Arguments --------- @param[in] n INTEGER The number of rows & columns of the matrix dA. N >= 0. @param[in] dA DOUBLE PRECISION array, dimension (LDDA,N) The N-by-N matrix dA. On exit, dA(j,i) = dA_original(i,j), for 0 <= i,j < N. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= N. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_transpose *******************************************************************************/ extern "C" void magmablas_dtranspose_inplace_q( magma_int_t n, magmaDouble_ptr dA, magma_int_t ldda, magma_queue_t queue ) { magma_int_t info = 0; if ( n < 0 ) info = -1; else if ( ldda < n ) info = -3; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } dim3 threads( NB, NB ); int nblock = magma_ceildiv( n, NB ); // need 1/2 * (nblock+1) * nblock to cover lower triangle and diagonal of matrix. // block assignment differs depending on whether nblock is odd or even. if ( nblock % 2 == 1 ) { dim3 grid( nblock, (nblock+1)/2 ); dtranspose_inplace_odd<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda ); } else { dim3 grid( nblock+1, nblock/2 ); dtranspose_inplace_even<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda ); } }
e38be11c1dfedd73a40cc47e69c292e0e56ea82e.hip
// !!! This is a file automatically generated by hipify!!! #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THH/generic/THHStorageCopy.hip" #else // conversions are delegated to THCTensor implementation #define THC_CUDA_STORAGE_IMPLEMENT_COPY(TYPEC, TYPECUDA) \ void THCStorage_(copyCuda##TYPEC)( \ THCState * state, \ THCStorage * self, \ struct THCuda##TYPECUDA##Storage * src) { \ size_t self_numel = self->nbytes() / sizeof(scalar_t); \ size_t src_numel = \ src->nbytes() / THCuda##TYPECUDA##Storage_elementSize(state); \ THArgCheck(self_numel == src_numel, 2, "size does not match"); \ at::Tensor selfTensor = tensor_reclaim( \ THCTensor_(newWithStorage1d)(state, self, 0, self_numel, 1)); \ at::Tensor srcTensor = tensor_reclaim( \ THCuda##TYPECUDA##Tensor_newWithStorage1d( \ state, src, 0, src_numel, 1)); \ selfTensor.copy_(srcTensor); \ } #if !defined(THC_REAL_IS_COMPLEXFLOAT) && !defined(THC_REAL_IS_COMPLEXDOUBLE) THC_CUDA_STORAGE_IMPLEMENT_COPY(Byte,Byte) THC_CUDA_STORAGE_IMPLEMENT_COPY(Char,Char) THC_CUDA_STORAGE_IMPLEMENT_COPY(Short,Short) THC_CUDA_STORAGE_IMPLEMENT_COPY(Int,Int) THC_CUDA_STORAGE_IMPLEMENT_COPY(Long,Long) THC_CUDA_STORAGE_IMPLEMENT_COPY(Float,) // i.e. float THC_CUDA_STORAGE_IMPLEMENT_COPY(Double,Double) THC_CUDA_STORAGE_IMPLEMENT_COPY(Half,Half) THC_CUDA_STORAGE_IMPLEMENT_COPY(Bool,Bool) THC_CUDA_STORAGE_IMPLEMENT_COPY(BFloat16,BFloat16) #else THC_CUDA_STORAGE_IMPLEMENT_COPY(ComplexFloat,ComplexFloat) THC_CUDA_STORAGE_IMPLEMENT_COPY(ComplexDouble,ComplexDouble) #endif #undef THC_CUDA_STORAGE_IMPLEMENT_COPY void THCStorage_(copyCuda)(THCState *state, THCStorage *self, THCStorage *src) { THCStorage_(TH_CONCAT_2(copyCuda, Real))(state, self, src); } void THCStorage_(copy)(THCState *state, THCStorage *self, THCStorage *src) { THCStorage_(copyCuda)(state, self, src); } #endif
e38be11c1dfedd73a40cc47e69c292e0e56ea82e.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THC/generic/THCStorageCopy.cu" #else // conversions are delegated to THCTensor implementation #define THC_CUDA_STORAGE_IMPLEMENT_COPY(TYPEC, TYPECUDA) \ void THCStorage_(copyCuda##TYPEC)( \ THCState * state, \ THCStorage * self, \ struct THCuda##TYPECUDA##Storage * src) { \ size_t self_numel = self->nbytes() / sizeof(scalar_t); \ size_t src_numel = \ src->nbytes() / THCuda##TYPECUDA##Storage_elementSize(state); \ THArgCheck(self_numel == src_numel, 2, "size does not match"); \ at::Tensor selfTensor = tensor_reclaim( \ THCTensor_(newWithStorage1d)(state, self, 0, self_numel, 1)); \ at::Tensor srcTensor = tensor_reclaim( \ THCuda##TYPECUDA##Tensor_newWithStorage1d( \ state, src, 0, src_numel, 1)); \ selfTensor.copy_(srcTensor); \ } #if !defined(THC_REAL_IS_COMPLEXFLOAT) && !defined(THC_REAL_IS_COMPLEXDOUBLE) THC_CUDA_STORAGE_IMPLEMENT_COPY(Byte,Byte) THC_CUDA_STORAGE_IMPLEMENT_COPY(Char,Char) THC_CUDA_STORAGE_IMPLEMENT_COPY(Short,Short) THC_CUDA_STORAGE_IMPLEMENT_COPY(Int,Int) THC_CUDA_STORAGE_IMPLEMENT_COPY(Long,Long) THC_CUDA_STORAGE_IMPLEMENT_COPY(Float,) // i.e. float THC_CUDA_STORAGE_IMPLEMENT_COPY(Double,Double) THC_CUDA_STORAGE_IMPLEMENT_COPY(Half,Half) THC_CUDA_STORAGE_IMPLEMENT_COPY(Bool,Bool) THC_CUDA_STORAGE_IMPLEMENT_COPY(BFloat16,BFloat16) #else THC_CUDA_STORAGE_IMPLEMENT_COPY(ComplexFloat,ComplexFloat) THC_CUDA_STORAGE_IMPLEMENT_COPY(ComplexDouble,ComplexDouble) #endif #undef THC_CUDA_STORAGE_IMPLEMENT_COPY void THCStorage_(copyCuda)(THCState *state, THCStorage *self, THCStorage *src) { THCStorage_(TH_CONCAT_2(copyCuda, Real))(state, self, src); } void THCStorage_(copy)(THCState *state, THCStorage *self, THCStorage *src) { THCStorage_(copyCuda)(state, self, src); } #endif
c437a497c68d5c47a767853948423232ce4ef71b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include "book.h" #include <stdio.h> __global__ void add(int a, int b, int *c, int *d, int *e, int *f){ int blocksPerGrid = gridDim.x; int threadsPerBlock = blockDim.x; int totalThreadNum = gridDim.x * blockDim.x;; int curThreadIdx = (blockIdx.x * blockDim.x) + threadIdx.x; *c = blocksPerGrid; *d = threadsPerBlock; *e = totalThreadNum; *f = curThreadIdx; } int main(void){ int c,d,e,f; int *dev_c, *dev_d, *dev_e, *dev_f; hipMalloc( (void**)&dev_c, sizeof(int) ); hipMalloc( (void**)&dev_d, sizeof(int) ); hipMalloc( (void**)&dev_e, sizeof(int) ); hipMalloc( (void**)&dev_f, sizeof(int) ); hipLaunchKernelGGL(( add), dim3(5),dim3(100), 0, 0, 2,7,dev_c, dev_d, dev_e, dev_f) ; hipMemcpy( &c, dev_c, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy( &d, dev_d, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy( &e, dev_e, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy( &f, dev_f, sizeof(int), hipMemcpyDeviceToHost); printf("blocks Per Grid : %d\n",c); printf("threads per block: %d\n",d); printf("total number of threads: %d\n",e); printf("Current Thread Index: %d\n",f); hipFree(dev_c); hipFree(dev_d); hipFree(dev_e); hipFree(dev_f); return 0; }
c437a497c68d5c47a767853948423232ce4ef71b.cu
#include <iostream> #include "book.h" #include <stdio.h> __global__ void add(int a, int b, int *c, int *d, int *e, int *f){ int blocksPerGrid = gridDim.x; int threadsPerBlock = blockDim.x; int totalThreadNum = gridDim.x * blockDim.x;; int curThreadIdx = (blockIdx.x * blockDim.x) + threadIdx.x; *c = blocksPerGrid; *d = threadsPerBlock; *e = totalThreadNum; *f = curThreadIdx; } int main(void){ int c,d,e,f; int *dev_c, *dev_d, *dev_e, *dev_f; cudaMalloc( (void**)&dev_c, sizeof(int) ); cudaMalloc( (void**)&dev_d, sizeof(int) ); cudaMalloc( (void**)&dev_e, sizeof(int) ); cudaMalloc( (void**)&dev_f, sizeof(int) ); add<<<5,100>>>(2,7,dev_c, dev_d, dev_e, dev_f) ; cudaMemcpy( &c, dev_c, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy( &d, dev_d, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy( &e, dev_e, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy( &f, dev_f, sizeof(int), cudaMemcpyDeviceToHost); printf("blocks Per Grid : %d\n",c); printf("threads per block: %d\n",d); printf("total number of threads: %d\n",e); printf("Current Thread Index: %d\n",f); cudaFree(dev_c); cudaFree(dev_d); cudaFree(dev_e); cudaFree(dev_f); return 0; }
be72a845882ba3bdb246e1573f072f21b7a33335.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. #include <stdio.h> #include "../EnumO.h" MY_ENUM_O(Fruits, int, (apple, banana, pear)); __global__ void add(int n, float a, float* x) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { x[i] = a * x[i]; } } int main(int argc, char** args) { int N = 1 << 20; float *x, *d_x; x = (float*)malloc(N * sizeof(float)); hipMalloc(&d_x, N * sizeof(float)); for (int i = 0; i < N; i++) { x[i] = 1.0f; } hipMemcpy(d_x, x, N * sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( add), dim3((N + 255) / 256), dim3(256), 0, 0, N, 2.0f, d_x); hipFree(d_x); free(x); }
be72a845882ba3bdb246e1573f072f21b7a33335.cu
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. #include <stdio.h> #include "../EnumO.h" MY_ENUM_O(Fruits, int, (apple, banana, pear)); __global__ void add(int n, float a, float* x) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { x[i] = a * x[i]; } } int main(int argc, char** args) { int N = 1 << 20; float *x, *d_x; x = (float*)malloc(N * sizeof(float)); cudaMalloc(&d_x, N * sizeof(float)); for (int i = 0; i < N; i++) { x[i] = 1.0f; } cudaMemcpy(d_x, x, N * sizeof(float), cudaMemcpyHostToDevice); add<<<(N + 255) / 256, 256>>>(N, 2.0f, d_x); cudaFree(d_x); free(x); }
bbbfe5d76fbd4d637faa4ee606761970bf02864f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 2020 Patrick Stotko * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <iostream> #include <thrust/copy.h> #include <thrust/reduce.h> #include <thrust/sequence.h> #include <stdgpu/iterator.h> // device_begin, device_end #include <stdgpu/memory.h> // createDeviceArray, destroyDeviceArray #include <stdgpu/platform.h> // STDGPU_HOST_DEVICE #include <stdgpu/unordered_map.cuh> // stdgpu::unordered_map struct is_odd { STDGPU_HOST_DEVICE bool operator()(const int x) const { return x % 2 == 1; } }; struct square { STDGPU_HOST_DEVICE int operator()(const int x) const { return x * x; } }; struct int_pair_plus { STDGPU_HOST_DEVICE stdgpu::pair<int, int> operator()(const stdgpu::pair<int, int>& lhs, const stdgpu::pair<int, int>& rhs) const { return { lhs.first + rhs.first, lhs.second + rhs.second }; } }; __global__ void insert_neighbors(const int* d_result, const stdgpu::index_t n, stdgpu::unordered_map<int, int> map) { stdgpu::index_t i = static_cast<stdgpu::index_t>(blockIdx.x * blockDim.x + threadIdx.x); if (i >= n) return; int num = d_result[i]; int num_neighborhood[3] = { num - 1, num, num + 1 }; for (int num_neighbor : num_neighborhood) { map.emplace(num_neighbor, square()(num_neighbor)); } } int main() { // // EXAMPLE DESCRIPTION // ------------------- // This example demonstrates how stdgpu::unordered_map is used to compute a duplicate-free set of numbers. // const stdgpu::index_t n = 100; int* d_input = createDeviceArray<int>(n); int* d_result = createDeviceArray<int>(n / 2); stdgpu::unordered_map<int, int> map = stdgpu::unordered_map<int, int>::createDeviceObject(n); thrust::sequence(stdgpu::device_begin(d_input), stdgpu::device_end(d_input), 1); // d_input : 1, 2, 3, ..., 100 thrust::copy_if(stdgpu::device_cbegin(d_input), stdgpu::device_cend(d_input), stdgpu::device_begin(d_result), is_odd()); // d_result : 1, 3, 5, ..., 99 stdgpu::index_t threads = 32; stdgpu::index_t blocks = (n / 2 + threads - 1) / threads; hipLaunchKernelGGL(( insert_neighbors), dim3(static_cast<unsigned int>(blocks)), dim3(static_cast<unsigned int>(threads)), 0, 0, d_result, n / 2, map); hipDeviceSynchronize(); // map : 0, 1, 2, 3, ..., 100 auto range_map = map.device_range(); stdgpu::pair<int, int> sum = thrust::reduce(range_map.begin(), range_map.end(), stdgpu::pair<int, int>(0, 0), int_pair_plus()); const stdgpu::pair<int, int> sum_closed_form = { n * (n + 1) / 2, n * (n + 1) * (2 * n + 1) / 6 }; std::cout << "The duplicate-free map of numbers contains " << map.size() << " elements (" << n + 1 << " expected) and the computed sums are (" << sum.first << ", " << sum.second << ") ((" << sum_closed_form.first << ", " << sum_closed_form.second << ") expected)" << std::endl; destroyDeviceArray<int>(d_input); destroyDeviceArray<int>(d_result); stdgpu::unordered_map<int, int>::destroyDeviceObject(map); }
bbbfe5d76fbd4d637faa4ee606761970bf02864f.cu
/* * Copyright 2020 Patrick Stotko * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <iostream> #include <thrust/copy.h> #include <thrust/reduce.h> #include <thrust/sequence.h> #include <stdgpu/iterator.h> // device_begin, device_end #include <stdgpu/memory.h> // createDeviceArray, destroyDeviceArray #include <stdgpu/platform.h> // STDGPU_HOST_DEVICE #include <stdgpu/unordered_map.cuh> // stdgpu::unordered_map struct is_odd { STDGPU_HOST_DEVICE bool operator()(const int x) const { return x % 2 == 1; } }; struct square { STDGPU_HOST_DEVICE int operator()(const int x) const { return x * x; } }; struct int_pair_plus { STDGPU_HOST_DEVICE stdgpu::pair<int, int> operator()(const stdgpu::pair<int, int>& lhs, const stdgpu::pair<int, int>& rhs) const { return { lhs.first + rhs.first, lhs.second + rhs.second }; } }; __global__ void insert_neighbors(const int* d_result, const stdgpu::index_t n, stdgpu::unordered_map<int, int> map) { stdgpu::index_t i = static_cast<stdgpu::index_t>(blockIdx.x * blockDim.x + threadIdx.x); if (i >= n) return; int num = d_result[i]; int num_neighborhood[3] = { num - 1, num, num + 1 }; for (int num_neighbor : num_neighborhood) { map.emplace(num_neighbor, square()(num_neighbor)); } } int main() { // // EXAMPLE DESCRIPTION // ------------------- // This example demonstrates how stdgpu::unordered_map is used to compute a duplicate-free set of numbers. // const stdgpu::index_t n = 100; int* d_input = createDeviceArray<int>(n); int* d_result = createDeviceArray<int>(n / 2); stdgpu::unordered_map<int, int> map = stdgpu::unordered_map<int, int>::createDeviceObject(n); thrust::sequence(stdgpu::device_begin(d_input), stdgpu::device_end(d_input), 1); // d_input : 1, 2, 3, ..., 100 thrust::copy_if(stdgpu::device_cbegin(d_input), stdgpu::device_cend(d_input), stdgpu::device_begin(d_result), is_odd()); // d_result : 1, 3, 5, ..., 99 stdgpu::index_t threads = 32; stdgpu::index_t blocks = (n / 2 + threads - 1) / threads; insert_neighbors<<<static_cast<unsigned int>(blocks), static_cast<unsigned int>(threads)>>>(d_result, n / 2, map); cudaDeviceSynchronize(); // map : 0, 1, 2, 3, ..., 100 auto range_map = map.device_range(); stdgpu::pair<int, int> sum = thrust::reduce(range_map.begin(), range_map.end(), stdgpu::pair<int, int>(0, 0), int_pair_plus()); const stdgpu::pair<int, int> sum_closed_form = { n * (n + 1) / 2, n * (n + 1) * (2 * n + 1) / 6 }; std::cout << "The duplicate-free map of numbers contains " << map.size() << " elements (" << n + 1 << " expected) and the computed sums are (" << sum.first << ", " << sum.second << ") ((" << sum_closed_form.first << ", " << sum_closed_form.second << ") expected)" << std::endl; destroyDeviceArray<int>(d_input); destroyDeviceArray<int>(d_result); stdgpu::unordered_map<int, int>::destroyDeviceObject(map); }
8dcbe8da290b1d2ff7b0c4d6e22b63736c2c9353.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include "Thinning.h" #include "ErrorCode.h" #include "Image.h" using namespace std; int main(int argc, char const **argv) { if(argc < 2) { cout << "Please input image!" << endl; return 0; } Thinning thin; Image *inimg; ImageBasicOp::newImage(&inimg); int errcode; errcode = ImageBasicOp::readFromFile(argv[1], inimg); if (errcode != NO_ERROR) { cout << "error: " << errcode << endl; return 0; } for(int i = 0; i < inimg->width * inimg->height; i++) if(inimg->imgData[i] != 0) inimg->imgData[i] = 255; Image *outimg1; ImageBasicOp::newImage(&outimg1); ImageBasicOp::makeAtHost(outimg1, inimg->width, inimg->height); hipEvent_t start, stop; float runTime; warmup(); // hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); thin.thinKwon(inimg, outimg1); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&runTime, start, stop); cout << "thin() time is " << runTime << " ms" << endl; ImageBasicOp::copyToHost(outimg1); ImageBasicOp::writeToFile("thin_outimg.bmp", outimg1); ImageBasicOp::deleteImage(inimg); ImageBasicOp::deleteImage(outimg1); return 0; }
8dcbe8da290b1d2ff7b0c4d6e22b63736c2c9353.cu
#include <iostream> #include "Thinning.h" #include "ErrorCode.h" #include "Image.h" using namespace std; int main(int argc, char const **argv) { if(argc < 2) { cout << "Please input image!" << endl; return 0; } Thinning thin; Image *inimg; ImageBasicOp::newImage(&inimg); int errcode; errcode = ImageBasicOp::readFromFile(argv[1], inimg); if (errcode != NO_ERROR) { cout << "error: " << errcode << endl; return 0; } for(int i = 0; i < inimg->width * inimg->height; i++) if(inimg->imgData[i] != 0) inimg->imgData[i] = 255; Image *outimg1; ImageBasicOp::newImage(&outimg1); ImageBasicOp::makeAtHost(outimg1, inimg->width, inimg->height); cudaEvent_t start, stop; float runTime; warmup(); // 直接并行 cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); thin.thinKwon(inimg, outimg1); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&runTime, start, stop); cout << "thin() time is " << runTime << " ms" << endl; ImageBasicOp::copyToHost(outimg1); ImageBasicOp::writeToFile("thin_outimg.bmp", outimg1); ImageBasicOp::deleteImage(inimg); ImageBasicOp::deleteImage(outimg1); return 0; }
a2a72f4ac1af06b2a13bad820970254f6a3b0282.hip
// !!! This is a file automatically generated by hipify!!! #include <torch/tensor.h> #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> namespace { #define CUDA_1D_KERNEL_LOOP(i, n) \ for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ i += blockDim.x * gridDim.x) // The number of cuda threads to use. 512 is used for backward compatibility constexpr int ROI_CUDA_NUM_THREADS = 512; // The maximum number of blocks to use in the default kernel call. constexpr int ROI_MAXIMUM_NUM_BLOCKS = 4096; /** * @brief Compute the number of blocks needed to run N threads. */ inline int ROI_GET_BLOCKS(const int N) { return ::max( ::min( (N + ROI_CUDA_NUM_THREADS - 1) / ROI_CUDA_NUM_THREADS, ROI_MAXIMUM_NUM_BLOCKS), // Use at least 1 block, since CUDA does not allow empty block 1); } template <typename T> __device__ T bilinear_interpolate( const T* bottom_data, const int height, const int width, T y, T x, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { // empty return 0; } if (y <= 0) { y = 0; } if (x <= 0) { x = 0; } int y_low = static_cast<int>(y); int x_low = static_cast<int>(x); int y_high; int x_high; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T)x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // do bilinear interpolation T v1 = bottom_data[y_low * width + x_low]; T v2 = bottom_data[y_low * width + x_high]; T v3 = bottom_data[y_high * width + x_low]; T v4 = bottom_data[y_high * width + x_high]; T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename T> __global__ void RoIAlignForwardKernel( const int nthreads, const T* bottom_data, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, const T* bottom_rois, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_bottom_rois[1] * spatial_scale; T roi_start_h = offset_bottom_rois[2] * spatial_scale; T roi_end_w = offset_bottom_rois[3] * spatial_scale; T roi_end_h = offset_bottom_rois[4] * spatial_scale; // Force malformed ROIs to be 1x1 T roi_width = max(roi_end_w - roi_start_w, (T)1.); T roi_height = max(roi_end_h - roi_start_h, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); const T* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 T output_val = 0.; for (int iy = 0; iy < roi_bin_grid_h; iy++) { // e.g., iy = 0, 1 const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T val = bilinear_interpolate( offset_bottom_data, height, width, y, x, index); output_val += val; } } output_val /= count; top_data[index] = output_val; } } template <typename T> __device__ void bilinear_interpolate_gradient( const int height, const int width, T y, T x, T* w1, T* w2, T* w3, T* w4, int* x_low, int* x_high, int* y_low, int* y_high, const int /*index*/ /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { // empty *w1 = *w2 = *w3 = *w4 = 0.; *x_low = *x_high = *y_low = *y_high = -1; return; } if (y <= 0) { y = 0; } if (x <= 0) { x = 0; } *y_low = static_cast<int>(y); *x_low = static_cast<int>(x); if (*y_low >= height - 1) { *y_high = *y_low = height - 1; y = (T)*y_low; } else { *y_high = *y_low + 1; } if (*x_low >= width - 1) { *x_high = *x_low = width - 1; x = (T)*x_low; } else { *x_high = *x_low + 1; } T ly = y - *y_low; T lx = x - *x_low; T hy = 1. - ly, hx = 1. - lx; // reference in forward *w1 = hy * hx, *w2 = hy * lx, *w3 = ly * hx, *w4 = ly * lx; return; } template <typename T> inline __device__ T gpu_atomic_add(const T val, T* address); template <> inline __device__ float gpu_atomic_add(const float val, float* address) { return atomicAdd(address, val); } template <> inline __device__ double gpu_atomic_add(const double val, double* address) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull; unsigned long long int assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return val; } template <typename T> __global__ void RoIAlignBackwardKernel( const int nthreads, const T* top_diff, const int num_rois, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, T* bottom_diff, const T* bottom_rois) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_bottom_rois[1] * spatial_scale; T roi_start_h = offset_bottom_rois[2] * spatial_scale; T roi_end_w = offset_bottom_rois[3] * spatial_scale; T roi_end_h = offset_bottom_rois[4] * spatial_scale; // Force malformed ROIs to be 1x1 T roi_width = max(roi_end_w - roi_start_w, (T)1.); T roi_height = max(roi_end_h - roi_start_h, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); T* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; int top_offset = (n * channels + c) * pooled_height * pooled_width; const T* offset_top_diff = top_diff + top_offset; const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw]; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 for (int iy = 0; iy < roi_bin_grid_h; iy++) { // e.g., iy = 0, 1 const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T w1, w2, w3, w4; int x_low, x_high, y_low, y_high; bilinear_interpolate_gradient( height, width, y, x, &w1, &w2, &w3, &w4, &x_low, &x_high, &y_low, &y_high, index); T g1 = top_diff_this_bin * w1 / count; T g2 = top_diff_this_bin * w2 / count; T g3 = top_diff_this_bin * w3 / count; T g4 = top_diff_this_bin * w4 / count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { /* atomicAdd( offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1)); atomicAdd( offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2)); atomicAdd( offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3)); atomicAdd( offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4)); */ gpu_atomic_add( static_cast<T>(g1), offset_bottom_diff + y_low * width + x_low); gpu_atomic_add( static_cast<T>(g2), offset_bottom_diff + y_low * width + x_high); gpu_atomic_add( static_cast<T>(g3), offset_bottom_diff + y_high * width + x_low); gpu_atomic_add( static_cast<T>(g4), offset_bottom_diff + y_high * width + x_high); } // if } // ix } // iy } // CUDA_1D_KERNEL_LOOP } // RoIAlignBackward } // namespace at::Tensor ROIAlign_Forward_CUDA( const at::Tensor input, const at::Tensor rois, int64_t pooled_height, int64_t pooled_width, double spatial_scale, int64_t sampling_ratio) { AT_ASSERT(input.is_contiguous()); AT_ASSERT(rois.is_contiguous()); AT_ASSERT(input.ndimension() == 4); AT_ASSERT(rois.ndimension() == 2); AT_ASSERT(rois.size(1) == 5); auto proposals = rois.size(0); auto channels = input.size(1); auto height = input.size(2); auto width = input.size(3); // Output Tensor is (num_rois, C, pooled_height, pooled_width) auto output = torch::zeros({proposals, channels, pooled_height, pooled_width}, input.options()); auto count = output.numel(); AT_DISPATCH_FLOATING_TYPES(input.type(), "ROIAlign_Forward_CUDA", ([&] { hipLaunchKernelGGL(( RoIAlignForwardKernel<scalar_t>) , dim3(ROI_GET_BLOCKS(count)), dim3(ROI_CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, input.data<scalar_t>(), static_cast<scalar_t>(spatial_scale), channels, height, width, pooled_height, pooled_width, sampling_ratio, rois.data<scalar_t>(), output.data<scalar_t>()); })); AT_ASSERT(hipGetLastError() == hipSuccess); return output; } at::Tensor ROIAlign_Backward_CUDA( const at::Tensor rois, const at::Tensor grad_output, int64_t b_size, int64_t channels, int64_t height, int64_t width, int64_t pooled_height, int64_t pooled_width, double spatial_scale, int64_t sampling_ratio) { AT_ASSERT(rois.is_contiguous()); AT_ASSERT(rois.ndimension() == 2); AT_ASSERT(rois.size(1) == 5); auto roi_cols = rois.size(1); AT_ASSERT(roi_cols == 4 || roi_cols == 5); // Output Tensor is (num_rois, C, pooled_height, pooled_width) // gradient wrt input features auto grad_in = torch::zeros({b_size, channels, height, width}, rois.options()); auto num_rois = rois.size(0); auto count = grad_output.numel(); AT_DISPATCH_FLOATING_TYPES(rois.type(), "ROIAlign_Backward_CUDA", ([&] { hipLaunchKernelGGL(( RoIAlignBackwardKernel<scalar_t>) , dim3(ROI_GET_BLOCKS(count)), dim3(ROI_CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, grad_output.data<scalar_t>(), num_rois, static_cast<scalar_t>(spatial_scale), channels, height, width, pooled_height, pooled_width, sampling_ratio, grad_in.data<scalar_t>(), rois.data<scalar_t>()); })); AT_ASSERT(hipGetLastError() == hipSuccess); return grad_in; }
a2a72f4ac1af06b2a13bad820970254f6a3b0282.cu
#include <torch/tensor.h> #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <cuda.h> #include <cuda_runtime.h> namespace { #define CUDA_1D_KERNEL_LOOP(i, n) \ for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ i += blockDim.x * gridDim.x) // The number of cuda threads to use. 512 is used for backward compatibility constexpr int ROI_CUDA_NUM_THREADS = 512; // The maximum number of blocks to use in the default kernel call. constexpr int ROI_MAXIMUM_NUM_BLOCKS = 4096; /** * @brief Compute the number of blocks needed to run N threads. */ inline int ROI_GET_BLOCKS(const int N) { return std::max( std::min( (N + ROI_CUDA_NUM_THREADS - 1) / ROI_CUDA_NUM_THREADS, ROI_MAXIMUM_NUM_BLOCKS), // Use at least 1 block, since CUDA does not allow empty block 1); } template <typename T> __device__ T bilinear_interpolate( const T* bottom_data, const int height, const int width, T y, T x, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { // empty return 0; } if (y <= 0) { y = 0; } if (x <= 0) { x = 0; } int y_low = static_cast<int>(y); int x_low = static_cast<int>(x); int y_high; int x_high; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T)x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // do bilinear interpolation T v1 = bottom_data[y_low * width + x_low]; T v2 = bottom_data[y_low * width + x_high]; T v3 = bottom_data[y_high * width + x_low]; T v4 = bottom_data[y_high * width + x_high]; T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename T> __global__ void RoIAlignForwardKernel( const int nthreads, const T* bottom_data, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, const T* bottom_rois, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_bottom_rois[1] * spatial_scale; T roi_start_h = offset_bottom_rois[2] * spatial_scale; T roi_end_w = offset_bottom_rois[3] * spatial_scale; T roi_end_h = offset_bottom_rois[4] * spatial_scale; // Force malformed ROIs to be 1x1 T roi_width = max(roi_end_w - roi_start_w, (T)1.); T roi_height = max(roi_end_h - roi_start_h, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); const T* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 T output_val = 0.; for (int iy = 0; iy < roi_bin_grid_h; iy++) { // e.g., iy = 0, 1 const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T val = bilinear_interpolate( offset_bottom_data, height, width, y, x, index); output_val += val; } } output_val /= count; top_data[index] = output_val; } } template <typename T> __device__ void bilinear_interpolate_gradient( const int height, const int width, T y, T x, T* w1, T* w2, T* w3, T* w4, int* x_low, int* x_high, int* y_low, int* y_high, const int /*index*/ /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { // empty *w1 = *w2 = *w3 = *w4 = 0.; *x_low = *x_high = *y_low = *y_high = -1; return; } if (y <= 0) { y = 0; } if (x <= 0) { x = 0; } *y_low = static_cast<int>(y); *x_low = static_cast<int>(x); if (*y_low >= height - 1) { *y_high = *y_low = height - 1; y = (T)*y_low; } else { *y_high = *y_low + 1; } if (*x_low >= width - 1) { *x_high = *x_low = width - 1; x = (T)*x_low; } else { *x_high = *x_low + 1; } T ly = y - *y_low; T lx = x - *x_low; T hy = 1. - ly, hx = 1. - lx; // reference in forward *w1 = hy * hx, *w2 = hy * lx, *w3 = ly * hx, *w4 = ly * lx; return; } template <typename T> inline __device__ T gpu_atomic_add(const T val, T* address); template <> inline __device__ float gpu_atomic_add(const float val, float* address) { return atomicAdd(address, val); } template <> inline __device__ double gpu_atomic_add(const double val, double* address) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull; unsigned long long int assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return val; } template <typename T> __global__ void RoIAlignBackwardKernel( const int nthreads, const T* top_diff, const int num_rois, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, T* bottom_diff, const T* bottom_rois) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_bottom_rois[1] * spatial_scale; T roi_start_h = offset_bottom_rois[2] * spatial_scale; T roi_end_w = offset_bottom_rois[3] * spatial_scale; T roi_end_h = offset_bottom_rois[4] * spatial_scale; // Force malformed ROIs to be 1x1 T roi_width = max(roi_end_w - roi_start_w, (T)1.); T roi_height = max(roi_end_h - roi_start_h, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); T* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; int top_offset = (n * channels + c) * pooled_height * pooled_width; const T* offset_top_diff = top_diff + top_offset; const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw]; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 for (int iy = 0; iy < roi_bin_grid_h; iy++) { // e.g., iy = 0, 1 const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T w1, w2, w3, w4; int x_low, x_high, y_low, y_high; bilinear_interpolate_gradient( height, width, y, x, &w1, &w2, &w3, &w4, &x_low, &x_high, &y_low, &y_high, index); T g1 = top_diff_this_bin * w1 / count; T g2 = top_diff_this_bin * w2 / count; T g3 = top_diff_this_bin * w3 / count; T g4 = top_diff_this_bin * w4 / count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { /* atomicAdd( offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1)); atomicAdd( offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2)); atomicAdd( offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3)); atomicAdd( offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4)); */ gpu_atomic_add( static_cast<T>(g1), offset_bottom_diff + y_low * width + x_low); gpu_atomic_add( static_cast<T>(g2), offset_bottom_diff + y_low * width + x_high); gpu_atomic_add( static_cast<T>(g3), offset_bottom_diff + y_high * width + x_low); gpu_atomic_add( static_cast<T>(g4), offset_bottom_diff + y_high * width + x_high); } // if } // ix } // iy } // CUDA_1D_KERNEL_LOOP } // RoIAlignBackward } // namespace at::Tensor ROIAlign_Forward_CUDA( const at::Tensor input, const at::Tensor rois, int64_t pooled_height, int64_t pooled_width, double spatial_scale, int64_t sampling_ratio) { AT_ASSERT(input.is_contiguous()); AT_ASSERT(rois.is_contiguous()); AT_ASSERT(input.ndimension() == 4); AT_ASSERT(rois.ndimension() == 2); AT_ASSERT(rois.size(1) == 5); auto proposals = rois.size(0); auto channels = input.size(1); auto height = input.size(2); auto width = input.size(3); // Output Tensor is (num_rois, C, pooled_height, pooled_width) auto output = torch::zeros({proposals, channels, pooled_height, pooled_width}, input.options()); auto count = output.numel(); AT_DISPATCH_FLOATING_TYPES(input.type(), "ROIAlign_Forward_CUDA", ([&] { RoIAlignForwardKernel<scalar_t> <<<ROI_GET_BLOCKS(count), ROI_CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( count, input.data<scalar_t>(), static_cast<scalar_t>(spatial_scale), channels, height, width, pooled_height, pooled_width, sampling_ratio, rois.data<scalar_t>(), output.data<scalar_t>()); })); AT_ASSERT(cudaGetLastError() == cudaSuccess); return output; } at::Tensor ROIAlign_Backward_CUDA( const at::Tensor rois, const at::Tensor grad_output, int64_t b_size, int64_t channels, int64_t height, int64_t width, int64_t pooled_height, int64_t pooled_width, double spatial_scale, int64_t sampling_ratio) { AT_ASSERT(rois.is_contiguous()); AT_ASSERT(rois.ndimension() == 2); AT_ASSERT(rois.size(1) == 5); auto roi_cols = rois.size(1); AT_ASSERT(roi_cols == 4 || roi_cols == 5); // Output Tensor is (num_rois, C, pooled_height, pooled_width) // gradient wrt input features auto grad_in = torch::zeros({b_size, channels, height, width}, rois.options()); auto num_rois = rois.size(0); auto count = grad_output.numel(); AT_DISPATCH_FLOATING_TYPES(rois.type(), "ROIAlign_Backward_CUDA", ([&] { RoIAlignBackwardKernel<scalar_t> <<<ROI_GET_BLOCKS(count), ROI_CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( count, grad_output.data<scalar_t>(), num_rois, static_cast<scalar_t>(spatial_scale), channels, height, width, pooled_height, pooled_width, sampling_ratio, grad_in.data<scalar_t>(), rois.data<scalar_t>()); })); AT_ASSERT(cudaGetLastError() == cudaSuccess); return grad_in; }
f6004c08f0aabd50f22628e8896f39e416920b21.hip
// !!! This is a file automatically generated by hipify!!! #include <string.h> #include <stdio.h> #include <stdlib.h> #include <opencv2/highgui/highgui.hpp> #include <hip/hip_runtime.h> #include "posterize.h" #include "mode.h" #include "smooth.h" #include "serialposterize.c" #include "serialmode.c" #include <opencv2/objdetect/objdetect.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/imgproc/imgproc.hpp> int setupCam(); void process_image(IplImage* img, int colors, char* command, IplImage* out_img); int main(int argc, char **argv) { setupCam(); return 0; //uchar4 *h_image, *d_image; char* input_file; char* output_file; char* command; int colors; if (argc < 4) { printf("Provide command to run, input, and output files.\n"); exit(1); } if (argc == 5) { colors = atoi(argv[4]); } else { colors = 6; } command = argv[1]; input_file = argv[2]; output_file = argv[3]; IplImage* img = cvLoadImage(input_file, CV_LOAD_IMAGE_COLOR); IplImage* out_img = cvCreateImage(cvGetSize(img), img->depth, img->nChannels); process_image(img, colors, command, out_img); int p[3]; p[0] = CV_IMWRITE_JPEG_QUALITY; p[1] = 95; p[2] = 0; cvSaveImage(output_file, out_img, p); cvReleaseImage(&img); return 0; } void process_image(IplImage* img, int colors, char* command, IplImage* out_img) { size_t cols = img->width; size_t rows = img->height; int channels = img->nChannels; char* image_rgb; image_rgb = img->imageData; char* out_image_rgb; if (strcmp(command,"serial-posterize") == 0) { out_image_rgb = processSerialPosterize(image_rgb, cols, rows, channels, colors); } else if (strcmp(command, "posterize") == 0) { out_image_rgb = processPosterize(image_rgb, cols, rows, channels, colors); } else if (strcmp(command, "mode") == 0) { out_image_rgb = processMode(image_rgb, cols, rows, channels, colors); } else if (strcmp(command, "smooth") == 0) { out_image_rgb = processSmooth(image_rgb, cols, rows, channels, colors); } else if (strcmp(command, "serial-mode") == 0) { out_image_rgb = processSerialMode(image_rgb, cols, rows, channels, colors); } else { printf("Command '%s' is not valid\n", command); exit(1); } out_img->imageData = out_image_rgb; } using namespace cv; int setupCam() { CvCapture* capture = 0; Mat frame, frameCopy, image; capture = cvCaptureFromCAM( -1 ); if(!capture) printf("No camera detected\n"); cvNamedWindow( "result", CV_WINDOW_AUTOSIZE ); if (capture) { printf("In capture ...\n"); IplImage* out_img; for (;;) { IplImage* iplImg = cvQueryFrame(capture); out_img = cvCreateImage(cvGetSize(iplImg), iplImg->depth, iplImg->nChannels); process_image(iplImg, 6, "serial-mode", out_img); frame = iplImg; if (frame.empty()) break; if (iplImg->origin == IPL_ORIGIN_TL) frame.copyTo(frameCopy); else flip(frame, frameCopy, 0); cvShowImage("result", out_img); cvReleaseImage(&out_img); if (waitKey(10) >= 0) cvReleaseCapture(&capture); } waitKey(0); cvDestroyWindow("result"); return 0; } }
f6004c08f0aabd50f22628e8896f39e416920b21.cu
#include <string.h> #include <stdio.h> #include <stdlib.h> #include <opencv2/highgui/highgui.hpp> #include <cuda.h> #include "posterize.h" #include "mode.h" #include "smooth.h" #include "serialposterize.c" #include "serialmode.c" #include <opencv2/objdetect/objdetect.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/imgproc/imgproc.hpp> int setupCam(); void process_image(IplImage* img, int colors, char* command, IplImage* out_img); int main(int argc, char **argv) { setupCam(); return 0; //uchar4 *h_image, *d_image; char* input_file; char* output_file; char* command; int colors; if (argc < 4) { printf("Provide command to run, input, and output files.\n"); exit(1); } if (argc == 5) { colors = atoi(argv[4]); } else { colors = 6; } command = argv[1]; input_file = argv[2]; output_file = argv[3]; IplImage* img = cvLoadImage(input_file, CV_LOAD_IMAGE_COLOR); IplImage* out_img = cvCreateImage(cvGetSize(img), img->depth, img->nChannels); process_image(img, colors, command, out_img); int p[3]; p[0] = CV_IMWRITE_JPEG_QUALITY; p[1] = 95; p[2] = 0; cvSaveImage(output_file, out_img, p); cvReleaseImage(&img); return 0; } void process_image(IplImage* img, int colors, char* command, IplImage* out_img) { size_t cols = img->width; size_t rows = img->height; int channels = img->nChannels; char* image_rgb; image_rgb = img->imageData; char* out_image_rgb; if (strcmp(command,"serial-posterize") == 0) { out_image_rgb = processSerialPosterize(image_rgb, cols, rows, channels, colors); } else if (strcmp(command, "posterize") == 0) { out_image_rgb = processPosterize(image_rgb, cols, rows, channels, colors); } else if (strcmp(command, "mode") == 0) { out_image_rgb = processMode(image_rgb, cols, rows, channels, colors); } else if (strcmp(command, "smooth") == 0) { out_image_rgb = processSmooth(image_rgb, cols, rows, channels, colors); } else if (strcmp(command, "serial-mode") == 0) { out_image_rgb = processSerialMode(image_rgb, cols, rows, channels, colors); } else { printf("Command '%s' is not valid\n", command); exit(1); } out_img->imageData = out_image_rgb; } using namespace cv; int setupCam() { CvCapture* capture = 0; Mat frame, frameCopy, image; capture = cvCaptureFromCAM( -1 ); if(!capture) printf("No camera detected\n"); cvNamedWindow( "result", CV_WINDOW_AUTOSIZE ); if (capture) { printf("In capture ...\n"); IplImage* out_img; for (;;) { IplImage* iplImg = cvQueryFrame(capture); out_img = cvCreateImage(cvGetSize(iplImg), iplImg->depth, iplImg->nChannels); process_image(iplImg, 6, "serial-mode", out_img); frame = iplImg; if (frame.empty()) break; if (iplImg->origin == IPL_ORIGIN_TL) frame.copyTo(frameCopy); else flip(frame, frameCopy, 0); cvShowImage("result", out_img); cvReleaseImage(&out_img); if (waitKey(10) >= 0) cvReleaseCapture(&capture); } waitKey(0); cvDestroyWindow("result"); return 0; } }
032173958ba03cc82d0807be811e334bece23301.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 @precisions normal s */ #include "common_magma.h" #include "commonblas_s.h" static __device__ void saxpy(float a,float *b, float *c) { c[0] += a * b[0]; c[1] += a * b[1]; c[2] += a * b[2]; c[3] += a * b[3]; c[4] += a * b[4]; c[5] += a * b[5]; c[6] += a * b[6]; c[7] += a * b[7]; c[8] += a * b[8]; c[9] += a * b[9]; c[10] += a * b[10]; c[11] += a * b[11]; c[12] += a * b[12]; c[13] += a * b[13]; c[14] += a * b[14]; c[15] += a * b[15]; } __global__ void sgemm_kernel_N_T_64_16_4_16_4(float *C, const float *A, const float *B, int m, int n, int k, int lda, int ldb, int ldc, float alpha, float beta) { /* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 Purpose: ======== This routine computes C = alpha* A*B^T + beta * C B is put into shared memory Parameters Used: blk_M=64 blk_N=16 blk_K=4 nthd_x=16 nthd_y=4 This code should run for any matrix size. =============================================================== */ const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x * 64; const int iby = blockIdx.y *16; const int idt = ty * 16 + tx; if( iby + tx >=n ) B+= iby+0; else B+= iby+tx; /* Taking care of boundary cases where K<4. */ if( ty >=k ) B+= __mul24( 0,ldb); else B+= __mul24( ty,ldb); if( ibx + idt >= m ) A += ibx + 0 ; else A += ibx + idt; int s2=lda, s3=2*lda, s4=3*lda ; switch (k){ case 1: s2=0; s3=0;s4=0 ; break ; case 2: s2=lda; s3=0;s4=0 ; break ; case 3: s2=lda; s3=2*lda;s4=0 ; break ; } C += ibx +idt +__mul24( iby,ldc); float Ap[4]={A[0], A[s2], A[s3], A[s4]}; float b=B[0]; const float *Bend = B + ldb*(k-k%4); B+=4*ldb; A+=4*lda; __shared__ float Bb[4][16]; float Cb[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; if(k>7) do { float Ab[4] = {Ap[0], Ap[1], Ap[2], Ap[3]}; Bb[ty][tx]=b; __syncthreads(); Ap[0] = A[0]; Ap[1] = A[s2]; Ap[2] = A[s3]; Ap[3] = A[s4]; b=B[0]; saxpy(Ab[0], &Bb[0][0], Cb); saxpy(Ab[1], &Bb[1][0], Cb); saxpy(Ab[2], &Bb[2][0], Cb); saxpy(Ab[3], &Bb[3][0], Cb); A+=4*lda; B += 4*ldb; __syncthreads(); } while (B < Bend); if(k>3){ Bb[ty][tx]=b; int k1 = k-k%4; if( (k1+ty) >=k) B-=4*ldb; else B-=0*ldb; if( (k1+0) >= k ) {s2=0 ;s3=0*lda;s4=0;A-=4*lda;} else if( (k1+1) >= k ) {s2=0 ;s3=0*lda;s4=0;A-=0*lda;} else if( (k1+2) >= k ) {s2=lda;s3=0*lda;s4=0;A-=0*lda;} else if( (k1+3) >= k ) {s2=lda;s3=2*lda;s4=0;A-=0*lda;} __syncthreads(); b=B[0]; saxpy(Ap[0], &Bb[0][0], Cb); Ap[0] = A[0]; saxpy(Ap[1], &Bb[1][0], Cb); Ap[1] = A[s2]; saxpy(Ap[2], &Bb[2][0], Cb); Ap[2] = A[s3]; saxpy(Ap[3], &Bb[3][0], Cb); Ap[3] = A[s4]; } k=k%4; if ( k!=0){ __syncthreads(); Bb[ty][tx]=b; __syncthreads(); for(int i=0;i<k;i++){ saxpy(Ap[i],&Bb[i][0], Cb); } } if( (iby+16)>=n) { lda = n-iby; } else{ lda = 16; } if( (ibx+idt) >= m ) lda = 0 ; else lda = lda ; switch(lda){ case 16: C[0] =alpha*Cb[0] + beta * C[0]; C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc]; C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc]; C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc]; C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc]; C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc]; C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc]; C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc]; C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc]; C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc]; C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc]; C[11*ldc] =alpha*Cb[11] + beta * C[11*ldc]; C[12*ldc] =alpha*Cb[12] + beta * C[12*ldc]; C[13*ldc] =alpha*Cb[13] + beta * C[13*ldc]; C[14*ldc] =alpha*Cb[14] + beta * C[14*ldc]; C[15*ldc] =alpha*Cb[15] + beta * C[15*ldc]; break; case 15: C[0] =alpha*Cb[0] + beta * C[0]; C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc]; C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc]; C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc]; C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc]; C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc]; C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc]; C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc]; C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc]; C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc]; C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc]; C[11*ldc] =alpha*Cb[11] + beta * C[11*ldc]; C[12*ldc] =alpha*Cb[12] + beta * C[12*ldc]; C[13*ldc] =alpha*Cb[13] + beta * C[13*ldc]; C[14*ldc] =alpha*Cb[14] + beta * C[14*ldc]; break; case 14: C[0] =alpha*Cb[0] + beta * C[0]; C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc]; C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc]; C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc]; C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc]; C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc]; C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc]; C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc]; C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc]; C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc]; C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc]; C[11*ldc] =alpha*Cb[11] + beta * C[11*ldc]; C[12*ldc] =alpha*Cb[12] + beta * C[12*ldc]; C[13*ldc] =alpha*Cb[13] + beta * C[13*ldc]; break; case 13: C[0] =alpha*Cb[0] + beta * C[0]; C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc]; C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc]; C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc]; C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc]; C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc]; C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc]; C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc]; C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc]; C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc]; C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc]; C[11*ldc] =alpha*Cb[11] + beta * C[11*ldc]; C[12*ldc] =alpha*Cb[12] + beta * C[12*ldc]; break; case 12: C[0] =alpha*Cb[0] + beta * C[0]; C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc]; C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc]; C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc]; C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc]; C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc]; C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc]; C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc]; C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc]; C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc]; C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc]; C[11*ldc] =alpha*Cb[11] + beta * C[11*ldc]; break; case 11: C[0] =alpha*Cb[0] + beta * C[0]; C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc]; C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc]; C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc]; C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc]; C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc]; C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc]; C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc]; C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc]; C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc]; C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc]; break; case 10: C[0] =alpha*Cb[0] + beta * C[0]; C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc]; C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc]; C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc]; C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc]; C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc]; C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc]; C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc]; C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc]; C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc]; break; case 9: C[0] =alpha*Cb[0] + beta * C[0]; C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc]; C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc]; C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc]; C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc]; C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc]; C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc]; C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc]; C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc]; break; case 8: C[0] =alpha*Cb[0] + beta * C[0]; C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc]; C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc]; C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc]; C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc]; C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc]; C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc]; C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc]; break; case 7: C[0] =alpha*Cb[0] + beta * C[0]; C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc]; C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc]; C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc]; C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc]; C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc]; C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc]; break; case 6: C[0] =alpha*Cb[0] + beta * C[0]; C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc]; C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc]; C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc]; C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc]; C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc]; break; case 5: C[0] =alpha*Cb[0] + beta * C[0]; C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc]; C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc]; C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc]; C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc]; break; case 4: C[0] =alpha*Cb[0] + beta * C[0]; C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc]; C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc]; C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc]; break; case 3: C[0] =alpha*Cb[0] + beta * C[0]; C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc]; C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc]; break; case 2: C[0] =alpha*Cb[0] + beta * C[0]; C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc]; break; case 1: C[0] =alpha*Cb[0] + beta * C[0]; break; case 0: break; } } extern "C" void magmablas_sgemm_kernel_N_T_64_16_4_16_4(float *C, const float *A, const float *B, magma_int_t m, magma_int_t n, magma_int_t k, magma_int_t lda, magma_int_t ldb, magma_int_t ldc, float alpha, float beta) { dim3 threads( 16, 4 ); dim3 grid(m/64+(m%64!=0),n/16+(n%16!=0)); hipLaunchKernelGGL(( sgemm_kernel_N_T_64_16_4_16_4), dim3(grid), dim3(threads), 0, magma_stream , C, A, B, m, n, k, lda, ldb, ldc, alpha, beta); }
032173958ba03cc82d0807be811e334bece23301.cu
/* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 @precisions normal s */ #include "common_magma.h" #include "commonblas_s.h" static __device__ void saxpy(float a,float *b, float *c) { c[0] += a * b[0]; c[1] += a * b[1]; c[2] += a * b[2]; c[3] += a * b[3]; c[4] += a * b[4]; c[5] += a * b[5]; c[6] += a * b[6]; c[7] += a * b[7]; c[8] += a * b[8]; c[9] += a * b[9]; c[10] += a * b[10]; c[11] += a * b[11]; c[12] += a * b[12]; c[13] += a * b[13]; c[14] += a * b[14]; c[15] += a * b[15]; } __global__ void sgemm_kernel_N_T_64_16_4_16_4(float *C, const float *A, const float *B, int m, int n, int k, int lda, int ldb, int ldc, float alpha, float beta) { /* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 Purpose: ======== This routine computes C = alpha* A*B^T + beta * C B is put into shared memory Parameters Used: blk_M=64 blk_N=16 blk_K=4 nthd_x=16 nthd_y=4 This code should run for any matrix size. =============================================================== */ const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x * 64; const int iby = blockIdx.y *16; const int idt = ty * 16 + tx; if( iby + tx >=n ) B+= iby+0; else B+= iby+tx; /* Taking care of boundary cases where K<4. */ if( ty >=k ) B+= __mul24( 0,ldb); else B+= __mul24( ty,ldb); if( ibx + idt >= m ) A += ibx + 0 ; else A += ibx + idt; int s2=lda, s3=2*lda, s4=3*lda ; switch (k){ case 1: s2=0; s3=0;s4=0 ; break ; case 2: s2=lda; s3=0;s4=0 ; break ; case 3: s2=lda; s3=2*lda;s4=0 ; break ; } C += ibx +idt +__mul24( iby,ldc); float Ap[4]={A[0], A[s2], A[s3], A[s4]}; float b=B[0]; const float *Bend = B + ldb*(k-k%4); B+=4*ldb; A+=4*lda; __shared__ float Bb[4][16]; float Cb[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; if(k>7) do { float Ab[4] = {Ap[0], Ap[1], Ap[2], Ap[3]}; Bb[ty][tx]=b; __syncthreads(); Ap[0] = A[0]; Ap[1] = A[s2]; Ap[2] = A[s3]; Ap[3] = A[s4]; b=B[0]; saxpy(Ab[0], &Bb[0][0], Cb); saxpy(Ab[1], &Bb[1][0], Cb); saxpy(Ab[2], &Bb[2][0], Cb); saxpy(Ab[3], &Bb[3][0], Cb); A+=4*lda; B += 4*ldb; __syncthreads(); } while (B < Bend); if(k>3){ Bb[ty][tx]=b; int k1 = k-k%4; if( (k1+ty) >=k) B-=4*ldb; else B-=0*ldb; if( (k1+0) >= k ) {s2=0 ;s3=0*lda;s4=0;A-=4*lda;} else if( (k1+1) >= k ) {s2=0 ;s3=0*lda;s4=0;A-=0*lda;} else if( (k1+2) >= k ) {s2=lda;s3=0*lda;s4=0;A-=0*lda;} else if( (k1+3) >= k ) {s2=lda;s3=2*lda;s4=0;A-=0*lda;} __syncthreads(); b=B[0]; saxpy(Ap[0], &Bb[0][0], Cb); Ap[0] = A[0]; saxpy(Ap[1], &Bb[1][0], Cb); Ap[1] = A[s2]; saxpy(Ap[2], &Bb[2][0], Cb); Ap[2] = A[s3]; saxpy(Ap[3], &Bb[3][0], Cb); Ap[3] = A[s4]; } k=k%4; if ( k!=0){ __syncthreads(); Bb[ty][tx]=b; __syncthreads(); for(int i=0;i<k;i++){ saxpy(Ap[i],&Bb[i][0], Cb); } } if( (iby+16)>=n) { lda = n-iby; } else{ lda = 16; } if( (ibx+idt) >= m ) lda = 0 ; else lda = lda ; switch(lda){ case 16: C[0] =alpha*Cb[0] + beta * C[0]; C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc]; C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc]; C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc]; C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc]; C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc]; C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc]; C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc]; C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc]; C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc]; C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc]; C[11*ldc] =alpha*Cb[11] + beta * C[11*ldc]; C[12*ldc] =alpha*Cb[12] + beta * C[12*ldc]; C[13*ldc] =alpha*Cb[13] + beta * C[13*ldc]; C[14*ldc] =alpha*Cb[14] + beta * C[14*ldc]; C[15*ldc] =alpha*Cb[15] + beta * C[15*ldc]; break; case 15: C[0] =alpha*Cb[0] + beta * C[0]; C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc]; C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc]; C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc]; C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc]; C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc]; C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc]; C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc]; C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc]; C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc]; C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc]; C[11*ldc] =alpha*Cb[11] + beta * C[11*ldc]; C[12*ldc] =alpha*Cb[12] + beta * C[12*ldc]; C[13*ldc] =alpha*Cb[13] + beta * C[13*ldc]; C[14*ldc] =alpha*Cb[14] + beta * C[14*ldc]; break; case 14: C[0] =alpha*Cb[0] + beta * C[0]; C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc]; C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc]; C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc]; C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc]; C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc]; C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc]; C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc]; C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc]; C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc]; C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc]; C[11*ldc] =alpha*Cb[11] + beta * C[11*ldc]; C[12*ldc] =alpha*Cb[12] + beta * C[12*ldc]; C[13*ldc] =alpha*Cb[13] + beta * C[13*ldc]; break; case 13: C[0] =alpha*Cb[0] + beta * C[0]; C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc]; C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc]; C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc]; C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc]; C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc]; C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc]; C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc]; C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc]; C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc]; C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc]; C[11*ldc] =alpha*Cb[11] + beta * C[11*ldc]; C[12*ldc] =alpha*Cb[12] + beta * C[12*ldc]; break; case 12: C[0] =alpha*Cb[0] + beta * C[0]; C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc]; C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc]; C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc]; C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc]; C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc]; C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc]; C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc]; C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc]; C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc]; C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc]; C[11*ldc] =alpha*Cb[11] + beta * C[11*ldc]; break; case 11: C[0] =alpha*Cb[0] + beta * C[0]; C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc]; C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc]; C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc]; C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc]; C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc]; C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc]; C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc]; C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc]; C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc]; C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc]; break; case 10: C[0] =alpha*Cb[0] + beta * C[0]; C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc]; C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc]; C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc]; C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc]; C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc]; C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc]; C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc]; C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc]; C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc]; break; case 9: C[0] =alpha*Cb[0] + beta * C[0]; C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc]; C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc]; C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc]; C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc]; C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc]; C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc]; C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc]; C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc]; break; case 8: C[0] =alpha*Cb[0] + beta * C[0]; C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc]; C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc]; C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc]; C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc]; C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc]; C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc]; C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc]; break; case 7: C[0] =alpha*Cb[0] + beta * C[0]; C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc]; C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc]; C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc]; C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc]; C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc]; C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc]; break; case 6: C[0] =alpha*Cb[0] + beta * C[0]; C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc]; C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc]; C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc]; C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc]; C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc]; break; case 5: C[0] =alpha*Cb[0] + beta * C[0]; C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc]; C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc]; C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc]; C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc]; break; case 4: C[0] =alpha*Cb[0] + beta * C[0]; C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc]; C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc]; C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc]; break; case 3: C[0] =alpha*Cb[0] + beta * C[0]; C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc]; C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc]; break; case 2: C[0] =alpha*Cb[0] + beta * C[0]; C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc]; break; case 1: C[0] =alpha*Cb[0] + beta * C[0]; break; case 0: break; } } extern "C" void magmablas_sgemm_kernel_N_T_64_16_4_16_4(float *C, const float *A, const float *B, magma_int_t m, magma_int_t n, magma_int_t k, magma_int_t lda, magma_int_t ldb, magma_int_t ldc, float alpha, float beta) { dim3 threads( 16, 4 ); dim3 grid(m/64+(m%64!=0),n/16+(n%16!=0)); sgemm_kernel_N_T_64_16_4_16_4<<< grid, threads, 0, magma_stream >>>(C, A, B, m, n, k, lda, ldb, ldc, alpha, beta); }
a8756f572c3b0cc2783d5fe1cbb864943b91c473.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" __global__ void Add(int *src, int delta) { src[threadIdx.x] += delta; }
a8756f572c3b0cc2783d5fe1cbb864943b91c473.cu
extern "C" __global__ void Add(int *src, int delta) { src[threadIdx.x] += delta; }
3a9e039c4dff8b453d52dcd1e8b06f7dac7d9b62.hip
// !!! This is a file automatically generated by hipify!!! /* * This CUDA-Cusparse code can handle/work with any type of the input mxArrays, * GPUarray or standard matlab CPU array as input {prhs[0]/prhs[1] := mxGPUArray or CPU Array}[double/complex double] * Sparse/Dense matrix-sparse/dense vector multiplication Z=CuMatlab_solve(Sparse/Dense(A),Sparse/Dense(Y)). * AZ=Y -->Z=A\Y * Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London * Wellcome Trust Centre for Neuroimaging * Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm) * Copyright 2018 * Kevin Bronik */ #include "matrix.h" #include "mex.h" #include "gpu/mxGPUArray.h" #include <cusparse_v2.h> #include <cusolverSp.h> #include <hip/hip_runtime_api.h> #include "cusolverSp_LOWLEVEL_PREVIEW.h" #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "SPARSEHELPER.h" #include "ERRORCHK.h" #include <omp.h> // Input Arguments #define INPUTSPARSEA prhs[0] #define INPUTDENSEB prhs[1] // Output Arguments #define OUTPUTMATRIX plhs[0] extern "C" static void mexCuMatlab_sparseSDC(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]) { int nDevices; hipError_t errCode =hipGetDeviceCount(&nDevices); //int nDevices; //hipGetDeviceCount(&nDevices); if (errCode != hipSuccess){ printf("Error! No CUDA devices found! \n"); return; } char const * const InputErrMsg = "Invalid input to MEX file, number of input arguments must be two."; char const * const OutputErrMsg = "Invalid output to MEX file, number of output arguments must be one."; if ((nrhs!=2)) { mexErrMsgIdAndTxt("MATLAB:mexatexit:invalidInput", InputErrMsg); } if ((nlhs!=1)) { mexErrMsgIdAndTxt("MATLAB:mexatexit:invalidInput", OutputErrMsg); } char *input_buf0; input_buf0 = mxArrayToString(INPUTSPARSEA); if ((mxIsChar(INPUTSPARSEA))){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(FIRST ARGUMENT) must be array, or gpuArray object not %s\n",input_buf0); } char *input_buf1; input_buf1 = mxArrayToString(INPUTDENSEB); if ((mxIsChar(INPUTDENSEB))){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(SECOND ARGUMENT) must be array, or gpuArray object not %s\n",input_buf1); } if (mxIsGPUArray(INPUTSPARSEA) && mxIsGPUArray(INPUTDENSEB)) { mxGPUArray const *INPUTSPARSEGPUA; mxGPUArray const *INPUTDENSEGPUB; /* Initialize the MathWorks GPU API. */ mxInitGPU(); INPUTSPARSEGPUA = mxGPUCreateFromMxArray(INPUTSPARSEA); INPUTDENSEGPUB = mxGPUCreateFromMxArray(INPUTDENSEB); if((mxGPUIsSparse(INPUTSPARSEGPUA))&& (!mxGPUIsSparse(INPUTDENSEGPUB)) ){ const mwSize *dimsGPUSA; dimsGPUSA=mxGPUGetDimensions(INPUTSPARSEGPUA); int numARows, numAColumns; numARows = (int)dimsGPUSA[0]; /* gets number of rows of A */ numAColumns = (int)dimsGPUSA[1]; /* gets number of columns of A */ const mwSize *dimsGPUSB; dimsGPUSB=mxGPUGetDimensions(INPUTDENSEGPUB); int numBRows, numBColumns; numBRows = (int)dimsGPUSB[0]; /* gets number of rows of B */ numBColumns = (int)dimsGPUSB[1]; /* gets number of columns of B */ if ( numARows != numAColumns ) { mxGPUDestroyGPUArray(INPUTDENSEGPUB); mxGPUDestroyGPUArray(INPUTSPARSEGPUA); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file,first argument must be a sparse/dense square matrix."); } if ( (numBColumns!= 1) ) { mxGPUDestroyGPUArray(INPUTDENSEGPUB); mxGPUDestroyGPUArray(INPUTSPARSEGPUA); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, second argument must be a dense/sparse column vector."); } if ( (numBRows!= numARows) ) { mxGPUDestroyGPUArray(INPUTDENSEGPUB); mxGPUDestroyGPUArray(INPUTSPARSEGPUA); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, array (matrix-vector) dimensions must agree."); } hipDoubleComplex const *d_B_dense; d_B_dense = (hipDoubleComplex const *)(mxGPUGetDataReadOnly(INPUTDENSEGPUB)); mwIndex nnz1; mxArray * VLSXY1 = mxGPUCreateMxArrayOnCPU(INPUTSPARSEGPUA); nnz1 = *(mxGetJc(VLSXY1) + numAColumns); int nnzA = (int)nnz1; mxArray * ROW_SORTA = mxCreateNumericMatrix(nnzA, 1,mxINT32_CLASS, mxREAL); int *ROWSORTA = (int *)mxGetInt32s(ROW_SORTA); SetIr_Data(VLSXY1, ROWSORTA); mxArray * COL_SORTA = mxCreateNumericMatrix(nnzA, 1, mxINT32_CLASS, mxREAL); int *COLSORTA = (int *)mxGetInt32s(COL_SORTA); SetJc_Int(VLSXY1, COLSORTA); hipDoubleComplex *VALSORTA = (hipDoubleComplex *)mxGetComplexDoubles(VLSXY1); hipsparseHandle_t handle; cusparseSafeCall(hipsparseCreate(&handle)); hipsparseMatDescr_t descrA; cusparseSafeCall(hipsparseCreateMatDescr(&descrA)); hipsparseSetMatType(descrA, HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(descrA, HIPSPARSE_INDEX_BASE_ONE); mxGPUDestroyGPUArray(INPUTSPARSEGPUA); mxGPUDestroyGPUArray(INPUTDENSEGPUB); //hipDoubleComplex *d_A; gpuErrchk(hipMalloc(&d_A, nnzA * sizeof(*d_A))); //int *d_A_RowIndices; gpuErrchk(hipMalloc(&d_A_RowIndices, (numARows + 1) * sizeof(*d_A_RowIndices))); //int *d_A_ColIndices; gpuErrchk(hipMalloc(&d_A_ColIndices, nnzA * sizeof(*d_A_ColIndices))); //int *d_cooRowIndA; gpuErrchk(hipMalloc(&d_cooRowIndA, nnzA * sizeof(*d_cooRowIndA))); size_t pivot_dimensA[1] = {nnzA}; size_t pivot_dimensROW_A[1] = {numARows+1}; size_t pivot_dimensCOL_A[1] = {nnzA}; size_t pivot_dimensCOO_A[1] = {nnzA}; mxGPUArray *A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensA, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE); hipDoubleComplex *d_A = (hipDoubleComplex *)mxGPUGetData(A); mxGPUArray * ROW_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensROW_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); int *d_A_RowIndices = (int *)mxGPUGetData(ROW_A); mxGPUArray * COL_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensCOL_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); int *d_A_ColIndices = (int *)mxGPUGetData(COL_A); mxGPUArray * COO_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensCOO_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); int *d_cooRowIndA = (int *)mxGPUGetData(COO_A); // --- Descriptor for sparse matrix B gpuErrchk(hipMemcpy(d_A, VALSORTA, nnzA * sizeof(*d_A), hipMemcpyHostToDevice)); gpuErrchk(hipMemcpy(d_A_ColIndices, COLSORTA, nnzA * sizeof(*d_A_ColIndices), hipMemcpyHostToDevice)); gpuErrchk(hipMemcpy(d_cooRowIndA, ROWSORTA, nnzA * sizeof(*d_cooRowIndA), hipMemcpyHostToDevice)); mxDestroyArray(COL_SORTA); mxDestroyArray(ROW_SORTA); mxDestroyArray(VLSXY1); int *Pa = NULL; void *pBuffera = NULL; size_t pBufferSizeInBytesa = 0; hipsparseXcoosort_bufferSizeExt(handle, numARows, numAColumns, nnzA, d_cooRowIndA, d_A_ColIndices, &pBufferSizeInBytesa); gpuErrchk(hipMalloc(&pBuffera, sizeof(char)*pBufferSizeInBytesa)); gpuErrchk(hipMalloc(&Pa, sizeof(int)*nnzA)); hipsparseCreateIdentityPermutation(handle, nnzA, Pa); cusparseSafeCall(hipsparseXcoosortByRow(handle, numARows, numAColumns, nnzA, d_cooRowIndA, d_A_ColIndices, Pa, pBuffera)); cusparseSafeCall(hipsparseZgthr(handle, nnzA, d_A, d_A, Pa, HIPSPARSE_INDEX_BASE_ZERO)); cusparseSafeCall(hipsparseXcoo2csr(handle, d_cooRowIndA, nnzA, numARows, d_A_RowIndices, HIPSPARSE_INDEX_BASE_ONE)); mxGPUDestroyGPUArray(COO_A); gpuErrchk(hipFree(pBuffera)); gpuErrchk(hipFree(Pa)); cusolverSpHandle_t handle_cusolver; cusolverSpCreate(&handle_cusolver); csrcholInfo_t chl_info = NULL; const double tol = 1.e-14; int singularity = 0; size_t size_internal = 0; size_t size_chol = 0; cusolverSafeCall(cusolverSpCreateCsrcholInfo(&chl_info)); cusolverSafeCall(cusolverSpXcsrcholAnalysis( handle_cusolver, numARows, nnzA, descrA, d_A_RowIndices, d_A_ColIndices, chl_info)); cusolverSafeCall(cusolverSpZcsrcholBufferInfo( handle_cusolver, numARows, nnzA, descrA, d_A, d_A_RowIndices, d_A_ColIndices, chl_info, &size_internal, &size_chol)); void *buffer_gpu = NULL; gpuErrchk(hipMalloc(&buffer_gpu, sizeof(char)*size_chol)); cusolverSafeCall(cusolverSpZcsrcholFactor( handle_cusolver, numARows, nnzA, descrA, d_A, d_A_RowIndices, d_A_ColIndices, chl_info, buffer_gpu)); cusolverSafeCall(cusolverSpZcsrcholZeroPivot( handle_cusolver, chl_info, tol, &singularity)); if ( 0 <= singularity){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, (fatal error:) A is not invertible, singularity=%d\n", singularity); } size_t pivot_dimensionsvalueVa[1] = {numAColumns}; mxGPUArray *VAL = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueVa, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_INITIALIZE_VALUES); hipDoubleComplex *VALOUT = (hipDoubleComplex *)mxGPUGetData(VAL); cusolverSafeCall(cusolverSpZcsrcholSolve( handle_cusolver, numARows, d_B_dense, VALOUT, chl_info, buffer_gpu)); mxGPUDestroyGPUArray(A); mxGPUDestroyGPUArray(ROW_A); mxGPUDestroyGPUArray(COL_A); OUTPUTMATRIX = mxGPUCreateMxArrayOnGPU(VAL); gpuErrchk(hipFree(buffer_gpu)); mxGPUDestroyGPUArray(VAL); cusolverSpDestroyCsrcholInfo(chl_info); hipsparseDestroyMatDescr(descrA); cusolverSpDestroy(handle_cusolver); hipsparseDestroy(handle); } else{ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments! %s\n"); } } //////////////////////////////////////////////////////////////////////////////////// else if (!(mxIsGPUArray(INPUTSPARSEA)) && !(mxIsGPUArray(INPUTDENSEB))){ // if ((mxGetClassID(INPUTSPARSEA) != mxDOUBLE_CLASS) || (mxGetClassID(INPUTSPARSEB) != mxDOUBLE_CLASS)) { // mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", // "Invalid input to MEX file, input(FIRST and SECOND ARGUMENTS) must be hipDoubleComplex precision."); // } if((mxIsSparse(INPUTSPARSEA))&& (!mxIsSparse(INPUTDENSEB)) ){ mxInitGPU(); const mwSize *dimsCPUA; dimsCPUA=mxGetDimensions(INPUTSPARSEA); int numARows = (int)dimsCPUA[0]; /* gets number of rows of A */ int numAColumns = (int)dimsCPUA[1]; /* gets number of columns of A */ const mwSize *dimsCPUB; dimsCPUB=mxGetDimensions(INPUTDENSEB); int numBRows = (int)dimsCPUB[0]; /* gets number of rows of B */ int numBColumns = (int)dimsCPUB[1]; /* gets number of columns of B */ if ( numARows != numAColumns ) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file,first argument must be a sparse/dense square matrix."); } if ( (numBColumns!= 1) ) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, second argument must be a dense/sparse column vector."); } if ( (numBRows!= numARows) ) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, array (matrix-vector) dimensions must agree."); } mwIndex nnz1; nnz1 = *(mxGetJc(INPUTSPARSEA) + numAColumns); int nnzA = (int)nnz1; mxArray * ROW_SORTA = mxCreateNumericMatrix(nnzA, 1,mxINT32_CLASS, mxREAL); int *ROWSORTA = (int *)mxGetInt32s(ROW_SORTA); SetIr_Data(INPUTSPARSEA, ROWSORTA); mxArray * COL_SORTA = mxCreateNumericMatrix(nnzA, 1, mxINT32_CLASS, mxREAL); int *COLSORTA = (int *)mxGetInt32s(COL_SORTA); SetJc_Int(INPUTSPARSEA, COLSORTA); hipDoubleComplex *VALSORTA = (hipDoubleComplex *)mxGetComplexDoubles(INPUTSPARSEA); hipsparseHandle_t handle; cusparseSafeCall(hipsparseCreate(&handle)); hipsparseMatDescr_t descrA; cusparseSafeCall(hipsparseCreateMatDescr(&descrA)); hipsparseSetMatType(descrA, HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(descrA, HIPSPARSE_INDEX_BASE_ONE); size_t pivot_dimensionsvalueDB[1] = {numBRows}; mxGPUArray *OUTMB = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueDB, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE); hipDoubleComplex *d_B_dense = (hipDoubleComplex *)mxGPUGetData(OUTMB); hipDoubleComplex *h_B_dense1; h_B_dense1 = (hipDoubleComplex *)mxGetComplexDoubles(INPUTDENSEB); gpuErrchk(hipMemcpy(d_B_dense, h_B_dense1, numBRows * sizeof(*d_B_dense), hipMemcpyHostToDevice)); //hipDoubleComplex *d_A; gpuErrchk(hipMalloc(&d_A, nnzA * sizeof(*d_A))); //int *d_A_RowIndices; gpuErrchk(hipMalloc(&d_A_RowIndices, (numARows + 1) * sizeof(*d_A_RowIndices))); //int *d_A_ColIndices; gpuErrchk(hipMalloc(&d_A_ColIndices, nnzA * sizeof(*d_A_ColIndices))); //int *d_cooRowIndA; gpuErrchk(hipMalloc(&d_cooRowIndA, nnzA * sizeof(*d_cooRowIndA))); size_t pivot_dimensA[1] = {nnzA}; size_t pivot_dimensROW_A[1] = {numARows+1}; size_t pivot_dimensCOL_A[1] = {nnzA}; size_t pivot_dimensCOO_A[1] = {nnzA}; mxGPUArray *A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensA, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE); hipDoubleComplex *d_A = (hipDoubleComplex *)mxGPUGetData(A); mxGPUArray * ROW_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensROW_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); int *d_A_RowIndices = (int *)mxGPUGetData(ROW_A); mxGPUArray * COL_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensCOL_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); int *d_A_ColIndices = (int *)mxGPUGetData(COL_A); mxGPUArray * COO_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensCOO_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); int *d_cooRowIndA = (int *)mxGPUGetData(COO_A); // --- Descriptor for sparse matrix B gpuErrchk(hipMemcpy(d_A, VALSORTA, nnzA * sizeof(*d_A), hipMemcpyHostToDevice)); gpuErrchk(hipMemcpy(d_A_ColIndices, COLSORTA, nnzA * sizeof(*d_A_ColIndices), hipMemcpyHostToDevice)); gpuErrchk(hipMemcpy(d_cooRowIndA, ROWSORTA, nnzA * sizeof(*d_cooRowIndA), hipMemcpyHostToDevice)); mxDestroyArray(COL_SORTA); mxDestroyArray(ROW_SORTA); int *Pa = NULL; void *pBuffera = NULL; size_t pBufferSizeInBytesa = 0; hipsparseXcoosort_bufferSizeExt(handle, numARows, numAColumns, nnzA, d_cooRowIndA, d_A_ColIndices, &pBufferSizeInBytesa); gpuErrchk(hipMalloc(&pBuffera, sizeof(char)*pBufferSizeInBytesa)); gpuErrchk(hipMalloc(&Pa, sizeof(int)*nnzA)); hipsparseCreateIdentityPermutation(handle, nnzA, Pa); cusparseSafeCall(hipsparseXcoosortByRow(handle, numARows, numAColumns, nnzA, d_cooRowIndA, d_A_ColIndices, Pa, pBuffera)); cusparseSafeCall(hipsparseZgthr(handle, nnzA, d_A, d_A, Pa, HIPSPARSE_INDEX_BASE_ZERO)); cusparseSafeCall(hipsparseXcoo2csr(handle, d_cooRowIndA, nnzA, numARows, d_A_RowIndices, HIPSPARSE_INDEX_BASE_ONE)); mxGPUDestroyGPUArray(COO_A); gpuErrchk(hipFree(pBuffera)); gpuErrchk(hipFree(Pa)); cusolverSpHandle_t handle_cusolver; cusolverSpCreate(&handle_cusolver); csrcholInfo_t chl_info = NULL; const double tol = 1.e-14; int singularity = 0; size_t size_internal = 0; size_t size_chol = 0; cusolverSafeCall(cusolverSpCreateCsrcholInfo(&chl_info)); cusolverSafeCall(cusolverSpXcsrcholAnalysis( handle_cusolver, numARows, nnzA, descrA, d_A_RowIndices, d_A_ColIndices, chl_info)); cusolverSafeCall(cusolverSpZcsrcholBufferInfo( handle_cusolver, numARows, nnzA, descrA, d_A, d_A_RowIndices, d_A_ColIndices, chl_info, &size_internal, &size_chol)); void *buffer_gpu = NULL; gpuErrchk(hipMalloc(&buffer_gpu, sizeof(char)*size_chol)); cusolverSafeCall(cusolverSpZcsrcholFactor( handle_cusolver, numARows, nnzA, descrA, d_A, d_A_RowIndices, d_A_ColIndices, chl_info, buffer_gpu)); cusolverSafeCall(cusolverSpZcsrcholZeroPivot( handle_cusolver, chl_info, tol, &singularity)); if ( 0 <= singularity){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, (fatal error:) A is not invertible, singularity=%d\n", singularity); } size_t pivot_dimensionsvalueVa[1] = {numAColumns}; mxGPUArray *VAL = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueVa, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_INITIALIZE_VALUES); hipDoubleComplex *VALOUT = (hipDoubleComplex *)mxGPUGetData(VAL); cusolverSafeCall(cusolverSpZcsrcholSolve( handle_cusolver, numARows, d_B_dense, VALOUT, chl_info, buffer_gpu)); mxGPUDestroyGPUArray(A); mxGPUDestroyGPUArray(ROW_A); mxGPUDestroyGPUArray(COL_A); mxGPUDestroyGPUArray(OUTMB); OUTPUTMATRIX = mxGPUCreateMxArrayOnGPU(VAL); gpuErrchk(hipFree(buffer_gpu)); mxGPUDestroyGPUArray(VAL); cusolverSpDestroyCsrcholInfo(chl_info); hipsparseDestroyMatDescr(descrA); cusolverSpDestroy(handle_cusolver); hipsparseDestroy(handle); } else{ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments! %s\n"); } } // else{ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments! %s\n"); } }
3a9e039c4dff8b453d52dcd1e8b06f7dac7d9b62.cu
/* * This CUDA-Cusparse code can handle/work with any type of the input mxArrays, * GPUarray or standard matlab CPU array as input {prhs[0]/prhs[1] := mxGPUArray or CPU Array}[double/complex double] * Sparse/Dense matrix-sparse/dense vector multiplication Z=CuMatlab_solve(Sparse/Dense(A),Sparse/Dense(Y)). * AZ=Y -->Z=A\Y * Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London * Wellcome Trust Centre for Neuroimaging * Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm) * Copyright 2018 * Kevin Bronik */ #include "matrix.h" #include "mex.h" #include "gpu/mxGPUArray.h" #include <cusparse_v2.h> #include <cusolverSp.h> #include <cuda_runtime_api.h> #include "cusolverSp_LOWLEVEL_PREVIEW.h" #include <cuda.h> #include <cuda_runtime.h> #include "SPARSEHELPER.h" #include "ERRORCHK.h" #include <omp.h> // Input Arguments #define INPUTSPARSEA prhs[0] #define INPUTDENSEB prhs[1] // Output Arguments #define OUTPUTMATRIX plhs[0] extern "C" static void mexCuMatlab_sparseSDC(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]) { int nDevices; cudaError_t errCode =cudaGetDeviceCount(&nDevices); //int nDevices; //cudaGetDeviceCount(&nDevices); if (errCode != cudaSuccess){ printf("Error! No CUDA devices found! \n"); return; } char const * const InputErrMsg = "Invalid input to MEX file, number of input arguments must be two."; char const * const OutputErrMsg = "Invalid output to MEX file, number of output arguments must be one."; if ((nrhs!=2)) { mexErrMsgIdAndTxt("MATLAB:mexatexit:invalidInput", InputErrMsg); } if ((nlhs!=1)) { mexErrMsgIdAndTxt("MATLAB:mexatexit:invalidInput", OutputErrMsg); } char *input_buf0; input_buf0 = mxArrayToString(INPUTSPARSEA); if ((mxIsChar(INPUTSPARSEA))){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(FIRST ARGUMENT) must be array, or gpuArray object not %s\n",input_buf0); } char *input_buf1; input_buf1 = mxArrayToString(INPUTDENSEB); if ((mxIsChar(INPUTDENSEB))){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(SECOND ARGUMENT) must be array, or gpuArray object not %s\n",input_buf1); } if (mxIsGPUArray(INPUTSPARSEA) && mxIsGPUArray(INPUTDENSEB)) { mxGPUArray const *INPUTSPARSEGPUA; mxGPUArray const *INPUTDENSEGPUB; /* Initialize the MathWorks GPU API. */ mxInitGPU(); INPUTSPARSEGPUA = mxGPUCreateFromMxArray(INPUTSPARSEA); INPUTDENSEGPUB = mxGPUCreateFromMxArray(INPUTDENSEB); if((mxGPUIsSparse(INPUTSPARSEGPUA))&& (!mxGPUIsSparse(INPUTDENSEGPUB)) ){ const mwSize *dimsGPUSA; dimsGPUSA=mxGPUGetDimensions(INPUTSPARSEGPUA); int numARows, numAColumns; numARows = (int)dimsGPUSA[0]; /* gets number of rows of A */ numAColumns = (int)dimsGPUSA[1]; /* gets number of columns of A */ const mwSize *dimsGPUSB; dimsGPUSB=mxGPUGetDimensions(INPUTDENSEGPUB); int numBRows, numBColumns; numBRows = (int)dimsGPUSB[0]; /* gets number of rows of B */ numBColumns = (int)dimsGPUSB[1]; /* gets number of columns of B */ if ( numARows != numAColumns ) { mxGPUDestroyGPUArray(INPUTDENSEGPUB); mxGPUDestroyGPUArray(INPUTSPARSEGPUA); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file,first argument must be a sparse/dense square matrix."); } if ( (numBColumns!= 1) ) { mxGPUDestroyGPUArray(INPUTDENSEGPUB); mxGPUDestroyGPUArray(INPUTSPARSEGPUA); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, second argument must be a dense/sparse column vector."); } if ( (numBRows!= numARows) ) { mxGPUDestroyGPUArray(INPUTDENSEGPUB); mxGPUDestroyGPUArray(INPUTSPARSEGPUA); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, array (matrix-vector) dimensions must agree."); } cuDoubleComplex const *d_B_dense; d_B_dense = (cuDoubleComplex const *)(mxGPUGetDataReadOnly(INPUTDENSEGPUB)); mwIndex nnz1; mxArray * VLSXY1 = mxGPUCreateMxArrayOnCPU(INPUTSPARSEGPUA); nnz1 = *(mxGetJc(VLSXY1) + numAColumns); int nnzA = (int)nnz1; mxArray * ROW_SORTA = mxCreateNumericMatrix(nnzA, 1,mxINT32_CLASS, mxREAL); int *ROWSORTA = (int *)mxGetInt32s(ROW_SORTA); SetIr_Data(VLSXY1, ROWSORTA); mxArray * COL_SORTA = mxCreateNumericMatrix(nnzA, 1, mxINT32_CLASS, mxREAL); int *COLSORTA = (int *)mxGetInt32s(COL_SORTA); SetJc_Int(VLSXY1, COLSORTA); cuDoubleComplex *VALSORTA = (cuDoubleComplex *)mxGetComplexDoubles(VLSXY1); cusparseHandle_t handle; cusparseSafeCall(cusparseCreate(&handle)); cusparseMatDescr_t descrA; cusparseSafeCall(cusparseCreateMatDescr(&descrA)); cusparseSetMatType(descrA, CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descrA, CUSPARSE_INDEX_BASE_ONE); mxGPUDestroyGPUArray(INPUTSPARSEGPUA); mxGPUDestroyGPUArray(INPUTDENSEGPUB); //cuDoubleComplex *d_A; gpuErrchk(cudaMalloc(&d_A, nnzA * sizeof(*d_A))); //int *d_A_RowIndices; gpuErrchk(cudaMalloc(&d_A_RowIndices, (numARows + 1) * sizeof(*d_A_RowIndices))); //int *d_A_ColIndices; gpuErrchk(cudaMalloc(&d_A_ColIndices, nnzA * sizeof(*d_A_ColIndices))); //int *d_cooRowIndA; gpuErrchk(cudaMalloc(&d_cooRowIndA, nnzA * sizeof(*d_cooRowIndA))); size_t pivot_dimensA[1] = {nnzA}; size_t pivot_dimensROW_A[1] = {numARows+1}; size_t pivot_dimensCOL_A[1] = {nnzA}; size_t pivot_dimensCOO_A[1] = {nnzA}; mxGPUArray *A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensA, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE); cuDoubleComplex *d_A = (cuDoubleComplex *)mxGPUGetData(A); mxGPUArray * ROW_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensROW_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); int *d_A_RowIndices = (int *)mxGPUGetData(ROW_A); mxGPUArray * COL_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensCOL_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); int *d_A_ColIndices = (int *)mxGPUGetData(COL_A); mxGPUArray * COO_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensCOO_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); int *d_cooRowIndA = (int *)mxGPUGetData(COO_A); // --- Descriptor for sparse matrix B gpuErrchk(cudaMemcpy(d_A, VALSORTA, nnzA * sizeof(*d_A), cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(d_A_ColIndices, COLSORTA, nnzA * sizeof(*d_A_ColIndices), cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(d_cooRowIndA, ROWSORTA, nnzA * sizeof(*d_cooRowIndA), cudaMemcpyHostToDevice)); mxDestroyArray(COL_SORTA); mxDestroyArray(ROW_SORTA); mxDestroyArray(VLSXY1); int *Pa = NULL; void *pBuffera = NULL; size_t pBufferSizeInBytesa = 0; cusparseXcoosort_bufferSizeExt(handle, numARows, numAColumns, nnzA, d_cooRowIndA, d_A_ColIndices, &pBufferSizeInBytesa); gpuErrchk(cudaMalloc(&pBuffera, sizeof(char)*pBufferSizeInBytesa)); gpuErrchk(cudaMalloc(&Pa, sizeof(int)*nnzA)); cusparseCreateIdentityPermutation(handle, nnzA, Pa); cusparseSafeCall(cusparseXcoosortByRow(handle, numARows, numAColumns, nnzA, d_cooRowIndA, d_A_ColIndices, Pa, pBuffera)); cusparseSafeCall(cusparseZgthr(handle, nnzA, d_A, d_A, Pa, CUSPARSE_INDEX_BASE_ZERO)); cusparseSafeCall(cusparseXcoo2csr(handle, d_cooRowIndA, nnzA, numARows, d_A_RowIndices, CUSPARSE_INDEX_BASE_ONE)); mxGPUDestroyGPUArray(COO_A); gpuErrchk(cudaFree(pBuffera)); gpuErrchk(cudaFree(Pa)); cusolverSpHandle_t handle_cusolver; cusolverSpCreate(&handle_cusolver); csrcholInfo_t chl_info = NULL; const double tol = 1.e-14; int singularity = 0; size_t size_internal = 0; size_t size_chol = 0; cusolverSafeCall(cusolverSpCreateCsrcholInfo(&chl_info)); cusolverSafeCall(cusolverSpXcsrcholAnalysis( handle_cusolver, numARows, nnzA, descrA, d_A_RowIndices, d_A_ColIndices, chl_info)); cusolverSafeCall(cusolverSpZcsrcholBufferInfo( handle_cusolver, numARows, nnzA, descrA, d_A, d_A_RowIndices, d_A_ColIndices, chl_info, &size_internal, &size_chol)); void *buffer_gpu = NULL; gpuErrchk(cudaMalloc(&buffer_gpu, sizeof(char)*size_chol)); cusolverSafeCall(cusolverSpZcsrcholFactor( handle_cusolver, numARows, nnzA, descrA, d_A, d_A_RowIndices, d_A_ColIndices, chl_info, buffer_gpu)); cusolverSafeCall(cusolverSpZcsrcholZeroPivot( handle_cusolver, chl_info, tol, &singularity)); if ( 0 <= singularity){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, (fatal error:) A is not invertible, singularity=%d\n", singularity); } size_t pivot_dimensionsvalueVa[1] = {numAColumns}; mxGPUArray *VAL = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueVa, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_INITIALIZE_VALUES); cuDoubleComplex *VALOUT = (cuDoubleComplex *)mxGPUGetData(VAL); cusolverSafeCall(cusolverSpZcsrcholSolve( handle_cusolver, numARows, d_B_dense, VALOUT, chl_info, buffer_gpu)); mxGPUDestroyGPUArray(A); mxGPUDestroyGPUArray(ROW_A); mxGPUDestroyGPUArray(COL_A); OUTPUTMATRIX = mxGPUCreateMxArrayOnGPU(VAL); gpuErrchk(cudaFree(buffer_gpu)); mxGPUDestroyGPUArray(VAL); cusolverSpDestroyCsrcholInfo(chl_info); cusparseDestroyMatDescr(descrA); cusolverSpDestroy(handle_cusolver); cusparseDestroy(handle); } else{ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments! %s\n"); } } //////////////////////////////////////////////////////////////////////////////////// else if (!(mxIsGPUArray(INPUTSPARSEA)) && !(mxIsGPUArray(INPUTDENSEB))){ // if ((mxGetClassID(INPUTSPARSEA) != mxDOUBLE_CLASS) || (mxGetClassID(INPUTSPARSEB) != mxDOUBLE_CLASS)) { // mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", // "Invalid input to MEX file, input(FIRST and SECOND ARGUMENTS) must be cuDoubleComplex precision."); // } if((mxIsSparse(INPUTSPARSEA))&& (!mxIsSparse(INPUTDENSEB)) ){ mxInitGPU(); const mwSize *dimsCPUA; dimsCPUA=mxGetDimensions(INPUTSPARSEA); int numARows = (int)dimsCPUA[0]; /* gets number of rows of A */ int numAColumns = (int)dimsCPUA[1]; /* gets number of columns of A */ const mwSize *dimsCPUB; dimsCPUB=mxGetDimensions(INPUTDENSEB); int numBRows = (int)dimsCPUB[0]; /* gets number of rows of B */ int numBColumns = (int)dimsCPUB[1]; /* gets number of columns of B */ if ( numARows != numAColumns ) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file,first argument must be a sparse/dense square matrix."); } if ( (numBColumns!= 1) ) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, second argument must be a dense/sparse column vector."); } if ( (numBRows!= numARows) ) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, array (matrix-vector) dimensions must agree."); } mwIndex nnz1; nnz1 = *(mxGetJc(INPUTSPARSEA) + numAColumns); int nnzA = (int)nnz1; mxArray * ROW_SORTA = mxCreateNumericMatrix(nnzA, 1,mxINT32_CLASS, mxREAL); int *ROWSORTA = (int *)mxGetInt32s(ROW_SORTA); SetIr_Data(INPUTSPARSEA, ROWSORTA); mxArray * COL_SORTA = mxCreateNumericMatrix(nnzA, 1, mxINT32_CLASS, mxREAL); int *COLSORTA = (int *)mxGetInt32s(COL_SORTA); SetJc_Int(INPUTSPARSEA, COLSORTA); cuDoubleComplex *VALSORTA = (cuDoubleComplex *)mxGetComplexDoubles(INPUTSPARSEA); cusparseHandle_t handle; cusparseSafeCall(cusparseCreate(&handle)); cusparseMatDescr_t descrA; cusparseSafeCall(cusparseCreateMatDescr(&descrA)); cusparseSetMatType(descrA, CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descrA, CUSPARSE_INDEX_BASE_ONE); size_t pivot_dimensionsvalueDB[1] = {numBRows}; mxGPUArray *OUTMB = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueDB, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE); cuDoubleComplex *d_B_dense = (cuDoubleComplex *)mxGPUGetData(OUTMB); cuDoubleComplex *h_B_dense1; h_B_dense1 = (cuDoubleComplex *)mxGetComplexDoubles(INPUTDENSEB); gpuErrchk(cudaMemcpy(d_B_dense, h_B_dense1, numBRows * sizeof(*d_B_dense), cudaMemcpyHostToDevice)); //cuDoubleComplex *d_A; gpuErrchk(cudaMalloc(&d_A, nnzA * sizeof(*d_A))); //int *d_A_RowIndices; gpuErrchk(cudaMalloc(&d_A_RowIndices, (numARows + 1) * sizeof(*d_A_RowIndices))); //int *d_A_ColIndices; gpuErrchk(cudaMalloc(&d_A_ColIndices, nnzA * sizeof(*d_A_ColIndices))); //int *d_cooRowIndA; gpuErrchk(cudaMalloc(&d_cooRowIndA, nnzA * sizeof(*d_cooRowIndA))); size_t pivot_dimensA[1] = {nnzA}; size_t pivot_dimensROW_A[1] = {numARows+1}; size_t pivot_dimensCOL_A[1] = {nnzA}; size_t pivot_dimensCOO_A[1] = {nnzA}; mxGPUArray *A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensA, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE); cuDoubleComplex *d_A = (cuDoubleComplex *)mxGPUGetData(A); mxGPUArray * ROW_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensROW_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); int *d_A_RowIndices = (int *)mxGPUGetData(ROW_A); mxGPUArray * COL_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensCOL_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); int *d_A_ColIndices = (int *)mxGPUGetData(COL_A); mxGPUArray * COO_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensCOO_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); int *d_cooRowIndA = (int *)mxGPUGetData(COO_A); // --- Descriptor for sparse matrix B gpuErrchk(cudaMemcpy(d_A, VALSORTA, nnzA * sizeof(*d_A), cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(d_A_ColIndices, COLSORTA, nnzA * sizeof(*d_A_ColIndices), cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(d_cooRowIndA, ROWSORTA, nnzA * sizeof(*d_cooRowIndA), cudaMemcpyHostToDevice)); mxDestroyArray(COL_SORTA); mxDestroyArray(ROW_SORTA); int *Pa = NULL; void *pBuffera = NULL; size_t pBufferSizeInBytesa = 0; cusparseXcoosort_bufferSizeExt(handle, numARows, numAColumns, nnzA, d_cooRowIndA, d_A_ColIndices, &pBufferSizeInBytesa); gpuErrchk(cudaMalloc(&pBuffera, sizeof(char)*pBufferSizeInBytesa)); gpuErrchk(cudaMalloc(&Pa, sizeof(int)*nnzA)); cusparseCreateIdentityPermutation(handle, nnzA, Pa); cusparseSafeCall(cusparseXcoosortByRow(handle, numARows, numAColumns, nnzA, d_cooRowIndA, d_A_ColIndices, Pa, pBuffera)); cusparseSafeCall(cusparseZgthr(handle, nnzA, d_A, d_A, Pa, CUSPARSE_INDEX_BASE_ZERO)); cusparseSafeCall(cusparseXcoo2csr(handle, d_cooRowIndA, nnzA, numARows, d_A_RowIndices, CUSPARSE_INDEX_BASE_ONE)); mxGPUDestroyGPUArray(COO_A); gpuErrchk(cudaFree(pBuffera)); gpuErrchk(cudaFree(Pa)); cusolverSpHandle_t handle_cusolver; cusolverSpCreate(&handle_cusolver); csrcholInfo_t chl_info = NULL; const double tol = 1.e-14; int singularity = 0; size_t size_internal = 0; size_t size_chol = 0; cusolverSafeCall(cusolverSpCreateCsrcholInfo(&chl_info)); cusolverSafeCall(cusolverSpXcsrcholAnalysis( handle_cusolver, numARows, nnzA, descrA, d_A_RowIndices, d_A_ColIndices, chl_info)); cusolverSafeCall(cusolverSpZcsrcholBufferInfo( handle_cusolver, numARows, nnzA, descrA, d_A, d_A_RowIndices, d_A_ColIndices, chl_info, &size_internal, &size_chol)); void *buffer_gpu = NULL; gpuErrchk(cudaMalloc(&buffer_gpu, sizeof(char)*size_chol)); cusolverSafeCall(cusolverSpZcsrcholFactor( handle_cusolver, numARows, nnzA, descrA, d_A, d_A_RowIndices, d_A_ColIndices, chl_info, buffer_gpu)); cusolverSafeCall(cusolverSpZcsrcholZeroPivot( handle_cusolver, chl_info, tol, &singularity)); if ( 0 <= singularity){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, (fatal error:) A is not invertible, singularity=%d\n", singularity); } size_t pivot_dimensionsvalueVa[1] = {numAColumns}; mxGPUArray *VAL = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueVa, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_INITIALIZE_VALUES); cuDoubleComplex *VALOUT = (cuDoubleComplex *)mxGPUGetData(VAL); cusolverSafeCall(cusolverSpZcsrcholSolve( handle_cusolver, numARows, d_B_dense, VALOUT, chl_info, buffer_gpu)); mxGPUDestroyGPUArray(A); mxGPUDestroyGPUArray(ROW_A); mxGPUDestroyGPUArray(COL_A); mxGPUDestroyGPUArray(OUTMB); OUTPUTMATRIX = mxGPUCreateMxArrayOnGPU(VAL); gpuErrchk(cudaFree(buffer_gpu)); mxGPUDestroyGPUArray(VAL); cusolverSpDestroyCsrcholInfo(chl_info); cusparseDestroyMatDescr(descrA); cusolverSpDestroy(handle_cusolver); cusparseDestroy(handle); } else{ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments! %s\n"); } } // else{ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments! %s\n"); } }
dfd9f17924fc69452cce86bf679c02161df8234e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2018-2020 Zhixu Zhao * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "tl_tensor_internal_cuda.h" template <typename T> static __global__ void dot_product_kernel(const T *src1, const T *src2, T *dst) { extern __shared__ int sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * (blockDim.x * 2) + threadIdx.x; sdata[tid] = src1[i] * src2[i] + src1[i + blockDim.x] * src2[i + blockDim.x]; __syncthreads(); for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } if (tid == 0) dst[blockIdx.x] = sdata[0]; } template <typename T> static void call_dot_product_kernel(const T *src1, const T *src2, T *dst, int n, int threads, int blocks) { dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); int smem_size = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T); hipLaunchKernelGGL(( dot_product_kernel<T>), dim3(dimGrid), dim3(dimBlock), smem_size, 0, src1, src2, dst); tl_cuda_device_sync(); } template <typename T> static __global__ void reduce_sum_kernel(const T *src, T *dst) { extern __shared__ int sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * (blockDim.x * 2) + threadIdx.x; sdata[tid] = src[i] + src[i + blockDim.x]; __syncthreads(); for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } if (tid == 0) dst[blockIdx.x] = sdata[0]; } template <typename T> static void call_reduce_sum_kernel(const T *src, T *dst, int n, int threads, int blocks) { dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); int smem_size = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T); hipLaunchKernelGGL(( reduce_sum_kernel<T>), dim3(dimGrid), dim3(dimBlock), smem_size, 0, src, dst); tl_cuda_device_sync(); } static unsigned int next_pow2(unsigned int x) { --x; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; return ++x; } static void reduce_get_num_blocks_and_threads(int n, int *blocks, int *threads) { *threads = (n < MAX_THREADS * 2) ? next_pow2((n + 1) / 2) : MAX_THREADS; *blocks = BLOCK_NUM(*threads * 2, n); } TL_EXPORT int tl_tensor_dot_product_cuda_ws_len(const tl_tensor *src) { int num_threads, num_blocks; reduce_get_num_blocks_and_threads(src->len, &num_blocks, &num_threads); return num_blocks; } TL_EXPORT tl_tensor *tl_tensor_dot_product_cuda(const tl_tensor *src1, const tl_tensor *src2, tl_tensor *ws1, tl_tensor *ws2, tl_tensor *dst) { int tmp_dim[1]; assert(tl_tensor_issameshape(src1, src2)); assert(tl_is_device_mem(src1->data) && tl_is_device_mem(src2->data)); assert(src1->dtype == src2->dtype); if (dst) { assert(tl_is_device_mem(dst->data)); assert(src1->dtype == dst->dtype); } else { tmp_dim[0] = 1; dst = tl_tensor_zeros_cuda(1, tmp_dim, src1->dtype); } int num_threads, num_blocks; reduce_get_num_blocks_and_threads(src1->len, &num_blocks, &num_threads); if (ws1) { assert(tl_is_device_mem(ws1->data)); assert(ws1->ndim == 1); assert(ws1->dims[0] == num_blocks); assert(ws1->dtype == src1->dtype); } else { tmp_dim[0] = num_blocks; ws1 = tl_tensor_zeros_cuda(1, tmp_dim, src1->dtype); } if (ws2) { assert(tl_is_device_mem(ws2->data)); assert(ws2->ndim == 1); assert(ws2->dims[0] == num_blocks); assert(ws2->dtype == src1->dtype); } else { tmp_dim[0] = num_blocks; ws2 = tl_tensor_zeros_cuda(1, tmp_dim, src1->dtype); } /* * Generated by tools/generic.pl with * $switchtype(src1->dtype, T) * $typenoset(T, TL_BOOL) * call_dot_product_kernel<T>((const T *)src1->data, (const T *)src2->data, (T *)ws1->data, src1->len, num_threads, * num_blocks); */ switch (src1->dtype) { case TL_DOUBLE: call_dot_product_kernel<double>((const double *)src1->data, (const double *)src2->data, (double *)ws1->data, src1->len, num_threads, num_blocks); break; case TL_FLOAT: call_dot_product_kernel<float>((const float *)src1->data, (const float *)src2->data, (float *)ws1->data, src1->len, num_threads, num_blocks); break; case TL_INT16: call_dot_product_kernel<int16_t>((const int16_t *)src1->data, (const int16_t *)src2->data, (int16_t *)ws1->data, src1->len, num_threads, num_blocks); break; case TL_INT32: call_dot_product_kernel<int32_t>((const int32_t *)src1->data, (const int32_t *)src2->data, (int32_t *)ws1->data, src1->len, num_threads, num_blocks); break; case TL_INT8: call_dot_product_kernel<int8_t>((const int8_t *)src1->data, (const int8_t *)src2->data, (int8_t *)ws1->data, src1->len, num_threads, num_blocks); break; case TL_UINT16: call_dot_product_kernel<uint16_t>((const uint16_t *)src1->data, (const uint16_t *)src2->data, (uint16_t *)ws1->data, src1->len, num_threads, num_blocks); break; case TL_UINT32: call_dot_product_kernel<uint32_t>((const uint32_t *)src1->data, (const uint32_t *)src2->data, (uint32_t *)ws1->data, src1->len, num_threads, num_blocks); break; case TL_UINT8: call_dot_product_kernel<uint8_t>((const uint8_t *)src1->data, (const uint8_t *)src2->data, (uint8_t *)ws1->data, src1->len, num_threads, num_blocks); break; default: assert(0 && "unsupported dtype for src1->dtype"); break; } int s = num_blocks; while (s > 1) { int threads = 0, blocks = 0; reduce_get_num_blocks_and_threads(s, &blocks, &threads); tl_memcpy_d2d(ws2->data, ws1->data, s * tl_size_of(ws1->dtype)); /* * Generated by tools/generic.pl with * $switchtype(src1->dtype,T) * $typenoset(T, TL_BOOL) * call_reduce_sum_kernel<T>((const T *)ws2->data, (T *)ws1->data, s, threads, blocks); */ switch (src1->dtype) { case TL_DOUBLE: call_reduce_sum_kernel<double>((const double *)ws2->data, (double *)ws1->data, s, threads, blocks); break; case TL_FLOAT: call_reduce_sum_kernel<float>((const float *)ws2->data, (float *)ws1->data, s, threads, blocks); break; case TL_INT16: call_reduce_sum_kernel<int16_t>((const int16_t *)ws2->data, (int16_t *)ws1->data, s, threads, blocks); break; case TL_INT32: call_reduce_sum_kernel<int32_t>((const int32_t *)ws2->data, (int32_t *)ws1->data, s, threads, blocks); break; case TL_INT8: call_reduce_sum_kernel<int8_t>((const int8_t *)ws2->data, (int8_t *)ws1->data, s, threads, blocks); break; case TL_UINT16: call_reduce_sum_kernel<uint16_t>((const uint16_t *)ws2->data, (uint16_t *)ws1->data, s, threads, blocks); break; case TL_UINT32: call_reduce_sum_kernel<uint32_t>((const uint32_t *)ws2->data, (uint32_t *)ws1->data, s, threads, blocks); break; case TL_UINT8: call_reduce_sum_kernel<uint8_t>((const uint8_t *)ws2->data, (uint8_t *)ws1->data, s, threads, blocks); break; default: assert(0 && "unsupported dtype for src1->dtype"); break; } s = (s + (threads * 2 - 1)) / (threads * 2); } tl_memcpy_d2d(dst->data, ws1->data, tl_size_of(src1->dtype)); if (!ws1) tl_tensor_free_data_too_cuda(ws1); if (!ws2) tl_tensor_free_data_too_cuda(ws2); return dst; }
dfd9f17924fc69452cce86bf679c02161df8234e.cu
/* * Copyright (c) 2018-2020 Zhixu Zhao * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "tl_tensor_internal_cuda.h" template <typename T> static __global__ void dot_product_kernel(const T *src1, const T *src2, T *dst) { extern __shared__ int sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * (blockDim.x * 2) + threadIdx.x; sdata[tid] = src1[i] * src2[i] + src1[i + blockDim.x] * src2[i + blockDim.x]; __syncthreads(); for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } if (tid == 0) dst[blockIdx.x] = sdata[0]; } template <typename T> static void call_dot_product_kernel(const T *src1, const T *src2, T *dst, int n, int threads, int blocks) { dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); int smem_size = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T); dot_product_kernel<T><<<dimGrid, dimBlock, smem_size>>>(src1, src2, dst); tl_cuda_device_sync(); } template <typename T> static __global__ void reduce_sum_kernel(const T *src, T *dst) { extern __shared__ int sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * (blockDim.x * 2) + threadIdx.x; sdata[tid] = src[i] + src[i + blockDim.x]; __syncthreads(); for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } if (tid == 0) dst[blockIdx.x] = sdata[0]; } template <typename T> static void call_reduce_sum_kernel(const T *src, T *dst, int n, int threads, int blocks) { dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); int smem_size = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T); reduce_sum_kernel<T><<<dimGrid, dimBlock, smem_size>>>(src, dst); tl_cuda_device_sync(); } static unsigned int next_pow2(unsigned int x) { --x; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; return ++x; } static void reduce_get_num_blocks_and_threads(int n, int *blocks, int *threads) { *threads = (n < MAX_THREADS * 2) ? next_pow2((n + 1) / 2) : MAX_THREADS; *blocks = BLOCK_NUM(*threads * 2, n); } TL_EXPORT int tl_tensor_dot_product_cuda_ws_len(const tl_tensor *src) { int num_threads, num_blocks; reduce_get_num_blocks_and_threads(src->len, &num_blocks, &num_threads); return num_blocks; } TL_EXPORT tl_tensor *tl_tensor_dot_product_cuda(const tl_tensor *src1, const tl_tensor *src2, tl_tensor *ws1, tl_tensor *ws2, tl_tensor *dst) { int tmp_dim[1]; assert(tl_tensor_issameshape(src1, src2)); assert(tl_is_device_mem(src1->data) && tl_is_device_mem(src2->data)); assert(src1->dtype == src2->dtype); if (dst) { assert(tl_is_device_mem(dst->data)); assert(src1->dtype == dst->dtype); } else { tmp_dim[0] = 1; dst = tl_tensor_zeros_cuda(1, tmp_dim, src1->dtype); } int num_threads, num_blocks; reduce_get_num_blocks_and_threads(src1->len, &num_blocks, &num_threads); if (ws1) { assert(tl_is_device_mem(ws1->data)); assert(ws1->ndim == 1); assert(ws1->dims[0] == num_blocks); assert(ws1->dtype == src1->dtype); } else { tmp_dim[0] = num_blocks; ws1 = tl_tensor_zeros_cuda(1, tmp_dim, src1->dtype); } if (ws2) { assert(tl_is_device_mem(ws2->data)); assert(ws2->ndim == 1); assert(ws2->dims[0] == num_blocks); assert(ws2->dtype == src1->dtype); } else { tmp_dim[0] = num_blocks; ws2 = tl_tensor_zeros_cuda(1, tmp_dim, src1->dtype); } /* * Generated by tools/generic.pl with * $switchtype(src1->dtype, T) * $typenoset(T, TL_BOOL) * call_dot_product_kernel<T>((const T *)src1->data, (const T *)src2->data, (T *)ws1->data, src1->len, num_threads, * num_blocks); */ switch (src1->dtype) { case TL_DOUBLE: call_dot_product_kernel<double>((const double *)src1->data, (const double *)src2->data, (double *)ws1->data, src1->len, num_threads, num_blocks); break; case TL_FLOAT: call_dot_product_kernel<float>((const float *)src1->data, (const float *)src2->data, (float *)ws1->data, src1->len, num_threads, num_blocks); break; case TL_INT16: call_dot_product_kernel<int16_t>((const int16_t *)src1->data, (const int16_t *)src2->data, (int16_t *)ws1->data, src1->len, num_threads, num_blocks); break; case TL_INT32: call_dot_product_kernel<int32_t>((const int32_t *)src1->data, (const int32_t *)src2->data, (int32_t *)ws1->data, src1->len, num_threads, num_blocks); break; case TL_INT8: call_dot_product_kernel<int8_t>((const int8_t *)src1->data, (const int8_t *)src2->data, (int8_t *)ws1->data, src1->len, num_threads, num_blocks); break; case TL_UINT16: call_dot_product_kernel<uint16_t>((const uint16_t *)src1->data, (const uint16_t *)src2->data, (uint16_t *)ws1->data, src1->len, num_threads, num_blocks); break; case TL_UINT32: call_dot_product_kernel<uint32_t>((const uint32_t *)src1->data, (const uint32_t *)src2->data, (uint32_t *)ws1->data, src1->len, num_threads, num_blocks); break; case TL_UINT8: call_dot_product_kernel<uint8_t>((const uint8_t *)src1->data, (const uint8_t *)src2->data, (uint8_t *)ws1->data, src1->len, num_threads, num_blocks); break; default: assert(0 && "unsupported dtype for src1->dtype"); break; } int s = num_blocks; while (s > 1) { int threads = 0, blocks = 0; reduce_get_num_blocks_and_threads(s, &blocks, &threads); tl_memcpy_d2d(ws2->data, ws1->data, s * tl_size_of(ws1->dtype)); /* * Generated by tools/generic.pl with * $switchtype(src1->dtype,T) * $typenoset(T, TL_BOOL) * call_reduce_sum_kernel<T>((const T *)ws2->data, (T *)ws1->data, s, threads, blocks); */ switch (src1->dtype) { case TL_DOUBLE: call_reduce_sum_kernel<double>((const double *)ws2->data, (double *)ws1->data, s, threads, blocks); break; case TL_FLOAT: call_reduce_sum_kernel<float>((const float *)ws2->data, (float *)ws1->data, s, threads, blocks); break; case TL_INT16: call_reduce_sum_kernel<int16_t>((const int16_t *)ws2->data, (int16_t *)ws1->data, s, threads, blocks); break; case TL_INT32: call_reduce_sum_kernel<int32_t>((const int32_t *)ws2->data, (int32_t *)ws1->data, s, threads, blocks); break; case TL_INT8: call_reduce_sum_kernel<int8_t>((const int8_t *)ws2->data, (int8_t *)ws1->data, s, threads, blocks); break; case TL_UINT16: call_reduce_sum_kernel<uint16_t>((const uint16_t *)ws2->data, (uint16_t *)ws1->data, s, threads, blocks); break; case TL_UINT32: call_reduce_sum_kernel<uint32_t>((const uint32_t *)ws2->data, (uint32_t *)ws1->data, s, threads, blocks); break; case TL_UINT8: call_reduce_sum_kernel<uint8_t>((const uint8_t *)ws2->data, (uint8_t *)ws1->data, s, threads, blocks); break; default: assert(0 && "unsupported dtype for src1->dtype"); break; } s = (s + (threads * 2 - 1)) / (threads * 2); } tl_memcpy_d2d(dst->data, ws1->data, tl_size_of(src1->dtype)); if (!ws1) tl_tensor_free_data_too_cuda(ws1); if (!ws2) tl_tensor_free_data_too_cuda(ws2); return dst; }
68e92ff71a7e72feb43534e1580c463aaf125e79.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or bpied warranties, including, but not limited to, the bpied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include <thrust/device_ptr.h> #include <thrust/sort.h> #include "opencv2/gpu/device/common.hpp" #include "opencv2/gpu/device/emulation.hpp" #include "opencv2/gpu/device/vec_math.hpp" #include "opencv2/gpu/device/limits.hpp" #include "opencv2/gpu/device/dynamic_smem.hpp" namespace cv { namespace gpu { namespace device { namespace hough { __device__ int g_counter; //////////////////////////////////////////////////////////////////////// // buildPointList template <int PIXELS_PER_THREAD> __global__ void buildPointList(const PtrStepSzb src, unsigned int* list) { __shared__ unsigned int s_queues[4][32 * PIXELS_PER_THREAD]; __shared__ int s_qsize[4]; __shared__ int s_globStart[4]; const int x = blockIdx.x * blockDim.x * PIXELS_PER_THREAD + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (threadIdx.x == 0) s_qsize[threadIdx.y] = 0; __syncthreads(); if (y < src.rows) { // fill the queue const uchar* srcRow = src.ptr(y); for (int i = 0, xx = x; i < PIXELS_PER_THREAD && xx < src.cols; ++i, xx += blockDim.x) { if (srcRow[xx]) { const unsigned int val = (y << 16) | xx; const int qidx = Emulation::smem::atomicAdd(&s_qsize[threadIdx.y], 1); s_queues[threadIdx.y][qidx] = val; } } } __syncthreads(); // let one thread reserve the space required in the global list if (threadIdx.x == 0 && threadIdx.y == 0) { // find how many items are stored in each list int totalSize = 0; for (int i = 0; i < blockDim.y; ++i) { s_globStart[i] = totalSize; totalSize += s_qsize[i]; } // calculate the offset in the global list const int globalOffset = atomicAdd(&g_counter, totalSize); for (int i = 0; i < blockDim.y; ++i) s_globStart[i] += globalOffset; } __syncthreads(); // copy local queues to global queue const int qsize = s_qsize[threadIdx.y]; int gidx = s_globStart[threadIdx.y] + threadIdx.x; for(int i = threadIdx.x; i < qsize; i += blockDim.x, gidx += blockDim.x) list[gidx] = s_queues[threadIdx.y][i]; } int buildPointList_gpu(PtrStepSzb src, unsigned int* list) { const int PIXELS_PER_THREAD = 16; void* counterPtr; cudaSafeCall( hipGetSymbolAddress(&counterPtr, g_counter) ); cudaSafeCall( hipMemset(counterPtr, 0, sizeof(int)) ); const dim3 block(32, 4); const dim3 grid(divUp(src.cols, block.x * PIXELS_PER_THREAD), divUp(src.rows, block.y)); cudaSafeCall( hipFuncSetCacheConfig(buildPointList<PIXELS_PER_THREAD>, hipFuncCachePreferShared) ); hipLaunchKernelGGL(( buildPointList<PIXELS_PER_THREAD>), dim3(grid), dim3(block), 0, 0, src, list); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); int totalCount; cudaSafeCall( hipMemcpy(&totalCount, counterPtr, sizeof(int), hipMemcpyDeviceToHost) ); return totalCount; } //////////////////////////////////////////////////////////////////////// // linesAccum __global__ void linesAccumGlobal(const unsigned int* list, const int count, PtrStepi accum, const float irho, const float theta, const int numrho) { const int n = blockIdx.x; const float ang = n * theta; float sinVal; float cosVal; sincosf(ang, &sinVal, &cosVal); sinVal *= irho; cosVal *= irho; const int shift = (numrho - 1) / 2; int* accumRow = accum.ptr(n + 1); for (int i = threadIdx.x; i < count; i += blockDim.x) { const unsigned int val = list[i]; const int x = (val & 0xFFFF); const int y = (val >> 16) & 0xFFFF; int r = __float2int_rn(x * cosVal + y * sinVal); r += shift; ::atomicAdd(accumRow + r + 1, 1); } } __global__ void linesAccumShared(const unsigned int* list, const int count, PtrStepi accum, const float irho, const float theta, const int numrho) { int* smem = DynamicSharedMem<int>(); for (int i = threadIdx.x; i < numrho + 1; i += blockDim.x) smem[i] = 0; __syncthreads(); const int n = blockIdx.x; const float ang = n * theta; float sinVal; float cosVal; sincosf(ang, &sinVal, &cosVal); sinVal *= irho; cosVal *= irho; const int shift = (numrho - 1) / 2; for (int i = threadIdx.x; i < count; i += blockDim.x) { const unsigned int val = list[i]; const int x = (val & 0xFFFF); const int y = (val >> 16) & 0xFFFF; int r = __float2int_rn(x * cosVal + y * sinVal); r += shift; Emulation::smem::atomicAdd(&smem[r + 1], 1); } __syncthreads(); int* accumRow = accum.ptr(n + 1); for (int i = threadIdx.x; i < numrho + 1; i += blockDim.x) accumRow[i] = smem[i]; } void linesAccum_gpu(const unsigned int* list, int count, PtrStepSzi accum, float rho, float theta, size_t sharedMemPerBlock, bool has20) { const dim3 block(has20 ? 1024 : 512); const dim3 grid(accum.rows - 2); size_t smemSize = (accum.cols - 1) * sizeof(int); if (smemSize < sharedMemPerBlock - 1000) hipLaunchKernelGGL(( linesAccumShared), dim3(grid), dim3(block), smemSize, 0, list, count, accum, 1.0f / rho, theta, accum.cols - 2); else hipLaunchKernelGGL(( linesAccumGlobal), dim3(grid), dim3(block), 0, 0, list, count, accum, 1.0f / rho, theta, accum.cols - 2); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////// // linesGetResult __global__ void linesGetResult(const PtrStepSzi accum, float2* out, int* votes, const int maxSize, const float rho, const float theta, const int threshold, const int numrho) { const int r = blockIdx.x * blockDim.x + threadIdx.x; const int n = blockIdx.y * blockDim.y + threadIdx.y; if (r >= accum.cols - 2 || n >= accum.rows - 2) return; const int curVotes = accum(n + 1, r + 1); if (curVotes > threshold && curVotes > accum(n + 1, r) && curVotes >= accum(n + 1, r + 2) && curVotes > accum(n, r + 1) && curVotes >= accum(n + 2, r + 1)) { const float radius = (r - (numrho - 1) * 0.5f) * rho; const float angle = n * theta; const int ind = ::atomicAdd(&g_counter, 1); if (ind < maxSize) { out[ind] = make_float2(radius, angle); votes[ind] = curVotes; } } } int linesGetResult_gpu(PtrStepSzi accum, float2* out, int* votes, int maxSize, float rho, float theta, int threshold, bool doSort) { void* counterPtr; cudaSafeCall( hipGetSymbolAddress(&counterPtr, g_counter) ); cudaSafeCall( hipMemset(counterPtr, 0, sizeof(int)) ); const dim3 block(32, 8); const dim3 grid(divUp(accum.cols - 2, block.x), divUp(accum.rows - 2, block.y)); cudaSafeCall( hipFuncSetCacheConfig(linesGetResult, hipFuncCachePreferL1) ); hipLaunchKernelGGL(( linesGetResult), dim3(grid), dim3(block), 0, 0, accum, out, votes, maxSize, rho, theta, threshold, accum.cols - 2); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); int totalCount; cudaSafeCall( hipMemcpy(&totalCount, counterPtr, sizeof(int), hipMemcpyDeviceToHost) ); totalCount = ::min(totalCount, maxSize); if (doSort && totalCount > 0) { thrust::device_ptr<float2> outPtr(out); thrust::device_ptr<int> votesPtr(votes); thrust::sort_by_key(votesPtr, votesPtr + totalCount, outPtr, thrust::greater<int>()); } return totalCount; } //////////////////////////////////////////////////////////////////////// // houghLinesProbabilistic texture<uchar, hipTextureType2D, hipReadModeElementType> tex_mask(false, hipFilterModePoint, hipAddressModeClamp); __global__ void houghLinesProbabilistic(const PtrStepSzi accum, int4* out, const int maxSize, const float rho, const float theta, const int lineGap, const int lineLength, const int rows, const int cols) { const int r = blockIdx.x * blockDim.x + threadIdx.x; const int n = blockIdx.y * blockDim.y + threadIdx.y; if (r >= accum.cols - 2 || n >= accum.rows - 2) return; const int curVotes = accum(n + 1, r + 1); if (curVotes >= lineLength && curVotes > accum(n, r) && curVotes > accum(n, r + 1) && curVotes > accum(n, r + 2) && curVotes > accum(n + 1, r) && curVotes > accum(n + 1, r + 2) && curVotes > accum(n + 2, r) && curVotes > accum(n + 2, r + 1) && curVotes > accum(n + 2, r + 2)) { const float radius = (r - (accum.cols - 2 - 1) * 0.5f) * rho; const float angle = n * theta; float cosa; float sina; sincosf(angle, &sina, &cosa); float2 p0 = make_float2(cosa * radius, sina * radius); float2 dir = make_float2(-sina, cosa); float2 pb[4] = {make_float2(-1, -1), make_float2(-1, -1), make_float2(-1, -1), make_float2(-1, -1)}; float a; if (dir.x != 0) { a = -p0.x / dir.x; pb[0].x = 0; pb[0].y = p0.y + a * dir.y; a = (cols - 1 - p0.x) / dir.x; pb[1].x = cols - 1; pb[1].y = p0.y + a * dir.y; } if (dir.y != 0) { a = -p0.y / dir.y; pb[2].x = p0.x + a * dir.x; pb[2].y = 0; a = (rows - 1 - p0.y) / dir.y; pb[3].x = p0.x + a * dir.x; pb[3].y = rows - 1; } if (pb[0].x == 0 && (pb[0].y >= 0 && pb[0].y < rows)) { p0 = pb[0]; if (dir.x < 0) dir = -dir; } else if (pb[1].x == cols - 1 && (pb[0].y >= 0 && pb[0].y < rows)) { p0 = pb[1]; if (dir.x > 0) dir = -dir; } else if (pb[2].y == 0 && (pb[2].x >= 0 && pb[2].x < cols)) { p0 = pb[2]; if (dir.y < 0) dir = -dir; } else if (pb[3].y == rows - 1 && (pb[3].x >= 0 && pb[3].x < cols)) { p0 = pb[3]; if (dir.y > 0) dir = -dir; } float2 d; if (::fabsf(dir.x) > ::fabsf(dir.y)) { d.x = dir.x > 0 ? 1 : -1; d.y = dir.y / ::fabsf(dir.x); } else { d.x = dir.x / ::fabsf(dir.y); d.y = dir.y > 0 ? 1 : -1; } float2 line_end[2]; int gap; bool inLine = false; float2 p1 = p0; if (p1.x < 0 || p1.x >= cols || p1.y < 0 || p1.y >= rows) return; for (;;) { if (tex2D(tex_mask, p1.x, p1.y)) { gap = 0; if (!inLine) { line_end[0] = p1; line_end[1] = p1; inLine = true; } else { line_end[1] = p1; } } else if (inLine) { if (++gap > lineGap) { bool good_line = ::abs(line_end[1].x - line_end[0].x) >= lineLength || ::abs(line_end[1].y - line_end[0].y) >= lineLength; if (good_line) { const int ind = ::atomicAdd(&g_counter, 1); if (ind < maxSize) out[ind] = make_int4(line_end[0].x, line_end[0].y, line_end[1].x, line_end[1].y); } gap = 0; inLine = false; } } p1 = p1 + d; if (p1.x < 0 || p1.x >= cols || p1.y < 0 || p1.y >= rows) { if (inLine) { bool good_line = ::abs(line_end[1].x - line_end[0].x) >= lineLength || ::abs(line_end[1].y - line_end[0].y) >= lineLength; if (good_line) { const int ind = ::atomicAdd(&g_counter, 1); if (ind < maxSize) out[ind] = make_int4(line_end[0].x, line_end[0].y, line_end[1].x, line_end[1].y); } } break; } } } } int houghLinesProbabilistic_gpu(PtrStepSzb mask, PtrStepSzi accum, int4* out, int maxSize, float rho, float theta, int lineGap, int lineLength) { void* counterPtr; cudaSafeCall( hipGetSymbolAddress(&counterPtr, g_counter) ); cudaSafeCall( hipMemset(counterPtr, 0, sizeof(int)) ); const dim3 block(32, 8); const dim3 grid(divUp(accum.cols - 2, block.x), divUp(accum.rows - 2, block.y)); bindTexture(&tex_mask, mask); hipLaunchKernelGGL(( houghLinesProbabilistic), dim3(grid), dim3(block), 0, 0, accum, out, maxSize, rho, theta, lineGap, lineLength, mask.rows, mask.cols); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); int totalCount; cudaSafeCall( hipMemcpy(&totalCount, counterPtr, sizeof(int), hipMemcpyDeviceToHost) ); totalCount = ::min(totalCount, maxSize); return totalCount; } //////////////////////////////////////////////////////////////////////// // circlesAccumCenters __global__ void circlesAccumCenters(const unsigned int* list, const int count, const PtrStepi dx, const PtrStepi dy, PtrStepi accum, const int width, const int height, const int minRadius, const int maxRadius, const float idp) { const int SHIFT = 10; const int ONE = 1 << SHIFT; const int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= count) return; const unsigned int val = list[tid]; const int x = (val & 0xFFFF); const int y = (val >> 16) & 0xFFFF; const int vx = dx(y, x); const int vy = dy(y, x); if (vx == 0 && vy == 0) return; const float mag = ::sqrtf(vx * vx + vy * vy); const int x0 = __float2int_rn((x * idp) * ONE); const int y0 = __float2int_rn((y * idp) * ONE); int sx = __float2int_rn((vx * idp) * ONE / mag); int sy = __float2int_rn((vy * idp) * ONE / mag); // Step from minRadius to maxRadius in both directions of the gradient for (int k1 = 0; k1 < 2; ++k1) { int x1 = x0 + minRadius * sx; int y1 = y0 + minRadius * sy; for (int r = minRadius; r <= maxRadius; x1 += sx, y1 += sy, ++r) { const int x2 = x1 >> SHIFT; const int y2 = y1 >> SHIFT; if (x2 < 0 || x2 >= width || y2 < 0 || y2 >= height) break; ::atomicAdd(accum.ptr(y2 + 1) + x2 + 1, 1); } sx = -sx; sy = -sy; } } void circlesAccumCenters_gpu(const unsigned int* list, int count, PtrStepi dx, PtrStepi dy, PtrStepSzi accum, int minRadius, int maxRadius, float idp) { const dim3 block(256); const dim3 grid(divUp(count, block.x)); cudaSafeCall( hipFuncSetCacheConfig(circlesAccumCenters, hipFuncCachePreferL1) ); hipLaunchKernelGGL(( circlesAccumCenters), dim3(grid), dim3(block), 0, 0, list, count, dx, dy, accum, accum.cols - 2, accum.rows - 2, minRadius, maxRadius, idp); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////// // buildCentersList __global__ void buildCentersList(const PtrStepSzi accum, unsigned int* centers, const int threshold) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < accum.cols - 2 && y < accum.rows - 2) { const int top = accum(y, x + 1); const int left = accum(y + 1, x); const int cur = accum(y + 1, x + 1); const int right = accum(y + 1, x + 2); const int bottom = accum(y + 2, x + 1); if (cur > threshold && cur > top && cur >= bottom && cur > left && cur >= right) { const unsigned int val = (y << 16) | x; const int idx = ::atomicAdd(&g_counter, 1); centers[idx] = val; } } } int buildCentersList_gpu(PtrStepSzi accum, unsigned int* centers, int threshold) { void* counterPtr; cudaSafeCall( hipGetSymbolAddress(&counterPtr, g_counter) ); cudaSafeCall( hipMemset(counterPtr, 0, sizeof(int)) ); const dim3 block(32, 8); const dim3 grid(divUp(accum.cols - 2, block.x), divUp(accum.rows - 2, block.y)); cudaSafeCall( hipFuncSetCacheConfig(buildCentersList, hipFuncCachePreferL1) ); hipLaunchKernelGGL(( buildCentersList), dim3(grid), dim3(block), 0, 0, accum, centers, threshold); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); int totalCount; cudaSafeCall( hipMemcpy(&totalCount, counterPtr, sizeof(int), hipMemcpyDeviceToHost) ); return totalCount; } //////////////////////////////////////////////////////////////////////// // circlesAccumRadius __global__ void circlesAccumRadius(const unsigned int* centers, const unsigned int* list, const int count, float3* circles, const int maxCircles, const float dp, const int minRadius, const int maxRadius, const int histSize, const int threshold) { int* smem = DynamicSharedMem<int>(); for (int i = threadIdx.x; i < histSize + 2; i += blockDim.x) smem[i] = 0; __syncthreads(); unsigned int val = centers[blockIdx.x]; float cx = (val & 0xFFFF); float cy = (val >> 16) & 0xFFFF; cx = (cx + 0.5f) * dp; cy = (cy + 0.5f) * dp; for (int i = threadIdx.x; i < count; i += blockDim.x) { val = list[i]; const int x = (val & 0xFFFF); const int y = (val >> 16) & 0xFFFF; const float rad = ::sqrtf((cx - x) * (cx - x) + (cy - y) * (cy - y)); if (rad >= minRadius && rad <= maxRadius) { const int r = __float2int_rn(rad - minRadius); Emulation::smem::atomicAdd(&smem[r + 1], 1); } } __syncthreads(); for (int i = threadIdx.x; i < histSize; i += blockDim.x) { const int curVotes = smem[i + 1]; if (curVotes >= threshold && curVotes > smem[i] && curVotes >= smem[i + 2]) { const int ind = ::atomicAdd(&g_counter, 1); if (ind < maxCircles) circles[ind] = make_float3(cx, cy, i + minRadius); } } } int circlesAccumRadius_gpu(const unsigned int* centers, int centersCount, const unsigned int* list, int count, float3* circles, int maxCircles, float dp, int minRadius, int maxRadius, int threshold, bool has20) { void* counterPtr; cudaSafeCall( hipGetSymbolAddress(&counterPtr, g_counter) ); cudaSafeCall( hipMemset(counterPtr, 0, sizeof(int)) ); const dim3 block(has20 ? 1024 : 512); const dim3 grid(centersCount); const int histSize = maxRadius - minRadius + 1; size_t smemSize = (histSize + 2) * sizeof(int); hipLaunchKernelGGL(( circlesAccumRadius), dim3(grid), dim3(block), smemSize, 0, centers, list, count, circles, maxCircles, dp, minRadius, maxRadius, histSize, threshold); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); int totalCount; cudaSafeCall( hipMemcpy(&totalCount, counterPtr, sizeof(int), hipMemcpyDeviceToHost) ); totalCount = ::min(totalCount, maxCircles); return totalCount; } //////////////////////////////////////////////////////////////////////// // Generalized Hough template <typename T, int PIXELS_PER_THREAD> __global__ void buildEdgePointList(const PtrStepSzb edges, const PtrStep<T> dx, const PtrStep<T> dy, unsigned int* coordList, float* thetaList) { __shared__ unsigned int s_coordLists[4][32 * PIXELS_PER_THREAD]; __shared__ float s_thetaLists[4][32 * PIXELS_PER_THREAD]; __shared__ int s_sizes[4]; __shared__ int s_globStart[4]; const int x = blockIdx.x * blockDim.x * PIXELS_PER_THREAD + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (threadIdx.x == 0) s_sizes[threadIdx.y] = 0; __syncthreads(); if (y < edges.rows) { // fill the queue const uchar* edgesRow = edges.ptr(y); const T* dxRow = dx.ptr(y); const T* dyRow = dy.ptr(y); for (int i = 0, xx = x; i < PIXELS_PER_THREAD && xx < edges.cols; ++i, xx += blockDim.x) { const T dxVal = dxRow[xx]; const T dyVal = dyRow[xx]; if (edgesRow[xx] && (dxVal != 0 || dyVal != 0)) { const unsigned int coord = (y << 16) | xx; float theta = ::atan2f(dyVal, dxVal); if (theta < 0) theta += 2.0f * CV_PI_F; const int qidx = Emulation::smem::atomicAdd(&s_sizes[threadIdx.y], 1); s_coordLists[threadIdx.y][qidx] = coord; s_thetaLists[threadIdx.y][qidx] = theta; } } } __syncthreads(); // let one thread reserve the space required in the global list if (threadIdx.x == 0 && threadIdx.y == 0) { // find how many items are stored in each list int totalSize = 0; for (int i = 0; i < blockDim.y; ++i) { s_globStart[i] = totalSize; totalSize += s_sizes[i]; } // calculate the offset in the global list const int globalOffset = atomicAdd(&g_counter, totalSize); for (int i = 0; i < blockDim.y; ++i) s_globStart[i] += globalOffset; } __syncthreads(); // copy local queues to global queue const int qsize = s_sizes[threadIdx.y]; int gidx = s_globStart[threadIdx.y] + threadIdx.x; for(int i = threadIdx.x; i < qsize; i += blockDim.x, gidx += blockDim.x) { coordList[gidx] = s_coordLists[threadIdx.y][i]; thetaList[gidx] = s_thetaLists[threadIdx.y][i]; } } template <typename T> int buildEdgePointList_gpu(PtrStepSzb edges, PtrStepSzb dx, PtrStepSzb dy, unsigned int* coordList, float* thetaList) { const int PIXELS_PER_THREAD = 8; void* counterPtr; cudaSafeCall( hipGetSymbolAddress(&counterPtr, g_counter) ); cudaSafeCall( hipMemset(counterPtr, 0, sizeof(int)) ); const dim3 block(32, 4); const dim3 grid(divUp(edges.cols, block.x * PIXELS_PER_THREAD), divUp(edges.rows, block.y)); cudaSafeCall( hipFuncSetCacheConfig(buildEdgePointList<T, PIXELS_PER_THREAD>, hipFuncCachePreferShared) ); hipLaunchKernelGGL(( buildEdgePointList<T, PIXELS_PER_THREAD>), dim3(grid), dim3(block), 0, 0, edges, (PtrStepSz<T>) dx, (PtrStepSz<T>) dy, coordList, thetaList); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); int totalCount; cudaSafeCall( hipMemcpy(&totalCount, counterPtr, sizeof(int), hipMemcpyDeviceToHost) ); return totalCount; } template int buildEdgePointList_gpu<short>(PtrStepSzb edges, PtrStepSzb dx, PtrStepSzb dy, unsigned int* coordList, float* thetaList); template int buildEdgePointList_gpu<int>(PtrStepSzb edges, PtrStepSzb dx, PtrStepSzb dy, unsigned int* coordList, float* thetaList); template int buildEdgePointList_gpu<float>(PtrStepSzb edges, PtrStepSzb dx, PtrStepSzb dy, unsigned int* coordList, float* thetaList); __global__ void buildRTable(const unsigned int* coordList, const float* thetaList, const int pointsCount, PtrStep<short2> r_table, int* r_sizes, int maxSize, const short2 templCenter, const float thetaScale) { const int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= pointsCount) return; const unsigned int coord = coordList[tid]; short2 p; p.x = (coord & 0xFFFF); p.y = (coord >> 16) & 0xFFFF; const float theta = thetaList[tid]; const int n = __float2int_rn(theta * thetaScale); const int ind = ::atomicAdd(r_sizes + n, 1); if (ind < maxSize) r_table(n, ind) = p - templCenter; } void buildRTable_gpu(const unsigned int* coordList, const float* thetaList, int pointsCount, PtrStepSz<short2> r_table, int* r_sizes, short2 templCenter, int levels) { const dim3 block(256); const dim3 grid(divUp(pointsCount, block.x)); const float thetaScale = levels / (2.0f * CV_PI_F); hipLaunchKernelGGL(( buildRTable), dim3(grid), dim3(block), 0, 0, coordList, thetaList, pointsCount, r_table, r_sizes, r_table.cols, templCenter, thetaScale); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////// // GHT_Ballard_Pos __global__ void GHT_Ballard_Pos_calcHist(const unsigned int* coordList, const float* thetaList, const int pointsCount, const PtrStep<short2> r_table, const int* r_sizes, PtrStepSzi hist, const float idp, const float thetaScale) { const int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= pointsCount) return; const unsigned int coord = coordList[tid]; short2 p; p.x = (coord & 0xFFFF); p.y = (coord >> 16) & 0xFFFF; const float theta = thetaList[tid]; const int n = __float2int_rn(theta * thetaScale); const short2* r_row = r_table.ptr(n); const int r_row_size = r_sizes[n]; for (int j = 0; j < r_row_size; ++j) { short2 c = p - r_row[j]; c.x = __float2int_rn(c.x * idp); c.y = __float2int_rn(c.y * idp); if (c.x >= 0 && c.x < hist.cols - 2 && c.y >= 0 && c.y < hist.rows - 2) ::atomicAdd(hist.ptr(c.y + 1) + c.x + 1, 1); } } void GHT_Ballard_Pos_calcHist_gpu(const unsigned int* coordList, const float* thetaList, int pointsCount, PtrStepSz<short2> r_table, const int* r_sizes, PtrStepSzi hist, float dp, int levels) { const dim3 block(256); const dim3 grid(divUp(pointsCount, block.x)); const float idp = 1.0f / dp; const float thetaScale = levels / (2.0f * CV_PI_F); hipLaunchKernelGGL(( GHT_Ballard_Pos_calcHist), dim3(grid), dim3(block), 0, 0, coordList, thetaList, pointsCount, r_table, r_sizes, hist, idp, thetaScale); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } __global__ void GHT_Ballard_Pos_findPosInHist(const PtrStepSzi hist, float4* out, int3* votes, const int maxSize, const float dp, const int threshold) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= hist.cols - 2 || y >= hist.rows - 2) return; const int curVotes = hist(y + 1, x + 1); if (curVotes > threshold && curVotes > hist(y + 1, x) && curVotes >= hist(y + 1, x + 2) && curVotes > hist(y, x + 1) && curVotes >= hist(y + 2, x + 1)) { const int ind = ::atomicAdd(&g_counter, 1); if (ind < maxSize) { out[ind] = make_float4(x * dp, y * dp, 1.0f, 0.0f); votes[ind] = make_int3(curVotes, 0, 0); } } } int GHT_Ballard_Pos_findPosInHist_gpu(PtrStepSzi hist, float4* out, int3* votes, int maxSize, float dp, int threshold) { void* counterPtr; cudaSafeCall( hipGetSymbolAddress(&counterPtr, g_counter) ); cudaSafeCall( hipMemset(counterPtr, 0, sizeof(int)) ); const dim3 block(32, 8); const dim3 grid(divUp(hist.cols - 2, block.x), divUp(hist.rows - 2, block.y)); cudaSafeCall( hipFuncSetCacheConfig(GHT_Ballard_Pos_findPosInHist, hipFuncCachePreferL1) ); hipLaunchKernelGGL(( GHT_Ballard_Pos_findPosInHist), dim3(grid), dim3(block), 0, 0, hist, out, votes, maxSize, dp, threshold); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); int totalCount; cudaSafeCall( hipMemcpy(&totalCount, counterPtr, sizeof(int), hipMemcpyDeviceToHost) ); totalCount = ::min(totalCount, maxSize); return totalCount; } //////////////////////////////////////////////////////////////////////// // GHT_Ballard_PosScale __global__ void GHT_Ballard_PosScale_calcHist(const unsigned int* coordList, const float* thetaList, PtrStep<short2> r_table, const int* r_sizes, PtrStepi hist, const int rows, const int cols, const float minScale, const float scaleStep, const int scaleRange, const float idp, const float thetaScale) { const unsigned int coord = coordList[blockIdx.x]; float2 p; p.x = (coord & 0xFFFF); p.y = (coord >> 16) & 0xFFFF; const float theta = thetaList[blockIdx.x]; const int n = __float2int_rn(theta * thetaScale); const short2* r_row = r_table.ptr(n); const int r_row_size = r_sizes[n]; for (int j = 0; j < r_row_size; ++j) { const float2 d = saturate_cast<float2>(r_row[j]); for (int s = threadIdx.x; s < scaleRange; s += blockDim.x) { const float scale = minScale + s * scaleStep; float2 c = p - scale * d; c.x *= idp; c.y *= idp; if (c.x >= 0 && c.x < cols && c.y >= 0 && c.y < rows) ::atomicAdd(hist.ptr((s + 1) * (rows + 2) + __float2int_rn(c.y + 1)) + __float2int_rn(c.x + 1), 1); } } } void GHT_Ballard_PosScale_calcHist_gpu(const unsigned int* coordList, const float* thetaList, int pointsCount, PtrStepSz<short2> r_table, const int* r_sizes, PtrStepi hist, int rows, int cols, float minScale, float scaleStep, int scaleRange, float dp, int levels) { const dim3 block(256); const dim3 grid(pointsCount); const float idp = 1.0f / dp; const float thetaScale = levels / (2.0f * CV_PI_F); hipLaunchKernelGGL(( GHT_Ballard_PosScale_calcHist), dim3(grid), dim3(block), 0, 0, coordList, thetaList, r_table, r_sizes, hist, rows, cols, minScale, scaleStep, scaleRange, idp, thetaScale); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } __global__ void GHT_Ballard_PosScale_findPosInHist(const PtrStepi hist, const int rows, const int cols, const int scaleRange, float4* out, int3* votes, const int maxSize, const float minScale, const float scaleStep, const float dp, const int threshold) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= cols || y >= rows) return; for (int s = 0; s < scaleRange; ++s) { const float scale = minScale + s * scaleStep; const int prevScaleIdx = (s) * (rows + 2); const int curScaleIdx = (s + 1) * (rows + 2); const int nextScaleIdx = (s + 2) * (rows + 2); const int curVotes = hist(curScaleIdx + y + 1, x + 1); if (curVotes > threshold && curVotes > hist(curScaleIdx + y + 1, x) && curVotes >= hist(curScaleIdx + y + 1, x + 2) && curVotes > hist(curScaleIdx + y, x + 1) && curVotes >= hist(curScaleIdx + y + 2, x + 1) && curVotes > hist(prevScaleIdx + y + 1, x + 1) && curVotes >= hist(nextScaleIdx + y + 1, x + 1)) { const int ind = ::atomicAdd(&g_counter, 1); if (ind < maxSize) { out[ind] = make_float4(x * dp, y * dp, scale, 0.0f); votes[ind] = make_int3(curVotes, curVotes, 0); } } } } int GHT_Ballard_PosScale_findPosInHist_gpu(PtrStepi hist, int rows, int cols, int scaleRange, float4* out, int3* votes, int maxSize, float minScale, float scaleStep, float dp, int threshold) { void* counterPtr; cudaSafeCall( hipGetSymbolAddress(&counterPtr, g_counter) ); cudaSafeCall( hipMemset(counterPtr, 0, sizeof(int)) ); const dim3 block(32, 8); const dim3 grid(divUp(cols, block.x), divUp(rows, block.y)); cudaSafeCall( hipFuncSetCacheConfig(GHT_Ballard_PosScale_findPosInHist, hipFuncCachePreferL1) ); hipLaunchKernelGGL(( GHT_Ballard_PosScale_findPosInHist), dim3(grid), dim3(block), 0, 0, hist, rows, cols, scaleRange, out, votes, maxSize, minScale, scaleStep, dp, threshold); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); int totalCount; cudaSafeCall( hipMemcpy(&totalCount, counterPtr, sizeof(int), hipMemcpyDeviceToHost) ); totalCount = ::min(totalCount, maxSize); return totalCount; } //////////////////////////////////////////////////////////////////////// // GHT_Ballard_PosRotation __global__ void GHT_Ballard_PosRotation_calcHist(const unsigned int* coordList, const float* thetaList, PtrStep<short2> r_table, const int* r_sizes, PtrStepi hist, const int rows, const int cols, const float minAngle, const float angleStep, const int angleRange, const float idp, const float thetaScale) { const unsigned int coord = coordList[blockIdx.x]; float2 p; p.x = (coord & 0xFFFF); p.y = (coord >> 16) & 0xFFFF; const float thetaVal = thetaList[blockIdx.x]; for (int a = threadIdx.x; a < angleRange; a += blockDim.x) { const float angle = (minAngle + a * angleStep) * (CV_PI_F / 180.0f); float sinA, cosA; sincosf(angle, &sinA, &cosA); float theta = thetaVal - angle; if (theta < 0) theta += 2.0f * CV_PI_F; const int n = __float2int_rn(theta * thetaScale); const short2* r_row = r_table.ptr(n); const int r_row_size = r_sizes[n]; for (int j = 0; j < r_row_size; ++j) { const float2 d = saturate_cast<float2>(r_row[j]); const float2 dr = make_float2(d.x * cosA - d.y * sinA, d.x * sinA + d.y * cosA); float2 c = make_float2(p.x - dr.x, p.y - dr.y); c.x *= idp; c.y *= idp; if (c.x >= 0 && c.x < cols && c.y >= 0 && c.y < rows) ::atomicAdd(hist.ptr((a + 1) * (rows + 2) + __float2int_rn(c.y + 1)) + __float2int_rn(c.x + 1), 1); } } } void GHT_Ballard_PosRotation_calcHist_gpu(const unsigned int* coordList, const float* thetaList, int pointsCount, PtrStepSz<short2> r_table, const int* r_sizes, PtrStepi hist, int rows, int cols, float minAngle, float angleStep, int angleRange, float dp, int levels) { const dim3 block(256); const dim3 grid(pointsCount); const float idp = 1.0f / dp; const float thetaScale = levels / (2.0f * CV_PI_F); hipLaunchKernelGGL(( GHT_Ballard_PosRotation_calcHist), dim3(grid), dim3(block), 0, 0, coordList, thetaList, r_table, r_sizes, hist, rows, cols, minAngle, angleStep, angleRange, idp, thetaScale); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } __global__ void GHT_Ballard_PosRotation_findPosInHist(const PtrStepi hist, const int rows, const int cols, const int angleRange, float4* out, int3* votes, const int maxSize, const float minAngle, const float angleStep, const float dp, const int threshold) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= cols || y >= rows) return; for (int a = 0; a < angleRange; ++a) { const float angle = minAngle + a * angleStep; const int prevAngleIdx = (a) * (rows + 2); const int curAngleIdx = (a + 1) * (rows + 2); const int nextAngleIdx = (a + 2) * (rows + 2); const int curVotes = hist(curAngleIdx + y + 1, x + 1); if (curVotes > threshold && curVotes > hist(curAngleIdx + y + 1, x) && curVotes >= hist(curAngleIdx + y + 1, x + 2) && curVotes > hist(curAngleIdx + y, x + 1) && curVotes >= hist(curAngleIdx + y + 2, x + 1) && curVotes > hist(prevAngleIdx + y + 1, x + 1) && curVotes >= hist(nextAngleIdx + y + 1, x + 1)) { const int ind = ::atomicAdd(&g_counter, 1); if (ind < maxSize) { out[ind] = make_float4(x * dp, y * dp, 1.0f, angle); votes[ind] = make_int3(curVotes, 0, curVotes); } } } } int GHT_Ballard_PosRotation_findPosInHist_gpu(PtrStepi hist, int rows, int cols, int angleRange, float4* out, int3* votes, int maxSize, float minAngle, float angleStep, float dp, int threshold) { void* counterPtr; cudaSafeCall( hipGetSymbolAddress(&counterPtr, g_counter) ); cudaSafeCall( hipMemset(counterPtr, 0, sizeof(int)) ); const dim3 block(32, 8); const dim3 grid(divUp(cols, block.x), divUp(rows, block.y)); cudaSafeCall( hipFuncSetCacheConfig(GHT_Ballard_PosRotation_findPosInHist, hipFuncCachePreferL1) ); hipLaunchKernelGGL(( GHT_Ballard_PosRotation_findPosInHist), dim3(grid), dim3(block), 0, 0, hist, rows, cols, angleRange, out, votes, maxSize, minAngle, angleStep, dp, threshold); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); int totalCount; cudaSafeCall( hipMemcpy(&totalCount, counterPtr, sizeof(int), hipMemcpyDeviceToHost) ); totalCount = ::min(totalCount, maxSize); return totalCount; } //////////////////////////////////////////////////////////////////////// // GHT_Guil_Full struct FeatureTable { uchar* p1_pos_data; size_t p1_pos_step; uchar* p1_theta_data; size_t p1_theta_step; uchar* p2_pos_data; size_t p2_pos_step; uchar* d12_data; size_t d12_step; uchar* r1_data; size_t r1_step; uchar* r2_data; size_t r2_step; }; __constant__ FeatureTable c_templFeatures; __constant__ FeatureTable c_imageFeatures; void GHT_Guil_Full_setTemplFeatures(PtrStepb p1_pos, PtrStepb p1_theta, PtrStepb p2_pos, PtrStepb d12, PtrStepb r1, PtrStepb r2) { FeatureTable tbl; tbl.p1_pos_data = p1_pos.data; tbl.p1_pos_step = p1_pos.step; tbl.p1_theta_data = p1_theta.data; tbl.p1_theta_step = p1_theta.step; tbl.p2_pos_data = p2_pos.data; tbl.p2_pos_step = p2_pos.step; tbl.d12_data = d12.data; tbl.d12_step = d12.step; tbl.r1_data = r1.data; tbl.r1_step = r1.step; tbl.r2_data = r2.data; tbl.r2_step = r2.step; cudaSafeCall( hipMemcpyToSymbol(c_templFeatures, &tbl, sizeof(FeatureTable)) ); } void GHT_Guil_Full_setImageFeatures(PtrStepb p1_pos, PtrStepb p1_theta, PtrStepb p2_pos, PtrStepb d12, PtrStepb r1, PtrStepb r2) { FeatureTable tbl; tbl.p1_pos_data = p1_pos.data; tbl.p1_pos_step = p1_pos.step; tbl.p1_theta_data = p1_theta.data; tbl.p1_theta_step = p1_theta.step; tbl.p2_pos_data = p2_pos.data; tbl.p2_pos_step = p2_pos.step; tbl.d12_data = d12.data; tbl.d12_step = d12.step; tbl.r1_data = r1.data; tbl.r1_step = r1.step; tbl.r2_data = r2.data; tbl.r2_step = r2.step; cudaSafeCall( hipMemcpyToSymbol(c_imageFeatures, &tbl, sizeof(FeatureTable)) ); } struct TemplFeatureTable { static __device__ float2* p1_pos(int n) { return (float2*)(c_templFeatures.p1_pos_data + n * c_templFeatures.p1_pos_step); } static __device__ float* p1_theta(int n) { return (float*)(c_templFeatures.p1_theta_data + n * c_templFeatures.p1_theta_step); } static __device__ float2* p2_pos(int n) { return (float2*)(c_templFeatures.p2_pos_data + n * c_templFeatures.p2_pos_step); } static __device__ float* d12(int n) { return (float*)(c_templFeatures.d12_data + n * c_templFeatures.d12_step); } static __device__ float2* r1(int n) { return (float2*)(c_templFeatures.r1_data + n * c_templFeatures.r1_step); } static __device__ float2* r2(int n) { return (float2*)(c_templFeatures.r2_data + n * c_templFeatures.r2_step); } }; struct ImageFeatureTable { static __device__ float2* p1_pos(int n) { return (float2*)(c_imageFeatures.p1_pos_data + n * c_imageFeatures.p1_pos_step); } static __device__ float* p1_theta(int n) { return (float*)(c_imageFeatures.p1_theta_data + n * c_imageFeatures.p1_theta_step); } static __device__ float2* p2_pos(int n) { return (float2*)(c_imageFeatures.p2_pos_data + n * c_imageFeatures.p2_pos_step); } static __device__ float* d12(int n) { return (float*)(c_imageFeatures.d12_data + n * c_imageFeatures.d12_step); } static __device__ float2* r1(int n) { return (float2*)(c_imageFeatures.r1_data + n * c_imageFeatures.r1_step); } static __device__ float2* r2(int n) { return (float2*)(c_imageFeatures.r2_data + n * c_imageFeatures.r2_step); } }; __device__ float clampAngle(float a) { float res = a; while (res > 2.0f * CV_PI_F) res -= 2.0f * CV_PI_F; while (res < 0.0f) res += 2.0f * CV_PI_F; return res; } __device__ bool angleEq(float a, float b, float eps) { return (::fabs(clampAngle(a - b)) <= eps); } template <class FT, bool isTempl> __global__ void GHT_Guil_Full_buildFeatureList(const unsigned int* coordList, const float* thetaList, const int pointsCount, int* sizes, const int maxSize, const float xi, const float angleEpsilon, const float alphaScale, const float2 center, const float maxDist) { const float p1_theta = thetaList[blockIdx.x]; const unsigned int coord1 = coordList[blockIdx.x]; float2 p1_pos; p1_pos.x = (coord1 & 0xFFFF); p1_pos.y = (coord1 >> 16) & 0xFFFF; for (int i = threadIdx.x; i < pointsCount; i += blockDim.x) { const float p2_theta = thetaList[i]; const unsigned int coord2 = coordList[i]; float2 p2_pos; p2_pos.x = (coord2 & 0xFFFF); p2_pos.y = (coord2 >> 16) & 0xFFFF; if (angleEq(p1_theta - p2_theta, xi, angleEpsilon)) { const float2 d = p1_pos - p2_pos; float alpha12 = clampAngle(::atan2(d.y, d.x) - p1_theta); float d12 = ::sqrtf(d.x * d.x + d.y * d.y); if (d12 > maxDist) continue; float2 r1 = p1_pos - center; float2 r2 = p2_pos - center; const int n = __float2int_rn(alpha12 * alphaScale); const int ind = ::atomicAdd(sizes + n, 1); if (ind < maxSize) { if (!isTempl) { FT::p1_pos(n)[ind] = p1_pos; FT::p2_pos(n)[ind] = p2_pos; } FT::p1_theta(n)[ind] = p1_theta; FT::d12(n)[ind] = d12; if (isTempl) { FT::r1(n)[ind] = r1; FT::r2(n)[ind] = r2; } } } } } template <class FT, bool isTempl> void GHT_Guil_Full_buildFeatureList_caller(const unsigned int* coordList, const float* thetaList, int pointsCount, int* sizes, int maxSize, float xi, float angleEpsilon, int levels, float2 center, float maxDist) { const dim3 block(256); const dim3 grid(pointsCount); const float alphaScale = levels / (2.0f * CV_PI_F); hipLaunchKernelGGL(( GHT_Guil_Full_buildFeatureList<FT, isTempl>), dim3(grid), dim3(block), 0, 0, coordList, thetaList, pointsCount, sizes, maxSize, xi * (CV_PI_F / 180.0f), angleEpsilon * (CV_PI_F / 180.0f), alphaScale, center, maxDist); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); thrust::device_ptr<int> sizesPtr(sizes); thrust::transform(sizesPtr, sizesPtr + levels + 1, sizesPtr, device::bind2nd(device::minimum<int>(), maxSize)); } void GHT_Guil_Full_buildTemplFeatureList_gpu(const unsigned int* coordList, const float* thetaList, int pointsCount, int* sizes, int maxSize, float xi, float angleEpsilon, int levels, float2 center, float maxDist) { GHT_Guil_Full_buildFeatureList_caller<TemplFeatureTable, true>(coordList, thetaList, pointsCount, sizes, maxSize, xi, angleEpsilon, levels, center, maxDist); } void GHT_Guil_Full_buildImageFeatureList_gpu(const unsigned int* coordList, const float* thetaList, int pointsCount, int* sizes, int maxSize, float xi, float angleEpsilon, int levels, float2 center, float maxDist) { GHT_Guil_Full_buildFeatureList_caller<ImageFeatureTable, false>(coordList, thetaList, pointsCount, sizes, maxSize, xi, angleEpsilon, levels, center, maxDist); } __global__ void GHT_Guil_Full_calcOHist(const int* templSizes, const int* imageSizes, int* OHist, const float minAngle, const float maxAngle, const float iAngleStep, const int angleRange) { extern __shared__ int s_OHist[]; for (int i = threadIdx.x; i <= angleRange; i += blockDim.x) s_OHist[i] = 0; __syncthreads(); const int tIdx = blockIdx.x; const int level = blockIdx.y; const int tSize = templSizes[level]; if (tIdx < tSize) { const int imSize = imageSizes[level]; const float t_p1_theta = TemplFeatureTable::p1_theta(level)[tIdx]; for (int i = threadIdx.x; i < imSize; i += blockDim.x) { const float im_p1_theta = ImageFeatureTable::p1_theta(level)[i]; const float angle = clampAngle(im_p1_theta - t_p1_theta); if (angle >= minAngle && angle <= maxAngle) { const int n = __float2int_rn((angle - minAngle) * iAngleStep); Emulation::smem::atomicAdd(&s_OHist[n], 1); } } } __syncthreads(); for (int i = threadIdx.x; i <= angleRange; i += blockDim.x) ::atomicAdd(OHist + i, s_OHist[i]); } void GHT_Guil_Full_calcOHist_gpu(const int* templSizes, const int* imageSizes, int* OHist, float minAngle, float maxAngle, float angleStep, int angleRange, int levels, int tMaxSize) { const dim3 block(256); const dim3 grid(tMaxSize, levels + 1); minAngle *= (CV_PI_F / 180.0f); maxAngle *= (CV_PI_F / 180.0f); angleStep *= (CV_PI_F / 180.0f); const size_t smemSize = (angleRange + 1) * sizeof(float); hipLaunchKernelGGL(( GHT_Guil_Full_calcOHist), dim3(grid), dim3(block), smemSize, 0, templSizes, imageSizes, OHist, minAngle, maxAngle, 1.0f / angleStep, angleRange); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } __global__ void GHT_Guil_Full_calcSHist(const int* templSizes, const int* imageSizes, int* SHist, const float angle, const float angleEpsilon, const float minScale, const float maxScale, const float iScaleStep, const int scaleRange) { extern __shared__ int s_SHist[]; for (int i = threadIdx.x; i <= scaleRange; i += blockDim.x) s_SHist[i] = 0; __syncthreads(); const int tIdx = blockIdx.x; const int level = blockIdx.y; const int tSize = templSizes[level]; if (tIdx < tSize) { const int imSize = imageSizes[level]; const float t_p1_theta = TemplFeatureTable::p1_theta(level)[tIdx] + angle; const float t_d12 = TemplFeatureTable::d12(level)[tIdx] + angle; for (int i = threadIdx.x; i < imSize; i += blockDim.x) { const float im_p1_theta = ImageFeatureTable::p1_theta(level)[i]; const float im_d12 = ImageFeatureTable::d12(level)[i]; if (angleEq(im_p1_theta, t_p1_theta, angleEpsilon)) { const float scale = im_d12 / t_d12; if (scale >= minScale && scale <= maxScale) { const int s = __float2int_rn((scale - minScale) * iScaleStep); Emulation::smem::atomicAdd(&s_SHist[s], 1); } } } } __syncthreads(); for (int i = threadIdx.x; i <= scaleRange; i += blockDim.x) ::atomicAdd(SHist + i, s_SHist[i]); } void GHT_Guil_Full_calcSHist_gpu(const int* templSizes, const int* imageSizes, int* SHist, float angle, float angleEpsilon, float minScale, float maxScale, float iScaleStep, int scaleRange, int levels, int tMaxSize) { const dim3 block(256); const dim3 grid(tMaxSize, levels + 1); angle *= (CV_PI_F / 180.0f); angleEpsilon *= (CV_PI_F / 180.0f); const size_t smemSize = (scaleRange + 1) * sizeof(float); hipLaunchKernelGGL(( GHT_Guil_Full_calcSHist), dim3(grid), dim3(block), smemSize, 0, templSizes, imageSizes, SHist, angle, angleEpsilon, minScale, maxScale, iScaleStep, scaleRange); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } __global__ void GHT_Guil_Full_calcPHist(const int* templSizes, const int* imageSizes, PtrStepSzi PHist, const float angle, const float sinVal, const float cosVal, const float angleEpsilon, const float scale, const float idp) { const int tIdx = blockIdx.x; const int level = blockIdx.y; const int tSize = templSizes[level]; if (tIdx < tSize) { const int imSize = imageSizes[level]; const float t_p1_theta = TemplFeatureTable::p1_theta(level)[tIdx] + angle; float2 r1 = TemplFeatureTable::r1(level)[tIdx]; float2 r2 = TemplFeatureTable::r2(level)[tIdx]; r1 = r1 * scale; r2 = r2 * scale; r1 = make_float2(cosVal * r1.x - sinVal * r1.y, sinVal * r1.x + cosVal * r1.y); r2 = make_float2(cosVal * r2.x - sinVal * r2.y, sinVal * r2.x + cosVal * r2.y); for (int i = threadIdx.x; i < imSize; i += blockDim.x) { const float im_p1_theta = ImageFeatureTable::p1_theta(level)[i]; const float2 im_p1_pos = ImageFeatureTable::p1_pos(level)[i]; const float2 im_p2_pos = ImageFeatureTable::p2_pos(level)[i]; if (angleEq(im_p1_theta, t_p1_theta, angleEpsilon)) { float2 c1, c2; c1 = im_p1_pos - r1; c1 = c1 * idp; c2 = im_p2_pos - r2; c2 = c2 * idp; if (::fabs(c1.x - c2.x) > 1 || ::fabs(c1.y - c2.y) > 1) continue; if (c1.y >= 0 && c1.y < PHist.rows - 2 && c1.x >= 0 && c1.x < PHist.cols - 2) ::atomicAdd(PHist.ptr(__float2int_rn(c1.y) + 1) + __float2int_rn(c1.x) + 1, 1); } } } } void GHT_Guil_Full_calcPHist_gpu(const int* templSizes, const int* imageSizes, PtrStepSzi PHist, float angle, float angleEpsilon, float scale, float dp, int levels, int tMaxSize) { const dim3 block(256); const dim3 grid(tMaxSize, levels + 1); angle *= (CV_PI_F / 180.0f); angleEpsilon *= (CV_PI_F / 180.0f); const float sinVal = ::sinf(angle); const float cosVal = ::cosf(angle); cudaSafeCall( hipFuncSetCacheConfig(GHT_Guil_Full_calcPHist, hipFuncCachePreferL1) ); hipLaunchKernelGGL(( GHT_Guil_Full_calcPHist), dim3(grid), dim3(block), 0, 0, templSizes, imageSizes, PHist, angle, sinVal, cosVal, angleEpsilon, scale, 1.0f / dp); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } __global__ void GHT_Guil_Full_findPosInHist(const PtrStepSzi hist, float4* out, int3* votes, const int maxSize, const float angle, const int angleVotes, const float scale, const int scaleVotes, const float dp, const int threshold) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= hist.cols - 2 || y >= hist.rows - 2) return; const int curVotes = hist(y + 1, x + 1); if (curVotes > threshold && curVotes > hist(y + 1, x) && curVotes >= hist(y + 1, x + 2) && curVotes > hist(y, x + 1) && curVotes >= hist(y + 2, x + 1)) { const int ind = ::atomicAdd(&g_counter, 1); if (ind < maxSize) { out[ind] = make_float4(x * dp, y * dp, scale, angle); votes[ind] = make_int3(curVotes, scaleVotes, angleVotes); } } } int GHT_Guil_Full_findPosInHist_gpu(PtrStepSzi hist, float4* out, int3* votes, int curSize, int maxSize, float angle, int angleVotes, float scale, int scaleVotes, float dp, int threshold) { void* counterPtr; cudaSafeCall( hipGetSymbolAddress(&counterPtr, g_counter) ); cudaSafeCall( hipMemcpy(counterPtr, &curSize, sizeof(int), hipMemcpyHostToDevice) ); const dim3 block(32, 8); const dim3 grid(divUp(hist.cols - 2, block.x), divUp(hist.rows - 2, block.y)); cudaSafeCall( hipFuncSetCacheConfig(GHT_Guil_Full_findPosInHist, hipFuncCachePreferL1) ); hipLaunchKernelGGL(( GHT_Guil_Full_findPosInHist), dim3(grid), dim3(block), 0, 0, hist, out, votes, maxSize, angle, angleVotes, scale, scaleVotes, dp, threshold); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); int totalCount; cudaSafeCall( hipMemcpy(&totalCount, counterPtr, sizeof(int), hipMemcpyDeviceToHost) ); totalCount = ::min(totalCount, maxSize); return totalCount; } } }}} #endif /* CUDA_DISABLER */
68e92ff71a7e72feb43534e1580c463aaf125e79.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or bpied warranties, including, but not limited to, the bpied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include <thrust/device_ptr.h> #include <thrust/sort.h> #include "opencv2/gpu/device/common.hpp" #include "opencv2/gpu/device/emulation.hpp" #include "opencv2/gpu/device/vec_math.hpp" #include "opencv2/gpu/device/limits.hpp" #include "opencv2/gpu/device/dynamic_smem.hpp" namespace cv { namespace gpu { namespace device { namespace hough { __device__ int g_counter; //////////////////////////////////////////////////////////////////////// // buildPointList template <int PIXELS_PER_THREAD> __global__ void buildPointList(const PtrStepSzb src, unsigned int* list) { __shared__ unsigned int s_queues[4][32 * PIXELS_PER_THREAD]; __shared__ int s_qsize[4]; __shared__ int s_globStart[4]; const int x = blockIdx.x * blockDim.x * PIXELS_PER_THREAD + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (threadIdx.x == 0) s_qsize[threadIdx.y] = 0; __syncthreads(); if (y < src.rows) { // fill the queue const uchar* srcRow = src.ptr(y); for (int i = 0, xx = x; i < PIXELS_PER_THREAD && xx < src.cols; ++i, xx += blockDim.x) { if (srcRow[xx]) { const unsigned int val = (y << 16) | xx; const int qidx = Emulation::smem::atomicAdd(&s_qsize[threadIdx.y], 1); s_queues[threadIdx.y][qidx] = val; } } } __syncthreads(); // let one thread reserve the space required in the global list if (threadIdx.x == 0 && threadIdx.y == 0) { // find how many items are stored in each list int totalSize = 0; for (int i = 0; i < blockDim.y; ++i) { s_globStart[i] = totalSize; totalSize += s_qsize[i]; } // calculate the offset in the global list const int globalOffset = atomicAdd(&g_counter, totalSize); for (int i = 0; i < blockDim.y; ++i) s_globStart[i] += globalOffset; } __syncthreads(); // copy local queues to global queue const int qsize = s_qsize[threadIdx.y]; int gidx = s_globStart[threadIdx.y] + threadIdx.x; for(int i = threadIdx.x; i < qsize; i += blockDim.x, gidx += blockDim.x) list[gidx] = s_queues[threadIdx.y][i]; } int buildPointList_gpu(PtrStepSzb src, unsigned int* list) { const int PIXELS_PER_THREAD = 16; void* counterPtr; cudaSafeCall( cudaGetSymbolAddress(&counterPtr, g_counter) ); cudaSafeCall( cudaMemset(counterPtr, 0, sizeof(int)) ); const dim3 block(32, 4); const dim3 grid(divUp(src.cols, block.x * PIXELS_PER_THREAD), divUp(src.rows, block.y)); cudaSafeCall( cudaFuncSetCacheConfig(buildPointList<PIXELS_PER_THREAD>, cudaFuncCachePreferShared) ); buildPointList<PIXELS_PER_THREAD><<<grid, block>>>(src, list); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); int totalCount; cudaSafeCall( cudaMemcpy(&totalCount, counterPtr, sizeof(int), cudaMemcpyDeviceToHost) ); return totalCount; } //////////////////////////////////////////////////////////////////////// // linesAccum __global__ void linesAccumGlobal(const unsigned int* list, const int count, PtrStepi accum, const float irho, const float theta, const int numrho) { const int n = blockIdx.x; const float ang = n * theta; float sinVal; float cosVal; sincosf(ang, &sinVal, &cosVal); sinVal *= irho; cosVal *= irho; const int shift = (numrho - 1) / 2; int* accumRow = accum.ptr(n + 1); for (int i = threadIdx.x; i < count; i += blockDim.x) { const unsigned int val = list[i]; const int x = (val & 0xFFFF); const int y = (val >> 16) & 0xFFFF; int r = __float2int_rn(x * cosVal + y * sinVal); r += shift; ::atomicAdd(accumRow + r + 1, 1); } } __global__ void linesAccumShared(const unsigned int* list, const int count, PtrStepi accum, const float irho, const float theta, const int numrho) { int* smem = DynamicSharedMem<int>(); for (int i = threadIdx.x; i < numrho + 1; i += blockDim.x) smem[i] = 0; __syncthreads(); const int n = blockIdx.x; const float ang = n * theta; float sinVal; float cosVal; sincosf(ang, &sinVal, &cosVal); sinVal *= irho; cosVal *= irho; const int shift = (numrho - 1) / 2; for (int i = threadIdx.x; i < count; i += blockDim.x) { const unsigned int val = list[i]; const int x = (val & 0xFFFF); const int y = (val >> 16) & 0xFFFF; int r = __float2int_rn(x * cosVal + y * sinVal); r += shift; Emulation::smem::atomicAdd(&smem[r + 1], 1); } __syncthreads(); int* accumRow = accum.ptr(n + 1); for (int i = threadIdx.x; i < numrho + 1; i += blockDim.x) accumRow[i] = smem[i]; } void linesAccum_gpu(const unsigned int* list, int count, PtrStepSzi accum, float rho, float theta, size_t sharedMemPerBlock, bool has20) { const dim3 block(has20 ? 1024 : 512); const dim3 grid(accum.rows - 2); size_t smemSize = (accum.cols - 1) * sizeof(int); if (smemSize < sharedMemPerBlock - 1000) linesAccumShared<<<grid, block, smemSize>>>(list, count, accum, 1.0f / rho, theta, accum.cols - 2); else linesAccumGlobal<<<grid, block>>>(list, count, accum, 1.0f / rho, theta, accum.cols - 2); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////// // linesGetResult __global__ void linesGetResult(const PtrStepSzi accum, float2* out, int* votes, const int maxSize, const float rho, const float theta, const int threshold, const int numrho) { const int r = blockIdx.x * blockDim.x + threadIdx.x; const int n = blockIdx.y * blockDim.y + threadIdx.y; if (r >= accum.cols - 2 || n >= accum.rows - 2) return; const int curVotes = accum(n + 1, r + 1); if (curVotes > threshold && curVotes > accum(n + 1, r) && curVotes >= accum(n + 1, r + 2) && curVotes > accum(n, r + 1) && curVotes >= accum(n + 2, r + 1)) { const float radius = (r - (numrho - 1) * 0.5f) * rho; const float angle = n * theta; const int ind = ::atomicAdd(&g_counter, 1); if (ind < maxSize) { out[ind] = make_float2(radius, angle); votes[ind] = curVotes; } } } int linesGetResult_gpu(PtrStepSzi accum, float2* out, int* votes, int maxSize, float rho, float theta, int threshold, bool doSort) { void* counterPtr; cudaSafeCall( cudaGetSymbolAddress(&counterPtr, g_counter) ); cudaSafeCall( cudaMemset(counterPtr, 0, sizeof(int)) ); const dim3 block(32, 8); const dim3 grid(divUp(accum.cols - 2, block.x), divUp(accum.rows - 2, block.y)); cudaSafeCall( cudaFuncSetCacheConfig(linesGetResult, cudaFuncCachePreferL1) ); linesGetResult<<<grid, block>>>(accum, out, votes, maxSize, rho, theta, threshold, accum.cols - 2); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); int totalCount; cudaSafeCall( cudaMemcpy(&totalCount, counterPtr, sizeof(int), cudaMemcpyDeviceToHost) ); totalCount = ::min(totalCount, maxSize); if (doSort && totalCount > 0) { thrust::device_ptr<float2> outPtr(out); thrust::device_ptr<int> votesPtr(votes); thrust::sort_by_key(votesPtr, votesPtr + totalCount, outPtr, thrust::greater<int>()); } return totalCount; } //////////////////////////////////////////////////////////////////////// // houghLinesProbabilistic texture<uchar, cudaTextureType2D, cudaReadModeElementType> tex_mask(false, cudaFilterModePoint, cudaAddressModeClamp); __global__ void houghLinesProbabilistic(const PtrStepSzi accum, int4* out, const int maxSize, const float rho, const float theta, const int lineGap, const int lineLength, const int rows, const int cols) { const int r = blockIdx.x * blockDim.x + threadIdx.x; const int n = blockIdx.y * blockDim.y + threadIdx.y; if (r >= accum.cols - 2 || n >= accum.rows - 2) return; const int curVotes = accum(n + 1, r + 1); if (curVotes >= lineLength && curVotes > accum(n, r) && curVotes > accum(n, r + 1) && curVotes > accum(n, r + 2) && curVotes > accum(n + 1, r) && curVotes > accum(n + 1, r + 2) && curVotes > accum(n + 2, r) && curVotes > accum(n + 2, r + 1) && curVotes > accum(n + 2, r + 2)) { const float radius = (r - (accum.cols - 2 - 1) * 0.5f) * rho; const float angle = n * theta; float cosa; float sina; sincosf(angle, &sina, &cosa); float2 p0 = make_float2(cosa * radius, sina * radius); float2 dir = make_float2(-sina, cosa); float2 pb[4] = {make_float2(-1, -1), make_float2(-1, -1), make_float2(-1, -1), make_float2(-1, -1)}; float a; if (dir.x != 0) { a = -p0.x / dir.x; pb[0].x = 0; pb[0].y = p0.y + a * dir.y; a = (cols - 1 - p0.x) / dir.x; pb[1].x = cols - 1; pb[1].y = p0.y + a * dir.y; } if (dir.y != 0) { a = -p0.y / dir.y; pb[2].x = p0.x + a * dir.x; pb[2].y = 0; a = (rows - 1 - p0.y) / dir.y; pb[3].x = p0.x + a * dir.x; pb[3].y = rows - 1; } if (pb[0].x == 0 && (pb[0].y >= 0 && pb[0].y < rows)) { p0 = pb[0]; if (dir.x < 0) dir = -dir; } else if (pb[1].x == cols - 1 && (pb[0].y >= 0 && pb[0].y < rows)) { p0 = pb[1]; if (dir.x > 0) dir = -dir; } else if (pb[2].y == 0 && (pb[2].x >= 0 && pb[2].x < cols)) { p0 = pb[2]; if (dir.y < 0) dir = -dir; } else if (pb[3].y == rows - 1 && (pb[3].x >= 0 && pb[3].x < cols)) { p0 = pb[3]; if (dir.y > 0) dir = -dir; } float2 d; if (::fabsf(dir.x) > ::fabsf(dir.y)) { d.x = dir.x > 0 ? 1 : -1; d.y = dir.y / ::fabsf(dir.x); } else { d.x = dir.x / ::fabsf(dir.y); d.y = dir.y > 0 ? 1 : -1; } float2 line_end[2]; int gap; bool inLine = false; float2 p1 = p0; if (p1.x < 0 || p1.x >= cols || p1.y < 0 || p1.y >= rows) return; for (;;) { if (tex2D(tex_mask, p1.x, p1.y)) { gap = 0; if (!inLine) { line_end[0] = p1; line_end[1] = p1; inLine = true; } else { line_end[1] = p1; } } else if (inLine) { if (++gap > lineGap) { bool good_line = ::abs(line_end[1].x - line_end[0].x) >= lineLength || ::abs(line_end[1].y - line_end[0].y) >= lineLength; if (good_line) { const int ind = ::atomicAdd(&g_counter, 1); if (ind < maxSize) out[ind] = make_int4(line_end[0].x, line_end[0].y, line_end[1].x, line_end[1].y); } gap = 0; inLine = false; } } p1 = p1 + d; if (p1.x < 0 || p1.x >= cols || p1.y < 0 || p1.y >= rows) { if (inLine) { bool good_line = ::abs(line_end[1].x - line_end[0].x) >= lineLength || ::abs(line_end[1].y - line_end[0].y) >= lineLength; if (good_line) { const int ind = ::atomicAdd(&g_counter, 1); if (ind < maxSize) out[ind] = make_int4(line_end[0].x, line_end[0].y, line_end[1].x, line_end[1].y); } } break; } } } } int houghLinesProbabilistic_gpu(PtrStepSzb mask, PtrStepSzi accum, int4* out, int maxSize, float rho, float theta, int lineGap, int lineLength) { void* counterPtr; cudaSafeCall( cudaGetSymbolAddress(&counterPtr, g_counter) ); cudaSafeCall( cudaMemset(counterPtr, 0, sizeof(int)) ); const dim3 block(32, 8); const dim3 grid(divUp(accum.cols - 2, block.x), divUp(accum.rows - 2, block.y)); bindTexture(&tex_mask, mask); houghLinesProbabilistic<<<grid, block>>>(accum, out, maxSize, rho, theta, lineGap, lineLength, mask.rows, mask.cols); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); int totalCount; cudaSafeCall( cudaMemcpy(&totalCount, counterPtr, sizeof(int), cudaMemcpyDeviceToHost) ); totalCount = ::min(totalCount, maxSize); return totalCount; } //////////////////////////////////////////////////////////////////////// // circlesAccumCenters __global__ void circlesAccumCenters(const unsigned int* list, const int count, const PtrStepi dx, const PtrStepi dy, PtrStepi accum, const int width, const int height, const int minRadius, const int maxRadius, const float idp) { const int SHIFT = 10; const int ONE = 1 << SHIFT; const int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= count) return; const unsigned int val = list[tid]; const int x = (val & 0xFFFF); const int y = (val >> 16) & 0xFFFF; const int vx = dx(y, x); const int vy = dy(y, x); if (vx == 0 && vy == 0) return; const float mag = ::sqrtf(vx * vx + vy * vy); const int x0 = __float2int_rn((x * idp) * ONE); const int y0 = __float2int_rn((y * idp) * ONE); int sx = __float2int_rn((vx * idp) * ONE / mag); int sy = __float2int_rn((vy * idp) * ONE / mag); // Step from minRadius to maxRadius in both directions of the gradient for (int k1 = 0; k1 < 2; ++k1) { int x1 = x0 + minRadius * sx; int y1 = y0 + minRadius * sy; for (int r = minRadius; r <= maxRadius; x1 += sx, y1 += sy, ++r) { const int x2 = x1 >> SHIFT; const int y2 = y1 >> SHIFT; if (x2 < 0 || x2 >= width || y2 < 0 || y2 >= height) break; ::atomicAdd(accum.ptr(y2 + 1) + x2 + 1, 1); } sx = -sx; sy = -sy; } } void circlesAccumCenters_gpu(const unsigned int* list, int count, PtrStepi dx, PtrStepi dy, PtrStepSzi accum, int minRadius, int maxRadius, float idp) { const dim3 block(256); const dim3 grid(divUp(count, block.x)); cudaSafeCall( cudaFuncSetCacheConfig(circlesAccumCenters, cudaFuncCachePreferL1) ); circlesAccumCenters<<<grid, block>>>(list, count, dx, dy, accum, accum.cols - 2, accum.rows - 2, minRadius, maxRadius, idp); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////// // buildCentersList __global__ void buildCentersList(const PtrStepSzi accum, unsigned int* centers, const int threshold) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < accum.cols - 2 && y < accum.rows - 2) { const int top = accum(y, x + 1); const int left = accum(y + 1, x); const int cur = accum(y + 1, x + 1); const int right = accum(y + 1, x + 2); const int bottom = accum(y + 2, x + 1); if (cur > threshold && cur > top && cur >= bottom && cur > left && cur >= right) { const unsigned int val = (y << 16) | x; const int idx = ::atomicAdd(&g_counter, 1); centers[idx] = val; } } } int buildCentersList_gpu(PtrStepSzi accum, unsigned int* centers, int threshold) { void* counterPtr; cudaSafeCall( cudaGetSymbolAddress(&counterPtr, g_counter) ); cudaSafeCall( cudaMemset(counterPtr, 0, sizeof(int)) ); const dim3 block(32, 8); const dim3 grid(divUp(accum.cols - 2, block.x), divUp(accum.rows - 2, block.y)); cudaSafeCall( cudaFuncSetCacheConfig(buildCentersList, cudaFuncCachePreferL1) ); buildCentersList<<<grid, block>>>(accum, centers, threshold); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); int totalCount; cudaSafeCall( cudaMemcpy(&totalCount, counterPtr, sizeof(int), cudaMemcpyDeviceToHost) ); return totalCount; } //////////////////////////////////////////////////////////////////////// // circlesAccumRadius __global__ void circlesAccumRadius(const unsigned int* centers, const unsigned int* list, const int count, float3* circles, const int maxCircles, const float dp, const int minRadius, const int maxRadius, const int histSize, const int threshold) { int* smem = DynamicSharedMem<int>(); for (int i = threadIdx.x; i < histSize + 2; i += blockDim.x) smem[i] = 0; __syncthreads(); unsigned int val = centers[blockIdx.x]; float cx = (val & 0xFFFF); float cy = (val >> 16) & 0xFFFF; cx = (cx + 0.5f) * dp; cy = (cy + 0.5f) * dp; for (int i = threadIdx.x; i < count; i += blockDim.x) { val = list[i]; const int x = (val & 0xFFFF); const int y = (val >> 16) & 0xFFFF; const float rad = ::sqrtf((cx - x) * (cx - x) + (cy - y) * (cy - y)); if (rad >= minRadius && rad <= maxRadius) { const int r = __float2int_rn(rad - minRadius); Emulation::smem::atomicAdd(&smem[r + 1], 1); } } __syncthreads(); for (int i = threadIdx.x; i < histSize; i += blockDim.x) { const int curVotes = smem[i + 1]; if (curVotes >= threshold && curVotes > smem[i] && curVotes >= smem[i + 2]) { const int ind = ::atomicAdd(&g_counter, 1); if (ind < maxCircles) circles[ind] = make_float3(cx, cy, i + minRadius); } } } int circlesAccumRadius_gpu(const unsigned int* centers, int centersCount, const unsigned int* list, int count, float3* circles, int maxCircles, float dp, int minRadius, int maxRadius, int threshold, bool has20) { void* counterPtr; cudaSafeCall( cudaGetSymbolAddress(&counterPtr, g_counter) ); cudaSafeCall( cudaMemset(counterPtr, 0, sizeof(int)) ); const dim3 block(has20 ? 1024 : 512); const dim3 grid(centersCount); const int histSize = maxRadius - minRadius + 1; size_t smemSize = (histSize + 2) * sizeof(int); circlesAccumRadius<<<grid, block, smemSize>>>(centers, list, count, circles, maxCircles, dp, minRadius, maxRadius, histSize, threshold); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); int totalCount; cudaSafeCall( cudaMemcpy(&totalCount, counterPtr, sizeof(int), cudaMemcpyDeviceToHost) ); totalCount = ::min(totalCount, maxCircles); return totalCount; } //////////////////////////////////////////////////////////////////////// // Generalized Hough template <typename T, int PIXELS_PER_THREAD> __global__ void buildEdgePointList(const PtrStepSzb edges, const PtrStep<T> dx, const PtrStep<T> dy, unsigned int* coordList, float* thetaList) { __shared__ unsigned int s_coordLists[4][32 * PIXELS_PER_THREAD]; __shared__ float s_thetaLists[4][32 * PIXELS_PER_THREAD]; __shared__ int s_sizes[4]; __shared__ int s_globStart[4]; const int x = blockIdx.x * blockDim.x * PIXELS_PER_THREAD + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (threadIdx.x == 0) s_sizes[threadIdx.y] = 0; __syncthreads(); if (y < edges.rows) { // fill the queue const uchar* edgesRow = edges.ptr(y); const T* dxRow = dx.ptr(y); const T* dyRow = dy.ptr(y); for (int i = 0, xx = x; i < PIXELS_PER_THREAD && xx < edges.cols; ++i, xx += blockDim.x) { const T dxVal = dxRow[xx]; const T dyVal = dyRow[xx]; if (edgesRow[xx] && (dxVal != 0 || dyVal != 0)) { const unsigned int coord = (y << 16) | xx; float theta = ::atan2f(dyVal, dxVal); if (theta < 0) theta += 2.0f * CV_PI_F; const int qidx = Emulation::smem::atomicAdd(&s_sizes[threadIdx.y], 1); s_coordLists[threadIdx.y][qidx] = coord; s_thetaLists[threadIdx.y][qidx] = theta; } } } __syncthreads(); // let one thread reserve the space required in the global list if (threadIdx.x == 0 && threadIdx.y == 0) { // find how many items are stored in each list int totalSize = 0; for (int i = 0; i < blockDim.y; ++i) { s_globStart[i] = totalSize; totalSize += s_sizes[i]; } // calculate the offset in the global list const int globalOffset = atomicAdd(&g_counter, totalSize); for (int i = 0; i < blockDim.y; ++i) s_globStart[i] += globalOffset; } __syncthreads(); // copy local queues to global queue const int qsize = s_sizes[threadIdx.y]; int gidx = s_globStart[threadIdx.y] + threadIdx.x; for(int i = threadIdx.x; i < qsize; i += blockDim.x, gidx += blockDim.x) { coordList[gidx] = s_coordLists[threadIdx.y][i]; thetaList[gidx] = s_thetaLists[threadIdx.y][i]; } } template <typename T> int buildEdgePointList_gpu(PtrStepSzb edges, PtrStepSzb dx, PtrStepSzb dy, unsigned int* coordList, float* thetaList) { const int PIXELS_PER_THREAD = 8; void* counterPtr; cudaSafeCall( cudaGetSymbolAddress(&counterPtr, g_counter) ); cudaSafeCall( cudaMemset(counterPtr, 0, sizeof(int)) ); const dim3 block(32, 4); const dim3 grid(divUp(edges.cols, block.x * PIXELS_PER_THREAD), divUp(edges.rows, block.y)); cudaSafeCall( cudaFuncSetCacheConfig(buildEdgePointList<T, PIXELS_PER_THREAD>, cudaFuncCachePreferShared) ); buildEdgePointList<T, PIXELS_PER_THREAD><<<grid, block>>>(edges, (PtrStepSz<T>) dx, (PtrStepSz<T>) dy, coordList, thetaList); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); int totalCount; cudaSafeCall( cudaMemcpy(&totalCount, counterPtr, sizeof(int), cudaMemcpyDeviceToHost) ); return totalCount; } template int buildEdgePointList_gpu<short>(PtrStepSzb edges, PtrStepSzb dx, PtrStepSzb dy, unsigned int* coordList, float* thetaList); template int buildEdgePointList_gpu<int>(PtrStepSzb edges, PtrStepSzb dx, PtrStepSzb dy, unsigned int* coordList, float* thetaList); template int buildEdgePointList_gpu<float>(PtrStepSzb edges, PtrStepSzb dx, PtrStepSzb dy, unsigned int* coordList, float* thetaList); __global__ void buildRTable(const unsigned int* coordList, const float* thetaList, const int pointsCount, PtrStep<short2> r_table, int* r_sizes, int maxSize, const short2 templCenter, const float thetaScale) { const int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= pointsCount) return; const unsigned int coord = coordList[tid]; short2 p; p.x = (coord & 0xFFFF); p.y = (coord >> 16) & 0xFFFF; const float theta = thetaList[tid]; const int n = __float2int_rn(theta * thetaScale); const int ind = ::atomicAdd(r_sizes + n, 1); if (ind < maxSize) r_table(n, ind) = p - templCenter; } void buildRTable_gpu(const unsigned int* coordList, const float* thetaList, int pointsCount, PtrStepSz<short2> r_table, int* r_sizes, short2 templCenter, int levels) { const dim3 block(256); const dim3 grid(divUp(pointsCount, block.x)); const float thetaScale = levels / (2.0f * CV_PI_F); buildRTable<<<grid, block>>>(coordList, thetaList, pointsCount, r_table, r_sizes, r_table.cols, templCenter, thetaScale); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////// // GHT_Ballard_Pos __global__ void GHT_Ballard_Pos_calcHist(const unsigned int* coordList, const float* thetaList, const int pointsCount, const PtrStep<short2> r_table, const int* r_sizes, PtrStepSzi hist, const float idp, const float thetaScale) { const int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= pointsCount) return; const unsigned int coord = coordList[tid]; short2 p; p.x = (coord & 0xFFFF); p.y = (coord >> 16) & 0xFFFF; const float theta = thetaList[tid]; const int n = __float2int_rn(theta * thetaScale); const short2* r_row = r_table.ptr(n); const int r_row_size = r_sizes[n]; for (int j = 0; j < r_row_size; ++j) { short2 c = p - r_row[j]; c.x = __float2int_rn(c.x * idp); c.y = __float2int_rn(c.y * idp); if (c.x >= 0 && c.x < hist.cols - 2 && c.y >= 0 && c.y < hist.rows - 2) ::atomicAdd(hist.ptr(c.y + 1) + c.x + 1, 1); } } void GHT_Ballard_Pos_calcHist_gpu(const unsigned int* coordList, const float* thetaList, int pointsCount, PtrStepSz<short2> r_table, const int* r_sizes, PtrStepSzi hist, float dp, int levels) { const dim3 block(256); const dim3 grid(divUp(pointsCount, block.x)); const float idp = 1.0f / dp; const float thetaScale = levels / (2.0f * CV_PI_F); GHT_Ballard_Pos_calcHist<<<grid, block>>>(coordList, thetaList, pointsCount, r_table, r_sizes, hist, idp, thetaScale); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } __global__ void GHT_Ballard_Pos_findPosInHist(const PtrStepSzi hist, float4* out, int3* votes, const int maxSize, const float dp, const int threshold) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= hist.cols - 2 || y >= hist.rows - 2) return; const int curVotes = hist(y + 1, x + 1); if (curVotes > threshold && curVotes > hist(y + 1, x) && curVotes >= hist(y + 1, x + 2) && curVotes > hist(y, x + 1) && curVotes >= hist(y + 2, x + 1)) { const int ind = ::atomicAdd(&g_counter, 1); if (ind < maxSize) { out[ind] = make_float4(x * dp, y * dp, 1.0f, 0.0f); votes[ind] = make_int3(curVotes, 0, 0); } } } int GHT_Ballard_Pos_findPosInHist_gpu(PtrStepSzi hist, float4* out, int3* votes, int maxSize, float dp, int threshold) { void* counterPtr; cudaSafeCall( cudaGetSymbolAddress(&counterPtr, g_counter) ); cudaSafeCall( cudaMemset(counterPtr, 0, sizeof(int)) ); const dim3 block(32, 8); const dim3 grid(divUp(hist.cols - 2, block.x), divUp(hist.rows - 2, block.y)); cudaSafeCall( cudaFuncSetCacheConfig(GHT_Ballard_Pos_findPosInHist, cudaFuncCachePreferL1) ); GHT_Ballard_Pos_findPosInHist<<<grid, block>>>(hist, out, votes, maxSize, dp, threshold); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); int totalCount; cudaSafeCall( cudaMemcpy(&totalCount, counterPtr, sizeof(int), cudaMemcpyDeviceToHost) ); totalCount = ::min(totalCount, maxSize); return totalCount; } //////////////////////////////////////////////////////////////////////// // GHT_Ballard_PosScale __global__ void GHT_Ballard_PosScale_calcHist(const unsigned int* coordList, const float* thetaList, PtrStep<short2> r_table, const int* r_sizes, PtrStepi hist, const int rows, const int cols, const float minScale, const float scaleStep, const int scaleRange, const float idp, const float thetaScale) { const unsigned int coord = coordList[blockIdx.x]; float2 p; p.x = (coord & 0xFFFF); p.y = (coord >> 16) & 0xFFFF; const float theta = thetaList[blockIdx.x]; const int n = __float2int_rn(theta * thetaScale); const short2* r_row = r_table.ptr(n); const int r_row_size = r_sizes[n]; for (int j = 0; j < r_row_size; ++j) { const float2 d = saturate_cast<float2>(r_row[j]); for (int s = threadIdx.x; s < scaleRange; s += blockDim.x) { const float scale = minScale + s * scaleStep; float2 c = p - scale * d; c.x *= idp; c.y *= idp; if (c.x >= 0 && c.x < cols && c.y >= 0 && c.y < rows) ::atomicAdd(hist.ptr((s + 1) * (rows + 2) + __float2int_rn(c.y + 1)) + __float2int_rn(c.x + 1), 1); } } } void GHT_Ballard_PosScale_calcHist_gpu(const unsigned int* coordList, const float* thetaList, int pointsCount, PtrStepSz<short2> r_table, const int* r_sizes, PtrStepi hist, int rows, int cols, float minScale, float scaleStep, int scaleRange, float dp, int levels) { const dim3 block(256); const dim3 grid(pointsCount); const float idp = 1.0f / dp; const float thetaScale = levels / (2.0f * CV_PI_F); GHT_Ballard_PosScale_calcHist<<<grid, block>>>(coordList, thetaList, r_table, r_sizes, hist, rows, cols, minScale, scaleStep, scaleRange, idp, thetaScale); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } __global__ void GHT_Ballard_PosScale_findPosInHist(const PtrStepi hist, const int rows, const int cols, const int scaleRange, float4* out, int3* votes, const int maxSize, const float minScale, const float scaleStep, const float dp, const int threshold) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= cols || y >= rows) return; for (int s = 0; s < scaleRange; ++s) { const float scale = minScale + s * scaleStep; const int prevScaleIdx = (s) * (rows + 2); const int curScaleIdx = (s + 1) * (rows + 2); const int nextScaleIdx = (s + 2) * (rows + 2); const int curVotes = hist(curScaleIdx + y + 1, x + 1); if (curVotes > threshold && curVotes > hist(curScaleIdx + y + 1, x) && curVotes >= hist(curScaleIdx + y + 1, x + 2) && curVotes > hist(curScaleIdx + y, x + 1) && curVotes >= hist(curScaleIdx + y + 2, x + 1) && curVotes > hist(prevScaleIdx + y + 1, x + 1) && curVotes >= hist(nextScaleIdx + y + 1, x + 1)) { const int ind = ::atomicAdd(&g_counter, 1); if (ind < maxSize) { out[ind] = make_float4(x * dp, y * dp, scale, 0.0f); votes[ind] = make_int3(curVotes, curVotes, 0); } } } } int GHT_Ballard_PosScale_findPosInHist_gpu(PtrStepi hist, int rows, int cols, int scaleRange, float4* out, int3* votes, int maxSize, float minScale, float scaleStep, float dp, int threshold) { void* counterPtr; cudaSafeCall( cudaGetSymbolAddress(&counterPtr, g_counter) ); cudaSafeCall( cudaMemset(counterPtr, 0, sizeof(int)) ); const dim3 block(32, 8); const dim3 grid(divUp(cols, block.x), divUp(rows, block.y)); cudaSafeCall( cudaFuncSetCacheConfig(GHT_Ballard_PosScale_findPosInHist, cudaFuncCachePreferL1) ); GHT_Ballard_PosScale_findPosInHist<<<grid, block>>>(hist, rows, cols, scaleRange, out, votes, maxSize, minScale, scaleStep, dp, threshold); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); int totalCount; cudaSafeCall( cudaMemcpy(&totalCount, counterPtr, sizeof(int), cudaMemcpyDeviceToHost) ); totalCount = ::min(totalCount, maxSize); return totalCount; } //////////////////////////////////////////////////////////////////////// // GHT_Ballard_PosRotation __global__ void GHT_Ballard_PosRotation_calcHist(const unsigned int* coordList, const float* thetaList, PtrStep<short2> r_table, const int* r_sizes, PtrStepi hist, const int rows, const int cols, const float minAngle, const float angleStep, const int angleRange, const float idp, const float thetaScale) { const unsigned int coord = coordList[blockIdx.x]; float2 p; p.x = (coord & 0xFFFF); p.y = (coord >> 16) & 0xFFFF; const float thetaVal = thetaList[blockIdx.x]; for (int a = threadIdx.x; a < angleRange; a += blockDim.x) { const float angle = (minAngle + a * angleStep) * (CV_PI_F / 180.0f); float sinA, cosA; sincosf(angle, &sinA, &cosA); float theta = thetaVal - angle; if (theta < 0) theta += 2.0f * CV_PI_F; const int n = __float2int_rn(theta * thetaScale); const short2* r_row = r_table.ptr(n); const int r_row_size = r_sizes[n]; for (int j = 0; j < r_row_size; ++j) { const float2 d = saturate_cast<float2>(r_row[j]); const float2 dr = make_float2(d.x * cosA - d.y * sinA, d.x * sinA + d.y * cosA); float2 c = make_float2(p.x - dr.x, p.y - dr.y); c.x *= idp; c.y *= idp; if (c.x >= 0 && c.x < cols && c.y >= 0 && c.y < rows) ::atomicAdd(hist.ptr((a + 1) * (rows + 2) + __float2int_rn(c.y + 1)) + __float2int_rn(c.x + 1), 1); } } } void GHT_Ballard_PosRotation_calcHist_gpu(const unsigned int* coordList, const float* thetaList, int pointsCount, PtrStepSz<short2> r_table, const int* r_sizes, PtrStepi hist, int rows, int cols, float minAngle, float angleStep, int angleRange, float dp, int levels) { const dim3 block(256); const dim3 grid(pointsCount); const float idp = 1.0f / dp; const float thetaScale = levels / (2.0f * CV_PI_F); GHT_Ballard_PosRotation_calcHist<<<grid, block>>>(coordList, thetaList, r_table, r_sizes, hist, rows, cols, minAngle, angleStep, angleRange, idp, thetaScale); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } __global__ void GHT_Ballard_PosRotation_findPosInHist(const PtrStepi hist, const int rows, const int cols, const int angleRange, float4* out, int3* votes, const int maxSize, const float minAngle, const float angleStep, const float dp, const int threshold) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= cols || y >= rows) return; for (int a = 0; a < angleRange; ++a) { const float angle = minAngle + a * angleStep; const int prevAngleIdx = (a) * (rows + 2); const int curAngleIdx = (a + 1) * (rows + 2); const int nextAngleIdx = (a + 2) * (rows + 2); const int curVotes = hist(curAngleIdx + y + 1, x + 1); if (curVotes > threshold && curVotes > hist(curAngleIdx + y + 1, x) && curVotes >= hist(curAngleIdx + y + 1, x + 2) && curVotes > hist(curAngleIdx + y, x + 1) && curVotes >= hist(curAngleIdx + y + 2, x + 1) && curVotes > hist(prevAngleIdx + y + 1, x + 1) && curVotes >= hist(nextAngleIdx + y + 1, x + 1)) { const int ind = ::atomicAdd(&g_counter, 1); if (ind < maxSize) { out[ind] = make_float4(x * dp, y * dp, 1.0f, angle); votes[ind] = make_int3(curVotes, 0, curVotes); } } } } int GHT_Ballard_PosRotation_findPosInHist_gpu(PtrStepi hist, int rows, int cols, int angleRange, float4* out, int3* votes, int maxSize, float minAngle, float angleStep, float dp, int threshold) { void* counterPtr; cudaSafeCall( cudaGetSymbolAddress(&counterPtr, g_counter) ); cudaSafeCall( cudaMemset(counterPtr, 0, sizeof(int)) ); const dim3 block(32, 8); const dim3 grid(divUp(cols, block.x), divUp(rows, block.y)); cudaSafeCall( cudaFuncSetCacheConfig(GHT_Ballard_PosRotation_findPosInHist, cudaFuncCachePreferL1) ); GHT_Ballard_PosRotation_findPosInHist<<<grid, block>>>(hist, rows, cols, angleRange, out, votes, maxSize, minAngle, angleStep, dp, threshold); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); int totalCount; cudaSafeCall( cudaMemcpy(&totalCount, counterPtr, sizeof(int), cudaMemcpyDeviceToHost) ); totalCount = ::min(totalCount, maxSize); return totalCount; } //////////////////////////////////////////////////////////////////////// // GHT_Guil_Full struct FeatureTable { uchar* p1_pos_data; size_t p1_pos_step; uchar* p1_theta_data; size_t p1_theta_step; uchar* p2_pos_data; size_t p2_pos_step; uchar* d12_data; size_t d12_step; uchar* r1_data; size_t r1_step; uchar* r2_data; size_t r2_step; }; __constant__ FeatureTable c_templFeatures; __constant__ FeatureTable c_imageFeatures; void GHT_Guil_Full_setTemplFeatures(PtrStepb p1_pos, PtrStepb p1_theta, PtrStepb p2_pos, PtrStepb d12, PtrStepb r1, PtrStepb r2) { FeatureTable tbl; tbl.p1_pos_data = p1_pos.data; tbl.p1_pos_step = p1_pos.step; tbl.p1_theta_data = p1_theta.data; tbl.p1_theta_step = p1_theta.step; tbl.p2_pos_data = p2_pos.data; tbl.p2_pos_step = p2_pos.step; tbl.d12_data = d12.data; tbl.d12_step = d12.step; tbl.r1_data = r1.data; tbl.r1_step = r1.step; tbl.r2_data = r2.data; tbl.r2_step = r2.step; cudaSafeCall( cudaMemcpyToSymbol(c_templFeatures, &tbl, sizeof(FeatureTable)) ); } void GHT_Guil_Full_setImageFeatures(PtrStepb p1_pos, PtrStepb p1_theta, PtrStepb p2_pos, PtrStepb d12, PtrStepb r1, PtrStepb r2) { FeatureTable tbl; tbl.p1_pos_data = p1_pos.data; tbl.p1_pos_step = p1_pos.step; tbl.p1_theta_data = p1_theta.data; tbl.p1_theta_step = p1_theta.step; tbl.p2_pos_data = p2_pos.data; tbl.p2_pos_step = p2_pos.step; tbl.d12_data = d12.data; tbl.d12_step = d12.step; tbl.r1_data = r1.data; tbl.r1_step = r1.step; tbl.r2_data = r2.data; tbl.r2_step = r2.step; cudaSafeCall( cudaMemcpyToSymbol(c_imageFeatures, &tbl, sizeof(FeatureTable)) ); } struct TemplFeatureTable { static __device__ float2* p1_pos(int n) { return (float2*)(c_templFeatures.p1_pos_data + n * c_templFeatures.p1_pos_step); } static __device__ float* p1_theta(int n) { return (float*)(c_templFeatures.p1_theta_data + n * c_templFeatures.p1_theta_step); } static __device__ float2* p2_pos(int n) { return (float2*)(c_templFeatures.p2_pos_data + n * c_templFeatures.p2_pos_step); } static __device__ float* d12(int n) { return (float*)(c_templFeatures.d12_data + n * c_templFeatures.d12_step); } static __device__ float2* r1(int n) { return (float2*)(c_templFeatures.r1_data + n * c_templFeatures.r1_step); } static __device__ float2* r2(int n) { return (float2*)(c_templFeatures.r2_data + n * c_templFeatures.r2_step); } }; struct ImageFeatureTable { static __device__ float2* p1_pos(int n) { return (float2*)(c_imageFeatures.p1_pos_data + n * c_imageFeatures.p1_pos_step); } static __device__ float* p1_theta(int n) { return (float*)(c_imageFeatures.p1_theta_data + n * c_imageFeatures.p1_theta_step); } static __device__ float2* p2_pos(int n) { return (float2*)(c_imageFeatures.p2_pos_data + n * c_imageFeatures.p2_pos_step); } static __device__ float* d12(int n) { return (float*)(c_imageFeatures.d12_data + n * c_imageFeatures.d12_step); } static __device__ float2* r1(int n) { return (float2*)(c_imageFeatures.r1_data + n * c_imageFeatures.r1_step); } static __device__ float2* r2(int n) { return (float2*)(c_imageFeatures.r2_data + n * c_imageFeatures.r2_step); } }; __device__ float clampAngle(float a) { float res = a; while (res > 2.0f * CV_PI_F) res -= 2.0f * CV_PI_F; while (res < 0.0f) res += 2.0f * CV_PI_F; return res; } __device__ bool angleEq(float a, float b, float eps) { return (::fabs(clampAngle(a - b)) <= eps); } template <class FT, bool isTempl> __global__ void GHT_Guil_Full_buildFeatureList(const unsigned int* coordList, const float* thetaList, const int pointsCount, int* sizes, const int maxSize, const float xi, const float angleEpsilon, const float alphaScale, const float2 center, const float maxDist) { const float p1_theta = thetaList[blockIdx.x]; const unsigned int coord1 = coordList[blockIdx.x]; float2 p1_pos; p1_pos.x = (coord1 & 0xFFFF); p1_pos.y = (coord1 >> 16) & 0xFFFF; for (int i = threadIdx.x; i < pointsCount; i += blockDim.x) { const float p2_theta = thetaList[i]; const unsigned int coord2 = coordList[i]; float2 p2_pos; p2_pos.x = (coord2 & 0xFFFF); p2_pos.y = (coord2 >> 16) & 0xFFFF; if (angleEq(p1_theta - p2_theta, xi, angleEpsilon)) { const float2 d = p1_pos - p2_pos; float alpha12 = clampAngle(::atan2(d.y, d.x) - p1_theta); float d12 = ::sqrtf(d.x * d.x + d.y * d.y); if (d12 > maxDist) continue; float2 r1 = p1_pos - center; float2 r2 = p2_pos - center; const int n = __float2int_rn(alpha12 * alphaScale); const int ind = ::atomicAdd(sizes + n, 1); if (ind < maxSize) { if (!isTempl) { FT::p1_pos(n)[ind] = p1_pos; FT::p2_pos(n)[ind] = p2_pos; } FT::p1_theta(n)[ind] = p1_theta; FT::d12(n)[ind] = d12; if (isTempl) { FT::r1(n)[ind] = r1; FT::r2(n)[ind] = r2; } } } } } template <class FT, bool isTempl> void GHT_Guil_Full_buildFeatureList_caller(const unsigned int* coordList, const float* thetaList, int pointsCount, int* sizes, int maxSize, float xi, float angleEpsilon, int levels, float2 center, float maxDist) { const dim3 block(256); const dim3 grid(pointsCount); const float alphaScale = levels / (2.0f * CV_PI_F); GHT_Guil_Full_buildFeatureList<FT, isTempl><<<grid, block>>>(coordList, thetaList, pointsCount, sizes, maxSize, xi * (CV_PI_F / 180.0f), angleEpsilon * (CV_PI_F / 180.0f), alphaScale, center, maxDist); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); thrust::device_ptr<int> sizesPtr(sizes); thrust::transform(sizesPtr, sizesPtr + levels + 1, sizesPtr, device::bind2nd(device::minimum<int>(), maxSize)); } void GHT_Guil_Full_buildTemplFeatureList_gpu(const unsigned int* coordList, const float* thetaList, int pointsCount, int* sizes, int maxSize, float xi, float angleEpsilon, int levels, float2 center, float maxDist) { GHT_Guil_Full_buildFeatureList_caller<TemplFeatureTable, true>(coordList, thetaList, pointsCount, sizes, maxSize, xi, angleEpsilon, levels, center, maxDist); } void GHT_Guil_Full_buildImageFeatureList_gpu(const unsigned int* coordList, const float* thetaList, int pointsCount, int* sizes, int maxSize, float xi, float angleEpsilon, int levels, float2 center, float maxDist) { GHT_Guil_Full_buildFeatureList_caller<ImageFeatureTable, false>(coordList, thetaList, pointsCount, sizes, maxSize, xi, angleEpsilon, levels, center, maxDist); } __global__ void GHT_Guil_Full_calcOHist(const int* templSizes, const int* imageSizes, int* OHist, const float minAngle, const float maxAngle, const float iAngleStep, const int angleRange) { extern __shared__ int s_OHist[]; for (int i = threadIdx.x; i <= angleRange; i += blockDim.x) s_OHist[i] = 0; __syncthreads(); const int tIdx = blockIdx.x; const int level = blockIdx.y; const int tSize = templSizes[level]; if (tIdx < tSize) { const int imSize = imageSizes[level]; const float t_p1_theta = TemplFeatureTable::p1_theta(level)[tIdx]; for (int i = threadIdx.x; i < imSize; i += blockDim.x) { const float im_p1_theta = ImageFeatureTable::p1_theta(level)[i]; const float angle = clampAngle(im_p1_theta - t_p1_theta); if (angle >= minAngle && angle <= maxAngle) { const int n = __float2int_rn((angle - minAngle) * iAngleStep); Emulation::smem::atomicAdd(&s_OHist[n], 1); } } } __syncthreads(); for (int i = threadIdx.x; i <= angleRange; i += blockDim.x) ::atomicAdd(OHist + i, s_OHist[i]); } void GHT_Guil_Full_calcOHist_gpu(const int* templSizes, const int* imageSizes, int* OHist, float minAngle, float maxAngle, float angleStep, int angleRange, int levels, int tMaxSize) { const dim3 block(256); const dim3 grid(tMaxSize, levels + 1); minAngle *= (CV_PI_F / 180.0f); maxAngle *= (CV_PI_F / 180.0f); angleStep *= (CV_PI_F / 180.0f); const size_t smemSize = (angleRange + 1) * sizeof(float); GHT_Guil_Full_calcOHist<<<grid, block, smemSize>>>(templSizes, imageSizes, OHist, minAngle, maxAngle, 1.0f / angleStep, angleRange); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } __global__ void GHT_Guil_Full_calcSHist(const int* templSizes, const int* imageSizes, int* SHist, const float angle, const float angleEpsilon, const float minScale, const float maxScale, const float iScaleStep, const int scaleRange) { extern __shared__ int s_SHist[]; for (int i = threadIdx.x; i <= scaleRange; i += blockDim.x) s_SHist[i] = 0; __syncthreads(); const int tIdx = blockIdx.x; const int level = blockIdx.y; const int tSize = templSizes[level]; if (tIdx < tSize) { const int imSize = imageSizes[level]; const float t_p1_theta = TemplFeatureTable::p1_theta(level)[tIdx] + angle; const float t_d12 = TemplFeatureTable::d12(level)[tIdx] + angle; for (int i = threadIdx.x; i < imSize; i += blockDim.x) { const float im_p1_theta = ImageFeatureTable::p1_theta(level)[i]; const float im_d12 = ImageFeatureTable::d12(level)[i]; if (angleEq(im_p1_theta, t_p1_theta, angleEpsilon)) { const float scale = im_d12 / t_d12; if (scale >= minScale && scale <= maxScale) { const int s = __float2int_rn((scale - minScale) * iScaleStep); Emulation::smem::atomicAdd(&s_SHist[s], 1); } } } } __syncthreads(); for (int i = threadIdx.x; i <= scaleRange; i += blockDim.x) ::atomicAdd(SHist + i, s_SHist[i]); } void GHT_Guil_Full_calcSHist_gpu(const int* templSizes, const int* imageSizes, int* SHist, float angle, float angleEpsilon, float minScale, float maxScale, float iScaleStep, int scaleRange, int levels, int tMaxSize) { const dim3 block(256); const dim3 grid(tMaxSize, levels + 1); angle *= (CV_PI_F / 180.0f); angleEpsilon *= (CV_PI_F / 180.0f); const size_t smemSize = (scaleRange + 1) * sizeof(float); GHT_Guil_Full_calcSHist<<<grid, block, smemSize>>>(templSizes, imageSizes, SHist, angle, angleEpsilon, minScale, maxScale, iScaleStep, scaleRange); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } __global__ void GHT_Guil_Full_calcPHist(const int* templSizes, const int* imageSizes, PtrStepSzi PHist, const float angle, const float sinVal, const float cosVal, const float angleEpsilon, const float scale, const float idp) { const int tIdx = blockIdx.x; const int level = blockIdx.y; const int tSize = templSizes[level]; if (tIdx < tSize) { const int imSize = imageSizes[level]; const float t_p1_theta = TemplFeatureTable::p1_theta(level)[tIdx] + angle; float2 r1 = TemplFeatureTable::r1(level)[tIdx]; float2 r2 = TemplFeatureTable::r2(level)[tIdx]; r1 = r1 * scale; r2 = r2 * scale; r1 = make_float2(cosVal * r1.x - sinVal * r1.y, sinVal * r1.x + cosVal * r1.y); r2 = make_float2(cosVal * r2.x - sinVal * r2.y, sinVal * r2.x + cosVal * r2.y); for (int i = threadIdx.x; i < imSize; i += blockDim.x) { const float im_p1_theta = ImageFeatureTable::p1_theta(level)[i]; const float2 im_p1_pos = ImageFeatureTable::p1_pos(level)[i]; const float2 im_p2_pos = ImageFeatureTable::p2_pos(level)[i]; if (angleEq(im_p1_theta, t_p1_theta, angleEpsilon)) { float2 c1, c2; c1 = im_p1_pos - r1; c1 = c1 * idp; c2 = im_p2_pos - r2; c2 = c2 * idp; if (::fabs(c1.x - c2.x) > 1 || ::fabs(c1.y - c2.y) > 1) continue; if (c1.y >= 0 && c1.y < PHist.rows - 2 && c1.x >= 0 && c1.x < PHist.cols - 2) ::atomicAdd(PHist.ptr(__float2int_rn(c1.y) + 1) + __float2int_rn(c1.x) + 1, 1); } } } } void GHT_Guil_Full_calcPHist_gpu(const int* templSizes, const int* imageSizes, PtrStepSzi PHist, float angle, float angleEpsilon, float scale, float dp, int levels, int tMaxSize) { const dim3 block(256); const dim3 grid(tMaxSize, levels + 1); angle *= (CV_PI_F / 180.0f); angleEpsilon *= (CV_PI_F / 180.0f); const float sinVal = ::sinf(angle); const float cosVal = ::cosf(angle); cudaSafeCall( cudaFuncSetCacheConfig(GHT_Guil_Full_calcPHist, cudaFuncCachePreferL1) ); GHT_Guil_Full_calcPHist<<<grid, block>>>(templSizes, imageSizes, PHist, angle, sinVal, cosVal, angleEpsilon, scale, 1.0f / dp); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } __global__ void GHT_Guil_Full_findPosInHist(const PtrStepSzi hist, float4* out, int3* votes, const int maxSize, const float angle, const int angleVotes, const float scale, const int scaleVotes, const float dp, const int threshold) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= hist.cols - 2 || y >= hist.rows - 2) return; const int curVotes = hist(y + 1, x + 1); if (curVotes > threshold && curVotes > hist(y + 1, x) && curVotes >= hist(y + 1, x + 2) && curVotes > hist(y, x + 1) && curVotes >= hist(y + 2, x + 1)) { const int ind = ::atomicAdd(&g_counter, 1); if (ind < maxSize) { out[ind] = make_float4(x * dp, y * dp, scale, angle); votes[ind] = make_int3(curVotes, scaleVotes, angleVotes); } } } int GHT_Guil_Full_findPosInHist_gpu(PtrStepSzi hist, float4* out, int3* votes, int curSize, int maxSize, float angle, int angleVotes, float scale, int scaleVotes, float dp, int threshold) { void* counterPtr; cudaSafeCall( cudaGetSymbolAddress(&counterPtr, g_counter) ); cudaSafeCall( cudaMemcpy(counterPtr, &curSize, sizeof(int), cudaMemcpyHostToDevice) ); const dim3 block(32, 8); const dim3 grid(divUp(hist.cols - 2, block.x), divUp(hist.rows - 2, block.y)); cudaSafeCall( cudaFuncSetCacheConfig(GHT_Guil_Full_findPosInHist, cudaFuncCachePreferL1) ); GHT_Guil_Full_findPosInHist<<<grid, block>>>(hist, out, votes, maxSize, angle, angleVotes, scale, scaleVotes, dp, threshold); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); int totalCount; cudaSafeCall( cudaMemcpy(&totalCount, counterPtr, sizeof(int), cudaMemcpyDeviceToHost) ); totalCount = ::min(totalCount, maxSize); return totalCount; } } }}} #endif /* CUDA_DISABLER */
8b3ceb9801ab6a523bead54e8dce958a018d9a8b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* NiuTrans.Tensor - an open-source tensor library * Copyright (C) 2017, Natural Language Processing Lab, Northeastern University. * All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * $Created by: XIAO Tong (email: xiaotong@mail.neu.edu.cn) 2018-04-24 */ #include "../../XDevice.h" #include "../../XUtility.h" #include "../../XTensor.h" #include "Sort.h" #include "Sort.cuh" namespace nts { // namespace nts(NiuTrans.Tensor) #ifdef USE_ROCM /* bitonic sort (for each row in a matrix) >> data - pointer to the data array >> index - index data array >> j - segment/distance for comparsion >> k - length of the monotonic sequence >> m - column number of the matrix >> n - row number of the matrix */ template<class T> __global__ void KernelBitonicSort2D(void * data, int j, int k, int m, int n) { const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; const unsigned int row = blockDim.y * blockIdx.y + threadIdx.y; if (idx >= m || row >= n) return; T * items = (T*)data + m * row; int ixj = idx^j; if (ixj > idx) { if ((idx&k) == 0 && items[idx] < items[ixj]) { T tmp = items[idx]; items[idx] = items[ixj]; items[ixj] = tmp; } if ((idx&k) != 0 && items[idx] > items[ixj]) { T tmp = items[idx]; items[idx] = items[ixj]; items[ixj] = tmp; } } } /* bitonic sort (for each row in a matrix) with index >> data - pointer to the data array >> index - index data array >> j - segment/distance for comparsion >> k - length of the monotonic sequence >> m - column number of the matrix >> n - row number of the matrix */ template<class T> __global__ void KernelBitonicSort2D(void * data, int * index, int j, int k, int m, int n) { const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; const unsigned int row = blockDim.y * blockIdx.y + threadIdx.y; if (idx >= m || row >= n) return; T * items = (T*)data + m * row; int * indexOnSite = index + m * row; int ixj = idx^j; if (ixj > idx) { if ((idx&k) == 0 && items[idx] < items[ixj]) { T tmp = items[idx]; items[idx] = items[ixj]; items[ixj] = tmp; int tmp2 = indexOnSite[idx]; indexOnSite[idx] = indexOnSite[ixj]; indexOnSite[ixj] = tmp2; } if ((idx&k) != 0 && items[idx] > items[ixj]) { T tmp = items[idx]; items[idx] = items[ixj]; items[ixj] = tmp; int tmp2 = indexOnSite[idx]; indexOnSite[idx] = indexOnSite[ixj]; indexOnSite[ixj] = tmp2; } } } /* reorganize data blocks (in a tensor) into a matrix. In each (source) block we have stride * strideNum items, where strideNum means the items along the leading dimension. In the target matrix, each row keeps strideNum items along the leading dimension in each source block. >> source - source data array >> target - target data array >> srcStride - how many items we need to go over we move to the next >> srcStrideNum - size of the leading dimension >> srcBlockNum - number of the source blocks >> tgtColNum - number of columns in the target matrix >> tgtRowNum - number of rows in the target matrix */ template<class T> __global__ void KernelReorganize(void * source, void * target, int srcStride, int srcStrideNum, int srcBlockNum, int tgtColNum, int tgtRowNum) { __shared__ int iBlock[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ int iOffset[MAX_CUDA_THREAD_NUM_PER_BLOCK]; /* index along the "stride" dimension */ int i = blockDim.x * blockIdx.x + threadIdx.x; /* index along the leading dimension */ int j = blockDim.y * blockIdx.y + threadIdx.y; if (i >= srcStride * srcBlockNum || j >= srcStrideNum) return; if (threadIdx.y == 0) { iBlock[threadIdx.x] = i / srcStride; iOffset[threadIdx.x] = i % srcStride; } __syncthreads(); T * s = (T*)source + (iBlock[threadIdx.x] * srcStrideNum + j) * srcStride + iOffset[threadIdx.x]; T * t = (T*)target + (iBlock[threadIdx.x] * srcStride + iOffset[threadIdx.x]) * tgtColNum + j; *t = *s; } /* copy back for "KernelReorganize" >> source - source data array >> target - target data array >> srcColNum - number of columns in the source matrix >> srcRowNum - number of rows in the source matrix >> tgtStride - how many items we need to go over we move to the next >> tgtStrideNum - size of the leading dimension >> tgtBlockNum - number of the target blocks */ template<class T> __global__ void KernelReorganizeBack(void * source, void * target, int srcColNum, int srcRowNum, int tgtStride, int tgtStrideNum, int tgtBlockNum) { __shared__ int iBlock[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ int iOffset[MAX_CUDA_THREAD_NUM_PER_BLOCK]; /* index along the "stride" dimension */ int i = blockDim.x * blockIdx.x + threadIdx.x; /* index along the leading dimension */ int j = blockDim.y * blockIdx.y + threadIdx.y; if (i >= tgtStride * tgtBlockNum || j >= tgtStrideNum) return; if (threadIdx.y == 0) { iBlock[threadIdx.x] = i / tgtStride; iOffset[threadIdx.x] = i % tgtStride; } __syncthreads(); T * s = (T*)source + (iBlock[threadIdx.x] * tgtStride + iOffset[threadIdx.x]) * srcColNum + j; T * t = (T*)target + (iBlock[threadIdx.x] * tgtStrideNum + j) * tgtStride + iOffset[threadIdx.x]; *t = *s; } /* set the data arrry with a default value >> data - data array >> value - default value >> size - size of the array */ template<class T> __global__ void KernelSetDataArray(T * data, T value, int size) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < size) data[i] = value; } /* sort the tensor along a given dimension >> a - input >> b - output >> indexA - input index tensor >> indexB - output index tensor >> dim - specified dimension >> k - top-k results are returned */ void _CudaSortBig(const XTensor * a, XTensor * b, XTensor * indexA, XTensor * indexB, int dim, int k) { CheckNTErrors((a && b), "Empty input tensor!"); CheckNTErrors((a->unitSize == b->unitSize), "Unmatched tensors!"); CheckNTErrors((a->order > dim && dim >= 0), "Incorrect dimension specified!"); CheckNTErrors((a->dataType == DEFAULT_DTYPE), "TODO!"); if (k < 0 || k > b->dimSize[dim]) k = b->dimSize[dim]; XMem * mem = a->mem; int stride = 1; int blockNum = 1; int strideNum = a->dimSize[dim]; for (int i = 0; i < dim; i++) blockNum *= a->dimSize[i]; for (int i = dim + 1; i < a->order; i++) stride *= a->dimSize[i]; int m = GetNextPower2(strideNum); int n = stride * blockNum; //void * buf = mem != NULL ? mem->AllocBuf(a->devID, n * m * a->unitSize) : XMemAlloc(a->devID, n * m * a->unitSize); void * buf; if (mem != NULL) { mem->LockBuf(); buf = mem->AllocBuf(a->devID, n * m * a->unitSize); } else { buf = XMemAlloc(a->devID, n * m * a->unitSize); } void * bufIndex = NULL; if (indexA != NULL && indexB != NULL) { bufIndex = mem != NULL ? mem->AllocBuf(a->devID, n * m * sizeof(int)) : XMemAlloc(a->devID, n * m * sizeof(int)); } int cudaGrids[3]; int cudaBlocks[3]; GDevs.GetCudaThread(a->devID, m * n, cudaGrids, cudaBlocks); int devIDBackup; ProtectCudaDev(a->devID, devIDBackup); /* set the buffer to the "min" value */ KernelSetDataArray<DTYPE> << <dim3(cudaGrids[0]), dim3(cudaBlocks[0]) >> > ((DTYPE*)buf, DTYPE_MIN, m * n); GDevs.GetCudaThread2D(a->devID, strideNum, n, MAX_INT, cudaGrids, cudaBlocks); /* reorganize the data into a matrix */ KernelReorganize<DTYPE> << <dim3(cudaGrids[1], cudaGrids[0]), dim3(cudaBlocks[1], cudaBlocks[0]) >> > (a->data, buf, stride, strideNum, blockNum, m, n); /* reorganize the index into a matrix */ if (indexA != NULL && indexB != NULL) KernelReorganize<int> << <dim3(cudaGrids[1], cudaGrids[0]), dim3(cudaBlocks[1], cudaBlocks[0]) >> > (indexA->data, bufIndex, stride, strideNum, blockNum, m, n); GDevs.GetCudaThread2D(a->devID, m, n, MAX_INT, cudaGrids, cudaBlocks); /* bitonic sorting */ for (int i = 2; i <= m; i <<= 1) { for (int j = i >> 1; j > 0; j = j >> 1) { if (indexA != NULL && indexB != NULL) { KernelBitonicSort2D<DTYPE> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> > (buf, (int*)bufIndex, j, i, m, n); } else { KernelBitonicSort2D<DTYPE> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> > (buf, j, i, m, n); } } } GDevs.GetCudaThread2D(a->devID, k, n, MAX_INT, cudaGrids, cudaBlocks); /* copy result to the output tensor */ KernelReorganizeBack<DTYPE> << <dim3(cudaGrids[1], cudaGrids[0]), dim3(cudaBlocks[1], cudaBlocks[0]) >> > (buf, b->data, m, n, stride, k, blockNum); if (indexA != NULL && indexB != NULL) KernelReorganizeBack<int> << <dim3(cudaGrids[1], cudaGrids[0]), dim3(cudaBlocks[1], cudaBlocks[0]) >> > (bufIndex, indexB->data, m, n, stride, k, blockNum); if (mem != NULL) { mem->ReleaseBuf(a->devID, n * m * a->unitSize); mem->UnlockBuf(); } else XMemFree(a->devID, buf); if (indexA != NULL && indexB != NULL) if (mem != NULL) mem->ReleaseBuf(a->devID, n * m * sizeof(int)); else XMemFree(a->devID, bufIndex); ProtectCudaDev(a->devID, devIDBackup); } #endif // USE_ROCM } // namespace nts(NiuTrans.Tensor)
8b3ceb9801ab6a523bead54e8dce958a018d9a8b.cu
/* NiuTrans.Tensor - an open-source tensor library * Copyright (C) 2017, Natural Language Processing Lab, Northeastern University. * All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * $Created by: XIAO Tong (email: xiaotong@mail.neu.edu.cn) 2018-04-24 */ #include "../../XDevice.h" #include "../../XUtility.h" #include "../../XTensor.h" #include "Sort.h" #include "Sort.cuh" namespace nts { // namespace nts(NiuTrans.Tensor) #ifdef USE_CUDA /* bitonic sort (for each row in a matrix) >> data - pointer to the data array >> index - index data array >> j - segment/distance for comparsion >> k - length of the monotonic sequence >> m - column number of the matrix >> n - row number of the matrix */ template<class T> __global__ void KernelBitonicSort2D(void * data, int j, int k, int m, int n) { const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; const unsigned int row = blockDim.y * blockIdx.y + threadIdx.y; if (idx >= m || row >= n) return; T * items = (T*)data + m * row; int ixj = idx^j; if (ixj > idx) { if ((idx&k) == 0 && items[idx] < items[ixj]) { T tmp = items[idx]; items[idx] = items[ixj]; items[ixj] = tmp; } if ((idx&k) != 0 && items[idx] > items[ixj]) { T tmp = items[idx]; items[idx] = items[ixj]; items[ixj] = tmp; } } } /* bitonic sort (for each row in a matrix) with index >> data - pointer to the data array >> index - index data array >> j - segment/distance for comparsion >> k - length of the monotonic sequence >> m - column number of the matrix >> n - row number of the matrix */ template<class T> __global__ void KernelBitonicSort2D(void * data, int * index, int j, int k, int m, int n) { const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; const unsigned int row = blockDim.y * blockIdx.y + threadIdx.y; if (idx >= m || row >= n) return; T * items = (T*)data + m * row; int * indexOnSite = index + m * row; int ixj = idx^j; if (ixj > idx) { if ((idx&k) == 0 && items[idx] < items[ixj]) { T tmp = items[idx]; items[idx] = items[ixj]; items[ixj] = tmp; int tmp2 = indexOnSite[idx]; indexOnSite[idx] = indexOnSite[ixj]; indexOnSite[ixj] = tmp2; } if ((idx&k) != 0 && items[idx] > items[ixj]) { T tmp = items[idx]; items[idx] = items[ixj]; items[ixj] = tmp; int tmp2 = indexOnSite[idx]; indexOnSite[idx] = indexOnSite[ixj]; indexOnSite[ixj] = tmp2; } } } /* reorganize data blocks (in a tensor) into a matrix. In each (source) block we have stride * strideNum items, where strideNum means the items along the leading dimension. In the target matrix, each row keeps strideNum items along the leading dimension in each source block. >> source - source data array >> target - target data array >> srcStride - how many items we need to go over we move to the next >> srcStrideNum - size of the leading dimension >> srcBlockNum - number of the source blocks >> tgtColNum - number of columns in the target matrix >> tgtRowNum - number of rows in the target matrix */ template<class T> __global__ void KernelReorganize(void * source, void * target, int srcStride, int srcStrideNum, int srcBlockNum, int tgtColNum, int tgtRowNum) { __shared__ int iBlock[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ int iOffset[MAX_CUDA_THREAD_NUM_PER_BLOCK]; /* index along the "stride" dimension */ int i = blockDim.x * blockIdx.x + threadIdx.x; /* index along the leading dimension */ int j = blockDim.y * blockIdx.y + threadIdx.y; if (i >= srcStride * srcBlockNum || j >= srcStrideNum) return; if (threadIdx.y == 0) { iBlock[threadIdx.x] = i / srcStride; iOffset[threadIdx.x] = i % srcStride; } __syncthreads(); T * s = (T*)source + (iBlock[threadIdx.x] * srcStrideNum + j) * srcStride + iOffset[threadIdx.x]; T * t = (T*)target + (iBlock[threadIdx.x] * srcStride + iOffset[threadIdx.x]) * tgtColNum + j; *t = *s; } /* copy back for "KernelReorganize" >> source - source data array >> target - target data array >> srcColNum - number of columns in the source matrix >> srcRowNum - number of rows in the source matrix >> tgtStride - how many items we need to go over we move to the next >> tgtStrideNum - size of the leading dimension >> tgtBlockNum - number of the target blocks */ template<class T> __global__ void KernelReorganizeBack(void * source, void * target, int srcColNum, int srcRowNum, int tgtStride, int tgtStrideNum, int tgtBlockNum) { __shared__ int iBlock[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ int iOffset[MAX_CUDA_THREAD_NUM_PER_BLOCK]; /* index along the "stride" dimension */ int i = blockDim.x * blockIdx.x + threadIdx.x; /* index along the leading dimension */ int j = blockDim.y * blockIdx.y + threadIdx.y; if (i >= tgtStride * tgtBlockNum || j >= tgtStrideNum) return; if (threadIdx.y == 0) { iBlock[threadIdx.x] = i / tgtStride; iOffset[threadIdx.x] = i % tgtStride; } __syncthreads(); T * s = (T*)source + (iBlock[threadIdx.x] * tgtStride + iOffset[threadIdx.x]) * srcColNum + j; T * t = (T*)target + (iBlock[threadIdx.x] * tgtStrideNum + j) * tgtStride + iOffset[threadIdx.x]; *t = *s; } /* set the data arrry with a default value >> data - data array >> value - default value >> size - size of the array */ template<class T> __global__ void KernelSetDataArray(T * data, T value, int size) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < size) data[i] = value; } /* sort the tensor along a given dimension >> a - input >> b - output >> indexA - input index tensor >> indexB - output index tensor >> dim - specified dimension >> k - top-k results are returned */ void _CudaSortBig(const XTensor * a, XTensor * b, XTensor * indexA, XTensor * indexB, int dim, int k) { CheckNTErrors((a && b), "Empty input tensor!"); CheckNTErrors((a->unitSize == b->unitSize), "Unmatched tensors!"); CheckNTErrors((a->order > dim && dim >= 0), "Incorrect dimension specified!"); CheckNTErrors((a->dataType == DEFAULT_DTYPE), "TODO!"); if (k < 0 || k > b->dimSize[dim]) k = b->dimSize[dim]; XMem * mem = a->mem; int stride = 1; int blockNum = 1; int strideNum = a->dimSize[dim]; for (int i = 0; i < dim; i++) blockNum *= a->dimSize[i]; for (int i = dim + 1; i < a->order; i++) stride *= a->dimSize[i]; int m = GetNextPower2(strideNum); int n = stride * blockNum; //void * buf = mem != NULL ? mem->AllocBuf(a->devID, n * m * a->unitSize) : XMemAlloc(a->devID, n * m * a->unitSize); void * buf; if (mem != NULL) { mem->LockBuf(); buf = mem->AllocBuf(a->devID, n * m * a->unitSize); } else { buf = XMemAlloc(a->devID, n * m * a->unitSize); } void * bufIndex = NULL; if (indexA != NULL && indexB != NULL) { bufIndex = mem != NULL ? mem->AllocBuf(a->devID, n * m * sizeof(int)) : XMemAlloc(a->devID, n * m * sizeof(int)); } int cudaGrids[3]; int cudaBlocks[3]; GDevs.GetCudaThread(a->devID, m * n, cudaGrids, cudaBlocks); int devIDBackup; ProtectCudaDev(a->devID, devIDBackup); /* set the buffer to the "min" value */ KernelSetDataArray<DTYPE> << <dim3(cudaGrids[0]), dim3(cudaBlocks[0]) >> > ((DTYPE*)buf, DTYPE_MIN, m * n); GDevs.GetCudaThread2D(a->devID, strideNum, n, MAX_INT, cudaGrids, cudaBlocks); /* reorganize the data into a matrix */ KernelReorganize<DTYPE> << <dim3(cudaGrids[1], cudaGrids[0]), dim3(cudaBlocks[1], cudaBlocks[0]) >> > (a->data, buf, stride, strideNum, blockNum, m, n); /* reorganize the index into a matrix */ if (indexA != NULL && indexB != NULL) KernelReorganize<int> << <dim3(cudaGrids[1], cudaGrids[0]), dim3(cudaBlocks[1], cudaBlocks[0]) >> > (indexA->data, bufIndex, stride, strideNum, blockNum, m, n); GDevs.GetCudaThread2D(a->devID, m, n, MAX_INT, cudaGrids, cudaBlocks); /* bitonic sorting */ for (int i = 2; i <= m; i <<= 1) { for (int j = i >> 1; j > 0; j = j >> 1) { if (indexA != NULL && indexB != NULL) { KernelBitonicSort2D<DTYPE> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> > (buf, (int*)bufIndex, j, i, m, n); } else { KernelBitonicSort2D<DTYPE> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> > (buf, j, i, m, n); } } } GDevs.GetCudaThread2D(a->devID, k, n, MAX_INT, cudaGrids, cudaBlocks); /* copy result to the output tensor */ KernelReorganizeBack<DTYPE> << <dim3(cudaGrids[1], cudaGrids[0]), dim3(cudaBlocks[1], cudaBlocks[0]) >> > (buf, b->data, m, n, stride, k, blockNum); if (indexA != NULL && indexB != NULL) KernelReorganizeBack<int> << <dim3(cudaGrids[1], cudaGrids[0]), dim3(cudaBlocks[1], cudaBlocks[0]) >> > (bufIndex, indexB->data, m, n, stride, k, blockNum); if (mem != NULL) { mem->ReleaseBuf(a->devID, n * m * a->unitSize); mem->UnlockBuf(); } else XMemFree(a->devID, buf); if (indexA != NULL && indexB != NULL) if (mem != NULL) mem->ReleaseBuf(a->devID, n * m * sizeof(int)); else XMemFree(a->devID, bufIndex); ProtectCudaDev(a->devID, devIDBackup); } #endif // USE_CUDA } // namespace nts(NiuTrans.Tensor)
bbe528a02fd685d02eb08c6a297ddb8992392be6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <thrust/transform.h> #include <thrust/device_ptr.h> #include <boost/log/trivial.hpp> #include "cudatools/deviceChunk.cuh" #include "phaseField.cuh" #include "evolve.cuh" #include "cudatools/function.cuh" #include "marchingCubes.cuh" using namespace commonns; using namespace std; using namespace thrust; __global__ void regularisationKernel(float* phase, float* Lphase, float* LLphase, Size3D gridDim, AlgParams aParams){ Point3D p = getThread3D(); FltFunc3D phasef(gridDim, phase); FltFunc3D Lphasef(gridDim, Lphase); FltFunc3D LLphasef(gridDim, LLphase); if(p >= 0 && p < Point3D(gridDim)){ float wsq = pow(aParams.w,2); float norm = aParams.regNormTerm; phasef[p] = phasef[p] + norm*(-1*(wsq/16) * LLphasef[p] - Lphasef[p] - (21/wsq)*(pow(phasef[p],3)-phasef[p])); } } // Parameters needed: // w, regNormTerm // gridSize, gridRes void regularisePhaseField(float* phase, GridParams gProps, AlgParams aParams){ Size3D gridDim = gProps.gridSize; int gridRes = gProps.gridRes; int arrLen = gridDim.vol(); GpuConf3D conf(gridDim, SQ_TPB_3D); DeviceChunk<float> Lphase(arrLen); DeviceChunk<float> LLphase(arrLen); hipLaunchKernelGGL(( computeL), dim3(conf.grid()), dim3(conf.block()), 0, 0, Lphase.getPtr(), phase, gridDim, gridRes); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); hipLaunchKernelGGL(( computeL), dim3(conf.grid()), dim3(conf.block()), 0, 0, LLphase.getPtr(), Lphase.getPtr(), gridDim, gridRes); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); hipLaunchKernelGGL(( regularisationKernel), dim3(conf.grid()), dim3(conf.block()), 0, 0, phase, Lphase.getPtr(), LLphase.getPtr(), gridDim, aParams); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); } // Surface points scanning __global__ void markSurfacePoints(Func3D<float> levelSet, Func3D<uint32_t> result, GridParams gParams){ int3 p = getThread3D().getInt3(); int3 size = gParams.gridSize.geti3(); const uint32_t SURF_PT(_SURF_PT_MARK); const uint32_t NOT_SURF_PT(_NOT_SURF_PT_MARK); if(p < size){ result[p] = NOT_SURF_PT; Cubes marchingCubes(levelSet); if(p < size-IONES3 && marchingCubes.onIsoSurface(p)){ result[p] = SURF_PT; } } } __global__ void createIdFunc(Func3D<int3> func){ int3 p = getThread3D().getInt3(); int3 size = func.getSize().geti3(); if(p < size){ func[p] = p; } } dchunk<int3>::uptr_t getSurfacePointsList(dchunk_float& d_levelSet, GridParams gParams){ Size3D gridSize = gParams.gridSize; func3_f levelSet = d_levelSet.funcView(gridSize); GpuConf confGrid = WorkManager(gridSize, 4).conf(); dchunk_uint32 d_surfaceMask(gridSize.vol()); d_surfaceMask.fill(uint32_t(_NOT_SURF_PT_MARK)); func3<uint32_t> surfaceMask = d_surfaceMask.funcView(gridSize); hipLaunchKernelGGL(( markSurfacePoints), dim3(confGrid.grid()), dim3(confGrid.block()), 0, 0, levelSet, surfaceMask, gParams); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); dchunk<int3> d_idFunc(gridSize.vol()); func3<int3> idFunc = d_idFunc.funcView(gridSize); hipLaunchKernelGGL(( createIdFunc), dim3(confGrid.grid()), dim3(confGrid.block()), 0, 0, idFunc); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); int nSurfPts = count(device, d_surfaceMask.tbegin(), d_surfaceMask.tend(), _SURF_PT_MARK); dchunk<int3>::uptr_t d_surfPts = dchunk<int3>::make_uptr(nSurfPts); copy_if(device, d_idFunc.tbegin(), d_idFunc.tend(), d_surfaceMask.tbegin(), d_surfPts->tbegin(), gt(_NOT_SURF_PT_MARK)); return d_surfPts; } // Level set clipping struct compare_pts { int3 mask; __host__ __device__ compare_pts(int3 a_mask) : mask(a_mask){} __host__ __device__ bool operator()(int3 a, int3 b){ return a*mask < b*mask; } }; std::pair<int3, int3> getFieldExtrema(dchunk_float& d_levelSet, GridParams gridProps){ std::cout << "getFieldExtrema" << gridProps.gridSize << std::endl; dchunk<int3>::uptr_t surfPts = getSurfacePointsList(d_levelSet, gridProps); auto axis_X = thrust::minmax_element( thrust::device, surfPts->tbegin(), surfPts->tend(), compare_pts(EU.getInt3())); auto axis_Y = thrust::minmax_element( thrust::device, surfPts->tbegin(), surfPts->tend(), compare_pts(EV.getInt3())); auto axis_Z = thrust::minmax_element( thrust::device, surfPts->tbegin(), surfPts->tend(), compare_pts(EW.getInt3())); int3 minX = *axis_X.first; int3 maxX = *axis_X.second+1; int3 minY = *axis_Y.first; int3 maxY = *axis_Y.second+1; int3 minZ = *axis_Z.first; int3 maxZ = *axis_Z.second+1; int3 mins = make_int3(minX.x, minY.y, minZ.z); int3 maxs = make_int3(maxX.x, maxY.y, maxZ.z); return std::make_pair(mins, maxs); } // input < output+2! __global__ void copyIntoFrame(func3_f input, func3_f output, int frameSize){ int3 p = getThread3D().getInt3(); int3 sizei = input.getSize().geti3(); int3 sizeo = output.getSize().geti3(); if(p >= IZEROS3 && p < sizei){ output[p+frameSize*IONES3] = input[p]; } } dchunk_float::uptr_t makeFrame(dchunk_float& d_field, Size3D size, int frameSize){ func3_f f_field = d_field.funcView(size); int3 resultSize = size.geti3()+ 2*frameSize*IONES3; dchunk_float::uptr_t ud_result = dchunk_float::make_uptr(mul(resultSize)); ud_result->fill(0.0f); func3_f f_result = ud_result->funcView(Size3D(resultSize)); GpuConf conf = WorkManager(size, 4).conf(); hipLaunchKernelGGL(( copyIntoFrame), dim3(conf.grid()), dim3(conf.block()), 0, 0, f_field, f_result, frameSize); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); return ud_result; }
bbe528a02fd685d02eb08c6a297ddb8992392be6.cu
#include <iostream> #include <thrust/transform.h> #include <thrust/device_ptr.h> #include <boost/log/trivial.hpp> #include "cudatools/deviceChunk.cuh" #include "phaseField.cuh" #include "evolve.cuh" #include "cudatools/function.cuh" #include "marchingCubes.cuh" using namespace commonns; using namespace std; using namespace thrust; __global__ void regularisationKernel(float* phase, float* Lphase, float* LLphase, Size3D gridDim, AlgParams aParams){ Point3D p = getThread3D(); FltFunc3D phasef(gridDim, phase); FltFunc3D Lphasef(gridDim, Lphase); FltFunc3D LLphasef(gridDim, LLphase); if(p >= 0 && p < Point3D(gridDim)){ float wsq = pow(aParams.w,2); float norm = aParams.regNormTerm; phasef[p] = phasef[p] + norm*(-1*(wsq/16) * LLphasef[p] - Lphasef[p] - (21/wsq)*(pow(phasef[p],3)-phasef[p])); } } // Parameters needed: // w, regNormTerm // gridSize, gridRes void regularisePhaseField(float* phase, GridParams gProps, AlgParams aParams){ Size3D gridDim = gProps.gridSize; int gridRes = gProps.gridRes; int arrLen = gridDim.vol(); GpuConf3D conf(gridDim, SQ_TPB_3D); DeviceChunk<float> Lphase(arrLen); DeviceChunk<float> LLphase(arrLen); computeL<<<conf.grid(), conf.block()>>>(Lphase.getPtr(), phase, gridDim, gridRes); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); computeL<<<conf.grid(), conf.block()>>>(LLphase.getPtr(), Lphase.getPtr(), gridDim, gridRes); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); regularisationKernel<<<conf.grid(), conf.block()>>>(phase, Lphase.getPtr(), LLphase.getPtr(), gridDim, aParams); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); } // Surface points scanning __global__ void markSurfacePoints(Func3D<float> levelSet, Func3D<uint32_t> result, GridParams gParams){ int3 p = getThread3D().getInt3(); int3 size = gParams.gridSize.geti3(); const uint32_t SURF_PT(_SURF_PT_MARK); const uint32_t NOT_SURF_PT(_NOT_SURF_PT_MARK); if(p < size){ result[p] = NOT_SURF_PT; Cubes marchingCubes(levelSet); if(p < size-IONES3 && marchingCubes.onIsoSurface(p)){ result[p] = SURF_PT; } } } __global__ void createIdFunc(Func3D<int3> func){ int3 p = getThread3D().getInt3(); int3 size = func.getSize().geti3(); if(p < size){ func[p] = p; } } dchunk<int3>::uptr_t getSurfacePointsList(dchunk_float& d_levelSet, GridParams gParams){ Size3D gridSize = gParams.gridSize; func3_f levelSet = d_levelSet.funcView(gridSize); GpuConf confGrid = WorkManager(gridSize, 4).conf(); dchunk_uint32 d_surfaceMask(gridSize.vol()); d_surfaceMask.fill(uint32_t(_NOT_SURF_PT_MARK)); func3<uint32_t> surfaceMask = d_surfaceMask.funcView(gridSize); markSurfacePoints<<<confGrid.grid(), confGrid.block()>>>(levelSet, surfaceMask, gParams); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); dchunk<int3> d_idFunc(gridSize.vol()); func3<int3> idFunc = d_idFunc.funcView(gridSize); createIdFunc<<<confGrid.grid(), confGrid.block()>>>(idFunc); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); int nSurfPts = count(device, d_surfaceMask.tbegin(), d_surfaceMask.tend(), _SURF_PT_MARK); dchunk<int3>::uptr_t d_surfPts = dchunk<int3>::make_uptr(nSurfPts); copy_if(device, d_idFunc.tbegin(), d_idFunc.tend(), d_surfaceMask.tbegin(), d_surfPts->tbegin(), gt(_NOT_SURF_PT_MARK)); return d_surfPts; } // Level set clipping struct compare_pts { int3 mask; __host__ __device__ compare_pts(int3 a_mask) : mask(a_mask){} __host__ __device__ bool operator()(int3 a, int3 b){ return a*mask < b*mask; } }; std::pair<int3, int3> getFieldExtrema(dchunk_float& d_levelSet, GridParams gridProps){ std::cout << "getFieldExtrema" << gridProps.gridSize << std::endl; dchunk<int3>::uptr_t surfPts = getSurfacePointsList(d_levelSet, gridProps); auto axis_X = thrust::minmax_element( thrust::device, surfPts->tbegin(), surfPts->tend(), compare_pts(EU.getInt3())); auto axis_Y = thrust::minmax_element( thrust::device, surfPts->tbegin(), surfPts->tend(), compare_pts(EV.getInt3())); auto axis_Z = thrust::minmax_element( thrust::device, surfPts->tbegin(), surfPts->tend(), compare_pts(EW.getInt3())); int3 minX = *axis_X.first; int3 maxX = *axis_X.second+1; int3 minY = *axis_Y.first; int3 maxY = *axis_Y.second+1; int3 minZ = *axis_Z.first; int3 maxZ = *axis_Z.second+1; int3 mins = make_int3(minX.x, minY.y, minZ.z); int3 maxs = make_int3(maxX.x, maxY.y, maxZ.z); return std::make_pair(mins, maxs); } // input < output+2! __global__ void copyIntoFrame(func3_f input, func3_f output, int frameSize){ int3 p = getThread3D().getInt3(); int3 sizei = input.getSize().geti3(); int3 sizeo = output.getSize().geti3(); if(p >= IZEROS3 && p < sizei){ output[p+frameSize*IONES3] = input[p]; } } dchunk_float::uptr_t makeFrame(dchunk_float& d_field, Size3D size, int frameSize){ func3_f f_field = d_field.funcView(size); int3 resultSize = size.geti3()+ 2*frameSize*IONES3; dchunk_float::uptr_t ud_result = dchunk_float::make_uptr(mul(resultSize)); ud_result->fill(0.0f); func3_f f_result = ud_result->funcView(Size3D(resultSize)); GpuConf conf = WorkManager(size, 4).conf(); copyIntoFrame<<<conf.grid(), conf.block()>>>(f_field, f_result, frameSize); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); return ud_result; }
40ee0a55da4e3b97aa2906262a1203884c1c7011.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "WaveEquationKernels.cuh" __global__ void WaveEquation_kernel(float3* slice1, float3* slice2, float3* slice3, unsigned int gridSize, float deltaTime) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < (gridSize * gridSize)) { unsigned int x = i / gridSize; unsigned int y = i % gridSize; unsigned int x_next = (x == (gridSize - 1)) ? 0 : x + 1; unsigned int x_prev = (x == 0) ? (gridSize - 1) : x - 1; unsigned int y_next = (y == (gridSize - 1)) ? 0 : y + 1; unsigned int y_prev = (y == 0) ? (gridSize - 1) : y - 1; // write output vertex slice3[y*gridSize+x].y = 0.01f * deltaTime*deltaTime * (slice1[y_next*gridSize+x].y + // 1.0f is the square of the wave speed slice1[y_prev*gridSize+x].y + slice1[y*gridSize+x_next].y + slice1[y*gridSize+x_prev].y - 4.0f * slice1[y*gridSize+x].y) + 2.0f * slice2[y*gridSize+x].y - slice1[y*gridSize+x].y; } } // __global__ void WaveSource_kernel(float3* target, float3* source, unsigned int gridSize, float amplitude, float deltaTime) // { // unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; // if (i < (gridSize * gridSize)) // { // // if (source[i].y > 0.0f) { target[i].y += (target[i].y < amplitude) ? min(source[i].y*deltaTime, amplitude-target[i].y) : 0.0f; } // // if (source[i].y < 0.0f) { target[i].y += (target[i].y > 0.0) ? max(source[i].y*deltaTime, 0-target[i].y) : 0.0f; } // // target[i].y += (source[i].y > 0.0f) ? ((target[i].y < amplitude) ? min(source[i].y * deltaTime, amplitude - target[i].y) : 0.0f) : ((target[i].y > 0.0) ? max(source[i].y * deltaTime, 0 - target[i].y) : 0.0f); // } // } __global__ void UpdateDisplacement_kernel(float3* displacement, float3* velocity, unsigned int gridSize, float deltaTime) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < (gridSize * gridSize)) { displacement[i].y += velocity[i].y * deltaTime; } } __global__ void UpdateVelocity_kernel(float3* velocity, float3* acceleration, unsigned int gridSize, float deltaTime) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < (gridSize * gridSize)) { velocity[i].y += acceleration[i].y * deltaTime; velocity[i].y *= 0.9999f; // put in some friction like force } } __global__ void UpdateAcceleration_kernel(float3* acceleration, float3* displacement, unsigned int gridSize, float deltaTime) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < (gridSize * gridSize)) { unsigned int x = i / gridSize; unsigned int y = i % gridSize; unsigned int x_next = (x == (gridSize - 1)) ? 0 : x + 1; unsigned int x_prev = (x == 0) ? (gridSize - 1) : x - 1; unsigned int y_next = (y == (gridSize - 1)) ? 0 : y + 1; unsigned int y_prev = (y == 0) ? (gridSize - 1) : y - 1; // acceleration is just the laplacian of the displacement field (times c**2, but make it to be 1) acceleration[y * gridSize + x].y = (displacement[y_next * gridSize + x].y + // 1.0f is the square of the wave speed displacement[y_prev * gridSize + x].y + displacement[y * gridSize + x_next].y + displacement[y * gridSize + x_prev].y - 4.0f * displacement[y * gridSize + x].y); } }
40ee0a55da4e3b97aa2906262a1203884c1c7011.cu
#include "WaveEquationKernels.cuh" __global__ void WaveEquation_kernel(float3* slice1, float3* slice2, float3* slice3, unsigned int gridSize, float deltaTime) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < (gridSize * gridSize)) { unsigned int x = i / gridSize; unsigned int y = i % gridSize; unsigned int x_next = (x == (gridSize - 1)) ? 0 : x + 1; unsigned int x_prev = (x == 0) ? (gridSize - 1) : x - 1; unsigned int y_next = (y == (gridSize - 1)) ? 0 : y + 1; unsigned int y_prev = (y == 0) ? (gridSize - 1) : y - 1; // write output vertex slice3[y*gridSize+x].y = 0.01f * deltaTime*deltaTime * (slice1[y_next*gridSize+x].y + // 1.0f is the square of the wave speed slice1[y_prev*gridSize+x].y + slice1[y*gridSize+x_next].y + slice1[y*gridSize+x_prev].y - 4.0f * slice1[y*gridSize+x].y) + 2.0f * slice2[y*gridSize+x].y - slice1[y*gridSize+x].y; } } // __global__ void WaveSource_kernel(float3* target, float3* source, unsigned int gridSize, float amplitude, float deltaTime) // { // unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; // if (i < (gridSize * gridSize)) // { // // if (source[i].y > 0.0f) { target[i].y += (target[i].y < amplitude) ? min(source[i].y*deltaTime, amplitude-target[i].y) : 0.0f; } // // if (source[i].y < 0.0f) { target[i].y += (target[i].y > 0.0) ? max(source[i].y*deltaTime, 0-target[i].y) : 0.0f; } // // target[i].y += (source[i].y > 0.0f) ? ((target[i].y < amplitude) ? min(source[i].y * deltaTime, amplitude - target[i].y) : 0.0f) : ((target[i].y > 0.0) ? max(source[i].y * deltaTime, 0 - target[i].y) : 0.0f); // } // } __global__ void UpdateDisplacement_kernel(float3* displacement, float3* velocity, unsigned int gridSize, float deltaTime) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < (gridSize * gridSize)) { displacement[i].y += velocity[i].y * deltaTime; } } __global__ void UpdateVelocity_kernel(float3* velocity, float3* acceleration, unsigned int gridSize, float deltaTime) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < (gridSize * gridSize)) { velocity[i].y += acceleration[i].y * deltaTime; velocity[i].y *= 0.9999f; // put in some friction like force } } __global__ void UpdateAcceleration_kernel(float3* acceleration, float3* displacement, unsigned int gridSize, float deltaTime) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < (gridSize * gridSize)) { unsigned int x = i / gridSize; unsigned int y = i % gridSize; unsigned int x_next = (x == (gridSize - 1)) ? 0 : x + 1; unsigned int x_prev = (x == 0) ? (gridSize - 1) : x - 1; unsigned int y_next = (y == (gridSize - 1)) ? 0 : y + 1; unsigned int y_prev = (y == 0) ? (gridSize - 1) : y - 1; // acceleration is just the laplacian of the displacement field (times c**2, but make it to be 1) acceleration[y * gridSize + x].y = (displacement[y_next * gridSize + x].y + // 1.0f is the square of the wave speed displacement[y_prev * gridSize + x].y + displacement[y * gridSize + x_next].y + displacement[y * gridSize + x_prev].y - 4.0f * displacement[y * gridSize + x].y); } }
4bc1b6b420978b8fc97b7d991ceda5849d70e302.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/filler.hpp" #include "caffe/layers/inner_product_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void MaskGenerator(const int n, const Dtype* weight, Dtype* weight_mask, const Dtype upperlimit, const Dtype lowerlimit) { CUDA_KERNEL_LOOP(index, n) { weight_mask[index] = weight[index] > lowerlimit ? (weight[index] < upperlimit ? (Dtype)0. : (Dtype)1.) : (Dtype)1.; } } template <typename Dtype> void InnerProductLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const Dtype* weight; // DNS if (sparsity_term_ && this->phase_ == TRAIN) { const Dtype* const_weight_mask; Dtype* weight_mask; Dtype* weight_mask_diff; const Dtype* const_weight_mask_diff; const int weight_number = this->blobs_[0]->count(); if (bias_term_) { weight_mask = this->blobs_[2]->mutable_gpu_data(); const_weight_mask = this->blobs_[2]->gpu_data(); weight_mask_diff = this->blobs_[2]->mutable_gpu_diff(); const_weight_mask_diff = this->blobs_[2]->gpu_diff(); } else { weight_mask = this->blobs_[1]->mutable_gpu_data(); const_weight_mask = this->blobs_[1]->gpu_data(); weight_mask_diff = this->blobs_[1]->mutable_gpu_diff(); const_weight_mask_diff = this->blobs_[1]->gpu_diff(); } if (this->surgey_term_) { Dtype mean_value; Dtype std_value; caffe_gpu_set(weight_number, (Dtype)1., weight_mask_diff); caffe_gpu_dot(weight_number, const_weight_mask_diff, this->blobs_[0]->gpu_data(), &mean_value); mean_value /= Dtype(weight_number); caffe_gpu_scalar(weight_number, -mean_value, this->blobs_[0]->gpu_data(), weight_mask_diff); caffe_gpu_mul(weight_number, const_weight_mask_diff, const_weight_mask_diff, weight_mask_diff); caffe_gpu_asum(weight_number, const_weight_mask_diff, &std_value); std_value /= Dtype(weight_number); std_value = sqrt(std_value); // According to 68-95-99.7 rule, we prune the weights distributed between [-1.281*Delte, 1.281*Delte]. // More Details: https://en.wikipedia.org/wiki/Standard_deviation // We tried to make the selecion of hyperparameter more convencing. const Dtype upper_threshold_value = mean_value + threshold*std_value; const Dtype lower_threshold_value = mean_value - threshold*std_value; hipLaunchKernelGGL(( MaskGenerator<Dtype>), dim3(CAFFE_GET_BLOCKS(weight_number)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, weight_number, this->blobs_[0]->gpu_data(), weight_mask, upper_threshold_value, lower_threshold_value); Dtype s_connection_num; Dtype a_connection_num; caffe_gpu_asum(weight_number, const_weight_mask, &a_connection_num); s_connection_num = weight_number - a_connection_num; this->s_connection_num = s_connection_num; this->a_connection_num = a_connection_num; } caffe_gpu_mul(weight_number, this->blobs_[0]->gpu_data(), const_weight_mask, weight_mask_diff); weight = const_weight_mask_diff; } else { if (sparsity_term_) { const Dtype* const_weight_mask; if (bias_term_) { const_weight_mask = this->blobs_[2]->gpu_data(); } else { const_weight_mask = this->blobs_[1]->gpu_data(); } caffe_gpu_mul(this->blobs_[0]->count(), this->blobs_[0]->gpu_data(), const_weight_mask, this->blobs_[0]->mutable_gpu_data()); } weight = this->blobs_[0]->gpu_data(); } if (M_ == 1) { caffe_gpu_gemv<Dtype>(CblasNoTrans, N_, K_, (Dtype)1., weight, bottom_data, (Dtype)0., top_data); if (bias_term_) caffe_gpu_axpy<Dtype>(N_, bias_multiplier_.cpu_data()[0], this->blobs_[1]->gpu_data(), top_data); } else { caffe_gpu_gemm<Dtype>(CblasNoTrans, transpose_ ? CblasNoTrans : CblasTrans, M_, N_, K_, (Dtype)1., bottom_data, weight, (Dtype)0., top_data); if (bias_term_) caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype)1., bias_multiplier_.gpu_data(), this->blobs_[1]->gpu_data(), (Dtype)1., top_data); } } template <typename Dtype> void InnerProductLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (this->param_propagate_down_[0]) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* bottom_data = bottom[0]->gpu_data(); // Gradient with respect to weight if (transpose_) { caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, K_, N_, M_, (Dtype)1., bottom_data, top_diff, (Dtype)1., this->blobs_[0]->mutable_gpu_diff()); } else { caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1., top_diff, bottom_data, (Dtype)1., this->blobs_[0]->mutable_gpu_diff()); } } if (bias_term_ && this->param_propagate_down_[1]) { const Dtype* top_diff = top[0]->gpu_diff(); // Gradient with respect to bias caffe_gpu_gemv<Dtype>(CblasTrans, M_, N_, (Dtype)1., top_diff, bias_multiplier_.gpu_data(), (Dtype)1., this->blobs_[1]->mutable_gpu_diff()); } if (propagate_down[0]) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* weight; if (sparsity_term_) { if (this->bias_term_) { weight = this->blobs_[2]->gpu_diff(); } else { weight = this->blobs_[1]->gpu_diff(); } } else { weight = this->blobs_[0]->gpu_data(); } // Gradient with respect to bottom data if (transpose_) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, K_, N_, (Dtype)1., top_diff, weight, (Dtype)0., bottom[0]->mutable_gpu_diff()); } else { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype)1., top_diff, weight, (Dtype)0., bottom[0]->mutable_gpu_diff()); } } } INSTANTIATE_LAYER_GPU_FUNCS(InnerProductLayer); } // namespace caffe
4bc1b6b420978b8fc97b7d991ceda5849d70e302.cu
#include <vector> #include "caffe/filler.hpp" #include "caffe/layers/inner_product_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void MaskGenerator(const int n, const Dtype* weight, Dtype* weight_mask, const Dtype upperlimit, const Dtype lowerlimit) { CUDA_KERNEL_LOOP(index, n) { weight_mask[index] = weight[index] > lowerlimit ? (weight[index] < upperlimit ? (Dtype)0. : (Dtype)1.) : (Dtype)1.; } } template <typename Dtype> void InnerProductLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const Dtype* weight; // DNS if (sparsity_term_ && this->phase_ == TRAIN) { const Dtype* const_weight_mask; Dtype* weight_mask; Dtype* weight_mask_diff; const Dtype* const_weight_mask_diff; const int weight_number = this->blobs_[0]->count(); if (bias_term_) { weight_mask = this->blobs_[2]->mutable_gpu_data(); const_weight_mask = this->blobs_[2]->gpu_data(); weight_mask_diff = this->blobs_[2]->mutable_gpu_diff(); const_weight_mask_diff = this->blobs_[2]->gpu_diff(); } else { weight_mask = this->blobs_[1]->mutable_gpu_data(); const_weight_mask = this->blobs_[1]->gpu_data(); weight_mask_diff = this->blobs_[1]->mutable_gpu_diff(); const_weight_mask_diff = this->blobs_[1]->gpu_diff(); } if (this->surgey_term_) { Dtype mean_value; Dtype std_value; caffe_gpu_set(weight_number, (Dtype)1., weight_mask_diff); caffe_gpu_dot(weight_number, const_weight_mask_diff, this->blobs_[0]->gpu_data(), &mean_value); mean_value /= Dtype(weight_number); caffe_gpu_scalar(weight_number, -mean_value, this->blobs_[0]->gpu_data(), weight_mask_diff); caffe_gpu_mul(weight_number, const_weight_mask_diff, const_weight_mask_diff, weight_mask_diff); caffe_gpu_asum(weight_number, const_weight_mask_diff, &std_value); std_value /= Dtype(weight_number); std_value = sqrt(std_value); // According to 68-95-99.7 rule, we prune the weights distributed between [-1.281*Delte, 1.281*Delte]. // More Details: https://en.wikipedia.org/wiki/Standard_deviation // We tried to make the selecion of hyperparameter more convencing. const Dtype upper_threshold_value = mean_value + threshold*std_value; const Dtype lower_threshold_value = mean_value - threshold*std_value; MaskGenerator<Dtype><<<CAFFE_GET_BLOCKS(weight_number), CAFFE_CUDA_NUM_THREADS>>>(weight_number, this->blobs_[0]->gpu_data(), weight_mask, upper_threshold_value, lower_threshold_value); Dtype s_connection_num; Dtype a_connection_num; caffe_gpu_asum(weight_number, const_weight_mask, &a_connection_num); s_connection_num = weight_number - a_connection_num; this->s_connection_num = s_connection_num; this->a_connection_num = a_connection_num; } caffe_gpu_mul(weight_number, this->blobs_[0]->gpu_data(), const_weight_mask, weight_mask_diff); weight = const_weight_mask_diff; } else { if (sparsity_term_) { const Dtype* const_weight_mask; if (bias_term_) { const_weight_mask = this->blobs_[2]->gpu_data(); } else { const_weight_mask = this->blobs_[1]->gpu_data(); } caffe_gpu_mul(this->blobs_[0]->count(), this->blobs_[0]->gpu_data(), const_weight_mask, this->blobs_[0]->mutable_gpu_data()); } weight = this->blobs_[0]->gpu_data(); } if (M_ == 1) { caffe_gpu_gemv<Dtype>(CblasNoTrans, N_, K_, (Dtype)1., weight, bottom_data, (Dtype)0., top_data); if (bias_term_) caffe_gpu_axpy<Dtype>(N_, bias_multiplier_.cpu_data()[0], this->blobs_[1]->gpu_data(), top_data); } else { caffe_gpu_gemm<Dtype>(CblasNoTrans, transpose_ ? CblasNoTrans : CblasTrans, M_, N_, K_, (Dtype)1., bottom_data, weight, (Dtype)0., top_data); if (bias_term_) caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype)1., bias_multiplier_.gpu_data(), this->blobs_[1]->gpu_data(), (Dtype)1., top_data); } } template <typename Dtype> void InnerProductLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (this->param_propagate_down_[0]) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* bottom_data = bottom[0]->gpu_data(); // Gradient with respect to weight if (transpose_) { caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, K_, N_, M_, (Dtype)1., bottom_data, top_diff, (Dtype)1., this->blobs_[0]->mutable_gpu_diff()); } else { caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1., top_diff, bottom_data, (Dtype)1., this->blobs_[0]->mutable_gpu_diff()); } } if (bias_term_ && this->param_propagate_down_[1]) { const Dtype* top_diff = top[0]->gpu_diff(); // Gradient with respect to bias caffe_gpu_gemv<Dtype>(CblasTrans, M_, N_, (Dtype)1., top_diff, bias_multiplier_.gpu_data(), (Dtype)1., this->blobs_[1]->mutable_gpu_diff()); } if (propagate_down[0]) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* weight; if (sparsity_term_) { if (this->bias_term_) { weight = this->blobs_[2]->gpu_diff(); } else { weight = this->blobs_[1]->gpu_diff(); } } else { weight = this->blobs_[0]->gpu_data(); } // Gradient with respect to bottom data if (transpose_) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, K_, N_, (Dtype)1., top_diff, weight, (Dtype)0., bottom[0]->mutable_gpu_diff()); } else { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype)1., top_diff, weight, (Dtype)0., bottom[0]->mutable_gpu_diff()); } } } INSTANTIATE_LAYER_GPU_FUNCS(InnerProductLayer); } // namespace caffe
09a5cfc17b1cdd1000be01df7f8206b62a6af97e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #ifdef NVML #include "rocm_smi/rocm_smi.h" #endif #include "svm.h" #include "cuda_svm.h" typedef signed char schar; template <class T> __device__ static inline T min(T x,T y) { return (x<y)?x:y; } template <class T> __device__ static inline T max(T x,T y) { return (x>y)?x:y; } template <class T> __device__ static inline void swap(T& x, T& y) { T t=x; x=y; y=t; } template <class S, class T> __device__ static inline void clone(T*& dst, S* src, int n) { dst = (T *)malloc(sizeof(T) * n); memcpy((void *)dst, (void *)src, sizeof(T)*n); } __device__ static inline float powi(float base, int times) { float tmp = base, ret = 1.0; for(int t=times; t>0; t/=2) { if(t%2==1) ret*=tmp; tmp = tmp * tmp; } return ret; } #define INF HUGE_VAL #define TAU 1e-12 __device__ struct svm_model *cuda_device_svm_train_no_prob(const struct svm_problem *prob, const struct svm_parameter *param); __device__ float cuda_svm_predict_values(const svm_model *model, const svm_node *x, float* dec_values); __device__ void cuda_svm_cross_validation(const svm_problem *prob, const svm_parameter *param, int nr_fold, float *target); class CUDA_Rand { private: int seed; public: __device__ CUDA_Rand() { seed = 0; } __device__ int rand_int(const int max) { seed = ((seed * 1103515245) + 12345) & 0x7fffffff; return seed%max; } }; // // Kernel Cache // // l is the number of total data items // size is the cache size limit in bytes // class CUDA_Cache { public: __device__ CUDA_Cache(int l,long int size); __device__ ~CUDA_Cache(); // request data [0,len) // return some position p where [p,len) need to be filled // (p >= len if nothing needs to be filled) __device__ int get_data(const int index, float **data, int len); __device__ void swap_index(int i, int j); private: int l; long int size; struct head_t { head_t *prev, *next; // a circular list float *data; int len; // data[0,len) is cached in this entry }; head_t *head; head_t lru_head; __device__ void lru_delete(head_t *h); __device__ void lru_insert(head_t *h); }; __device__ CUDA_Cache::CUDA_Cache(int l_,long int size_):l(l_),size(size_) { head = (head_t *)malloc(sizeof(head_t) * l); // initialized to 0 memset(head, 0, sizeof(head_t) *l); size /= sizeof(float); size -= l * sizeof(head_t) / sizeof(float); size = max(size, 2 * (long int) l); // cache must be large enough for two columns lru_head.next = lru_head.prev = &lru_head; } __device__ CUDA_Cache::~CUDA_Cache() { for(head_t *h = lru_head.next; h != &lru_head; h=h->next) free(h->data); free(head); } __device__ void CUDA_Cache::lru_delete(head_t *h) { // delete from current location h->prev->next = h->next; h->next->prev = h->prev; } __device__ void CUDA_Cache::lru_insert(head_t *h) { // insert to last position h->next = &lru_head; h->prev = lru_head.prev; h->prev->next = h; h->next->prev = h; } __device__ int CUDA_Cache::get_data(const int index, float **data, int len) { head_t *h = &head[index]; if(h->len) lru_delete(h); int more = len - h->len; if (more > 0) { // free old space while(size < more) { head_t *old = lru_head.next; lru_delete(old); free(old->data); size += old->len; old->data = 0; old->len = 0; } // allocate new space float *tp = h->data; h->data = (float *)malloc(sizeof(float)*len); if (tp != NULL) { memcpy(h->data, tp, sizeof(float)*h->len); free(tp); } size -= more; swap(h->len,len); } lru_insert(h); *data = h->data; return len; } __device__ void CUDA_Cache::swap_index(int i, int j) { if(i==j) return; if(head[i].len) lru_delete(&head[i]); if(head[j].len) lru_delete(&head[j]); swap(head[i].data,head[j].data); swap(head[i].len,head[j].len); if(head[i].len) lru_insert(&head[i]); if(head[j].len) lru_insert(&head[j]); if(i>j) swap(i,j); for(head_t *h = lru_head.next; h!=&lru_head; h=h->next) { if(h->len > i) { if(h->len > j) swap(h->data[i],h->data[j]); else { // give up lru_delete(h); free(h->data); size += h->len; h->data = 0; h->len = 0; } } } } // // Kernel evaluation // // the static method k_function is for doing single kernel evaluation // the constructor of Kernel prepares to calculate the l*l kernel matrix // the member function get_Q is for getting one column from the Q Matrix // class CUDA_QMatrix { public: __device__ virtual float *get_Q(int column, int len) const = 0; __device__ virtual float *get_QD() const = 0; __device__ virtual void swap_index(int i, int j) const = 0; __device__ virtual ~CUDA_QMatrix() {} }; class CUDA_Kernel: public CUDA_QMatrix { public: __device__ CUDA_Kernel(int l, svm_node * const * x, const svm_parameter& param); __device__ virtual ~CUDA_Kernel(); __device__ static float k_function(const svm_node *x, const svm_node *y, const svm_parameter& param); __device__ virtual float *get_Q(int column, int len) const = 0; __device__ virtual float *get_QD() const = 0; __device__ virtual void swap_index(int i, int j) const // no so const... { swap(x[i],x[j]); if(x_square) swap(x_square[i],x_square[j]); } protected: float (CUDA_Kernel::*kernel_function)(int i, int j) const; private: const svm_node **x; float *x_square; // svm_parameter const int kernel_type; const int degree; const float gamma; const float coef0; __device__ static float dot(const svm_node *px, const svm_node *py); __device__ float kernel_linear(int i, int j) const { return dot(x[i],x[j]); } __device__ float kernel_poly(int i, int j) const { return powi(gamma*dot(x[i],x[j])+coef0,degree); } __device__ float kernel_rbf(int i, int j) const { return expf(-gamma*(x_square[i]+x_square[j]-2*dot(x[i],x[j]))); } __device__ float kernel_sigmoid(int i, int j) const { return tanh(gamma*dot(x[i],x[j])+coef0); } __device__ float kernel_precomputed(int i, int j) const { return x[i][(int)(x[j][0].value)].value; } }; __device__ CUDA_Kernel::CUDA_Kernel(int l, svm_node * const * x_, const svm_parameter& param) :kernel_type(param.kernel_type), degree(param.degree), gamma(param.gamma), coef0(param.coef0) { switch(kernel_type) { case LINEAR: kernel_function = &CUDA_Kernel::kernel_linear; break; case POLY: kernel_function = &CUDA_Kernel::kernel_poly; break; case RBF: kernel_function = &CUDA_Kernel::kernel_rbf; break; case SIGMOID: kernel_function = &CUDA_Kernel::kernel_sigmoid; break; case PRECOMPUTED: kernel_function = &CUDA_Kernel::kernel_precomputed; break; } clone(x,x_,l); if(kernel_type == RBF) { x_square = new float[l]; for(int i=0;i<l;i++) x_square[i] = dot(x[i],x[i]); } else x_square = 0; } __device__ CUDA_Kernel::~CUDA_Kernel() { delete[] x; delete[] x_square; } __device__ float CUDA_Kernel::dot(const svm_node *px, const svm_node *py) { float sum = 0; while(px->index != -1 && py->index != -1) { if(px->index == py->index) { sum += px->value * py->value; ++px; ++py; } else { if(px->index > py->index) ++py; else ++px; } } return sum; } __device__ float CUDA_Kernel::k_function(const svm_node *x, const svm_node *y, const svm_parameter& param) { switch(param.kernel_type) { case LINEAR: return dot(x,y); case POLY: return powi(param.gamma*dot(x,y)+param.coef0,param.degree); case RBF: { float sum = 0; while(x->index != -1 && y->index !=-1) { if(x->index == y->index) { float d = x->value - y->value; sum += d*d; ++x; ++y; } else { if(x->index > y->index) { sum += y->value * y->value; ++y; } else { sum += x->value * x->value; ++x; } } } while(x->index != -1) { sum += x->value * x->value; ++x; } while(y->index != -1) { sum += y->value * y->value; ++y; } return expf(-param.gamma*sum); } case SIGMOID: return tanhf(param.gamma*dot(x,y)+param.coef0); case PRECOMPUTED: //x: test (validation), y: SV return x[(int)(y->value)].value; default: return 0; // Unreachable } } // An SMO algorithm in Fan et al., JMLR 6(2005), p. 1889--1918 // Solves: // // min 0.5(\alpha^T Q \alpha) + p^T \alpha // // y^T \alpha = \delta // y_i = +1 or -1 // 0 <= alpha_i <= Cp for y_i = 1 // 0 <= alpha_i <= Cn for y_i = -1 // // Given: // // Q, p, y, Cp, Cn, and an initial feasible point \alpha // l is the size of vectors and matrices // eps is the stopping tolerance // // solution will be put in \alpha, objective value will be put in obj // class CUDA_Solver { public: __device__ CUDA_Solver() {}; __device__ virtual ~CUDA_Solver() {}; struct SolutionInfo { float obj; float rho; float upper_bound_p; float upper_bound_n; float r; // for CUDA_Solver_NU }; __device__ void Solve(int l, const CUDA_QMatrix& Q, const float *p_, const schar *y_, float *alpha_, float Cp, float Cn, float eps, SolutionInfo* si, int shrinking); protected: int active_size; schar *y; float *G; // gradient of objective function enum { LOWER_BOUND, UPPER_BOUND, FREE }; char *alpha_status; // LOWER_BOUND, UPPER_BOUND, FREE float *alpha; const CUDA_QMatrix *Q; const float *QD; float eps; float Cp,Cn; float *p; int *active_set; float *G_bar; // gradient, if we treat free variables as 0 int l; bool unshrink; // XXX __device__ float get_C(int i) { return (y[i] > 0)? Cp : Cn; } __device__ void update_alpha_status(int i) { if(alpha[i] >= get_C(i)) alpha_status[i] = UPPER_BOUND; else if(alpha[i] <= 0) alpha_status[i] = LOWER_BOUND; else alpha_status[i] = FREE; } __device__ bool is_upper_bound(int i) { return alpha_status[i] == UPPER_BOUND; } __device__ bool is_lower_bound(int i) { return alpha_status[i] == LOWER_BOUND; } __device__ bool is_free(int i) { return alpha_status[i] == FREE; } __device__ void swap_index(int i, int j); __device__ void reconstruct_gradient(); __device__ virtual int select_working_set(int &i, int &j); __device__ virtual float calculate_rho(); __device__ virtual void do_shrinking(); private: __device__ bool be_shrunk(int i, float Gmax1, float Gmax2); }; __device__ void CUDA_Solver::swap_index(int i, int j) { Q->swap_index(i,j); swap(y[i],y[j]); swap(G[i],G[j]); swap(alpha_status[i],alpha_status[j]); swap(alpha[i],alpha[j]); swap(p[i],p[j]); swap(active_set[i],active_set[j]); swap(G_bar[i],G_bar[j]); } __device__ void CUDA_Solver::reconstruct_gradient() { // reconstruct inactive elements of G from G_bar and free variables if(active_size == l) return; int i,j; int nr_free = 0; for(j=active_size;j<l;j++) G[j] = G_bar[j] + p[j]; for(j=0;j<active_size;j++) if(is_free(j)) nr_free++; if(2*nr_free < active_size) printf("\nWARNING: using -h 0 may be faster\n"); if (nr_free*l > 2*active_size*(l-active_size)) { for(i=active_size;i<l;i++) { const float *Q_i = Q->get_Q(i,active_size); for(j=0;j<active_size;j++) if(is_free(j)) G[i] += alpha[j] * Q_i[j]; } } else { for(i=0;i<active_size;i++) if(is_free(i)) { const float *Q_i = Q->get_Q(i,l); float alpha_i = alpha[i]; for(j=active_size;j<l;j++) G[j] += alpha_i * Q_i[j]; } } } __device__ void CUDA_Solver::Solve(int l, const CUDA_QMatrix& Q, const float *p_, const schar *y_, float *alpha_, float Cp, float Cn, float eps, SolutionInfo* si, int shrinking) { this->l = l; this->Q = &Q; QD=Q.get_QD(); clone(p, p_,l); clone(y, y_,l); clone(alpha,alpha_,l); this->Cp = Cp; this->Cn = Cn; this->eps = eps; unshrink = false; // initialize alpha_status { alpha_status = (char *)malloc(sizeof(char) * l); for(int i=0;i<l;i++) update_alpha_status(i); } // initialize active set (for shrinking) { active_set = (int *)malloc(sizeof(int) * l);; for(int i=0;i<l;i++) active_set[i] = i; active_size = l; } // initialize gradient { G = (float *)malloc(sizeof(float) * l); G_bar = (float *)malloc(sizeof(float) * l); int i; for(i=0;i<l;i++) { G[i] = p[i]; G_bar[i] = 0; } for(i=0;i<l;i++) if(!is_lower_bound(i)) { const float *Q_i = Q.get_Q(i,l); float alpha_i = alpha[i]; int j; for(j=0;j<l;j++) G[j] += alpha_i*Q_i[j]; if(is_upper_bound(i)) for(j=0;j<l;j++) G_bar[j] += get_C(i) * Q_i[j]; } } // optimization step int iter = 0; int max_iter = max(10000000, l>INT_MAX/100 ? INT_MAX : 100*l); int counter = min(l,1000)+1; while(iter < max_iter) { // show progress and do shrinking if(--counter == 0) { counter = min(l,1000); if(shrinking) do_shrinking(); printf("."); } int i,j; if (select_working_set(i,j)!=0) { // reconstruct the whole gradient reconstruct_gradient(); // reset active set size and check active_size = l; printf("*"); if(select_working_set(i,j)!=0) break; else counter = 1; // do shrinking next iteration } ++iter; // update alpha[i] and alpha[j], handle bounds carefully const float *Q_i = Q.get_Q(i,active_size); const float *Q_j = Q.get_Q(j,active_size); float C_i = get_C(i); float C_j = get_C(j); float old_alpha_i = alpha[i]; float old_alpha_j = alpha[j]; if(y[i]!=y[j]) { float quad_coef = QD[i]+QD[j]+2*Q_i[j]; if (quad_coef <= 0) quad_coef = TAU; float delta = (-G[i]-G[j])/quad_coef; float diff = alpha[i] - alpha[j]; alpha[i] += delta; alpha[j] += delta; if(diff > 0) { if(alpha[j] < 0) { alpha[j] = 0; alpha[i] = diff; } } else { if(alpha[i] < 0) { alpha[i] = 0; alpha[j] = -diff; } } if(diff > C_i - C_j) { if(alpha[i] > C_i) { alpha[i] = C_i; alpha[j] = C_i - diff; } } else { if(alpha[j] > C_j) { alpha[j] = C_j; alpha[i] = C_j + diff; } } } else { float quad_coef = QD[i]+QD[j]-2*Q_i[j]; if (quad_coef <= 0) quad_coef = TAU; float delta = (G[i]-G[j])/quad_coef; float sum = alpha[i] + alpha[j]; alpha[i] -= delta; alpha[j] += delta; if(sum > C_i) { if(alpha[i] > C_i) { alpha[i] = C_i; alpha[j] = sum - C_i; } } else { if(alpha[j] < 0) { alpha[j] = 0; alpha[i] = sum; } } if(sum > C_j) { if(alpha[j] > C_j) { alpha[j] = C_j; alpha[i] = sum - C_j; } } else { if(alpha[i] < 0) { alpha[i] = 0; alpha[j] = sum; } } } // update G float delta_alpha_i = alpha[i] - old_alpha_i; float delta_alpha_j = alpha[j] - old_alpha_j; for(int k=0;k<active_size;k++) { G[k] += Q_i[k]*delta_alpha_i + Q_j[k]*delta_alpha_j; } // update alpha_status and G_bar { bool ui = is_upper_bound(i); bool uj = is_upper_bound(j); update_alpha_status(i); update_alpha_status(j); int k; if(ui != is_upper_bound(i)) { Q_i = Q.get_Q(i,l); if(ui) for(k=0;k<l;k++) G_bar[k] -= C_i * Q_i[k]; else for(k=0;k<l;k++) G_bar[k] += C_i * Q_i[k]; } if(uj != is_upper_bound(j)) { Q_j = Q.get_Q(j,l); if(uj) for(k=0;k<l;k++) G_bar[k] -= C_j * Q_j[k]; else for(k=0;k<l;k++) G_bar[k] += C_j * Q_j[k]; } } } if(iter >= max_iter) { if(active_size < l) { // reconstruct the whole gradient to calculate objective value reconstruct_gradient(); active_size = l; printf("*"); } printf("\nWARNING: reaching max number of iterations\n"); } // calculate rho si->rho = calculate_rho(); // calculate objective value { float v = 0; int i; for(i=0;i<l;i++) v += alpha[i] * (G[i] + p[i]); si->obj = v/2; } // put back the solution { for(int i=0;i<l;i++) alpha_[active_set[i]] = alpha[i]; } // juggle everything back /*{ for(int i=0;i<l;i++) while(active_set[i] != i) swap_index(i,active_set[i]); // or Q.swap_index(i,active_set[i]); }*/ si->upper_bound_p = Cp; si->upper_bound_n = Cn; printf("\noptimization finished, #iter = %d\n",iter); free(p); free(y); free(alpha); free(alpha_status); free(active_set); free(G); free(G_bar); } // return 1 if already optimal, return 0 otherwise __device__ int CUDA_Solver::select_working_set(int &out_i, int &out_j) { // return i,j such that // i: maximizes -y_i * grad(f)_i, i in I_up(\alpha) // j: minimizes the decrease of obj value // (if quadratic coefficeint <= 0, replace it with tau) // -y_j*grad(f)_j < -y_i*grad(f)_i, j in I_low(\alpha) float Gmax = -INF; float Gmax2 = -INF; int Gmax_idx = -1; int Gmin_idx = -1; float obj_diff_min = INF; for(int t=0;t<active_size;t++) if(y[t]==+1) { if(!is_upper_bound(t)) if(-G[t] >= Gmax) { Gmax = -G[t]; Gmax_idx = t; } } else { if(!is_lower_bound(t)) if(G[t] >= Gmax) { Gmax = G[t]; Gmax_idx = t; } } int i = Gmax_idx; const float *Q_i = NULL; if(i != -1) // NULL Q_i not accessed: Gmax=-INF if i=-1 Q_i = Q->get_Q(i,active_size); for(int j=0;j<active_size;j++) { if(y[j]==+1) { if (!is_lower_bound(j)) { float grad_diff=Gmax+G[j]; if (G[j] >= Gmax2) Gmax2 = G[j]; if (grad_diff > 0) { float obj_diff; float quad_coef = QD[i]+QD[j]-2.0*y[i]*Q_i[j]; if (quad_coef > 0) obj_diff = -(grad_diff*grad_diff)/quad_coef; else obj_diff = -(grad_diff*grad_diff)/TAU; if (obj_diff <= obj_diff_min) { Gmin_idx=j; obj_diff_min = obj_diff; } } } } else { if (!is_upper_bound(j)) { float grad_diff= Gmax-G[j]; if (-G[j] >= Gmax2) Gmax2 = -G[j]; if (grad_diff > 0) { float obj_diff; float quad_coef = QD[i]+QD[j]+2.0*y[i]*Q_i[j]; if (quad_coef > 0) obj_diff = -(grad_diff*grad_diff)/quad_coef; else obj_diff = -(grad_diff*grad_diff)/TAU; if (obj_diff <= obj_diff_min) { Gmin_idx=j; obj_diff_min = obj_diff; } } } } } if(Gmax+Gmax2 < eps) return 1; out_i = Gmax_idx; out_j = Gmin_idx; return 0; } __device__ bool CUDA_Solver::be_shrunk(int i, float Gmax1, float Gmax2) { if(is_upper_bound(i)) { if(y[i]==+1) return(-G[i] > Gmax1); else return(-G[i] > Gmax2); } else if(is_lower_bound(i)) { if(y[i]==+1) return(G[i] > Gmax2); else return(G[i] > Gmax1); } else return(false); } __device__ void CUDA_Solver::do_shrinking() { int i; float Gmax1 = -INF; // max { -y_i * grad(f)_i | i in I_up(\alpha) } float Gmax2 = -INF; // max { y_i * grad(f)_i | i in I_low(\alpha) } // find maximal violating pair first for(i=0;i<active_size;i++) { if(y[i]==+1) { if(!is_upper_bound(i)) { if(-G[i] >= Gmax1) Gmax1 = -G[i]; } if(!is_lower_bound(i)) { if(G[i] >= Gmax2) Gmax2 = G[i]; } } else { if(!is_upper_bound(i)) { if(-G[i] >= Gmax2) Gmax2 = -G[i]; } if(!is_lower_bound(i)) { if(G[i] >= Gmax1) Gmax1 = G[i]; } } } if(unshrink == false && Gmax1 + Gmax2 <= eps*10) { unshrink = true; reconstruct_gradient(); active_size = l; printf("*"); } for(i=0;i<active_size;i++) if (be_shrunk(i, Gmax1, Gmax2)) { active_size--; while (active_size > i) { if (!be_shrunk(active_size, Gmax1, Gmax2)) { swap_index(i,active_size); break; } active_size--; } } } __device__ float CUDA_Solver::calculate_rho() { float r; int nr_free = 0; float ub = INF, lb = -INF, sum_free = 0; for(int i=0;i<active_size;i++) { float yG = y[i]*G[i]; if(is_upper_bound(i)) { if(y[i]==-1) ub = min(ub,yG); else lb = max(lb,yG); } else if(is_lower_bound(i)) { if(y[i]==+1) ub = min(ub,yG); else lb = max(lb,yG); } else { ++nr_free; sum_free += yG; } } if(nr_free>0) r = sum_free/nr_free; else r = (ub+lb)/2; return r; } // // CUDA_Solver for nu-svm classification and regression // // additional constraint: e^T \alpha = constant // class CUDA_Solver_NU: public CUDA_Solver { public: __device__ CUDA_Solver_NU() {} __device__ void Solve(int l, const CUDA_QMatrix& Q, const float *p, const schar *y, float *alpha, float Cp, float Cn, float eps, SolutionInfo* si, int shrinking) { this->si = si; CUDA_Solver::Solve(l,Q,p,y,alpha,Cp,Cn,eps,si,shrinking); } private: SolutionInfo *si; __device__ int select_working_set(int &i, int &j); __device__ float calculate_rho(); __device__ bool be_shrunk(int i, float Gmax1, float Gmax2, float Gmax3, float Gmax4); __device__ void do_shrinking(); }; // return 1 if already optimal, return 0 otherwise __device__ int CUDA_Solver_NU::select_working_set(int &out_i, int &out_j) { // return i,j such that y_i = y_j and // i: maximizes -y_i * grad(f)_i, i in I_up(\alpha) // j: minimizes the decrease of obj value // (if quadratic coefficeint <= 0, replace it with tau) // -y_j*grad(f)_j < -y_i*grad(f)_i, j in I_low(\alpha) float Gmaxp = -INF; float Gmaxp2 = -INF; int Gmaxp_idx = -1; float Gmaxn = -INF; float Gmaxn2 = -INF; int Gmaxn_idx = -1; int Gmin_idx = -1; float obj_diff_min = INF; for(int t=0;t<active_size;t++) if(y[t]==+1) { if(!is_upper_bound(t)) if(-G[t] >= Gmaxp) { Gmaxp = -G[t]; Gmaxp_idx = t; } } else { if(!is_lower_bound(t)) if(G[t] >= Gmaxn) { Gmaxn = G[t]; Gmaxn_idx = t; } } int ip = Gmaxp_idx; int in = Gmaxn_idx; const float *Q_ip = NULL; const float *Q_in = NULL; if(ip != -1) // NULL Q_ip not accessed: Gmaxp=-INF if ip=-1 Q_ip = Q->get_Q(ip,active_size); if(in != -1) Q_in = Q->get_Q(in,active_size); for(int j=0;j<active_size;j++) { if(y[j]==+1) { if (!is_lower_bound(j)) { float grad_diff=Gmaxp+G[j]; if (G[j] >= Gmaxp2) Gmaxp2 = G[j]; if (grad_diff > 0) { float obj_diff; float quad_coef = QD[ip]+QD[j]-2*Q_ip[j]; if (quad_coef > 0) obj_diff = -(grad_diff*grad_diff)/quad_coef; else obj_diff = -(grad_diff*grad_diff)/TAU; if (obj_diff <= obj_diff_min) { Gmin_idx=j; obj_diff_min = obj_diff; } } } } else { if (!is_upper_bound(j)) { float grad_diff=Gmaxn-G[j]; if (-G[j] >= Gmaxn2) Gmaxn2 = -G[j]; if (grad_diff > 0) { float obj_diff; float quad_coef = QD[in]+QD[j]-2*Q_in[j]; if (quad_coef > 0) obj_diff = -(grad_diff*grad_diff)/quad_coef; else obj_diff = -(grad_diff*grad_diff)/TAU; if (obj_diff <= obj_diff_min) { Gmin_idx=j; obj_diff_min = obj_diff; } } } } } if(max(Gmaxp+Gmaxp2,Gmaxn+Gmaxn2) < eps) return 1; if (y[Gmin_idx] == +1) out_i = Gmaxp_idx; else out_i = Gmaxn_idx; out_j = Gmin_idx; return 0; } __device__ bool CUDA_Solver_NU::be_shrunk(int i, float Gmax1, float Gmax2, float Gmax3, float Gmax4) { if(is_upper_bound(i)) { if(y[i]==+1) return(-G[i] > Gmax1); else return(-G[i] > Gmax4); } else if(is_lower_bound(i)) { if(y[i]==+1) return(G[i] > Gmax2); else return(G[i] > Gmax3); } else return(false); } __device__ void CUDA_Solver_NU::do_shrinking() { float Gmax1 = -INF; // max { -y_i * grad(f)_i | y_i = +1, i in I_up(\alpha) } float Gmax2 = -INF; // max { y_i * grad(f)_i | y_i = +1, i in I_low(\alpha) } float Gmax3 = -INF; // max { -y_i * grad(f)_i | y_i = -1, i in I_up(\alpha) } float Gmax4 = -INF; // max { y_i * grad(f)_i | y_i = -1, i in I_low(\alpha) } // find maximal violating pair first int i; for(i=0;i<active_size;i++) { if(!is_upper_bound(i)) { if(y[i]==+1) { if(-G[i] > Gmax1) Gmax1 = -G[i]; } else if(-G[i] > Gmax4) Gmax4 = -G[i]; } if(!is_lower_bound(i)) { if(y[i]==+1) { if(G[i] > Gmax2) Gmax2 = G[i]; } else if(G[i] > Gmax3) Gmax3 = G[i]; } } if(unshrink == false && max(Gmax1+Gmax2,Gmax3+Gmax4) <= eps*10) { unshrink = true; reconstruct_gradient(); active_size = l; } for(i=0;i<active_size;i++) if (be_shrunk(i, Gmax1, Gmax2, Gmax3, Gmax4)) { active_size--; while (active_size > i) { if (!be_shrunk(active_size, Gmax1, Gmax2, Gmax3, Gmax4)) { swap_index(i,active_size); break; } active_size--; } } } __device__ float CUDA_Solver_NU::calculate_rho() { int nr_free1 = 0,nr_free2 = 0; float ub1 = INF, ub2 = INF; float lb1 = -INF, lb2 = -INF; float sum_free1 = 0, sum_free2 = 0; for(int i=0;i<active_size;i++) { if(y[i]==+1) { if(is_upper_bound(i)) lb1 = max(lb1,G[i]); else if(is_lower_bound(i)) ub1 = min(ub1,G[i]); else { ++nr_free1; sum_free1 += G[i]; } } else { if(is_upper_bound(i)) lb2 = max(lb2,G[i]); else if(is_lower_bound(i)) ub2 = min(ub2,G[i]); else { ++nr_free2; sum_free2 += G[i]; } } } float r1,r2; if(nr_free1 > 0) r1 = sum_free1/nr_free1; else r1 = (ub1+lb1)/2; if(nr_free2 > 0) r2 = sum_free2/nr_free2; else r2 = (ub2+lb2)/2; si->r = (r1+r2)/2; return (r1-r2)/2; } // // Q matrices for various formulations // class CUDA_SVC_Q: public CUDA_Kernel { public: __device__ CUDA_SVC_Q(const svm_problem& prob, const svm_parameter& param, const schar *y_) :CUDA_Kernel(prob.l, prob.x, param) { clone(y,y_,prob.l); cache = new CUDA_Cache(prob.l,(long int)(param.cache_size*(1<<20))); QD = (float *)malloc(sizeof(float) * prob.l); for(int i=0;i<prob.l;i++) QD[i] = (this->*kernel_function)(i,i); } __device__ float *get_Q(int i, int len) const { float *data; int start, j; if((start = cache->get_data(i,&data,len)) < len) { for(j=start;j<len;j++) data[j] = (float)(y[i]*y[j]*(this->*kernel_function)(i,j)); } return data; } __device__ float *get_QD() const { return QD; } __device__ void swap_index(int i, int j) const { cache->swap_index(i,j); CUDA_Kernel::swap_index(i,j); swap(y[i],y[j]); swap(QD[i],QD[j]); } __device__ ~CUDA_SVC_Q() { free(y); delete cache; free(QD); } private: schar *y; CUDA_Cache *cache; float *QD; }; class CUDA_ONE_CLASS_Q: public CUDA_Kernel { public: __device__ CUDA_ONE_CLASS_Q(const svm_problem& prob, const svm_parameter& param) :CUDA_Kernel(prob.l, prob.x, param) { cache = new CUDA_Cache(prob.l,(long int)(param.cache_size*(1<<20))); QD = (float *)malloc(sizeof(float) * prob.l); for(int i=0;i<prob.l;i++) QD[i] = (this->*kernel_function)(i,i); } __device__ float *get_Q(int i, int len) const { float *data; int start, j; if((start = cache->get_data(i,&data,len)) < len) { for(j=start;j<len;j++) data[j] = (float)(this->*kernel_function)(i,j); } return data; } __device__ float *get_QD() const { return QD; } __device__ void swap_index(int i, int j) const { cache->swap_index(i,j); CUDA_Kernel::swap_index(i,j); swap(QD[i],QD[j]); } __device__ ~CUDA_ONE_CLASS_Q() { delete cache; free(QD); } private: CUDA_Cache *cache; float *QD; }; class CUDA_SVR_Q: public CUDA_Kernel { public: __device__ CUDA_SVR_Q(const svm_problem& prob, const svm_parameter& param) :CUDA_Kernel(prob.l, prob.x, param) { l = prob.l; cache = new CUDA_Cache(l,(long int)(param.cache_size*(1<<20))); QD = new float[2*l]; sign = new schar[2*l]; index = new int[2*l]; for(int k=0;k<l;k++) { sign[k] = 1; sign[k+l] = -1; index[k] = k; index[k+l] = k; QD[k] = (this->*kernel_function)(k,k); QD[k+l] = QD[k]; } buffer[0] = new float[2*l]; buffer[1] = new float[2*l]; next_buffer = 0; } __device__ void swap_index(int i, int j) const { swap(sign[i],sign[j]); swap(index[i],index[j]); swap(QD[i],QD[j]); } __device__ float *get_Q(int i, int len) const { float *data; int j, real_i = index[i]; if(cache->get_data(real_i,&data,l) < l) { for(j=0;j<l;j++) data[j] = (float)(this->*kernel_function)(real_i,j); } // reorder and copy float *buf = buffer[next_buffer]; next_buffer = 1 - next_buffer; schar si = sign[i]; for(j=0;j<len;j++) buf[j] = (float) si * (float) sign[j] * data[index[j]]; return buf; } __device__ float *get_QD() const { return QD; } __device__ ~CUDA_SVR_Q() { delete cache; delete[] sign; delete[] index; delete[] buffer[0]; delete[] buffer[1]; delete[] QD; } private: int l; CUDA_Cache *cache; schar *sign; int *index; mutable int next_buffer; float *buffer[2]; float *QD; }; __device__ void cuda_svm_group_classes(const svm_problem *prob, int *nr_class_ret, int **label_ret, int **start_ret, int **count_ret, int *perm) { int l = prob->l; int max_nr_class = 16; int nr_class = 0; int *label = (int *)malloc(sizeof(int) * max_nr_class); int *count = (int *)malloc(sizeof(int) * max_nr_class); int *data_label = (int *)malloc(sizeof(int) * l); int i; for(i=0;i<l;i++) { int this_label = (int)prob->y[i]; int j; for(j=0;j<nr_class;j++) { if(this_label == label[j]) { ++count[j]; break; } } data_label[i] = j; if(j == nr_class) { if(nr_class == max_nr_class) { int *label_t = label; int *count_t = count; label = (int *)malloc(2 * max_nr_class * sizeof(int)); count = (int *)malloc(2 * max_nr_class * sizeof(int)); int k; for (k=0; k<max_nr_class; k++) { label[k] = label_t[k]; count[k] = count_t[k]; } free(label_t); free(count_t); max_nr_class *= 2; } label[nr_class] = this_label; count[nr_class] = 1; ++nr_class; } } // // Labels are ordered by their first occurrence in the training set. // However, for two-class sets with -1/+1 labels and -1 appears first, // we swap labels to ensure that internally the binary SVM has positive data corresponding to the +1 instances. // if (nr_class == 2 && label[0] == -1 && label[1] == 1) { swap(label[0],label[1]); swap(count[0],count[1]); for(i=0;i<l;i++) { if(data_label[i] == 0) data_label[i] = 1; else data_label[i] = 0; } } int *start = (int *)malloc(sizeof(int) * nr_class); start[0] = 0; for(i=1;i<nr_class;i++) start[i] = start[i-1]+count[i-1]; for(i=0;i<l;i++) { perm[start[data_label[i]]] = i; ++start[data_label[i]]; } start[0] = 0; for(i=1;i<nr_class;i++) start[i] = start[i-1]+count[i-1]; *nr_class_ret = nr_class; *label_ret = label; *start_ret = start; *count_ret = count; free(data_label); } __host__ int svm_get_nr_classes(const svm_problem *prob) { int l = prob->l; int max_nr_class = 16; int nr_class = 0; int *label = (int *)malloc(sizeof(int) * max_nr_class); int i; for(i=0;i<l;i++) { int this_label = (int)prob->y[i]; int j; for(j=0;j<nr_class;j++) if(this_label == label[j]) break; if(j == nr_class) { if(nr_class == max_nr_class) { max_nr_class *= 2; label = (int *)realloc(label,max_nr_class*sizeof(int)); } label[nr_class] = this_label; ++nr_class; } } return nr_class; } __device__ void cuda_svm_free_model_content(svm_model* model_ptr) { if(model_ptr->free_sv && model_ptr->l > 0 && model_ptr->SV != NULL) free((void *)(model_ptr->SV[0])); if(model_ptr->sv_coef) { for(int i=0;i<model_ptr->nr_class-1;i++) free(model_ptr->sv_coef[i]); } free(model_ptr->SV); model_ptr->SV = NULL; free(model_ptr->sv_coef); model_ptr->sv_coef = NULL; free(model_ptr->rho); model_ptr->rho = NULL; free(model_ptr->label); model_ptr->label= NULL; free(model_ptr->probA); model_ptr->probA = NULL; free(model_ptr->probB); model_ptr->probB= NULL; free(model_ptr->sv_indices); model_ptr->sv_indices = NULL; free(model_ptr->nSV); model_ptr->nSV = NULL; } __device__ void cuda_svm_free_and_destroy_model(svm_model** model_ptr_ptr) { if(model_ptr_ptr != NULL && *model_ptr_ptr != NULL) { cuda_svm_free_model_content(*model_ptr_ptr); free(*model_ptr_ptr); *model_ptr_ptr = NULL; } } __device__ void cuda_svm_destroy_param(svm_parameter* param) { free(param->weight_label); free(param->weight); } // Platt's binary SVM Probablistic Output: an improvement from Lin et al. __device__ void cuda_sigmoid_train(int l, const float *dec_values, const float *labels, float& A, float& B) { float prior1=0, prior0 = 0; int i; for (i=0;i<l;i++) if (labels[i] > 0) prior1+=1; else prior0+=1; int max_iter=100; // Maximal number of iterations float min_step=1e-10; // Minimal step taken in line search float sigma=1e-12; // For numerically strict PD of Hessian float eps=1e-5; float hiTarget=(prior1+1.0)/(prior1+2.0); float loTarget=1.0/(prior0+2.0); float *t=(float *)malloc(sizeof(float) * l); float fApB,p,q,h11,h22,h21,g1,g2,det,dA,dB,gd,stepsize; float newA,newB,newf,d1,d2; int iter; // Initial Point and Initial Fun Value A=0.0; B=logf((prior0+1.0)/(prior1+1.0)); float fval = 0.0; for (i=0;i<l;i++) { if (labels[i]>0) t[i]=hiTarget; else t[i]=loTarget; fApB = dec_values[i]*A+B; if (fApB>=0) fval += t[i]*fApB + logf(1+expf(-fApB)); else fval += (t[i] - 1)*fApB +logf(1+expf(fApB)); } for (iter=0;iter<max_iter;iter++) { // Update Gradient and Hessian (use H' = H + sigma I) h11=sigma; // numerically ensures strict PD h22=sigma; h21=0.0;g1=0.0;g2=0.0; for (i=0;i<l;i++) { fApB = dec_values[i]*A+B; if (fApB >= 0) { q=1.0/(1.0+expf(-fApB)); p=expf(-fApB)*q; } else { p=1.0/(1.0+expf(fApB)); q=expf(fApB)*p; } d2=p*q; h11+=dec_values[i]*dec_values[i]*d2; h22+=d2; h21+=dec_values[i]*d2; d1=t[i]-p; g1+=dec_values[i]*d1; g2+=d1; } // Stopping Criteria if (fabs(g1)<eps && fabs(g2)<eps) break; // Finding Newton direction: -inv(H') * g det=h11*h22-h21*h21; dA=-(h22*g1 - h21 * g2) / det; dB=-(-h21*g1+ h11 * g2) / det; gd=g1*dA+g2*dB; stepsize = 1; // Line Search while (stepsize >= min_step) { newA = A + stepsize * dA; newB = B + stepsize * dB; // New function value newf = 0.0; for (i=0;i<l;i++) { fApB = dec_values[i]*newA+newB; if (fApB >= 0) newf += t[i]*fApB + logf(1+expf(-fApB)); else newf += (t[i] - 1)*fApB +logf(1+expf(fApB)); } // Check sufficient decrease if (newf<fval+0.0001*stepsize*gd) { A=newA;B=newB;fval=newf; break; } else stepsize = stepsize / 2.0; } if (stepsize < min_step) { printf("Line search fails in two-class probability estimates\n"); break; } } if (iter>=max_iter) printf("Reaching maximal iterations in two-class probability estimates\n"); free(t); } __device__ float cuda_sigmoid_predict(float decision_value, float A, float B) { float fApB = decision_value*A+B; // 1-p used later; avoid catastrophic cancellation if (fApB >= 0) return expf(-fApB)/(1.0+expf(-fApB)); else return 1.0/(1+expf(fApB)) ; } // Method 2 from the multiclass_prob paper by Wu, Lin, and Weng __device__ void cuda_multiclass_probability(int k, float **r, float *p) { int t,j; int iter = 0, max_iter=max(100,k); float **Q=(float **)malloc(sizeof(float *) * k); float *Qp=(float *)malloc(sizeof(float) * k); float pQp, eps=0.005/k; for (t=0;t<k;t++) { p[t]=1.0/k; // Valid if k = 1 Q[t]=(float *)malloc(sizeof(float) * k); Q[t][t]=0; for (j=0;j<t;j++) { Q[t][t]+=r[j][t]*r[j][t]; Q[t][j]=Q[j][t]; } for (j=t+1;j<k;j++) { Q[t][t]+=r[j][t]*r[j][t]; Q[t][j]=-r[j][t]*r[t][j]; } } for (iter=0;iter<max_iter;iter++) { // stopping condition, recalculate QP,pQP for numerical accuracy pQp=0; for (t=0;t<k;t++) { Qp[t]=0; for (j=0;j<k;j++) Qp[t]+=Q[t][j]*p[j]; pQp+=p[t]*Qp[t]; } float max_error=0; for (t=0;t<k;t++) { float error=fabs(Qp[t]-pQp); if (error>max_error) max_error=error; } if (max_error<eps) break; for (t=0;t<k;t++) { float diff=(-Qp[t]+pQp)/Q[t][t]; p[t]+=diff; pQp=(pQp+diff*(diff*Q[t][t]+2*Qp[t]))/(1+diff)/(1+diff); for (j=0;j<k;j++) { Qp[j]=(Qp[j]+diff*Q[t][j])/(1+diff); p[j]/=(1+diff); } } } if (iter>=max_iter) printf("Exceeds max_iter in multiclass_prob\n"); for(t=0;t<k;t++) free(Q[t]); free(Q); free(Qp); } // Cross-validation decision values for probability estimates __device__ void cuda_svm_binary_svc_probability(const svm_problem *prob, const svm_parameter *param, float Cp, float Cn, float& probA, float& probB) { int i; int nr_fold = 5; int *perm = (int *)malloc(sizeof(int) * prob->l); float *dec_values = (float *)malloc(sizeof(float) * prob->l); CUDA_Rand rand; // random shuffle for(i=0;i<prob->l;i++) perm[i]=i; for(i=0;i<prob->l;i++) { int j = i+rand.rand_int(prob->l-i); swap(perm[i],perm[j]); } for(i=0;i<nr_fold;i++) { int begin = i*prob->l/nr_fold; int end = (i+1)*prob->l/nr_fold; int j,k; struct svm_problem subprob; subprob.l = prob->l-(end-begin); subprob.x = (struct svm_node **)malloc(sizeof(struct svm_node*) * subprob.l); subprob.y = (float *)malloc(sizeof(float) * subprob.l); k=0; for(j=0;j<begin;j++) { subprob.x[k] = prob->x[perm[j]]; subprob.y[k] = prob->y[perm[j]]; ++k; } for(j=end;j<prob->l;j++) { subprob.x[k] = prob->x[perm[j]]; subprob.y[k] = prob->y[perm[j]]; ++k; } int p_count=0,n_count=0; for(j=0;j<k;j++) if(subprob.y[j]>0) p_count++; else n_count++; if(p_count==0 && n_count==0) for(j=begin;j<end;j++) dec_values[perm[j]] = 0; else if(p_count > 0 && n_count == 0) for(j=begin;j<end;j++) dec_values[perm[j]] = 1; else if(p_count == 0 && n_count > 0) for(j=begin;j<end;j++) dec_values[perm[j]] = -1; else { svm_parameter subparam = *param; subparam.probability=0; subparam.C=1.0; subparam.nr_weight=2; subparam.weight_label = (int *)malloc(sizeof(int) * 2); subparam.weight = (float *)malloc(sizeof(float) * 2); subparam.weight_label[0]=+1; subparam.weight_label[1]=-1; subparam.weight[0]=Cp; subparam.weight[1]=Cn; struct svm_model *submodel = cuda_device_svm_train_no_prob(&subprob, &subparam); for(j=begin;j<end;j++) { cuda_svm_predict_values(submodel, prob->x[perm[j]], &(dec_values[perm[j]])); // ensure +1 -1 order; reason not using CV subroutine dec_values[perm[j]] *= submodel->label[0]; } cuda_svm_free_and_destroy_model(&submodel); cuda_svm_destroy_param(&subparam); } free(subprob.x); free(subprob.y); } cuda_sigmoid_train(prob->l,dec_values,prob->y,probA,probB); free(dec_values); free(perm); } __device__ float cuda_svm_svr_probability(const svm_problem *prob, const svm_parameter *param) { int i; int nr_fold = 5; float *ymv = (float *)malloc(sizeof(float) * prob->l); float mae = 0; svm_parameter newparam = *param; newparam.probability = 0; cuda_svm_cross_validation(prob,&newparam,nr_fold,ymv); for(i=0;i<prob->l;i++) { ymv[i]=prob->y[i]-ymv[i]; mae += fabs(ymv[i]); } mae /= prob->l; float std=sqrtf(2*mae*mae); int count=0; mae=0; for(i=0;i<prob->l;i++) if (fabs(ymv[i]) > 5*std) count=count+1; else mae+=fabs(ymv[i]); mae /= (prob->l-count); printf("Prob. model for test data: target value = predicted value + z,\nz: Laplace distribution e^(-|z|/sigma)/(2sigma),sigma= %g\n",mae); free(ymv); return mae; } __device__ float cuda_svm_predict_values(const svm_model *model, const svm_node *x, float* dec_values) { int i; if(model->param.svm_type == ONE_CLASS || model->param.svm_type == EPSILON_SVR || model->param.svm_type == NU_SVR) { float *sv_coef = model->sv_coef[0]; float sum = 0; for(i=0;i<model->l;i++) sum += sv_coef[i] * CUDA_Kernel::k_function(x,model->SV[i],model->param); sum -= model->rho[0]; *dec_values = sum; if(model->param.svm_type == ONE_CLASS) return (sum>0)?1:-1; else return sum; } else { int nr_class = model->nr_class; int l = model->l; float *kvalue = (float *)malloc(sizeof(float) * l); for(i=0;i<l;i++) kvalue[i] = CUDA_Kernel::k_function(x,model->SV[i],model->param); int *start = (int *)malloc(sizeof(int) * nr_class); start[0] = 0; for(i=1;i<nr_class;i++) start[i] = start[i-1]+model->nSV[i-1]; int *vote = (int *)malloc(sizeof(int) * nr_class); for(i=0;i<nr_class;i++) vote[i] = 0; int p=0; for(i=0;i<nr_class;i++) for(int j=i+1;j<nr_class;j++) { float sum = 0; int si = start[i]; int sj = start[j]; int ci = model->nSV[i]; int cj = model->nSV[j]; int k; float *coef1 = model->sv_coef[j-1]; float *coef2 = model->sv_coef[i]; for(k=0;k<ci;k++) sum += coef1[si+k] * kvalue[si+k]; for(k=0;k<cj;k++) sum += coef2[sj+k] * kvalue[sj+k]; sum -= model->rho[p]; dec_values[p] = sum; if(dec_values[p] > 0) ++vote[i]; else ++vote[j]; p++; } int vote_max_idx = 0; for(i=1;i<nr_class;i++) if(vote[i] > vote[vote_max_idx]) vote_max_idx = i; free(kvalue); free(start); free(vote); return model->label[vote_max_idx]; } } __device__ float cuda_svm_predict(const svm_model *model, const svm_node *x) { int nr_class = model->nr_class; float *dec_values; if(model->param.svm_type == ONE_CLASS || model->param.svm_type == EPSILON_SVR || model->param.svm_type == NU_SVR) dec_values = (float *)malloc(sizeof(float)); else dec_values = (float *)malloc(sizeof(float) * nr_class*(nr_class-1)/2); float pred_result = cuda_svm_predict_values(model, x, dec_values); free(dec_values); return pred_result; } __device__ float cuda_svm_predict_probability(const svm_model *model, const svm_node *x, float *prob_estimates) { if ((model->param.svm_type == C_SVC || model->param.svm_type == NU_SVC) && model->probA!=NULL && model->probB!=NULL) { int i; int nr_class = model->nr_class; float *dec_values = (float *)malloc(sizeof(float) * nr_class*(nr_class-1)/2); cuda_svm_predict_values(model, x, dec_values); float min_prob=1e-7; float **pairwise_prob=(float **)malloc(sizeof(float *) * nr_class); for(i=0;i<nr_class;i++) pairwise_prob[i]=(float *)malloc(sizeof(float) * nr_class); int k=0; for(i=0;i<nr_class;i++) for(int j=i+1;j<nr_class;j++) { pairwise_prob[i][j]=min(max(cuda_sigmoid_predict(dec_values[k],model->probA[k],model->probB[k]),min_prob),1-min_prob); pairwise_prob[j][i]=1-pairwise_prob[i][j]; k++; } cuda_multiclass_probability(nr_class,pairwise_prob,prob_estimates); int prob_max_idx = 0; for(i=1;i<nr_class;i++) if(prob_estimates[i] > prob_estimates[prob_max_idx]) prob_max_idx = i; for(i=0;i<nr_class;i++) free(pairwise_prob[i]); free(dec_values); free(pairwise_prob); return model->label[prob_max_idx]; } else return cuda_svm_predict(model, x); } // Stratified cross validation __device__ void cuda_svm_cross_validation(const svm_problem *prob, const svm_parameter *param, int nr_fold, float *target) { int i; int *fold_start; int l = prob->l; int *perm = (int *)malloc(sizeof(int) * l); int nr_class; CUDA_Rand rand; if (nr_fold > l) { nr_fold = l; printf("WARNING: # folds > # data. Will use # folds = # data instead (i.e., leave-one-out cross validation)\n"); } fold_start = (int *)malloc(sizeof(int) * (nr_fold+1)); // stratified cv may not give leave-one-out rate // Each class to l folds -> some folds may have zero elements if((param->svm_type == C_SVC || param->svm_type == NU_SVC) && nr_fold < l) { int *start = NULL; int *label = NULL; int *count = NULL; cuda_svm_group_classes(prob,&nr_class,&label,&start,&count,perm); // random shuffle and then data grouped by fold using the array perm int *fold_count = (int *)malloc(sizeof(int) * nr_fold); int c; int *index = (int *)malloc(sizeof(int) * l); for(i=0;i<l;i++) index[i]=perm[i]; for (c=0; c<nr_class; c++) for(i=0;i<count[c];i++) { int j = i+rand.rand_int(count[c]-i); swap(index[start[c]+j],index[start[c]+i]); } for(i=0;i<nr_fold;i++) { fold_count[i] = 0; for (c=0; c<nr_class;c++) fold_count[i]+=(i+1)*count[c]/nr_fold-i*count[c]/nr_fold; } fold_start[0]=0; for (i=1;i<=nr_fold;i++) fold_start[i] = fold_start[i-1]+fold_count[i-1]; for (c=0; c<nr_class;c++) for(i=0;i<nr_fold;i++) { int begin = start[c]+i*count[c]/nr_fold; int end = start[c]+(i+1)*count[c]/nr_fold; for(int j=begin;j<end;j++) { perm[fold_start[i]] = index[j]; fold_start[i]++; } } fold_start[0]=0; for (i=1;i<=nr_fold;i++) fold_start[i] = fold_start[i-1]+fold_count[i-1]; free(start); free(label); free(count); free(index); free(fold_count); } else { for(i=0;i<l;i++) perm[i]=i; for(i=0;i<l;i++) { int j = i+rand.rand_int(l-i); swap(perm[i],perm[j]); } for(i=0;i<=nr_fold;i++) fold_start[i]=i*l/nr_fold; } for(i=0;i<nr_fold;i++) { int begin = fold_start[i]; int end = fold_start[i+1]; int j,k; struct svm_problem subprob; subprob.l = l-(end-begin); subprob.x = (svm_node **)malloc(sizeof(svm_node *) * subprob.l); subprob.y = (float *)malloc(sizeof(float) * subprob.l); k=0; for(j=0;j<begin;j++) { subprob.x[k] = prob->x[perm[j]]; subprob.y[k] = prob->y[perm[j]]; ++k; } for(j=end;j<l;j++) { subprob.x[k] = prob->x[perm[j]]; subprob.y[k] = prob->y[perm[j]]; ++k; } struct svm_model *submodel = cuda_device_svm_train_no_prob(&subprob, param); //if(param->probability && // (param->svm_type == C_SVC || param->svm_type == NU_SVC)) //{ // float *prob_estimates = (float *)malloc(sizeof(float) * submodel->nr_class); // for(j=begin;j<end;j++) // target[perm[j]] = cuda_svm_predict_probability(submodel,prob->x[perm[j]],prob_estimates); // free(prob_estimates); //} //else for(j=begin;j<end;j++) target[perm[j]] = cuda_svm_predict(submodel,prob->x[perm[j]]); cuda_svm_free_and_destroy_model(&submodel); free(subprob.x); free(subprob.y); } free(fold_start); free(perm); } // // construct and solve various formulations // __device__ void solve_c_svc(const svm_problem *prob, const svm_parameter *param, float *alpha, CUDA_Solver::SolutionInfo* si, float Cp, float Cn) { int l = prob->l; float *minus_ones = (float *)malloc(sizeof(float) * l); schar *y = (schar *)malloc(sizeof(schar) * l); int i; for(i=0;i<l;i++) { alpha[i] = 0; minus_ones[i] = -1; if(prob->y[i] > 0) y[i] = +1; else y[i] = -1; } CUDA_Solver s; s.Solve(l, CUDA_SVC_Q(*prob,*param,y), minus_ones, y, alpha, Cp, Cn, param->eps, si, param->shrinking); float sum_alpha=0; for(i=0;i<l;i++) sum_alpha += alpha[i]; //if (Cp==Cn) // printf("nu = %f\n", sum_alpha/(Cp*prob->l)); for(i=0;i<l;i++) alpha[i] *= y[i]; free(minus_ones); free(y); } __device__ void solve_nu_svc(const svm_problem *prob, const svm_parameter *param, float *alpha, CUDA_Solver::SolutionInfo* si) { int i; int l = prob->l; float nu = param->nu; schar *y = (schar *)malloc(sizeof(schar) * l); for(i=0;i<l;i++) if(prob->y[i]>0) y[i] = +1; else y[i] = -1; float sum_pos = nu*l/2; float sum_neg = nu*l/2; for(i=0;i<l;i++) if(y[i] == +1) { alpha[i] = min(1.0,sum_pos); sum_pos -= alpha[i]; } else { alpha[i] = min(1.0,sum_neg); sum_neg -= alpha[i]; } float *zeros = (float *)malloc(sizeof(float) * l); for(i=0;i<l;i++) zeros[i] = 0; CUDA_Solver_NU s; s.Solve(l, CUDA_SVC_Q(*prob,*param,y), zeros, y, alpha, 1.0, 1.0, param->eps, si, param->shrinking); float r = si->r; //printf("C = %f\n",1/r); for(i=0;i<l;i++) alpha[i] *= y[i]/r; si->rho /= r; si->obj /= (r*r); si->upper_bound_p = 1/r; si->upper_bound_n = 1/r; free(y); free(zeros); } __device__ void solve_one_class(const svm_problem *prob, const svm_parameter *param, float *alpha, CUDA_Solver::SolutionInfo* si) { int l = prob->l; float *zeros = (float *)malloc(sizeof(float) * l); schar *ones = (schar *)malloc(sizeof(schar) * l); int i; int n = (int)(param->nu*prob->l); // # of alpha's at upper bound for(i=0;i<n;i++) alpha[i] = 1; if(n<prob->l) alpha[n] = param->nu * prob->l - n; for(i=n+1;i<l;i++) alpha[i] = 0; for(i=0;i<l;i++) { zeros[i] = 0; ones[i] = 1; } CUDA_Solver s; s.Solve(l, CUDA_ONE_CLASS_Q(*prob,*param), zeros, ones, alpha, 1.0, 1.0, param->eps, si, param->shrinking); free(zeros); free(ones); } __device__ void solve_epsilon_svr(const svm_problem *prob, const svm_parameter *param, float *alpha, CUDA_Solver::SolutionInfo* si) { int l = prob->l; float *alpha2 = (float *)malloc(sizeof(float) * l * 2); float *linear_term = (float *)malloc(sizeof(float) * l * 2); schar *y = (schar *)malloc(sizeof(schar) * l * 2); int i; for(i=0;i<l;i++) { alpha2[i] = 0; linear_term[i] = param->p - prob->y[i]; y[i] = 1; alpha2[i+l] = 0; linear_term[i+l] = param->p + prob->y[i]; y[i+l] = -1; } CUDA_Solver s; s.Solve(2*l, CUDA_SVR_Q(*prob,*param), linear_term, y, alpha2, param->C, param->C, param->eps, si, param->shrinking); float sum_alpha = 0; for(i=0;i<l;i++) { alpha[i] = alpha2[i] - alpha2[i+l]; sum_alpha += fabs(alpha[i]); } //printf("nu = %f\n",sum_alpha/(param->C*l)); free(alpha2); free(linear_term); free(y); } __device__ void solve_nu_svr(const svm_problem *prob, const svm_parameter *param, float *alpha, CUDA_Solver::SolutionInfo* si) { int l = prob->l; float C = param->C; float *alpha2 = (float *)malloc(sizeof(float) * l * 2); float *linear_term = (float *)malloc(sizeof(float) * l * 2); schar *y = (schar *)malloc(sizeof(schar) * l * 2); int i; float sum = C * param->nu * l / 2; for(i=0;i<l;i++) { alpha2[i] = alpha2[i+l] = min(sum,C); sum -= alpha2[i]; linear_term[i] = - prob->y[i]; y[i] = 1; linear_term[i+l] = prob->y[i]; y[i+l] = -1; } CUDA_Solver_NU s; s.Solve(2*l, CUDA_SVR_Q(*prob,*param), linear_term, y, alpha2, C, C, param->eps, si, param->shrinking); //printf("epsilon = %f\n",-si->r); for(i=0;i<l;i++) alpha[i] = alpha2[i] - alpha2[i+l]; free(alpha2); free(linear_term); free(y); } __device__ struct decision_function cuda_svm_train_one(const svm_problem *prob, const svm_parameter *param, float Cp, float Cn) { float *alpha = (float *)malloc(sizeof(float) * prob->l); CUDA_Solver::SolutionInfo si; switch(param->svm_type) { case C_SVC: solve_c_svc(prob,param,alpha,&si,Cp,Cn); break; case NU_SVC: solve_nu_svc(prob,param,alpha,&si); break; case ONE_CLASS: solve_one_class(prob,param,alpha,&si); break; case EPSILON_SVR: solve_epsilon_svr(prob,param,alpha,&si); break; case NU_SVR: solve_nu_svr(prob,param,alpha,&si); break; } printf("obj = %f, rho = %f\n",si.obj,si.rho); // output SVs /* int nSV = 0; int nBSV = 0; for(int i=0;i<prob->l;i++) { if(fabs(alpha[i]) > 0) { ++nSV; if(prob->y[i] > 0) { if(fabs(alpha[i]) >= si.upper_bound_p) ++nBSV; } else { if(fabs(alpha[i]) >= si.upper_bound_n) ++nBSV; } } } printf("nSV = %d, nBSV = %d\n",nSV,nBSV); */ decision_function f; f.alpha = alpha; f.rho = si.rho; return f; } __device__ struct svm_model *cuda_device_svm_train_no_prob(const struct svm_problem *prob, const struct svm_parameter *param) { svm_model *model = (svm_model *)malloc(sizeof(svm_model)); model->param = *param; model->free_sv = 0; // XXX if(param->svm_type == ONE_CLASS || param->svm_type == EPSILON_SVR || param->svm_type == NU_SVR) { // regression or one-class-svm model->nr_class = 2; model->label = NULL; model->nSV = NULL; model->probA = NULL; model->probB = NULL; model->sv_coef = (float **)malloc(sizeof(float*)); //if(param->probability && // (param->svm_type == EPSILON_SVR || // param->svm_type == NU_SVR)) //{ // model->probA = (float *)malloc(sizeof(float)); // model->probA[0] = cuda_svm_svr_probability(prob,param); //} decision_function f = cuda_svm_train_one(prob,param,0,0); model->rho = (float *)malloc(sizeof(float)); model->rho[0] = f.rho; int nSV = 0; int i; for(i=0;i<prob->l;i++) if(fabs(f.alpha[i]) > 0) ++nSV; model->l = nSV; model->SV = (svm_node **)malloc(sizeof(svm_node *) * nSV); model->sv_coef[0] = (float *)malloc(sizeof(float) * nSV); model->sv_indices = (int *)malloc(sizeof(int) * nSV); int j = 0; for(i=0;i<prob->l;i++) if(fabs(f.alpha[i]) > 0) { model->SV[j] = prob->x[i]; model->sv_coef[0][j] = f.alpha[i]; model->sv_indices[j] = i+1; ++j; } free(f.alpha); } else { // classification int l = prob->l; int nr_class; int *label = NULL; int *start = NULL; int *count = NULL; int *perm = (int *)malloc(sizeof(int) * l); // group training data of the same class cuda_svm_group_classes(prob,&nr_class,&label,&start,&count,perm); if(nr_class == 1) printf("WARNING: training data in only one class. See README for details.\n"); svm_node **x = (svm_node **)malloc(sizeof(svm_node *) * l); int i; for(i=0;i<l;i++) x[i] = prob->x[perm[i]]; // calculate weighted C float *weighted_C = (float *)malloc(sizeof(float) * nr_class); for(i=0;i<nr_class;i++) weighted_C[i] = param->C; for(i=0;i<param->nr_weight;i++) { int j; for(j=0;j<nr_class;j++) if(param->weight_label[i] == label[j]) break; if(j == nr_class) printf("WARNING: class label %d specified in weight is not found\n", param->weight_label[i]); else weighted_C[j] *= param->weight[i]; } // train k*(k-1)/2 models bool *nonzero = (bool *)malloc(sizeof(bool) * l); for(i=0;i<l;i++) nonzero[i] = false; decision_function *f = (decision_function *)malloc(sizeof(decision_function) * nr_class*(nr_class-1)/2); //float *probA=NULL,*probB=NULL; //if (param->probability) //{ // probA=(float *)malloc(sizeof(float) * nr_class*(nr_class-1)/2); // probB=(float *)malloc(sizeof(float) * nr_class*(nr_class-1)/2); //} int p = 0; for(i=0;i<nr_class;i++) for(int j=i+1;j<nr_class;j++) { svm_problem sub_prob; int si = start[i], sj = start[j]; int ci = count[i], cj = count[j]; sub_prob.l = ci+cj; sub_prob.x = (svm_node **)malloc(sizeof(svm_node *) * sub_prob.l); sub_prob.y = (float *)malloc(sizeof(float) * sub_prob.l); int k; for(k=0;k<ci;k++) { sub_prob.x[k] = x[si+k]; sub_prob.y[k] = +1; } for(k=0;k<cj;k++) { sub_prob.x[ci+k] = x[sj+k]; sub_prob.y[ci+k] = -1; } //if(param->probability) // cuda_svm_binary_svc_probability(&sub_prob,param,weighted_C[i],weighted_C[j],probA[p],probB[p]); f[p] = cuda_svm_train_one(&sub_prob,param,weighted_C[i],weighted_C[j]); for(k=0;k<ci;k++) if(!nonzero[si+k] && fabs(f[p].alpha[k]) > 0) nonzero[si+k] = true; for(k=0;k<cj;k++) if(!nonzero[sj+k] && fabs(f[p].alpha[ci+k]) > 0) nonzero[sj+k] = true; free(sub_prob.x); free(sub_prob.y); ++p; } // build output model->nr_class = nr_class; model->label = (int *)malloc(sizeof(int) * nr_class); for(i=0;i<nr_class;i++) model->label[i] = label[i]; model->rho = (float *)malloc(sizeof(float) * nr_class*(nr_class-1)/2); for(i=0;i<nr_class*(nr_class-1)/2;i++) model->rho[i] = f[i].rho; //if(param->probability) //{ // model->probA = (float *)malloc(sizeof(float) * nr_class*(nr_class-1)/2); // model->probB = (float *)malloc(sizeof(float) * nr_class*(nr_class-1)/2); // for(i=0;i<nr_class*(nr_class-1)/2;i++) // { // model->probA[i] = probA[i]; // model->probB[i] = probB[i]; // } //} //else //{ model->probA=NULL; model->probB=NULL; //} int total_sv = 0; int *nz_count = (int *)malloc(sizeof(int) * nr_class); model->nSV = (int *)malloc(sizeof(int) * nr_class); for(i=0;i<nr_class;i++) { int nSV = 0; for(int j=0;j<count[i];j++) if(nonzero[start[i]+j]) { ++nSV; ++total_sv; } model->nSV[i] = nSV; nz_count[i] = nSV; } printf("Total nSV = %d\n",total_sv); model->l = total_sv; model->SV = (svm_node **)malloc(sizeof(svm_node *) * total_sv); model->sv_indices = (int *)malloc(sizeof(int) * total_sv); p = 0; for(i=0;i<l;i++) if(nonzero[i]) { model->SV[p] = x[i]; model->sv_indices[p++] = perm[i] + 1; } int *nz_start = (int *)malloc(sizeof(int) * nr_class); nz_start[0] = 0; for(i=1;i<nr_class;i++) nz_start[i] = nz_start[i-1]+nz_count[i-1]; model->sv_coef = (float **)malloc(sizeof(float *) * (nr_class-1)); for(i=0;i<nr_class-1;i++) model->sv_coef[i] = (float *)malloc(sizeof(float) * total_sv); p = 0; for(i=0;i<nr_class;i++) for(int j=i+1;j<nr_class;j++) { // classifier (i,j): coefficients with // i are in sv_coef[j-1][nz_start[i]...], // j are in sv_coef[i][nz_start[j]...] int si = start[i]; int sj = start[j]; int ci = count[i]; int cj = count[j]; int q = nz_start[i]; int k; for(k=0;k<ci;k++) if(nonzero[si+k]) model->sv_coef[j-1][q++] = f[p].alpha[k]; q = nz_start[j]; for(k=0;k<cj;k++) if(nonzero[sj+k]) model->sv_coef[i][q++] = f[p].alpha[ci+k]; ++p; } free(label); //free(probA); //free(probB); free(count); free(perm); free(start); free(x); free(weighted_C); free(nonzero); for(i=0;i<nr_class*(nr_class-1)/2;i++) free(f[i].alpha); free(f); free(nz_count); free(nz_start); } return model; } __device__ void cuda_perform_svm_train(const struct svm_problem *prob, const struct svm_parameter *param, struct svm_model *model) { model->param = *param; model->free_sv = 0; // XXX if(param->svm_type == ONE_CLASS || param->svm_type == EPSILON_SVR || param->svm_type == NU_SVR) { // regression or one-class-svm model->nr_class = 2; if(param->probability && (param->svm_type == EPSILON_SVR || param->svm_type == NU_SVR)) { model->probA[0] = cuda_svm_svr_probability(prob, param); } decision_function f = cuda_svm_train_one(prob,param,0,0); model->rho[0] = f.rho; int nSV = 0; int i; for(i=0;i<prob->l;i++) if(fabs(f.alpha[i]) > 0) ++nSV; model->l = nSV; int j = 0; for(i=0;i<prob->l;i++) if(fabs(f.alpha[i]) > 0) { model->SV[j] = (struct svm_node *)i; model->sv_coef[0][j] = f.alpha[i]; model->sv_indices[j] = i+1; ++j; } free(f.alpha); } else { // classification int l = prob->l; int nr_class; int *label = NULL; int *start = NULL; int *count = NULL; int *perm = (int *)malloc(sizeof(int) * l); // group training data of the same class cuda_svm_group_classes(prob,&nr_class,&label,&start,&count,perm); if(nr_class == 1) printf("WARNING: training data in only one class. See README for details.\n"); svm_node **x = (svm_node **)malloc(sizeof(svm_node *) * l); int i; for(i=0;i<l;i++) x[i] = prob->x[perm[i]]; // calculate weighted C float *weighted_C = (float *)malloc(sizeof(float) * nr_class); for(i=0;i<nr_class;i++) weighted_C[i] = param->C; for(i=0;i<param->nr_weight;i++) { int j; for(j=0;j<nr_class;j++) if(param->weight_label[i] == label[j]) break; if(j == nr_class) printf("WARNING: class label %d specified in weight is not found\n", param->weight_label[i]); else weighted_C[j] *= param->weight[i]; } // train k*(k-1)/2 models bool *nonzero = (bool *)malloc(sizeof(bool) * l); for(i=0;i<l;i++) nonzero[i] = false; decision_function *f = (decision_function *)malloc(sizeof(decision_function) * nr_class*(nr_class-1)/2); float *probA=NULL,*probB=NULL; if (param->probability) { probA=(float *)malloc(sizeof(float) * nr_class*(nr_class-1)/2); probB=(float *)malloc(sizeof(float) * nr_class*(nr_class-1)/2); } int p = 0; for(i=0;i<nr_class;i++) for(int j=i+1;j<nr_class;j++) { svm_problem sub_prob; int si = start[i], sj = start[j]; int ci = count[i], cj = count[j]; sub_prob.l = ci+cj; sub_prob.x = (svm_node **)malloc(sizeof(svm_node *) * sub_prob.l); sub_prob.y = (float *)malloc(sizeof(float) * sub_prob.l); int k; for(k=0;k<ci;k++) { sub_prob.x[k] = x[si+k]; sub_prob.y[k] = +1; } for(k=0;k<cj;k++) { sub_prob.x[ci+k] = x[sj+k]; sub_prob.y[ci+k] = -1; } if (param->probability) cuda_svm_binary_svc_probability(&sub_prob,param,weighted_C[i],weighted_C[j],probA[p],probB[p]); f[p] = cuda_svm_train_one(&sub_prob,param,weighted_C[i],weighted_C[j]); for(k=0;k<ci;k++) if(!nonzero[si+k] && fabs(f[p].alpha[k]) > 0) nonzero[si+k] = true; for(k=0;k<cj;k++) if(!nonzero[sj+k] && fabs(f[p].alpha[ci+k]) > 0) nonzero[sj+k] = true; free(sub_prob.x); free(sub_prob.y); ++p; } // build output model->nr_class = nr_class; for(i=0;i<nr_class;i++) model->label[i] = label[i]; for(i=0;i<nr_class*(nr_class-1)/2;i++) model->rho[i] = f[i].rho; if(param->probability) { for(i=0;i<nr_class*(nr_class-1)/2;i++) { model->probA[i] = probA[i]; model->probB[i] = probB[i]; } } else { model->probA=NULL; model->probB=NULL; } int total_sv = 0; int *nz_count = (int *)malloc(sizeof(int) * nr_class); for(i=0;i<nr_class;i++) { int nSV = 0; for(int j=0;j<count[i];j++) if(nonzero[start[i]+j]) { ++nSV; ++total_sv; } model->nSV[i] = nSV; nz_count[i] = nSV; } printf("Total nSV = %d\n",total_sv); model->l = total_sv; p = 0; for(i=0;i<l;i++) if(nonzero[i]) { model->SV[p] = (struct svm_node *)(perm[i]); model->sv_indices[p++] = perm[i] + 1; } int *nz_start = (int *)malloc(sizeof(int) * nr_class); nz_start[0] = 0; for(i=1;i<nr_class;i++) nz_start[i] = nz_start[i-1]+nz_count[i-1]; p = 0; for(i=0;i<nr_class;i++) for(int j=i+1;j<nr_class;j++) { // classifier (i,j): coefficients with // i are in sv_coef[j-1][nz_start[i]...], // j are in sv_coef[i][nz_start[j]...] int si = start[i]; int sj = start[j]; int ci = count[i]; int cj = count[j]; int q = nz_start[i]; int k; for(k=0;k<ci;k++) if(nonzero[si+k]) model->sv_coef[j-1][q++] = f[p].alpha[k]; q = nz_start[j]; for(k=0;k<cj;k++) if(nonzero[sj+k]) model->sv_coef[i][q++] = f[p].alpha[ci+k]; ++p; } free(label); free(probA); free(probB); free(count); free(perm); free(start); free(x); free(weighted_C); free(nonzero); for(i=0;i<nr_class*(nr_class-1)/2;i++) free(f[i].alpha); free(f); free(nz_count); free(nz_start); } } __global__ void cuda_svm_train_kernel(const struct svm_problem *subprobs, const struct svm_parameter *params, struct svm_model *submodels, size_t pitch, int nr_grid, int nr_fold) { int x = blockIdx.x; int y = threadIdx.x; if (y%WARP_SIZE == 0) { y = y / WARP_SIZE; if (x<nr_grid && y<nr_fold) { struct svm_model *row = (struct svm_model *)((char*)submodels + x * pitch); cuda_perform_svm_train(&(subprobs[y]), &(params[x]), &(row[y])); } } } int cuda_svm_train(const struct svm_problem *h_prob, struct svm_problem *h_subprobs, struct svm_parameter *h_params, int nr_grid, int nr_fold, struct svm_model *h_submodels) { int i, j, k; int dev_cnt; int res = 0; // // Initialize // if (hipGetLastError() != hipSuccess) { fprintf(stderr, "Error when initialize CUDA device\n"); return 1; } hipGetDeviceCount(&dev_cnt); if (dev_cnt == 0) { fprintf(stderr, "No CUDA device\n"); return 1; } #ifdef NVML if (dev_cnt > 1) { // // Choose device that has maximum device memory left // int max_dev; size_t max_avail = 0; nvmlInit(); for (i=0; i<dev_cnt; i++) { uint32_t device; nvmlMemory_t mem; nvmlDeviceGetHandleByIndex(i, &device); nvmlDeviceGetMemoryInfo(device, &mem); if (mem.free > max_avail) { max_dev = i; max_avail = mem.free; } } nvmlShutdown(); hipSetDevice(max_dev); } #endif struct svm_node **x_space = (struct svm_node **)malloc(sizeof(struct svm_node *) * h_prob->l); struct svm_problem *subprobs; struct svm_parameter *params; struct svm_model *submodels; // Send original prob.x to device for (i=0; i<h_prob->l; i++) { j=0; while(h_prob->x[i][j++].index != -1); hipMalloc(&(x_space[i]), sizeof(struct svm_node) * j); hipMemcpy(x_space[i], h_prob->x[i], sizeof(struct svm_node) * j, hipMemcpyHostToDevice); } // Build subprobs in device hipMalloc(&subprobs, sizeof(struct svm_problem) * nr_fold); float **y = (float **)malloc(sizeof(float *) * nr_fold); struct svm_node ***x = (struct svm_node ***)malloc(sizeof(struct svm_node **) * nr_fold); for (i=0; i<nr_fold; i++) { hipMemcpy(&(subprobs[i].l), &(h_subprobs[i].l), sizeof(int), hipMemcpyHostToDevice); hipMalloc(&(y[i]), sizeof(float) * h_subprobs[i].l); hipMemcpy(y[i], h_subprobs[i].y, sizeof(float) * h_subprobs[i].l, hipMemcpyHostToDevice); hipMemcpy(&(subprobs[i].y), &(y[i]), sizeof(float *), hipMemcpyHostToDevice); hipMalloc(&(x[i]), sizeof(struct svm_node *) * h_subprobs[i].l); // The h_subprobs[i].x[j] stores the index in the original prob.x, instead of a pointer to that for (j=0; j<h_subprobs[i].l; j++) hipMemcpy(&(x[i][j]), &(x_space[int(h_subprobs[i].x[j])]), sizeof(struct svm_node *), hipMemcpyHostToDevice); hipMemcpy(&(subprobs[i].x), &(x[i]), sizeof(struct svm_node **), hipMemcpyHostToDevice); } // Send params to device hipMalloc(&params, sizeof(struct svm_parameter) * nr_grid); hipMemcpy(params, h_params, sizeof(struct svm_parameter) * nr_grid, hipMemcpyHostToDevice); // Build results (submodels) cache in device size_t pitch; hipMallocPitch(&submodels, &pitch, sizeof(struct svm_model) * nr_fold, nr_grid); int nr_class = svm_get_nr_classes(h_prob); struct svm_node ***SV = (struct svm_node ***)malloc(sizeof(struct svm_node **) * nr_grid * nr_fold); // The model->sv_coef is a two-dimension linked list float ***sv_coef_p = (float ***)malloc(sizeof(float **) * nr_grid * nr_fold); float ***sv_coef = (float ***)malloc(sizeof(float **) * nr_grid * nr_fold); for (i=0; i<nr_grid; i++) for (j=0; j<nr_fold; j++) sv_coef[i*nr_fold+j] = (float **)malloc(sizeof(float *) * (nr_class-1)); float **rho = (float **)malloc(sizeof(float *) * nr_grid * nr_fold); float **probA = (float **)malloc(sizeof(float *) * nr_grid * nr_fold); float **probB = (float **)malloc(sizeof(float *) * nr_grid * nr_fold); int **sv_indices = (int **)malloc(sizeof(int *) * nr_grid * nr_fold); int **label = (int **)malloc(sizeof(int *) * nr_grid * nr_fold); int **nSV = (int **)malloc(sizeof(int *) * nr_grid * nr_fold); // Build the storage structure for results for (i=0; i<nr_grid; i++) for (j=0; j<nr_fold; j++) { hipMalloc(&(SV[i*nr_fold+j]), sizeof(struct svm_node *) * h_subprobs[j].l); hipMalloc(&(sv_coef_p[i*nr_fold+j]), sizeof(float *) * (nr_class-1)); for (k=0; k<nr_class-1; k++) hipMalloc(&(sv_coef[i*nr_fold+j][k]), sizeof(float) * h_subprobs[j].l); hipMalloc(&(rho[i*nr_fold+j]), sizeof(float) * nr_class*(nr_class-1)/2); hipMalloc(&(probA[i*nr_fold+j]), sizeof(float) * nr_class*(nr_class-1)/2); hipMalloc(&(probB[i*nr_fold+j]), sizeof(float) * nr_class*(nr_class-1)/2); hipMalloc(&(sv_indices[i*nr_fold+j]), sizeof(int) * h_subprobs[j].l); hipMalloc(&(label[i*nr_fold+j]), sizeof(int) * nr_class); hipMalloc(&(nSV[i*nr_fold+j]), sizeof(int) * nr_class); struct svm_model *models = (struct svm_model *)((char*)submodels + i*pitch); hipMemcpy(&(models[j].SV), &(SV[i*nr_fold+j]), sizeof(struct svm_node **), hipMemcpyHostToDevice); hipMemcpy(&(models[j].sv_coef), &(sv_coef_p[i*nr_fold+j]), sizeof(float **), hipMemcpyHostToDevice); hipMemcpy(sv_coef_p[i*nr_fold+j], sv_coef[i*nr_fold+j], sizeof(float *) * (nr_class-1), hipMemcpyHostToDevice); hipMemcpy(&(models[j].rho), &(rho[i*nr_fold+j]), sizeof(float *), hipMemcpyHostToDevice); hipMemcpy(&(models[j].probA), &(probA[i*nr_fold+j]), sizeof(float *), hipMemcpyHostToDevice); hipMemcpy(&(models[j].probB), &(probB[i*nr_fold+j]), sizeof(float *), hipMemcpyHostToDevice); hipMemcpy(&(models[j].sv_indices), &(sv_indices[i*nr_fold+j]), sizeof(int *), hipMemcpyHostToDevice); hipMemcpy(&(models[j].label), &(label[i*nr_fold+j]), sizeof(int *), hipMemcpyHostToDevice); hipMemcpy(&(models[j].nSV), &(nSV[i*nr_fold+j]), sizeof(int *), hipMemcpyHostToDevice); } // // Run the kernel // hipDeviceSetLimit(hipLimitMallocHeapSize, DEVICE_HEAP_SIZE); hipLaunchKernelGGL(( cuda_svm_train_kernel), dim3(nr_grid), dim3(nr_fold*WARP_SIZE), 0, 0, subprobs, params, submodels, pitch, nr_grid, nr_fold); if (hipGetLastError() == hipSuccess) { if (hipDeviceSynchronize() == hipSuccess) { // Copy results (submodels) from device to host hipMemcpy2D(h_submodels, sizeof(struct svm_model) * nr_fold, submodels, pitch, sizeof(struct svm_model) * nr_fold, nr_grid, hipMemcpyDeviceToHost); for (i=0; i<nr_grid; i++) for (j=0; j<nr_fold; j++) { struct svm_model *model = h_submodels + i*nr_fold + j; model->SV = (struct svm_node **)malloc(sizeof(struct svm_node *) * h_subprobs[j].l); hipMemcpy(model->SV, SV[i*nr_fold+j], sizeof(struct svm_node *) * h_subprobs[j].l, hipMemcpyDeviceToHost); // The returned model->SV is a index to the subprob, instead of a pointer for(k=0; k<model->l; k++) model->SV[k] = (struct svm_node *)(h_prob->x[int(h_subprobs[j].x[int(model->SV[k])])]); model->sv_coef = (float **)malloc(sizeof(float *) * (nr_class-1)); for (k=0; k<nr_class-1; k++) { model->sv_coef[k] = (float *)malloc(sizeof(float) * h_subprobs[j].l); hipMemcpy(model->sv_coef[k], sv_coef[i*nr_fold+j][k], sizeof(float) * h_subprobs[j].l, hipMemcpyDeviceToHost); } model->rho = (float *)malloc(sizeof(float) * nr_class*(nr_class-1)/2); model->probA = (float *)malloc(sizeof(float) * nr_class*(nr_class-1)/2); model->probB = (float *)malloc(sizeof(float) * nr_class*(nr_class-1)/2); model->sv_indices = (int *)malloc(sizeof(int) * h_subprobs[j].l); model->label = (int *)malloc(sizeof(int) * nr_class); model->nSV = (int *)malloc(sizeof(int) * nr_class); hipMemcpy(model->rho, rho[i*nr_fold+j], sizeof(float) * nr_class*(nr_class-1)/2, hipMemcpyDeviceToHost); hipMemcpy(model->probA, probA[i*nr_fold+j], sizeof(float) * nr_class*(nr_class-1)/2, hipMemcpyDeviceToHost); hipMemcpy(model->probB, probB[i*nr_fold+j], sizeof(float) * nr_class*(nr_class-1)/2, hipMemcpyDeviceToHost); hipMemcpy(model->sv_indices, sv_indices[i*nr_fold+j], sizeof(int) * h_subprobs[j].l, hipMemcpyDeviceToHost); hipMemcpy(model->label, label[i*nr_fold+j], sizeof(int) * nr_class, hipMemcpyDeviceToHost); hipMemcpy(model->nSV, nSV[i*nr_fold+j], sizeof(int) * nr_class, hipMemcpyDeviceToHost); } } else { fprintf(stderr, "Error when running CUDA svm train: %s\n", hipGetErrorString(hipGetLastError())); res = 1; } } else { fprintf(stderr, "Error when launching CUDA svm train\n"); res = 1; } // // Free all the memory allocated in device and host // for (i=0; i<h_prob->l; i++) hipFree(x_space[i]); free(x_space); for (i=0; i<nr_fold; i++) { hipFree(y[i]); hipFree(x[i]); } free(y); free(x); hipFree(subprobs); hipFree(params); hipFree(submodels); for (i=0; i<nr_grid; i++) for (j=0; j<nr_fold; j++) { hipFree(SV[i*nr_fold+j]); for (k=0; k<nr_class-1; k++) hipFree(sv_coef[i*nr_fold+j][k]); free(sv_coef[i*nr_fold+j]); hipFree(sv_coef_p[i*nr_fold+j]); hipFree(rho[i*nr_fold+j]); hipFree(probA[i*nr_fold+j]); hipFree(probB[i*nr_fold+j]); hipFree(sv_indices[i*nr_fold+j]); hipFree(label[i*nr_fold+j]); hipFree(nSV[i*nr_fold+j]); } free(SV); free(sv_coef_p); free(sv_coef); free(rho); free(probA); free(probB); free(sv_indices); free(label); free(nSV); if (hipGetLastError() != hipSuccess) { fprintf(stderr, "Error when cleaning CUDA svm train\n"); res = 1; } return res; }
09a5cfc17b1cdd1000be01df7f8206b62a6af97e.cu
#include <stdio.h> #include <cuda_runtime.h> #ifdef NVML #include "nvml.h" #endif #include "svm.h" #include "cuda_svm.h" typedef signed char schar; template <class T> __device__ static inline T min(T x,T y) { return (x<y)?x:y; } template <class T> __device__ static inline T max(T x,T y) { return (x>y)?x:y; } template <class T> __device__ static inline void swap(T& x, T& y) { T t=x; x=y; y=t; } template <class S, class T> __device__ static inline void clone(T*& dst, S* src, int n) { dst = (T *)malloc(sizeof(T) * n); memcpy((void *)dst, (void *)src, sizeof(T)*n); } __device__ static inline float powi(float base, int times) { float tmp = base, ret = 1.0; for(int t=times; t>0; t/=2) { if(t%2==1) ret*=tmp; tmp = tmp * tmp; } return ret; } #define INF HUGE_VAL #define TAU 1e-12 __device__ struct svm_model *cuda_device_svm_train_no_prob(const struct svm_problem *prob, const struct svm_parameter *param); __device__ float cuda_svm_predict_values(const svm_model *model, const svm_node *x, float* dec_values); __device__ void cuda_svm_cross_validation(const svm_problem *prob, const svm_parameter *param, int nr_fold, float *target); class CUDA_Rand { private: int seed; public: __device__ CUDA_Rand() { seed = 0; } __device__ int rand_int(const int max) { seed = ((seed * 1103515245) + 12345) & 0x7fffffff; return seed%max; } }; // // Kernel Cache // // l is the number of total data items // size is the cache size limit in bytes // class CUDA_Cache { public: __device__ CUDA_Cache(int l,long int size); __device__ ~CUDA_Cache(); // request data [0,len) // return some position p where [p,len) need to be filled // (p >= len if nothing needs to be filled) __device__ int get_data(const int index, float **data, int len); __device__ void swap_index(int i, int j); private: int l; long int size; struct head_t { head_t *prev, *next; // a circular list float *data; int len; // data[0,len) is cached in this entry }; head_t *head; head_t lru_head; __device__ void lru_delete(head_t *h); __device__ void lru_insert(head_t *h); }; __device__ CUDA_Cache::CUDA_Cache(int l_,long int size_):l(l_),size(size_) { head = (head_t *)malloc(sizeof(head_t) * l); // initialized to 0 memset(head, 0, sizeof(head_t) *l); size /= sizeof(float); size -= l * sizeof(head_t) / sizeof(float); size = max(size, 2 * (long int) l); // cache must be large enough for two columns lru_head.next = lru_head.prev = &lru_head; } __device__ CUDA_Cache::~CUDA_Cache() { for(head_t *h = lru_head.next; h != &lru_head; h=h->next) free(h->data); free(head); } __device__ void CUDA_Cache::lru_delete(head_t *h) { // delete from current location h->prev->next = h->next; h->next->prev = h->prev; } __device__ void CUDA_Cache::lru_insert(head_t *h) { // insert to last position h->next = &lru_head; h->prev = lru_head.prev; h->prev->next = h; h->next->prev = h; } __device__ int CUDA_Cache::get_data(const int index, float **data, int len) { head_t *h = &head[index]; if(h->len) lru_delete(h); int more = len - h->len; if (more > 0) { // free old space while(size < more) { head_t *old = lru_head.next; lru_delete(old); free(old->data); size += old->len; old->data = 0; old->len = 0; } // allocate new space float *tp = h->data; h->data = (float *)malloc(sizeof(float)*len); if (tp != NULL) { memcpy(h->data, tp, sizeof(float)*h->len); free(tp); } size -= more; swap(h->len,len); } lru_insert(h); *data = h->data; return len; } __device__ void CUDA_Cache::swap_index(int i, int j) { if(i==j) return; if(head[i].len) lru_delete(&head[i]); if(head[j].len) lru_delete(&head[j]); swap(head[i].data,head[j].data); swap(head[i].len,head[j].len); if(head[i].len) lru_insert(&head[i]); if(head[j].len) lru_insert(&head[j]); if(i>j) swap(i,j); for(head_t *h = lru_head.next; h!=&lru_head; h=h->next) { if(h->len > i) { if(h->len > j) swap(h->data[i],h->data[j]); else { // give up lru_delete(h); free(h->data); size += h->len; h->data = 0; h->len = 0; } } } } // // Kernel evaluation // // the static method k_function is for doing single kernel evaluation // the constructor of Kernel prepares to calculate the l*l kernel matrix // the member function get_Q is for getting one column from the Q Matrix // class CUDA_QMatrix { public: __device__ virtual float *get_Q(int column, int len) const = 0; __device__ virtual float *get_QD() const = 0; __device__ virtual void swap_index(int i, int j) const = 0; __device__ virtual ~CUDA_QMatrix() {} }; class CUDA_Kernel: public CUDA_QMatrix { public: __device__ CUDA_Kernel(int l, svm_node * const * x, const svm_parameter& param); __device__ virtual ~CUDA_Kernel(); __device__ static float k_function(const svm_node *x, const svm_node *y, const svm_parameter& param); __device__ virtual float *get_Q(int column, int len) const = 0; __device__ virtual float *get_QD() const = 0; __device__ virtual void swap_index(int i, int j) const // no so const... { swap(x[i],x[j]); if(x_square) swap(x_square[i],x_square[j]); } protected: float (CUDA_Kernel::*kernel_function)(int i, int j) const; private: const svm_node **x; float *x_square; // svm_parameter const int kernel_type; const int degree; const float gamma; const float coef0; __device__ static float dot(const svm_node *px, const svm_node *py); __device__ float kernel_linear(int i, int j) const { return dot(x[i],x[j]); } __device__ float kernel_poly(int i, int j) const { return powi(gamma*dot(x[i],x[j])+coef0,degree); } __device__ float kernel_rbf(int i, int j) const { return expf(-gamma*(x_square[i]+x_square[j]-2*dot(x[i],x[j]))); } __device__ float kernel_sigmoid(int i, int j) const { return tanh(gamma*dot(x[i],x[j])+coef0); } __device__ float kernel_precomputed(int i, int j) const { return x[i][(int)(x[j][0].value)].value; } }; __device__ CUDA_Kernel::CUDA_Kernel(int l, svm_node * const * x_, const svm_parameter& param) :kernel_type(param.kernel_type), degree(param.degree), gamma(param.gamma), coef0(param.coef0) { switch(kernel_type) { case LINEAR: kernel_function = &CUDA_Kernel::kernel_linear; break; case POLY: kernel_function = &CUDA_Kernel::kernel_poly; break; case RBF: kernel_function = &CUDA_Kernel::kernel_rbf; break; case SIGMOID: kernel_function = &CUDA_Kernel::kernel_sigmoid; break; case PRECOMPUTED: kernel_function = &CUDA_Kernel::kernel_precomputed; break; } clone(x,x_,l); if(kernel_type == RBF) { x_square = new float[l]; for(int i=0;i<l;i++) x_square[i] = dot(x[i],x[i]); } else x_square = 0; } __device__ CUDA_Kernel::~CUDA_Kernel() { delete[] x; delete[] x_square; } __device__ float CUDA_Kernel::dot(const svm_node *px, const svm_node *py) { float sum = 0; while(px->index != -1 && py->index != -1) { if(px->index == py->index) { sum += px->value * py->value; ++px; ++py; } else { if(px->index > py->index) ++py; else ++px; } } return sum; } __device__ float CUDA_Kernel::k_function(const svm_node *x, const svm_node *y, const svm_parameter& param) { switch(param.kernel_type) { case LINEAR: return dot(x,y); case POLY: return powi(param.gamma*dot(x,y)+param.coef0,param.degree); case RBF: { float sum = 0; while(x->index != -1 && y->index !=-1) { if(x->index == y->index) { float d = x->value - y->value; sum += d*d; ++x; ++y; } else { if(x->index > y->index) { sum += y->value * y->value; ++y; } else { sum += x->value * x->value; ++x; } } } while(x->index != -1) { sum += x->value * x->value; ++x; } while(y->index != -1) { sum += y->value * y->value; ++y; } return expf(-param.gamma*sum); } case SIGMOID: return tanhf(param.gamma*dot(x,y)+param.coef0); case PRECOMPUTED: //x: test (validation), y: SV return x[(int)(y->value)].value; default: return 0; // Unreachable } } // An SMO algorithm in Fan et al., JMLR 6(2005), p. 1889--1918 // Solves: // // min 0.5(\alpha^T Q \alpha) + p^T \alpha // // y^T \alpha = \delta // y_i = +1 or -1 // 0 <= alpha_i <= Cp for y_i = 1 // 0 <= alpha_i <= Cn for y_i = -1 // // Given: // // Q, p, y, Cp, Cn, and an initial feasible point \alpha // l is the size of vectors and matrices // eps is the stopping tolerance // // solution will be put in \alpha, objective value will be put in obj // class CUDA_Solver { public: __device__ CUDA_Solver() {}; __device__ virtual ~CUDA_Solver() {}; struct SolutionInfo { float obj; float rho; float upper_bound_p; float upper_bound_n; float r; // for CUDA_Solver_NU }; __device__ void Solve(int l, const CUDA_QMatrix& Q, const float *p_, const schar *y_, float *alpha_, float Cp, float Cn, float eps, SolutionInfo* si, int shrinking); protected: int active_size; schar *y; float *G; // gradient of objective function enum { LOWER_BOUND, UPPER_BOUND, FREE }; char *alpha_status; // LOWER_BOUND, UPPER_BOUND, FREE float *alpha; const CUDA_QMatrix *Q; const float *QD; float eps; float Cp,Cn; float *p; int *active_set; float *G_bar; // gradient, if we treat free variables as 0 int l; bool unshrink; // XXX __device__ float get_C(int i) { return (y[i] > 0)? Cp : Cn; } __device__ void update_alpha_status(int i) { if(alpha[i] >= get_C(i)) alpha_status[i] = UPPER_BOUND; else if(alpha[i] <= 0) alpha_status[i] = LOWER_BOUND; else alpha_status[i] = FREE; } __device__ bool is_upper_bound(int i) { return alpha_status[i] == UPPER_BOUND; } __device__ bool is_lower_bound(int i) { return alpha_status[i] == LOWER_BOUND; } __device__ bool is_free(int i) { return alpha_status[i] == FREE; } __device__ void swap_index(int i, int j); __device__ void reconstruct_gradient(); __device__ virtual int select_working_set(int &i, int &j); __device__ virtual float calculate_rho(); __device__ virtual void do_shrinking(); private: __device__ bool be_shrunk(int i, float Gmax1, float Gmax2); }; __device__ void CUDA_Solver::swap_index(int i, int j) { Q->swap_index(i,j); swap(y[i],y[j]); swap(G[i],G[j]); swap(alpha_status[i],alpha_status[j]); swap(alpha[i],alpha[j]); swap(p[i],p[j]); swap(active_set[i],active_set[j]); swap(G_bar[i],G_bar[j]); } __device__ void CUDA_Solver::reconstruct_gradient() { // reconstruct inactive elements of G from G_bar and free variables if(active_size == l) return; int i,j; int nr_free = 0; for(j=active_size;j<l;j++) G[j] = G_bar[j] + p[j]; for(j=0;j<active_size;j++) if(is_free(j)) nr_free++; if(2*nr_free < active_size) printf("\nWARNING: using -h 0 may be faster\n"); if (nr_free*l > 2*active_size*(l-active_size)) { for(i=active_size;i<l;i++) { const float *Q_i = Q->get_Q(i,active_size); for(j=0;j<active_size;j++) if(is_free(j)) G[i] += alpha[j] * Q_i[j]; } } else { for(i=0;i<active_size;i++) if(is_free(i)) { const float *Q_i = Q->get_Q(i,l); float alpha_i = alpha[i]; for(j=active_size;j<l;j++) G[j] += alpha_i * Q_i[j]; } } } __device__ void CUDA_Solver::Solve(int l, const CUDA_QMatrix& Q, const float *p_, const schar *y_, float *alpha_, float Cp, float Cn, float eps, SolutionInfo* si, int shrinking) { this->l = l; this->Q = &Q; QD=Q.get_QD(); clone(p, p_,l); clone(y, y_,l); clone(alpha,alpha_,l); this->Cp = Cp; this->Cn = Cn; this->eps = eps; unshrink = false; // initialize alpha_status { alpha_status = (char *)malloc(sizeof(char) * l); for(int i=0;i<l;i++) update_alpha_status(i); } // initialize active set (for shrinking) { active_set = (int *)malloc(sizeof(int) * l);; for(int i=0;i<l;i++) active_set[i] = i; active_size = l; } // initialize gradient { G = (float *)malloc(sizeof(float) * l); G_bar = (float *)malloc(sizeof(float) * l); int i; for(i=0;i<l;i++) { G[i] = p[i]; G_bar[i] = 0; } for(i=0;i<l;i++) if(!is_lower_bound(i)) { const float *Q_i = Q.get_Q(i,l); float alpha_i = alpha[i]; int j; for(j=0;j<l;j++) G[j] += alpha_i*Q_i[j]; if(is_upper_bound(i)) for(j=0;j<l;j++) G_bar[j] += get_C(i) * Q_i[j]; } } // optimization step int iter = 0; int max_iter = max(10000000, l>INT_MAX/100 ? INT_MAX : 100*l); int counter = min(l,1000)+1; while(iter < max_iter) { // show progress and do shrinking if(--counter == 0) { counter = min(l,1000); if(shrinking) do_shrinking(); printf("."); } int i,j; if (select_working_set(i,j)!=0) { // reconstruct the whole gradient reconstruct_gradient(); // reset active set size and check active_size = l; printf("*"); if(select_working_set(i,j)!=0) break; else counter = 1; // do shrinking next iteration } ++iter; // update alpha[i] and alpha[j], handle bounds carefully const float *Q_i = Q.get_Q(i,active_size); const float *Q_j = Q.get_Q(j,active_size); float C_i = get_C(i); float C_j = get_C(j); float old_alpha_i = alpha[i]; float old_alpha_j = alpha[j]; if(y[i]!=y[j]) { float quad_coef = QD[i]+QD[j]+2*Q_i[j]; if (quad_coef <= 0) quad_coef = TAU; float delta = (-G[i]-G[j])/quad_coef; float diff = alpha[i] - alpha[j]; alpha[i] += delta; alpha[j] += delta; if(diff > 0) { if(alpha[j] < 0) { alpha[j] = 0; alpha[i] = diff; } } else { if(alpha[i] < 0) { alpha[i] = 0; alpha[j] = -diff; } } if(diff > C_i - C_j) { if(alpha[i] > C_i) { alpha[i] = C_i; alpha[j] = C_i - diff; } } else { if(alpha[j] > C_j) { alpha[j] = C_j; alpha[i] = C_j + diff; } } } else { float quad_coef = QD[i]+QD[j]-2*Q_i[j]; if (quad_coef <= 0) quad_coef = TAU; float delta = (G[i]-G[j])/quad_coef; float sum = alpha[i] + alpha[j]; alpha[i] -= delta; alpha[j] += delta; if(sum > C_i) { if(alpha[i] > C_i) { alpha[i] = C_i; alpha[j] = sum - C_i; } } else { if(alpha[j] < 0) { alpha[j] = 0; alpha[i] = sum; } } if(sum > C_j) { if(alpha[j] > C_j) { alpha[j] = C_j; alpha[i] = sum - C_j; } } else { if(alpha[i] < 0) { alpha[i] = 0; alpha[j] = sum; } } } // update G float delta_alpha_i = alpha[i] - old_alpha_i; float delta_alpha_j = alpha[j] - old_alpha_j; for(int k=0;k<active_size;k++) { G[k] += Q_i[k]*delta_alpha_i + Q_j[k]*delta_alpha_j; } // update alpha_status and G_bar { bool ui = is_upper_bound(i); bool uj = is_upper_bound(j); update_alpha_status(i); update_alpha_status(j); int k; if(ui != is_upper_bound(i)) { Q_i = Q.get_Q(i,l); if(ui) for(k=0;k<l;k++) G_bar[k] -= C_i * Q_i[k]; else for(k=0;k<l;k++) G_bar[k] += C_i * Q_i[k]; } if(uj != is_upper_bound(j)) { Q_j = Q.get_Q(j,l); if(uj) for(k=0;k<l;k++) G_bar[k] -= C_j * Q_j[k]; else for(k=0;k<l;k++) G_bar[k] += C_j * Q_j[k]; } } } if(iter >= max_iter) { if(active_size < l) { // reconstruct the whole gradient to calculate objective value reconstruct_gradient(); active_size = l; printf("*"); } printf("\nWARNING: reaching max number of iterations\n"); } // calculate rho si->rho = calculate_rho(); // calculate objective value { float v = 0; int i; for(i=0;i<l;i++) v += alpha[i] * (G[i] + p[i]); si->obj = v/2; } // put back the solution { for(int i=0;i<l;i++) alpha_[active_set[i]] = alpha[i]; } // juggle everything back /*{ for(int i=0;i<l;i++) while(active_set[i] != i) swap_index(i,active_set[i]); // or Q.swap_index(i,active_set[i]); }*/ si->upper_bound_p = Cp; si->upper_bound_n = Cn; printf("\noptimization finished, #iter = %d\n",iter); free(p); free(y); free(alpha); free(alpha_status); free(active_set); free(G); free(G_bar); } // return 1 if already optimal, return 0 otherwise __device__ int CUDA_Solver::select_working_set(int &out_i, int &out_j) { // return i,j such that // i: maximizes -y_i * grad(f)_i, i in I_up(\alpha) // j: minimizes the decrease of obj value // (if quadratic coefficeint <= 0, replace it with tau) // -y_j*grad(f)_j < -y_i*grad(f)_i, j in I_low(\alpha) float Gmax = -INF; float Gmax2 = -INF; int Gmax_idx = -1; int Gmin_idx = -1; float obj_diff_min = INF; for(int t=0;t<active_size;t++) if(y[t]==+1) { if(!is_upper_bound(t)) if(-G[t] >= Gmax) { Gmax = -G[t]; Gmax_idx = t; } } else { if(!is_lower_bound(t)) if(G[t] >= Gmax) { Gmax = G[t]; Gmax_idx = t; } } int i = Gmax_idx; const float *Q_i = NULL; if(i != -1) // NULL Q_i not accessed: Gmax=-INF if i=-1 Q_i = Q->get_Q(i,active_size); for(int j=0;j<active_size;j++) { if(y[j]==+1) { if (!is_lower_bound(j)) { float grad_diff=Gmax+G[j]; if (G[j] >= Gmax2) Gmax2 = G[j]; if (grad_diff > 0) { float obj_diff; float quad_coef = QD[i]+QD[j]-2.0*y[i]*Q_i[j]; if (quad_coef > 0) obj_diff = -(grad_diff*grad_diff)/quad_coef; else obj_diff = -(grad_diff*grad_diff)/TAU; if (obj_diff <= obj_diff_min) { Gmin_idx=j; obj_diff_min = obj_diff; } } } } else { if (!is_upper_bound(j)) { float grad_diff= Gmax-G[j]; if (-G[j] >= Gmax2) Gmax2 = -G[j]; if (grad_diff > 0) { float obj_diff; float quad_coef = QD[i]+QD[j]+2.0*y[i]*Q_i[j]; if (quad_coef > 0) obj_diff = -(grad_diff*grad_diff)/quad_coef; else obj_diff = -(grad_diff*grad_diff)/TAU; if (obj_diff <= obj_diff_min) { Gmin_idx=j; obj_diff_min = obj_diff; } } } } } if(Gmax+Gmax2 < eps) return 1; out_i = Gmax_idx; out_j = Gmin_idx; return 0; } __device__ bool CUDA_Solver::be_shrunk(int i, float Gmax1, float Gmax2) { if(is_upper_bound(i)) { if(y[i]==+1) return(-G[i] > Gmax1); else return(-G[i] > Gmax2); } else if(is_lower_bound(i)) { if(y[i]==+1) return(G[i] > Gmax2); else return(G[i] > Gmax1); } else return(false); } __device__ void CUDA_Solver::do_shrinking() { int i; float Gmax1 = -INF; // max { -y_i * grad(f)_i | i in I_up(\alpha) } float Gmax2 = -INF; // max { y_i * grad(f)_i | i in I_low(\alpha) } // find maximal violating pair first for(i=0;i<active_size;i++) { if(y[i]==+1) { if(!is_upper_bound(i)) { if(-G[i] >= Gmax1) Gmax1 = -G[i]; } if(!is_lower_bound(i)) { if(G[i] >= Gmax2) Gmax2 = G[i]; } } else { if(!is_upper_bound(i)) { if(-G[i] >= Gmax2) Gmax2 = -G[i]; } if(!is_lower_bound(i)) { if(G[i] >= Gmax1) Gmax1 = G[i]; } } } if(unshrink == false && Gmax1 + Gmax2 <= eps*10) { unshrink = true; reconstruct_gradient(); active_size = l; printf("*"); } for(i=0;i<active_size;i++) if (be_shrunk(i, Gmax1, Gmax2)) { active_size--; while (active_size > i) { if (!be_shrunk(active_size, Gmax1, Gmax2)) { swap_index(i,active_size); break; } active_size--; } } } __device__ float CUDA_Solver::calculate_rho() { float r; int nr_free = 0; float ub = INF, lb = -INF, sum_free = 0; for(int i=0;i<active_size;i++) { float yG = y[i]*G[i]; if(is_upper_bound(i)) { if(y[i]==-1) ub = min(ub,yG); else lb = max(lb,yG); } else if(is_lower_bound(i)) { if(y[i]==+1) ub = min(ub,yG); else lb = max(lb,yG); } else { ++nr_free; sum_free += yG; } } if(nr_free>0) r = sum_free/nr_free; else r = (ub+lb)/2; return r; } // // CUDA_Solver for nu-svm classification and regression // // additional constraint: e^T \alpha = constant // class CUDA_Solver_NU: public CUDA_Solver { public: __device__ CUDA_Solver_NU() {} __device__ void Solve(int l, const CUDA_QMatrix& Q, const float *p, const schar *y, float *alpha, float Cp, float Cn, float eps, SolutionInfo* si, int shrinking) { this->si = si; CUDA_Solver::Solve(l,Q,p,y,alpha,Cp,Cn,eps,si,shrinking); } private: SolutionInfo *si; __device__ int select_working_set(int &i, int &j); __device__ float calculate_rho(); __device__ bool be_shrunk(int i, float Gmax1, float Gmax2, float Gmax3, float Gmax4); __device__ void do_shrinking(); }; // return 1 if already optimal, return 0 otherwise __device__ int CUDA_Solver_NU::select_working_set(int &out_i, int &out_j) { // return i,j such that y_i = y_j and // i: maximizes -y_i * grad(f)_i, i in I_up(\alpha) // j: minimizes the decrease of obj value // (if quadratic coefficeint <= 0, replace it with tau) // -y_j*grad(f)_j < -y_i*grad(f)_i, j in I_low(\alpha) float Gmaxp = -INF; float Gmaxp2 = -INF; int Gmaxp_idx = -1; float Gmaxn = -INF; float Gmaxn2 = -INF; int Gmaxn_idx = -1; int Gmin_idx = -1; float obj_diff_min = INF; for(int t=0;t<active_size;t++) if(y[t]==+1) { if(!is_upper_bound(t)) if(-G[t] >= Gmaxp) { Gmaxp = -G[t]; Gmaxp_idx = t; } } else { if(!is_lower_bound(t)) if(G[t] >= Gmaxn) { Gmaxn = G[t]; Gmaxn_idx = t; } } int ip = Gmaxp_idx; int in = Gmaxn_idx; const float *Q_ip = NULL; const float *Q_in = NULL; if(ip != -1) // NULL Q_ip not accessed: Gmaxp=-INF if ip=-1 Q_ip = Q->get_Q(ip,active_size); if(in != -1) Q_in = Q->get_Q(in,active_size); for(int j=0;j<active_size;j++) { if(y[j]==+1) { if (!is_lower_bound(j)) { float grad_diff=Gmaxp+G[j]; if (G[j] >= Gmaxp2) Gmaxp2 = G[j]; if (grad_diff > 0) { float obj_diff; float quad_coef = QD[ip]+QD[j]-2*Q_ip[j]; if (quad_coef > 0) obj_diff = -(grad_diff*grad_diff)/quad_coef; else obj_diff = -(grad_diff*grad_diff)/TAU; if (obj_diff <= obj_diff_min) { Gmin_idx=j; obj_diff_min = obj_diff; } } } } else { if (!is_upper_bound(j)) { float grad_diff=Gmaxn-G[j]; if (-G[j] >= Gmaxn2) Gmaxn2 = -G[j]; if (grad_diff > 0) { float obj_diff; float quad_coef = QD[in]+QD[j]-2*Q_in[j]; if (quad_coef > 0) obj_diff = -(grad_diff*grad_diff)/quad_coef; else obj_diff = -(grad_diff*grad_diff)/TAU; if (obj_diff <= obj_diff_min) { Gmin_idx=j; obj_diff_min = obj_diff; } } } } } if(max(Gmaxp+Gmaxp2,Gmaxn+Gmaxn2) < eps) return 1; if (y[Gmin_idx] == +1) out_i = Gmaxp_idx; else out_i = Gmaxn_idx; out_j = Gmin_idx; return 0; } __device__ bool CUDA_Solver_NU::be_shrunk(int i, float Gmax1, float Gmax2, float Gmax3, float Gmax4) { if(is_upper_bound(i)) { if(y[i]==+1) return(-G[i] > Gmax1); else return(-G[i] > Gmax4); } else if(is_lower_bound(i)) { if(y[i]==+1) return(G[i] > Gmax2); else return(G[i] > Gmax3); } else return(false); } __device__ void CUDA_Solver_NU::do_shrinking() { float Gmax1 = -INF; // max { -y_i * grad(f)_i | y_i = +1, i in I_up(\alpha) } float Gmax2 = -INF; // max { y_i * grad(f)_i | y_i = +1, i in I_low(\alpha) } float Gmax3 = -INF; // max { -y_i * grad(f)_i | y_i = -1, i in I_up(\alpha) } float Gmax4 = -INF; // max { y_i * grad(f)_i | y_i = -1, i in I_low(\alpha) } // find maximal violating pair first int i; for(i=0;i<active_size;i++) { if(!is_upper_bound(i)) { if(y[i]==+1) { if(-G[i] > Gmax1) Gmax1 = -G[i]; } else if(-G[i] > Gmax4) Gmax4 = -G[i]; } if(!is_lower_bound(i)) { if(y[i]==+1) { if(G[i] > Gmax2) Gmax2 = G[i]; } else if(G[i] > Gmax3) Gmax3 = G[i]; } } if(unshrink == false && max(Gmax1+Gmax2,Gmax3+Gmax4) <= eps*10) { unshrink = true; reconstruct_gradient(); active_size = l; } for(i=0;i<active_size;i++) if (be_shrunk(i, Gmax1, Gmax2, Gmax3, Gmax4)) { active_size--; while (active_size > i) { if (!be_shrunk(active_size, Gmax1, Gmax2, Gmax3, Gmax4)) { swap_index(i,active_size); break; } active_size--; } } } __device__ float CUDA_Solver_NU::calculate_rho() { int nr_free1 = 0,nr_free2 = 0; float ub1 = INF, ub2 = INF; float lb1 = -INF, lb2 = -INF; float sum_free1 = 0, sum_free2 = 0; for(int i=0;i<active_size;i++) { if(y[i]==+1) { if(is_upper_bound(i)) lb1 = max(lb1,G[i]); else if(is_lower_bound(i)) ub1 = min(ub1,G[i]); else { ++nr_free1; sum_free1 += G[i]; } } else { if(is_upper_bound(i)) lb2 = max(lb2,G[i]); else if(is_lower_bound(i)) ub2 = min(ub2,G[i]); else { ++nr_free2; sum_free2 += G[i]; } } } float r1,r2; if(nr_free1 > 0) r1 = sum_free1/nr_free1; else r1 = (ub1+lb1)/2; if(nr_free2 > 0) r2 = sum_free2/nr_free2; else r2 = (ub2+lb2)/2; si->r = (r1+r2)/2; return (r1-r2)/2; } // // Q matrices for various formulations // class CUDA_SVC_Q: public CUDA_Kernel { public: __device__ CUDA_SVC_Q(const svm_problem& prob, const svm_parameter& param, const schar *y_) :CUDA_Kernel(prob.l, prob.x, param) { clone(y,y_,prob.l); cache = new CUDA_Cache(prob.l,(long int)(param.cache_size*(1<<20))); QD = (float *)malloc(sizeof(float) * prob.l); for(int i=0;i<prob.l;i++) QD[i] = (this->*kernel_function)(i,i); } __device__ float *get_Q(int i, int len) const { float *data; int start, j; if((start = cache->get_data(i,&data,len)) < len) { for(j=start;j<len;j++) data[j] = (float)(y[i]*y[j]*(this->*kernel_function)(i,j)); } return data; } __device__ float *get_QD() const { return QD; } __device__ void swap_index(int i, int j) const { cache->swap_index(i,j); CUDA_Kernel::swap_index(i,j); swap(y[i],y[j]); swap(QD[i],QD[j]); } __device__ ~CUDA_SVC_Q() { free(y); delete cache; free(QD); } private: schar *y; CUDA_Cache *cache; float *QD; }; class CUDA_ONE_CLASS_Q: public CUDA_Kernel { public: __device__ CUDA_ONE_CLASS_Q(const svm_problem& prob, const svm_parameter& param) :CUDA_Kernel(prob.l, prob.x, param) { cache = new CUDA_Cache(prob.l,(long int)(param.cache_size*(1<<20))); QD = (float *)malloc(sizeof(float) * prob.l); for(int i=0;i<prob.l;i++) QD[i] = (this->*kernel_function)(i,i); } __device__ float *get_Q(int i, int len) const { float *data; int start, j; if((start = cache->get_data(i,&data,len)) < len) { for(j=start;j<len;j++) data[j] = (float)(this->*kernel_function)(i,j); } return data; } __device__ float *get_QD() const { return QD; } __device__ void swap_index(int i, int j) const { cache->swap_index(i,j); CUDA_Kernel::swap_index(i,j); swap(QD[i],QD[j]); } __device__ ~CUDA_ONE_CLASS_Q() { delete cache; free(QD); } private: CUDA_Cache *cache; float *QD; }; class CUDA_SVR_Q: public CUDA_Kernel { public: __device__ CUDA_SVR_Q(const svm_problem& prob, const svm_parameter& param) :CUDA_Kernel(prob.l, prob.x, param) { l = prob.l; cache = new CUDA_Cache(l,(long int)(param.cache_size*(1<<20))); QD = new float[2*l]; sign = new schar[2*l]; index = new int[2*l]; for(int k=0;k<l;k++) { sign[k] = 1; sign[k+l] = -1; index[k] = k; index[k+l] = k; QD[k] = (this->*kernel_function)(k,k); QD[k+l] = QD[k]; } buffer[0] = new float[2*l]; buffer[1] = new float[2*l]; next_buffer = 0; } __device__ void swap_index(int i, int j) const { swap(sign[i],sign[j]); swap(index[i],index[j]); swap(QD[i],QD[j]); } __device__ float *get_Q(int i, int len) const { float *data; int j, real_i = index[i]; if(cache->get_data(real_i,&data,l) < l) { for(j=0;j<l;j++) data[j] = (float)(this->*kernel_function)(real_i,j); } // reorder and copy float *buf = buffer[next_buffer]; next_buffer = 1 - next_buffer; schar si = sign[i]; for(j=0;j<len;j++) buf[j] = (float) si * (float) sign[j] * data[index[j]]; return buf; } __device__ float *get_QD() const { return QD; } __device__ ~CUDA_SVR_Q() { delete cache; delete[] sign; delete[] index; delete[] buffer[0]; delete[] buffer[1]; delete[] QD; } private: int l; CUDA_Cache *cache; schar *sign; int *index; mutable int next_buffer; float *buffer[2]; float *QD; }; __device__ void cuda_svm_group_classes(const svm_problem *prob, int *nr_class_ret, int **label_ret, int **start_ret, int **count_ret, int *perm) { int l = prob->l; int max_nr_class = 16; int nr_class = 0; int *label = (int *)malloc(sizeof(int) * max_nr_class); int *count = (int *)malloc(sizeof(int) * max_nr_class); int *data_label = (int *)malloc(sizeof(int) * l); int i; for(i=0;i<l;i++) { int this_label = (int)prob->y[i]; int j; for(j=0;j<nr_class;j++) { if(this_label == label[j]) { ++count[j]; break; } } data_label[i] = j; if(j == nr_class) { if(nr_class == max_nr_class) { int *label_t = label; int *count_t = count; label = (int *)malloc(2 * max_nr_class * sizeof(int)); count = (int *)malloc(2 * max_nr_class * sizeof(int)); int k; for (k=0; k<max_nr_class; k++) { label[k] = label_t[k]; count[k] = count_t[k]; } free(label_t); free(count_t); max_nr_class *= 2; } label[nr_class] = this_label; count[nr_class] = 1; ++nr_class; } } // // Labels are ordered by their first occurrence in the training set. // However, for two-class sets with -1/+1 labels and -1 appears first, // we swap labels to ensure that internally the binary SVM has positive data corresponding to the +1 instances. // if (nr_class == 2 && label[0] == -1 && label[1] == 1) { swap(label[0],label[1]); swap(count[0],count[1]); for(i=0;i<l;i++) { if(data_label[i] == 0) data_label[i] = 1; else data_label[i] = 0; } } int *start = (int *)malloc(sizeof(int) * nr_class); start[0] = 0; for(i=1;i<nr_class;i++) start[i] = start[i-1]+count[i-1]; for(i=0;i<l;i++) { perm[start[data_label[i]]] = i; ++start[data_label[i]]; } start[0] = 0; for(i=1;i<nr_class;i++) start[i] = start[i-1]+count[i-1]; *nr_class_ret = nr_class; *label_ret = label; *start_ret = start; *count_ret = count; free(data_label); } __host__ int svm_get_nr_classes(const svm_problem *prob) { int l = prob->l; int max_nr_class = 16; int nr_class = 0; int *label = (int *)malloc(sizeof(int) * max_nr_class); int i; for(i=0;i<l;i++) { int this_label = (int)prob->y[i]; int j; for(j=0;j<nr_class;j++) if(this_label == label[j]) break; if(j == nr_class) { if(nr_class == max_nr_class) { max_nr_class *= 2; label = (int *)realloc(label,max_nr_class*sizeof(int)); } label[nr_class] = this_label; ++nr_class; } } return nr_class; } __device__ void cuda_svm_free_model_content(svm_model* model_ptr) { if(model_ptr->free_sv && model_ptr->l > 0 && model_ptr->SV != NULL) free((void *)(model_ptr->SV[0])); if(model_ptr->sv_coef) { for(int i=0;i<model_ptr->nr_class-1;i++) free(model_ptr->sv_coef[i]); } free(model_ptr->SV); model_ptr->SV = NULL; free(model_ptr->sv_coef); model_ptr->sv_coef = NULL; free(model_ptr->rho); model_ptr->rho = NULL; free(model_ptr->label); model_ptr->label= NULL; free(model_ptr->probA); model_ptr->probA = NULL; free(model_ptr->probB); model_ptr->probB= NULL; free(model_ptr->sv_indices); model_ptr->sv_indices = NULL; free(model_ptr->nSV); model_ptr->nSV = NULL; } __device__ void cuda_svm_free_and_destroy_model(svm_model** model_ptr_ptr) { if(model_ptr_ptr != NULL && *model_ptr_ptr != NULL) { cuda_svm_free_model_content(*model_ptr_ptr); free(*model_ptr_ptr); *model_ptr_ptr = NULL; } } __device__ void cuda_svm_destroy_param(svm_parameter* param) { free(param->weight_label); free(param->weight); } // Platt's binary SVM Probablistic Output: an improvement from Lin et al. __device__ void cuda_sigmoid_train(int l, const float *dec_values, const float *labels, float& A, float& B) { float prior1=0, prior0 = 0; int i; for (i=0;i<l;i++) if (labels[i] > 0) prior1+=1; else prior0+=1; int max_iter=100; // Maximal number of iterations float min_step=1e-10; // Minimal step taken in line search float sigma=1e-12; // For numerically strict PD of Hessian float eps=1e-5; float hiTarget=(prior1+1.0)/(prior1+2.0); float loTarget=1.0/(prior0+2.0); float *t=(float *)malloc(sizeof(float) * l); float fApB,p,q,h11,h22,h21,g1,g2,det,dA,dB,gd,stepsize; float newA,newB,newf,d1,d2; int iter; // Initial Point and Initial Fun Value A=0.0; B=logf((prior0+1.0)/(prior1+1.0)); float fval = 0.0; for (i=0;i<l;i++) { if (labels[i]>0) t[i]=hiTarget; else t[i]=loTarget; fApB = dec_values[i]*A+B; if (fApB>=0) fval += t[i]*fApB + logf(1+expf(-fApB)); else fval += (t[i] - 1)*fApB +logf(1+expf(fApB)); } for (iter=0;iter<max_iter;iter++) { // Update Gradient and Hessian (use H' = H + sigma I) h11=sigma; // numerically ensures strict PD h22=sigma; h21=0.0;g1=0.0;g2=0.0; for (i=0;i<l;i++) { fApB = dec_values[i]*A+B; if (fApB >= 0) { q=1.0/(1.0+expf(-fApB)); p=expf(-fApB)*q; } else { p=1.0/(1.0+expf(fApB)); q=expf(fApB)*p; } d2=p*q; h11+=dec_values[i]*dec_values[i]*d2; h22+=d2; h21+=dec_values[i]*d2; d1=t[i]-p; g1+=dec_values[i]*d1; g2+=d1; } // Stopping Criteria if (fabs(g1)<eps && fabs(g2)<eps) break; // Finding Newton direction: -inv(H') * g det=h11*h22-h21*h21; dA=-(h22*g1 - h21 * g2) / det; dB=-(-h21*g1+ h11 * g2) / det; gd=g1*dA+g2*dB; stepsize = 1; // Line Search while (stepsize >= min_step) { newA = A + stepsize * dA; newB = B + stepsize * dB; // New function value newf = 0.0; for (i=0;i<l;i++) { fApB = dec_values[i]*newA+newB; if (fApB >= 0) newf += t[i]*fApB + logf(1+expf(-fApB)); else newf += (t[i] - 1)*fApB +logf(1+expf(fApB)); } // Check sufficient decrease if (newf<fval+0.0001*stepsize*gd) { A=newA;B=newB;fval=newf; break; } else stepsize = stepsize / 2.0; } if (stepsize < min_step) { printf("Line search fails in two-class probability estimates\n"); break; } } if (iter>=max_iter) printf("Reaching maximal iterations in two-class probability estimates\n"); free(t); } __device__ float cuda_sigmoid_predict(float decision_value, float A, float B) { float fApB = decision_value*A+B; // 1-p used later; avoid catastrophic cancellation if (fApB >= 0) return expf(-fApB)/(1.0+expf(-fApB)); else return 1.0/(1+expf(fApB)) ; } // Method 2 from the multiclass_prob paper by Wu, Lin, and Weng __device__ void cuda_multiclass_probability(int k, float **r, float *p) { int t,j; int iter = 0, max_iter=max(100,k); float **Q=(float **)malloc(sizeof(float *) * k); float *Qp=(float *)malloc(sizeof(float) * k); float pQp, eps=0.005/k; for (t=0;t<k;t++) { p[t]=1.0/k; // Valid if k = 1 Q[t]=(float *)malloc(sizeof(float) * k); Q[t][t]=0; for (j=0;j<t;j++) { Q[t][t]+=r[j][t]*r[j][t]; Q[t][j]=Q[j][t]; } for (j=t+1;j<k;j++) { Q[t][t]+=r[j][t]*r[j][t]; Q[t][j]=-r[j][t]*r[t][j]; } } for (iter=0;iter<max_iter;iter++) { // stopping condition, recalculate QP,pQP for numerical accuracy pQp=0; for (t=0;t<k;t++) { Qp[t]=0; for (j=0;j<k;j++) Qp[t]+=Q[t][j]*p[j]; pQp+=p[t]*Qp[t]; } float max_error=0; for (t=0;t<k;t++) { float error=fabs(Qp[t]-pQp); if (error>max_error) max_error=error; } if (max_error<eps) break; for (t=0;t<k;t++) { float diff=(-Qp[t]+pQp)/Q[t][t]; p[t]+=diff; pQp=(pQp+diff*(diff*Q[t][t]+2*Qp[t]))/(1+diff)/(1+diff); for (j=0;j<k;j++) { Qp[j]=(Qp[j]+diff*Q[t][j])/(1+diff); p[j]/=(1+diff); } } } if (iter>=max_iter) printf("Exceeds max_iter in multiclass_prob\n"); for(t=0;t<k;t++) free(Q[t]); free(Q); free(Qp); } // Cross-validation decision values for probability estimates __device__ void cuda_svm_binary_svc_probability(const svm_problem *prob, const svm_parameter *param, float Cp, float Cn, float& probA, float& probB) { int i; int nr_fold = 5; int *perm = (int *)malloc(sizeof(int) * prob->l); float *dec_values = (float *)malloc(sizeof(float) * prob->l); CUDA_Rand rand; // random shuffle for(i=0;i<prob->l;i++) perm[i]=i; for(i=0;i<prob->l;i++) { int j = i+rand.rand_int(prob->l-i); swap(perm[i],perm[j]); } for(i=0;i<nr_fold;i++) { int begin = i*prob->l/nr_fold; int end = (i+1)*prob->l/nr_fold; int j,k; struct svm_problem subprob; subprob.l = prob->l-(end-begin); subprob.x = (struct svm_node **)malloc(sizeof(struct svm_node*) * subprob.l); subprob.y = (float *)malloc(sizeof(float) * subprob.l); k=0; for(j=0;j<begin;j++) { subprob.x[k] = prob->x[perm[j]]; subprob.y[k] = prob->y[perm[j]]; ++k; } for(j=end;j<prob->l;j++) { subprob.x[k] = prob->x[perm[j]]; subprob.y[k] = prob->y[perm[j]]; ++k; } int p_count=0,n_count=0; for(j=0;j<k;j++) if(subprob.y[j]>0) p_count++; else n_count++; if(p_count==0 && n_count==0) for(j=begin;j<end;j++) dec_values[perm[j]] = 0; else if(p_count > 0 && n_count == 0) for(j=begin;j<end;j++) dec_values[perm[j]] = 1; else if(p_count == 0 && n_count > 0) for(j=begin;j<end;j++) dec_values[perm[j]] = -1; else { svm_parameter subparam = *param; subparam.probability=0; subparam.C=1.0; subparam.nr_weight=2; subparam.weight_label = (int *)malloc(sizeof(int) * 2); subparam.weight = (float *)malloc(sizeof(float) * 2); subparam.weight_label[0]=+1; subparam.weight_label[1]=-1; subparam.weight[0]=Cp; subparam.weight[1]=Cn; struct svm_model *submodel = cuda_device_svm_train_no_prob(&subprob, &subparam); for(j=begin;j<end;j++) { cuda_svm_predict_values(submodel, prob->x[perm[j]], &(dec_values[perm[j]])); // ensure +1 -1 order; reason not using CV subroutine dec_values[perm[j]] *= submodel->label[0]; } cuda_svm_free_and_destroy_model(&submodel); cuda_svm_destroy_param(&subparam); } free(subprob.x); free(subprob.y); } cuda_sigmoid_train(prob->l,dec_values,prob->y,probA,probB); free(dec_values); free(perm); } __device__ float cuda_svm_svr_probability(const svm_problem *prob, const svm_parameter *param) { int i; int nr_fold = 5; float *ymv = (float *)malloc(sizeof(float) * prob->l); float mae = 0; svm_parameter newparam = *param; newparam.probability = 0; cuda_svm_cross_validation(prob,&newparam,nr_fold,ymv); for(i=0;i<prob->l;i++) { ymv[i]=prob->y[i]-ymv[i]; mae += fabs(ymv[i]); } mae /= prob->l; float std=sqrtf(2*mae*mae); int count=0; mae=0; for(i=0;i<prob->l;i++) if (fabs(ymv[i]) > 5*std) count=count+1; else mae+=fabs(ymv[i]); mae /= (prob->l-count); printf("Prob. model for test data: target value = predicted value + z,\nz: Laplace distribution e^(-|z|/sigma)/(2sigma),sigma= %g\n",mae); free(ymv); return mae; } __device__ float cuda_svm_predict_values(const svm_model *model, const svm_node *x, float* dec_values) { int i; if(model->param.svm_type == ONE_CLASS || model->param.svm_type == EPSILON_SVR || model->param.svm_type == NU_SVR) { float *sv_coef = model->sv_coef[0]; float sum = 0; for(i=0;i<model->l;i++) sum += sv_coef[i] * CUDA_Kernel::k_function(x,model->SV[i],model->param); sum -= model->rho[0]; *dec_values = sum; if(model->param.svm_type == ONE_CLASS) return (sum>0)?1:-1; else return sum; } else { int nr_class = model->nr_class; int l = model->l; float *kvalue = (float *)malloc(sizeof(float) * l); for(i=0;i<l;i++) kvalue[i] = CUDA_Kernel::k_function(x,model->SV[i],model->param); int *start = (int *)malloc(sizeof(int) * nr_class); start[0] = 0; for(i=1;i<nr_class;i++) start[i] = start[i-1]+model->nSV[i-1]; int *vote = (int *)malloc(sizeof(int) * nr_class); for(i=0;i<nr_class;i++) vote[i] = 0; int p=0; for(i=0;i<nr_class;i++) for(int j=i+1;j<nr_class;j++) { float sum = 0; int si = start[i]; int sj = start[j]; int ci = model->nSV[i]; int cj = model->nSV[j]; int k; float *coef1 = model->sv_coef[j-1]; float *coef2 = model->sv_coef[i]; for(k=0;k<ci;k++) sum += coef1[si+k] * kvalue[si+k]; for(k=0;k<cj;k++) sum += coef2[sj+k] * kvalue[sj+k]; sum -= model->rho[p]; dec_values[p] = sum; if(dec_values[p] > 0) ++vote[i]; else ++vote[j]; p++; } int vote_max_idx = 0; for(i=1;i<nr_class;i++) if(vote[i] > vote[vote_max_idx]) vote_max_idx = i; free(kvalue); free(start); free(vote); return model->label[vote_max_idx]; } } __device__ float cuda_svm_predict(const svm_model *model, const svm_node *x) { int nr_class = model->nr_class; float *dec_values; if(model->param.svm_type == ONE_CLASS || model->param.svm_type == EPSILON_SVR || model->param.svm_type == NU_SVR) dec_values = (float *)malloc(sizeof(float)); else dec_values = (float *)malloc(sizeof(float) * nr_class*(nr_class-1)/2); float pred_result = cuda_svm_predict_values(model, x, dec_values); free(dec_values); return pred_result; } __device__ float cuda_svm_predict_probability(const svm_model *model, const svm_node *x, float *prob_estimates) { if ((model->param.svm_type == C_SVC || model->param.svm_type == NU_SVC) && model->probA!=NULL && model->probB!=NULL) { int i; int nr_class = model->nr_class; float *dec_values = (float *)malloc(sizeof(float) * nr_class*(nr_class-1)/2); cuda_svm_predict_values(model, x, dec_values); float min_prob=1e-7; float **pairwise_prob=(float **)malloc(sizeof(float *) * nr_class); for(i=0;i<nr_class;i++) pairwise_prob[i]=(float *)malloc(sizeof(float) * nr_class); int k=0; for(i=0;i<nr_class;i++) for(int j=i+1;j<nr_class;j++) { pairwise_prob[i][j]=min(max(cuda_sigmoid_predict(dec_values[k],model->probA[k],model->probB[k]),min_prob),1-min_prob); pairwise_prob[j][i]=1-pairwise_prob[i][j]; k++; } cuda_multiclass_probability(nr_class,pairwise_prob,prob_estimates); int prob_max_idx = 0; for(i=1;i<nr_class;i++) if(prob_estimates[i] > prob_estimates[prob_max_idx]) prob_max_idx = i; for(i=0;i<nr_class;i++) free(pairwise_prob[i]); free(dec_values); free(pairwise_prob); return model->label[prob_max_idx]; } else return cuda_svm_predict(model, x); } // Stratified cross validation __device__ void cuda_svm_cross_validation(const svm_problem *prob, const svm_parameter *param, int nr_fold, float *target) { int i; int *fold_start; int l = prob->l; int *perm = (int *)malloc(sizeof(int) * l); int nr_class; CUDA_Rand rand; if (nr_fold > l) { nr_fold = l; printf("WARNING: # folds > # data. Will use # folds = # data instead (i.e., leave-one-out cross validation)\n"); } fold_start = (int *)malloc(sizeof(int) * (nr_fold+1)); // stratified cv may not give leave-one-out rate // Each class to l folds -> some folds may have zero elements if((param->svm_type == C_SVC || param->svm_type == NU_SVC) && nr_fold < l) { int *start = NULL; int *label = NULL; int *count = NULL; cuda_svm_group_classes(prob,&nr_class,&label,&start,&count,perm); // random shuffle and then data grouped by fold using the array perm int *fold_count = (int *)malloc(sizeof(int) * nr_fold); int c; int *index = (int *)malloc(sizeof(int) * l); for(i=0;i<l;i++) index[i]=perm[i]; for (c=0; c<nr_class; c++) for(i=0;i<count[c];i++) { int j = i+rand.rand_int(count[c]-i); swap(index[start[c]+j],index[start[c]+i]); } for(i=0;i<nr_fold;i++) { fold_count[i] = 0; for (c=0; c<nr_class;c++) fold_count[i]+=(i+1)*count[c]/nr_fold-i*count[c]/nr_fold; } fold_start[0]=0; for (i=1;i<=nr_fold;i++) fold_start[i] = fold_start[i-1]+fold_count[i-1]; for (c=0; c<nr_class;c++) for(i=0;i<nr_fold;i++) { int begin = start[c]+i*count[c]/nr_fold; int end = start[c]+(i+1)*count[c]/nr_fold; for(int j=begin;j<end;j++) { perm[fold_start[i]] = index[j]; fold_start[i]++; } } fold_start[0]=0; for (i=1;i<=nr_fold;i++) fold_start[i] = fold_start[i-1]+fold_count[i-1]; free(start); free(label); free(count); free(index); free(fold_count); } else { for(i=0;i<l;i++) perm[i]=i; for(i=0;i<l;i++) { int j = i+rand.rand_int(l-i); swap(perm[i],perm[j]); } for(i=0;i<=nr_fold;i++) fold_start[i]=i*l/nr_fold; } for(i=0;i<nr_fold;i++) { int begin = fold_start[i]; int end = fold_start[i+1]; int j,k; struct svm_problem subprob; subprob.l = l-(end-begin); subprob.x = (svm_node **)malloc(sizeof(svm_node *) * subprob.l); subprob.y = (float *)malloc(sizeof(float) * subprob.l); k=0; for(j=0;j<begin;j++) { subprob.x[k] = prob->x[perm[j]]; subprob.y[k] = prob->y[perm[j]]; ++k; } for(j=end;j<l;j++) { subprob.x[k] = prob->x[perm[j]]; subprob.y[k] = prob->y[perm[j]]; ++k; } struct svm_model *submodel = cuda_device_svm_train_no_prob(&subprob, param); //if(param->probability && // (param->svm_type == C_SVC || param->svm_type == NU_SVC)) //{ // float *prob_estimates = (float *)malloc(sizeof(float) * submodel->nr_class); // for(j=begin;j<end;j++) // target[perm[j]] = cuda_svm_predict_probability(submodel,prob->x[perm[j]],prob_estimates); // free(prob_estimates); //} //else for(j=begin;j<end;j++) target[perm[j]] = cuda_svm_predict(submodel,prob->x[perm[j]]); cuda_svm_free_and_destroy_model(&submodel); free(subprob.x); free(subprob.y); } free(fold_start); free(perm); } // // construct and solve various formulations // __device__ void solve_c_svc(const svm_problem *prob, const svm_parameter *param, float *alpha, CUDA_Solver::SolutionInfo* si, float Cp, float Cn) { int l = prob->l; float *minus_ones = (float *)malloc(sizeof(float) * l); schar *y = (schar *)malloc(sizeof(schar) * l); int i; for(i=0;i<l;i++) { alpha[i] = 0; minus_ones[i] = -1; if(prob->y[i] > 0) y[i] = +1; else y[i] = -1; } CUDA_Solver s; s.Solve(l, CUDA_SVC_Q(*prob,*param,y), minus_ones, y, alpha, Cp, Cn, param->eps, si, param->shrinking); float sum_alpha=0; for(i=0;i<l;i++) sum_alpha += alpha[i]; //if (Cp==Cn) // printf("nu = %f\n", sum_alpha/(Cp*prob->l)); for(i=0;i<l;i++) alpha[i] *= y[i]; free(minus_ones); free(y); } __device__ void solve_nu_svc(const svm_problem *prob, const svm_parameter *param, float *alpha, CUDA_Solver::SolutionInfo* si) { int i; int l = prob->l; float nu = param->nu; schar *y = (schar *)malloc(sizeof(schar) * l); for(i=0;i<l;i++) if(prob->y[i]>0) y[i] = +1; else y[i] = -1; float sum_pos = nu*l/2; float sum_neg = nu*l/2; for(i=0;i<l;i++) if(y[i] == +1) { alpha[i] = min(1.0,sum_pos); sum_pos -= alpha[i]; } else { alpha[i] = min(1.0,sum_neg); sum_neg -= alpha[i]; } float *zeros = (float *)malloc(sizeof(float) * l); for(i=0;i<l;i++) zeros[i] = 0; CUDA_Solver_NU s; s.Solve(l, CUDA_SVC_Q(*prob,*param,y), zeros, y, alpha, 1.0, 1.0, param->eps, si, param->shrinking); float r = si->r; //printf("C = %f\n",1/r); for(i=0;i<l;i++) alpha[i] *= y[i]/r; si->rho /= r; si->obj /= (r*r); si->upper_bound_p = 1/r; si->upper_bound_n = 1/r; free(y); free(zeros); } __device__ void solve_one_class(const svm_problem *prob, const svm_parameter *param, float *alpha, CUDA_Solver::SolutionInfo* si) { int l = prob->l; float *zeros = (float *)malloc(sizeof(float) * l); schar *ones = (schar *)malloc(sizeof(schar) * l); int i; int n = (int)(param->nu*prob->l); // # of alpha's at upper bound for(i=0;i<n;i++) alpha[i] = 1; if(n<prob->l) alpha[n] = param->nu * prob->l - n; for(i=n+1;i<l;i++) alpha[i] = 0; for(i=0;i<l;i++) { zeros[i] = 0; ones[i] = 1; } CUDA_Solver s; s.Solve(l, CUDA_ONE_CLASS_Q(*prob,*param), zeros, ones, alpha, 1.0, 1.0, param->eps, si, param->shrinking); free(zeros); free(ones); } __device__ void solve_epsilon_svr(const svm_problem *prob, const svm_parameter *param, float *alpha, CUDA_Solver::SolutionInfo* si) { int l = prob->l; float *alpha2 = (float *)malloc(sizeof(float) * l * 2); float *linear_term = (float *)malloc(sizeof(float) * l * 2); schar *y = (schar *)malloc(sizeof(schar) * l * 2); int i; for(i=0;i<l;i++) { alpha2[i] = 0; linear_term[i] = param->p - prob->y[i]; y[i] = 1; alpha2[i+l] = 0; linear_term[i+l] = param->p + prob->y[i]; y[i+l] = -1; } CUDA_Solver s; s.Solve(2*l, CUDA_SVR_Q(*prob,*param), linear_term, y, alpha2, param->C, param->C, param->eps, si, param->shrinking); float sum_alpha = 0; for(i=0;i<l;i++) { alpha[i] = alpha2[i] - alpha2[i+l]; sum_alpha += fabs(alpha[i]); } //printf("nu = %f\n",sum_alpha/(param->C*l)); free(alpha2); free(linear_term); free(y); } __device__ void solve_nu_svr(const svm_problem *prob, const svm_parameter *param, float *alpha, CUDA_Solver::SolutionInfo* si) { int l = prob->l; float C = param->C; float *alpha2 = (float *)malloc(sizeof(float) * l * 2); float *linear_term = (float *)malloc(sizeof(float) * l * 2); schar *y = (schar *)malloc(sizeof(schar) * l * 2); int i; float sum = C * param->nu * l / 2; for(i=0;i<l;i++) { alpha2[i] = alpha2[i+l] = min(sum,C); sum -= alpha2[i]; linear_term[i] = - prob->y[i]; y[i] = 1; linear_term[i+l] = prob->y[i]; y[i+l] = -1; } CUDA_Solver_NU s; s.Solve(2*l, CUDA_SVR_Q(*prob,*param), linear_term, y, alpha2, C, C, param->eps, si, param->shrinking); //printf("epsilon = %f\n",-si->r); for(i=0;i<l;i++) alpha[i] = alpha2[i] - alpha2[i+l]; free(alpha2); free(linear_term); free(y); } __device__ struct decision_function cuda_svm_train_one(const svm_problem *prob, const svm_parameter *param, float Cp, float Cn) { float *alpha = (float *)malloc(sizeof(float) * prob->l); CUDA_Solver::SolutionInfo si; switch(param->svm_type) { case C_SVC: solve_c_svc(prob,param,alpha,&si,Cp,Cn); break; case NU_SVC: solve_nu_svc(prob,param,alpha,&si); break; case ONE_CLASS: solve_one_class(prob,param,alpha,&si); break; case EPSILON_SVR: solve_epsilon_svr(prob,param,alpha,&si); break; case NU_SVR: solve_nu_svr(prob,param,alpha,&si); break; } printf("obj = %f, rho = %f\n",si.obj,si.rho); // output SVs /* int nSV = 0; int nBSV = 0; for(int i=0;i<prob->l;i++) { if(fabs(alpha[i]) > 0) { ++nSV; if(prob->y[i] > 0) { if(fabs(alpha[i]) >= si.upper_bound_p) ++nBSV; } else { if(fabs(alpha[i]) >= si.upper_bound_n) ++nBSV; } } } printf("nSV = %d, nBSV = %d\n",nSV,nBSV); */ decision_function f; f.alpha = alpha; f.rho = si.rho; return f; } __device__ struct svm_model *cuda_device_svm_train_no_prob(const struct svm_problem *prob, const struct svm_parameter *param) { svm_model *model = (svm_model *)malloc(sizeof(svm_model)); model->param = *param; model->free_sv = 0; // XXX if(param->svm_type == ONE_CLASS || param->svm_type == EPSILON_SVR || param->svm_type == NU_SVR) { // regression or one-class-svm model->nr_class = 2; model->label = NULL; model->nSV = NULL; model->probA = NULL; model->probB = NULL; model->sv_coef = (float **)malloc(sizeof(float*)); //if(param->probability && // (param->svm_type == EPSILON_SVR || // param->svm_type == NU_SVR)) //{ // model->probA = (float *)malloc(sizeof(float)); // model->probA[0] = cuda_svm_svr_probability(prob,param); //} decision_function f = cuda_svm_train_one(prob,param,0,0); model->rho = (float *)malloc(sizeof(float)); model->rho[0] = f.rho; int nSV = 0; int i; for(i=0;i<prob->l;i++) if(fabs(f.alpha[i]) > 0) ++nSV; model->l = nSV; model->SV = (svm_node **)malloc(sizeof(svm_node *) * nSV); model->sv_coef[0] = (float *)malloc(sizeof(float) * nSV); model->sv_indices = (int *)malloc(sizeof(int) * nSV); int j = 0; for(i=0;i<prob->l;i++) if(fabs(f.alpha[i]) > 0) { model->SV[j] = prob->x[i]; model->sv_coef[0][j] = f.alpha[i]; model->sv_indices[j] = i+1; ++j; } free(f.alpha); } else { // classification int l = prob->l; int nr_class; int *label = NULL; int *start = NULL; int *count = NULL; int *perm = (int *)malloc(sizeof(int) * l); // group training data of the same class cuda_svm_group_classes(prob,&nr_class,&label,&start,&count,perm); if(nr_class == 1) printf("WARNING: training data in only one class. See README for details.\n"); svm_node **x = (svm_node **)malloc(sizeof(svm_node *) * l); int i; for(i=0;i<l;i++) x[i] = prob->x[perm[i]]; // calculate weighted C float *weighted_C = (float *)malloc(sizeof(float) * nr_class); for(i=0;i<nr_class;i++) weighted_C[i] = param->C; for(i=0;i<param->nr_weight;i++) { int j; for(j=0;j<nr_class;j++) if(param->weight_label[i] == label[j]) break; if(j == nr_class) printf("WARNING: class label %d specified in weight is not found\n", param->weight_label[i]); else weighted_C[j] *= param->weight[i]; } // train k*(k-1)/2 models bool *nonzero = (bool *)malloc(sizeof(bool) * l); for(i=0;i<l;i++) nonzero[i] = false; decision_function *f = (decision_function *)malloc(sizeof(decision_function) * nr_class*(nr_class-1)/2); //float *probA=NULL,*probB=NULL; //if (param->probability) //{ // probA=(float *)malloc(sizeof(float) * nr_class*(nr_class-1)/2); // probB=(float *)malloc(sizeof(float) * nr_class*(nr_class-1)/2); //} int p = 0; for(i=0;i<nr_class;i++) for(int j=i+1;j<nr_class;j++) { svm_problem sub_prob; int si = start[i], sj = start[j]; int ci = count[i], cj = count[j]; sub_prob.l = ci+cj; sub_prob.x = (svm_node **)malloc(sizeof(svm_node *) * sub_prob.l); sub_prob.y = (float *)malloc(sizeof(float) * sub_prob.l); int k; for(k=0;k<ci;k++) { sub_prob.x[k] = x[si+k]; sub_prob.y[k] = +1; } for(k=0;k<cj;k++) { sub_prob.x[ci+k] = x[sj+k]; sub_prob.y[ci+k] = -1; } //if(param->probability) // cuda_svm_binary_svc_probability(&sub_prob,param,weighted_C[i],weighted_C[j],probA[p],probB[p]); f[p] = cuda_svm_train_one(&sub_prob,param,weighted_C[i],weighted_C[j]); for(k=0;k<ci;k++) if(!nonzero[si+k] && fabs(f[p].alpha[k]) > 0) nonzero[si+k] = true; for(k=0;k<cj;k++) if(!nonzero[sj+k] && fabs(f[p].alpha[ci+k]) > 0) nonzero[sj+k] = true; free(sub_prob.x); free(sub_prob.y); ++p; } // build output model->nr_class = nr_class; model->label = (int *)malloc(sizeof(int) * nr_class); for(i=0;i<nr_class;i++) model->label[i] = label[i]; model->rho = (float *)malloc(sizeof(float) * nr_class*(nr_class-1)/2); for(i=0;i<nr_class*(nr_class-1)/2;i++) model->rho[i] = f[i].rho; //if(param->probability) //{ // model->probA = (float *)malloc(sizeof(float) * nr_class*(nr_class-1)/2); // model->probB = (float *)malloc(sizeof(float) * nr_class*(nr_class-1)/2); // for(i=0;i<nr_class*(nr_class-1)/2;i++) // { // model->probA[i] = probA[i]; // model->probB[i] = probB[i]; // } //} //else //{ model->probA=NULL; model->probB=NULL; //} int total_sv = 0; int *nz_count = (int *)malloc(sizeof(int) * nr_class); model->nSV = (int *)malloc(sizeof(int) * nr_class); for(i=0;i<nr_class;i++) { int nSV = 0; for(int j=0;j<count[i];j++) if(nonzero[start[i]+j]) { ++nSV; ++total_sv; } model->nSV[i] = nSV; nz_count[i] = nSV; } printf("Total nSV = %d\n",total_sv); model->l = total_sv; model->SV = (svm_node **)malloc(sizeof(svm_node *) * total_sv); model->sv_indices = (int *)malloc(sizeof(int) * total_sv); p = 0; for(i=0;i<l;i++) if(nonzero[i]) { model->SV[p] = x[i]; model->sv_indices[p++] = perm[i] + 1; } int *nz_start = (int *)malloc(sizeof(int) * nr_class); nz_start[0] = 0; for(i=1;i<nr_class;i++) nz_start[i] = nz_start[i-1]+nz_count[i-1]; model->sv_coef = (float **)malloc(sizeof(float *) * (nr_class-1)); for(i=0;i<nr_class-1;i++) model->sv_coef[i] = (float *)malloc(sizeof(float) * total_sv); p = 0; for(i=0;i<nr_class;i++) for(int j=i+1;j<nr_class;j++) { // classifier (i,j): coefficients with // i are in sv_coef[j-1][nz_start[i]...], // j are in sv_coef[i][nz_start[j]...] int si = start[i]; int sj = start[j]; int ci = count[i]; int cj = count[j]; int q = nz_start[i]; int k; for(k=0;k<ci;k++) if(nonzero[si+k]) model->sv_coef[j-1][q++] = f[p].alpha[k]; q = nz_start[j]; for(k=0;k<cj;k++) if(nonzero[sj+k]) model->sv_coef[i][q++] = f[p].alpha[ci+k]; ++p; } free(label); //free(probA); //free(probB); free(count); free(perm); free(start); free(x); free(weighted_C); free(nonzero); for(i=0;i<nr_class*(nr_class-1)/2;i++) free(f[i].alpha); free(f); free(nz_count); free(nz_start); } return model; } __device__ void cuda_perform_svm_train(const struct svm_problem *prob, const struct svm_parameter *param, struct svm_model *model) { model->param = *param; model->free_sv = 0; // XXX if(param->svm_type == ONE_CLASS || param->svm_type == EPSILON_SVR || param->svm_type == NU_SVR) { // regression or one-class-svm model->nr_class = 2; if(param->probability && (param->svm_type == EPSILON_SVR || param->svm_type == NU_SVR)) { model->probA[0] = cuda_svm_svr_probability(prob, param); } decision_function f = cuda_svm_train_one(prob,param,0,0); model->rho[0] = f.rho; int nSV = 0; int i; for(i=0;i<prob->l;i++) if(fabs(f.alpha[i]) > 0) ++nSV; model->l = nSV; int j = 0; for(i=0;i<prob->l;i++) if(fabs(f.alpha[i]) > 0) { model->SV[j] = (struct svm_node *)i; model->sv_coef[0][j] = f.alpha[i]; model->sv_indices[j] = i+1; ++j; } free(f.alpha); } else { // classification int l = prob->l; int nr_class; int *label = NULL; int *start = NULL; int *count = NULL; int *perm = (int *)malloc(sizeof(int) * l); // group training data of the same class cuda_svm_group_classes(prob,&nr_class,&label,&start,&count,perm); if(nr_class == 1) printf("WARNING: training data in only one class. See README for details.\n"); svm_node **x = (svm_node **)malloc(sizeof(svm_node *) * l); int i; for(i=0;i<l;i++) x[i] = prob->x[perm[i]]; // calculate weighted C float *weighted_C = (float *)malloc(sizeof(float) * nr_class); for(i=0;i<nr_class;i++) weighted_C[i] = param->C; for(i=0;i<param->nr_weight;i++) { int j; for(j=0;j<nr_class;j++) if(param->weight_label[i] == label[j]) break; if(j == nr_class) printf("WARNING: class label %d specified in weight is not found\n", param->weight_label[i]); else weighted_C[j] *= param->weight[i]; } // train k*(k-1)/2 models bool *nonzero = (bool *)malloc(sizeof(bool) * l); for(i=0;i<l;i++) nonzero[i] = false; decision_function *f = (decision_function *)malloc(sizeof(decision_function) * nr_class*(nr_class-1)/2); float *probA=NULL,*probB=NULL; if (param->probability) { probA=(float *)malloc(sizeof(float) * nr_class*(nr_class-1)/2); probB=(float *)malloc(sizeof(float) * nr_class*(nr_class-1)/2); } int p = 0; for(i=0;i<nr_class;i++) for(int j=i+1;j<nr_class;j++) { svm_problem sub_prob; int si = start[i], sj = start[j]; int ci = count[i], cj = count[j]; sub_prob.l = ci+cj; sub_prob.x = (svm_node **)malloc(sizeof(svm_node *) * sub_prob.l); sub_prob.y = (float *)malloc(sizeof(float) * sub_prob.l); int k; for(k=0;k<ci;k++) { sub_prob.x[k] = x[si+k]; sub_prob.y[k] = +1; } for(k=0;k<cj;k++) { sub_prob.x[ci+k] = x[sj+k]; sub_prob.y[ci+k] = -1; } if (param->probability) cuda_svm_binary_svc_probability(&sub_prob,param,weighted_C[i],weighted_C[j],probA[p],probB[p]); f[p] = cuda_svm_train_one(&sub_prob,param,weighted_C[i],weighted_C[j]); for(k=0;k<ci;k++) if(!nonzero[si+k] && fabs(f[p].alpha[k]) > 0) nonzero[si+k] = true; for(k=0;k<cj;k++) if(!nonzero[sj+k] && fabs(f[p].alpha[ci+k]) > 0) nonzero[sj+k] = true; free(sub_prob.x); free(sub_prob.y); ++p; } // build output model->nr_class = nr_class; for(i=0;i<nr_class;i++) model->label[i] = label[i]; for(i=0;i<nr_class*(nr_class-1)/2;i++) model->rho[i] = f[i].rho; if(param->probability) { for(i=0;i<nr_class*(nr_class-1)/2;i++) { model->probA[i] = probA[i]; model->probB[i] = probB[i]; } } else { model->probA=NULL; model->probB=NULL; } int total_sv = 0; int *nz_count = (int *)malloc(sizeof(int) * nr_class); for(i=0;i<nr_class;i++) { int nSV = 0; for(int j=0;j<count[i];j++) if(nonzero[start[i]+j]) { ++nSV; ++total_sv; } model->nSV[i] = nSV; nz_count[i] = nSV; } printf("Total nSV = %d\n",total_sv); model->l = total_sv; p = 0; for(i=0;i<l;i++) if(nonzero[i]) { model->SV[p] = (struct svm_node *)(perm[i]); model->sv_indices[p++] = perm[i] + 1; } int *nz_start = (int *)malloc(sizeof(int) * nr_class); nz_start[0] = 0; for(i=1;i<nr_class;i++) nz_start[i] = nz_start[i-1]+nz_count[i-1]; p = 0; for(i=0;i<nr_class;i++) for(int j=i+1;j<nr_class;j++) { // classifier (i,j): coefficients with // i are in sv_coef[j-1][nz_start[i]...], // j are in sv_coef[i][nz_start[j]...] int si = start[i]; int sj = start[j]; int ci = count[i]; int cj = count[j]; int q = nz_start[i]; int k; for(k=0;k<ci;k++) if(nonzero[si+k]) model->sv_coef[j-1][q++] = f[p].alpha[k]; q = nz_start[j]; for(k=0;k<cj;k++) if(nonzero[sj+k]) model->sv_coef[i][q++] = f[p].alpha[ci+k]; ++p; } free(label); free(probA); free(probB); free(count); free(perm); free(start); free(x); free(weighted_C); free(nonzero); for(i=0;i<nr_class*(nr_class-1)/2;i++) free(f[i].alpha); free(f); free(nz_count); free(nz_start); } } __global__ void cuda_svm_train_kernel(const struct svm_problem *subprobs, const struct svm_parameter *params, struct svm_model *submodels, size_t pitch, int nr_grid, int nr_fold) { int x = blockIdx.x; int y = threadIdx.x; if (y%WARP_SIZE == 0) { y = y / WARP_SIZE; if (x<nr_grid && y<nr_fold) { struct svm_model *row = (struct svm_model *)((char*)submodels + x * pitch); cuda_perform_svm_train(&(subprobs[y]), &(params[x]), &(row[y])); } } } int cuda_svm_train(const struct svm_problem *h_prob, struct svm_problem *h_subprobs, struct svm_parameter *h_params, int nr_grid, int nr_fold, struct svm_model *h_submodels) { int i, j, k; int dev_cnt; int res = 0; // // Initialize // if (cudaGetLastError() != cudaSuccess) { fprintf(stderr, "Error when initialize CUDA device\n"); return 1; } cudaGetDeviceCount(&dev_cnt); if (dev_cnt == 0) { fprintf(stderr, "No CUDA device\n"); return 1; } #ifdef NVML if (dev_cnt > 1) { // // Choose device that has maximum device memory left // int max_dev; size_t max_avail = 0; nvmlInit(); for (i=0; i<dev_cnt; i++) { nvmlDevice_t device; nvmlMemory_t mem; nvmlDeviceGetHandleByIndex(i, &device); nvmlDeviceGetMemoryInfo(device, &mem); if (mem.free > max_avail) { max_dev = i; max_avail = mem.free; } } nvmlShutdown(); cudaSetDevice(max_dev); } #endif struct svm_node **x_space = (struct svm_node **)malloc(sizeof(struct svm_node *) * h_prob->l); struct svm_problem *subprobs; struct svm_parameter *params; struct svm_model *submodels; // Send original prob.x to device for (i=0; i<h_prob->l; i++) { j=0; while(h_prob->x[i][j++].index != -1); cudaMalloc(&(x_space[i]), sizeof(struct svm_node) * j); cudaMemcpy(x_space[i], h_prob->x[i], sizeof(struct svm_node) * j, cudaMemcpyHostToDevice); } // Build subprobs in device cudaMalloc(&subprobs, sizeof(struct svm_problem) * nr_fold); float **y = (float **)malloc(sizeof(float *) * nr_fold); struct svm_node ***x = (struct svm_node ***)malloc(sizeof(struct svm_node **) * nr_fold); for (i=0; i<nr_fold; i++) { cudaMemcpy(&(subprobs[i].l), &(h_subprobs[i].l), sizeof(int), cudaMemcpyHostToDevice); cudaMalloc(&(y[i]), sizeof(float) * h_subprobs[i].l); cudaMemcpy(y[i], h_subprobs[i].y, sizeof(float) * h_subprobs[i].l, cudaMemcpyHostToDevice); cudaMemcpy(&(subprobs[i].y), &(y[i]), sizeof(float *), cudaMemcpyHostToDevice); cudaMalloc(&(x[i]), sizeof(struct svm_node *) * h_subprobs[i].l); // The h_subprobs[i].x[j] stores the index in the original prob.x, instead of a pointer to that for (j=0; j<h_subprobs[i].l; j++) cudaMemcpy(&(x[i][j]), &(x_space[int(h_subprobs[i].x[j])]), sizeof(struct svm_node *), cudaMemcpyHostToDevice); cudaMemcpy(&(subprobs[i].x), &(x[i]), sizeof(struct svm_node **), cudaMemcpyHostToDevice); } // Send params to device cudaMalloc(&params, sizeof(struct svm_parameter) * nr_grid); cudaMemcpy(params, h_params, sizeof(struct svm_parameter) * nr_grid, cudaMemcpyHostToDevice); // Build results (submodels) cache in device size_t pitch; cudaMallocPitch(&submodels, &pitch, sizeof(struct svm_model) * nr_fold, nr_grid); int nr_class = svm_get_nr_classes(h_prob); struct svm_node ***SV = (struct svm_node ***)malloc(sizeof(struct svm_node **) * nr_grid * nr_fold); // The model->sv_coef is a two-dimension linked list float ***sv_coef_p = (float ***)malloc(sizeof(float **) * nr_grid * nr_fold); float ***sv_coef = (float ***)malloc(sizeof(float **) * nr_grid * nr_fold); for (i=0; i<nr_grid; i++) for (j=0; j<nr_fold; j++) sv_coef[i*nr_fold+j] = (float **)malloc(sizeof(float *) * (nr_class-1)); float **rho = (float **)malloc(sizeof(float *) * nr_grid * nr_fold); float **probA = (float **)malloc(sizeof(float *) * nr_grid * nr_fold); float **probB = (float **)malloc(sizeof(float *) * nr_grid * nr_fold); int **sv_indices = (int **)malloc(sizeof(int *) * nr_grid * nr_fold); int **label = (int **)malloc(sizeof(int *) * nr_grid * nr_fold); int **nSV = (int **)malloc(sizeof(int *) * nr_grid * nr_fold); // Build the storage structure for results for (i=0; i<nr_grid; i++) for (j=0; j<nr_fold; j++) { cudaMalloc(&(SV[i*nr_fold+j]), sizeof(struct svm_node *) * h_subprobs[j].l); cudaMalloc(&(sv_coef_p[i*nr_fold+j]), sizeof(float *) * (nr_class-1)); for (k=0; k<nr_class-1; k++) cudaMalloc(&(sv_coef[i*nr_fold+j][k]), sizeof(float) * h_subprobs[j].l); cudaMalloc(&(rho[i*nr_fold+j]), sizeof(float) * nr_class*(nr_class-1)/2); cudaMalloc(&(probA[i*nr_fold+j]), sizeof(float) * nr_class*(nr_class-1)/2); cudaMalloc(&(probB[i*nr_fold+j]), sizeof(float) * nr_class*(nr_class-1)/2); cudaMalloc(&(sv_indices[i*nr_fold+j]), sizeof(int) * h_subprobs[j].l); cudaMalloc(&(label[i*nr_fold+j]), sizeof(int) * nr_class); cudaMalloc(&(nSV[i*nr_fold+j]), sizeof(int) * nr_class); struct svm_model *models = (struct svm_model *)((char*)submodels + i*pitch); cudaMemcpy(&(models[j].SV), &(SV[i*nr_fold+j]), sizeof(struct svm_node **), cudaMemcpyHostToDevice); cudaMemcpy(&(models[j].sv_coef), &(sv_coef_p[i*nr_fold+j]), sizeof(float **), cudaMemcpyHostToDevice); cudaMemcpy(sv_coef_p[i*nr_fold+j], sv_coef[i*nr_fold+j], sizeof(float *) * (nr_class-1), cudaMemcpyHostToDevice); cudaMemcpy(&(models[j].rho), &(rho[i*nr_fold+j]), sizeof(float *), cudaMemcpyHostToDevice); cudaMemcpy(&(models[j].probA), &(probA[i*nr_fold+j]), sizeof(float *), cudaMemcpyHostToDevice); cudaMemcpy(&(models[j].probB), &(probB[i*nr_fold+j]), sizeof(float *), cudaMemcpyHostToDevice); cudaMemcpy(&(models[j].sv_indices), &(sv_indices[i*nr_fold+j]), sizeof(int *), cudaMemcpyHostToDevice); cudaMemcpy(&(models[j].label), &(label[i*nr_fold+j]), sizeof(int *), cudaMemcpyHostToDevice); cudaMemcpy(&(models[j].nSV), &(nSV[i*nr_fold+j]), sizeof(int *), cudaMemcpyHostToDevice); } // // Run the kernel // cudaDeviceSetLimit(cudaLimitMallocHeapSize, DEVICE_HEAP_SIZE); cuda_svm_train_kernel<<<nr_grid, nr_fold*WARP_SIZE>>>(subprobs, params, submodels, pitch, nr_grid, nr_fold); if (cudaGetLastError() == cudaSuccess) { if (cudaDeviceSynchronize() == cudaSuccess) { // Copy results (submodels) from device to host cudaMemcpy2D(h_submodels, sizeof(struct svm_model) * nr_fold, submodels, pitch, sizeof(struct svm_model) * nr_fold, nr_grid, cudaMemcpyDeviceToHost); for (i=0; i<nr_grid; i++) for (j=0; j<nr_fold; j++) { struct svm_model *model = h_submodels + i*nr_fold + j; model->SV = (struct svm_node **)malloc(sizeof(struct svm_node *) * h_subprobs[j].l); cudaMemcpy(model->SV, SV[i*nr_fold+j], sizeof(struct svm_node *) * h_subprobs[j].l, cudaMemcpyDeviceToHost); // The returned model->SV is a index to the subprob, instead of a pointer for(k=0; k<model->l; k++) model->SV[k] = (struct svm_node *)(h_prob->x[int(h_subprobs[j].x[int(model->SV[k])])]); model->sv_coef = (float **)malloc(sizeof(float *) * (nr_class-1)); for (k=0; k<nr_class-1; k++) { model->sv_coef[k] = (float *)malloc(sizeof(float) * h_subprobs[j].l); cudaMemcpy(model->sv_coef[k], sv_coef[i*nr_fold+j][k], sizeof(float) * h_subprobs[j].l, cudaMemcpyDeviceToHost); } model->rho = (float *)malloc(sizeof(float) * nr_class*(nr_class-1)/2); model->probA = (float *)malloc(sizeof(float) * nr_class*(nr_class-1)/2); model->probB = (float *)malloc(sizeof(float) * nr_class*(nr_class-1)/2); model->sv_indices = (int *)malloc(sizeof(int) * h_subprobs[j].l); model->label = (int *)malloc(sizeof(int) * nr_class); model->nSV = (int *)malloc(sizeof(int) * nr_class); cudaMemcpy(model->rho, rho[i*nr_fold+j], sizeof(float) * nr_class*(nr_class-1)/2, cudaMemcpyDeviceToHost); cudaMemcpy(model->probA, probA[i*nr_fold+j], sizeof(float) * nr_class*(nr_class-1)/2, cudaMemcpyDeviceToHost); cudaMemcpy(model->probB, probB[i*nr_fold+j], sizeof(float) * nr_class*(nr_class-1)/2, cudaMemcpyDeviceToHost); cudaMemcpy(model->sv_indices, sv_indices[i*nr_fold+j], sizeof(int) * h_subprobs[j].l, cudaMemcpyDeviceToHost); cudaMemcpy(model->label, label[i*nr_fold+j], sizeof(int) * nr_class, cudaMemcpyDeviceToHost); cudaMemcpy(model->nSV, nSV[i*nr_fold+j], sizeof(int) * nr_class, cudaMemcpyDeviceToHost); } } else { fprintf(stderr, "Error when running CUDA svm train: %s\n", cudaGetErrorString(cudaGetLastError())); res = 1; } } else { fprintf(stderr, "Error when launching CUDA svm train\n"); res = 1; } // // Free all the memory allocated in device and host // for (i=0; i<h_prob->l; i++) cudaFree(x_space[i]); free(x_space); for (i=0; i<nr_fold; i++) { cudaFree(y[i]); cudaFree(x[i]); } free(y); free(x); cudaFree(subprobs); cudaFree(params); cudaFree(submodels); for (i=0; i<nr_grid; i++) for (j=0; j<nr_fold; j++) { cudaFree(SV[i*nr_fold+j]); for (k=0; k<nr_class-1; k++) cudaFree(sv_coef[i*nr_fold+j][k]); free(sv_coef[i*nr_fold+j]); cudaFree(sv_coef_p[i*nr_fold+j]); cudaFree(rho[i*nr_fold+j]); cudaFree(probA[i*nr_fold+j]); cudaFree(probB[i*nr_fold+j]); cudaFree(sv_indices[i*nr_fold+j]); cudaFree(label[i*nr_fold+j]); cudaFree(nSV[i*nr_fold+j]); } free(SV); free(sv_coef_p); free(sv_coef); free(rho); free(probA); free(probB); free(sv_indices); free(label); free(nSV); if (cudaGetLastError() != cudaSuccess) { fprintf(stderr, "Error when cleaning CUDA svm train\n"); res = 1; } return res; }
5610dca276888b8f34e376b1510ed4de5c04234b.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "pointGenKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *points = NULL; hipMalloc(&points, XSIZE*YSIZE); float *dirs = NULL; hipMalloc(&dirs, XSIZE*YSIZE); int nelems = 1; float minimum = 1; float step = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( pointGenKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, points,dirs,nelems,minimum,step); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( pointGenKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, points,dirs,nelems,minimum,step); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( pointGenKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, points,dirs,nelems,minimum,step); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
5610dca276888b8f34e376b1510ed4de5c04234b.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "pointGenKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *points = NULL; cudaMalloc(&points, XSIZE*YSIZE); float *dirs = NULL; cudaMalloc(&dirs, XSIZE*YSIZE); int nelems = 1; float minimum = 1; float step = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); pointGenKernel<<<gridBlock,threadBlock>>>(points,dirs,nelems,minimum,step); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { pointGenKernel<<<gridBlock,threadBlock>>>(points,dirs,nelems,minimum,step); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { pointGenKernel<<<gridBlock,threadBlock>>>(points,dirs,nelems,minimum,step); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
985b892ea4f1a11811f7eb74ea379b804867e386.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_advec_cell_kernel3_ydir; int xdim0_advec_cell_kernel3_ydir_h = -1; __constant__ int ydim0_advec_cell_kernel3_ydir; int ydim0_advec_cell_kernel3_ydir_h = -1; __constant__ int xdim1_advec_cell_kernel3_ydir; int xdim1_advec_cell_kernel3_ydir_h = -1; __constant__ int ydim1_advec_cell_kernel3_ydir; int ydim1_advec_cell_kernel3_ydir_h = -1; __constant__ int xdim2_advec_cell_kernel3_ydir; int xdim2_advec_cell_kernel3_ydir_h = -1; __constant__ int ydim2_advec_cell_kernel3_ydir; int ydim2_advec_cell_kernel3_ydir_h = -1; __constant__ int xdim3_advec_cell_kernel3_ydir; int xdim3_advec_cell_kernel3_ydir_h = -1; __constant__ int ydim3_advec_cell_kernel3_ydir; int ydim3_advec_cell_kernel3_ydir_h = -1; __constant__ int xdim4_advec_cell_kernel3_ydir; int xdim4_advec_cell_kernel3_ydir_h = -1; __constant__ int ydim4_advec_cell_kernel3_ydir; int ydim4_advec_cell_kernel3_ydir_h = -1; __constant__ int xdim5_advec_cell_kernel3_ydir; int xdim5_advec_cell_kernel3_ydir_h = -1; __constant__ int ydim5_advec_cell_kernel3_ydir; int ydim5_advec_cell_kernel3_ydir_h = -1; __constant__ int xdim6_advec_cell_kernel3_ydir; int xdim6_advec_cell_kernel3_ydir_h = -1; __constant__ int ydim6_advec_cell_kernel3_ydir; int ydim6_advec_cell_kernel3_ydir_h = -1; __constant__ int xdim7_advec_cell_kernel3_ydir; int xdim7_advec_cell_kernel3_ydir_h = -1; __constant__ int ydim7_advec_cell_kernel3_ydir; int ydim7_advec_cell_kernel3_ydir_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 #undef OPS_ACC7 #define OPS_ACC0(x, y, z) \ (x + xdim0_advec_cell_kernel3_ydir * (y) + \ xdim0_advec_cell_kernel3_ydir * ydim0_advec_cell_kernel3_ydir * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_advec_cell_kernel3_ydir * (y) + \ xdim1_advec_cell_kernel3_ydir * ydim1_advec_cell_kernel3_ydir * (z)) #define OPS_ACC2(x, y, z) \ (x + xdim2_advec_cell_kernel3_ydir * (y) + \ xdim2_advec_cell_kernel3_ydir * ydim2_advec_cell_kernel3_ydir * (z)) #define OPS_ACC3(x, y, z) \ (x + xdim3_advec_cell_kernel3_ydir * (y) + \ xdim3_advec_cell_kernel3_ydir * ydim3_advec_cell_kernel3_ydir * (z)) #define OPS_ACC4(x, y, z) \ (x + xdim4_advec_cell_kernel3_ydir * (y) + \ xdim4_advec_cell_kernel3_ydir * ydim4_advec_cell_kernel3_ydir * (z)) #define OPS_ACC5(x, y, z) \ (x + xdim5_advec_cell_kernel3_ydir * (y) + \ xdim5_advec_cell_kernel3_ydir * ydim5_advec_cell_kernel3_ydir * (z)) #define OPS_ACC6(x, y, z) \ (x + xdim6_advec_cell_kernel3_ydir * (y) + \ xdim6_advec_cell_kernel3_ydir * ydim6_advec_cell_kernel3_ydir * (z)) #define OPS_ACC7(x, y, z) \ (x + xdim7_advec_cell_kernel3_ydir * (y) + \ xdim7_advec_cell_kernel3_ydir * ydim7_advec_cell_kernel3_ydir * (z)) // user function __device__ inline void advec_cell_kernel3_ydir(const double *vol_flux_y, const double *pre_vol, const int *yy, const double *vertexdy, const double *density1, const double *energy1, double *mass_flux_y, double *ener_flux) { double sigmat, sigmav, sigmam, sigma3, sigma4; double diffuw, diffdw, limiter; double one_by_six = 1.0 / 6.0; int y_max = field.y_max; int upwind, donor, downwind, dif; if (vol_flux_y[OPS_ACC0(0, 0, 0)] > 0.0) { upwind = -2; donor = -1; downwind = 0; dif = donor; } else if (yy[OPS_ACC2(0, 1, 0)] < y_max + 2 - 2) { upwind = 1; donor = 0; downwind = -1; dif = upwind; } else { upwind = 0; donor = 0; downwind = -1; dif = upwind; } sigmat = fabs(vol_flux_y[OPS_ACC0(0, 0, 0)]) / pre_vol[OPS_ACC1(0, donor, 0)]; sigma3 = (1.0 + sigmat) * (vertexdy[OPS_ACC3(0, 0, 0)] / vertexdy[OPS_ACC3(0, dif, 0)]); sigma4 = 2.0 - sigmat; sigmav = sigmat; diffuw = density1[OPS_ACC4(0, donor, 0)] - density1[OPS_ACC4(0, upwind, 0)]; diffdw = density1[OPS_ACC4(0, downwind, 0)] - density1[OPS_ACC4(0, donor, 0)]; if ((diffuw * diffdw) > 0.0) limiter = (1.0 - sigmav) * SIGN(1.0, diffdw) * MIN(MIN(fabs(diffuw), fabs(diffdw)), one_by_six * (sigma3 * fabs(diffuw) + sigma4 * fabs(diffdw))); else limiter = 0.0; mass_flux_y[OPS_ACC6(0, 0, 0)] = (vol_flux_y[OPS_ACC0(0, 0, 0)]) * (density1[OPS_ACC4(0, donor, 0)] + limiter); sigmam = fabs(mass_flux_y[OPS_ACC6(0, 0, 0)]) / (density1[OPS_ACC4(0, donor, 0)] * pre_vol[OPS_ACC1(0, donor, 0)]); diffuw = energy1[OPS_ACC5(0, donor, 0)] - energy1[OPS_ACC5(0, upwind, 0)]; diffdw = energy1[OPS_ACC5(0, downwind, 0)] - energy1[OPS_ACC5(0, donor, 0)]; if ((diffuw * diffdw) > 0.0) limiter = (1.0 - sigmam) * SIGN(1.0, diffdw) * MIN(MIN(fabs(diffuw), fabs(diffdw)), one_by_six * (sigma3 * fabs(diffuw) + sigma4 * fabs(diffdw))); else limiter = 0.0; ener_flux[OPS_ACC7(0, 0, 0)] = mass_flux_y[OPS_ACC6(0, 0, 0)] * (energy1[OPS_ACC5(0, donor, 0)] + limiter); } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 #undef OPS_ACC7 __global__ void ops_advec_cell_kernel3_ydir( const double *__restrict arg0, const double *__restrict arg1, const int *__restrict arg2, const double *__restrict arg3, const double *__restrict arg4, const double *__restrict arg5, double *__restrict arg6, double *__restrict arg7, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_advec_cell_kernel3_ydir + idx_z * 1 * 1 * xdim0_advec_cell_kernel3_ydir * ydim0_advec_cell_kernel3_ydir; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_advec_cell_kernel3_ydir + idx_z * 1 * 1 * xdim1_advec_cell_kernel3_ydir * ydim1_advec_cell_kernel3_ydir; arg2 += idx_x * 0 * 1 + idx_y * 1 * 1 * xdim2_advec_cell_kernel3_ydir + idx_z * 0 * 1 * xdim2_advec_cell_kernel3_ydir * ydim2_advec_cell_kernel3_ydir; arg3 += idx_x * 0 * 1 + idx_y * 1 * 1 * xdim3_advec_cell_kernel3_ydir + idx_z * 0 * 1 * xdim3_advec_cell_kernel3_ydir * ydim3_advec_cell_kernel3_ydir; arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_advec_cell_kernel3_ydir + idx_z * 1 * 1 * xdim4_advec_cell_kernel3_ydir * ydim4_advec_cell_kernel3_ydir; arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_advec_cell_kernel3_ydir + idx_z * 1 * 1 * xdim5_advec_cell_kernel3_ydir * ydim5_advec_cell_kernel3_ydir; arg6 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim6_advec_cell_kernel3_ydir + idx_z * 1 * 1 * xdim6_advec_cell_kernel3_ydir * ydim6_advec_cell_kernel3_ydir; arg7 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim7_advec_cell_kernel3_ydir + idx_z * 1 * 1 * xdim7_advec_cell_kernel3_ydir * ydim7_advec_cell_kernel3_ydir; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { advec_cell_kernel3_ydir(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7); } } // host stub function void ops_par_loop_advec_cell_kernel3_ydir(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) { // Timing double t1, t2, c1, c2; ops_arg args[8] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 8, range, 13)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(13, "advec_cell_kernel3_ydir"); OPS_kernels[13].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; int xdim5 = args[5].dat->size[0]; int ydim5 = args[5].dat->size[1]; int xdim6 = args[6].dat->size[0]; int ydim6 = args[6].dat->size[1]; int xdim7 = args[7].dat->size[0]; int ydim7 = args[7].dat->size[1]; if (xdim0 != xdim0_advec_cell_kernel3_ydir_h || ydim0 != ydim0_advec_cell_kernel3_ydir_h || xdim1 != xdim1_advec_cell_kernel3_ydir_h || ydim1 != ydim1_advec_cell_kernel3_ydir_h || xdim2 != xdim2_advec_cell_kernel3_ydir_h || ydim2 != ydim2_advec_cell_kernel3_ydir_h || xdim3 != xdim3_advec_cell_kernel3_ydir_h || ydim3 != ydim3_advec_cell_kernel3_ydir_h || xdim4 != xdim4_advec_cell_kernel3_ydir_h || ydim4 != ydim4_advec_cell_kernel3_ydir_h || xdim5 != xdim5_advec_cell_kernel3_ydir_h || ydim5 != ydim5_advec_cell_kernel3_ydir_h || xdim6 != xdim6_advec_cell_kernel3_ydir_h || ydim6 != ydim6_advec_cell_kernel3_ydir_h || xdim7 != xdim7_advec_cell_kernel3_ydir_h || ydim7 != ydim7_advec_cell_kernel3_ydir_h) { hipMemcpyToSymbol(xdim0_advec_cell_kernel3_ydir, &xdim0, sizeof(int)); xdim0_advec_cell_kernel3_ydir_h = xdim0; hipMemcpyToSymbol(ydim0_advec_cell_kernel3_ydir, &ydim0, sizeof(int)); ydim0_advec_cell_kernel3_ydir_h = ydim0; hipMemcpyToSymbol(xdim1_advec_cell_kernel3_ydir, &xdim1, sizeof(int)); xdim1_advec_cell_kernel3_ydir_h = xdim1; hipMemcpyToSymbol(ydim1_advec_cell_kernel3_ydir, &ydim1, sizeof(int)); ydim1_advec_cell_kernel3_ydir_h = ydim1; hipMemcpyToSymbol(xdim2_advec_cell_kernel3_ydir, &xdim2, sizeof(int)); xdim2_advec_cell_kernel3_ydir_h = xdim2; hipMemcpyToSymbol(ydim2_advec_cell_kernel3_ydir, &ydim2, sizeof(int)); ydim2_advec_cell_kernel3_ydir_h = ydim2; hipMemcpyToSymbol(xdim3_advec_cell_kernel3_ydir, &xdim3, sizeof(int)); xdim3_advec_cell_kernel3_ydir_h = xdim3; hipMemcpyToSymbol(ydim3_advec_cell_kernel3_ydir, &ydim3, sizeof(int)); ydim3_advec_cell_kernel3_ydir_h = ydim3; hipMemcpyToSymbol(xdim4_advec_cell_kernel3_ydir, &xdim4, sizeof(int)); xdim4_advec_cell_kernel3_ydir_h = xdim4; hipMemcpyToSymbol(ydim4_advec_cell_kernel3_ydir, &ydim4, sizeof(int)); ydim4_advec_cell_kernel3_ydir_h = ydim4; hipMemcpyToSymbol(xdim5_advec_cell_kernel3_ydir, &xdim5, sizeof(int)); xdim5_advec_cell_kernel3_ydir_h = xdim5; hipMemcpyToSymbol(ydim5_advec_cell_kernel3_ydir, &ydim5, sizeof(int)); ydim5_advec_cell_kernel3_ydir_h = ydim5; hipMemcpyToSymbol(xdim6_advec_cell_kernel3_ydir, &xdim6, sizeof(int)); xdim6_advec_cell_kernel3_ydir_h = xdim6; hipMemcpyToSymbol(ydim6_advec_cell_kernel3_ydir, &ydim6, sizeof(int)); ydim6_advec_cell_kernel3_ydir_h = ydim6; hipMemcpyToSymbol(xdim7_advec_cell_kernel3_ydir, &xdim7, sizeof(int)); xdim7_advec_cell_kernel3_ydir_h = xdim7; hipMemcpyToSymbol(ydim7_advec_cell_kernel3_ydir, &ydim7, sizeof(int)); ydim7_advec_cell_kernel3_ydir_h = ydim7; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; int dat2 = args[2].dat->elem_size; int dat3 = args[3].dat->elem_size; int dat4 = args[4].dat->elem_size; int dat5 = args[5].dat->elem_size; int dat6 = args[6].dat->elem_size; int dat7 = args[7].dat->elem_size; char *p_a[8]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d]; #endif int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]); base2 = base2 + dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]); base2 = base2 + dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] - d_m[2]); p_a[2] = (char *)args[2].data_d + base2; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d]; #endif int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]); base3 = base3 + dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]); base3 = base3 + dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] - d_m[2]); p_a[3] = (char *)args[3].data_d + base3; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d]; #endif int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]); base4 = base4 + dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]); base4 = base4 + dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] - d_m[2]); p_a[4] = (char *)args[4].data_d + base4; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d]; #endif int base5 = dat5 * 1 * (start[0] * args[5].stencil->stride[0] - args[5].dat->base[0] - d_m[0]); base5 = base5 + dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1] - args[5].dat->base[1] - d_m[1]); base5 = base5 + dat5 * args[5].dat->size[0] * args[5].dat->size[1] * (start[2] * args[5].stencil->stride[2] - args[5].dat->base[2] - d_m[2]); p_a[5] = (char *)args[5].data_d + base5; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d] + OPS_sub_dat_list[args[6].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d]; #endif int base6 = dat6 * 1 * (start[0] * args[6].stencil->stride[0] - args[6].dat->base[0] - d_m[0]); base6 = base6 + dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1] - args[6].dat->base[1] - d_m[1]); base6 = base6 + dat6 * args[6].dat->size[0] * args[6].dat->size[1] * (start[2] * args[6].stencil->stride[2] - args[6].dat->base[2] - d_m[2]); p_a[6] = (char *)args[6].data_d + base6; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[7].dat->d_m[d] + OPS_sub_dat_list[args[7].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[7].dat->d_m[d]; #endif int base7 = dat7 * 1 * (start[0] * args[7].stencil->stride[0] - args[7].dat->base[0] - d_m[0]); base7 = base7 + dat7 * args[7].dat->size[0] * (start[1] * args[7].stencil->stride[1] - args[7].dat->base[1] - d_m[1]); base7 = base7 + dat7 * args[7].dat->size[0] * args[7].dat->size[1] * (start[2] * args[7].stencil->stride[2] - args[7].dat->base[2] - d_m[2]); p_a[7] = (char *)args[7].data_d + base7; ops_H_D_exchanges_device(args, 8); ops_halo_exchanges(args, 8, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[13].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_advec_cell_kernel3_ydir), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (double *)p_a[7], x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[13].time += t1 - t2; } ops_set_dirtybit_device(args, 8); ops_set_halo_dirtybit3(&args[6], range); ops_set_halo_dirtybit3(&args[7], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[13].mpi_time += t2 - t1; OPS_kernels[13].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[13].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[13].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[13].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[13].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[13].transfer += ops_compute_transfer(dim, start, end, &arg5); OPS_kernels[13].transfer += ops_compute_transfer(dim, start, end, &arg6); OPS_kernels[13].transfer += ops_compute_transfer(dim, start, end, &arg7); } }
985b892ea4f1a11811f7eb74ea379b804867e386.cu
// // auto-generated by ops.py // __constant__ int xdim0_advec_cell_kernel3_ydir; int xdim0_advec_cell_kernel3_ydir_h = -1; __constant__ int ydim0_advec_cell_kernel3_ydir; int ydim0_advec_cell_kernel3_ydir_h = -1; __constant__ int xdim1_advec_cell_kernel3_ydir; int xdim1_advec_cell_kernel3_ydir_h = -1; __constant__ int ydim1_advec_cell_kernel3_ydir; int ydim1_advec_cell_kernel3_ydir_h = -1; __constant__ int xdim2_advec_cell_kernel3_ydir; int xdim2_advec_cell_kernel3_ydir_h = -1; __constant__ int ydim2_advec_cell_kernel3_ydir; int ydim2_advec_cell_kernel3_ydir_h = -1; __constant__ int xdim3_advec_cell_kernel3_ydir; int xdim3_advec_cell_kernel3_ydir_h = -1; __constant__ int ydim3_advec_cell_kernel3_ydir; int ydim3_advec_cell_kernel3_ydir_h = -1; __constant__ int xdim4_advec_cell_kernel3_ydir; int xdim4_advec_cell_kernel3_ydir_h = -1; __constant__ int ydim4_advec_cell_kernel3_ydir; int ydim4_advec_cell_kernel3_ydir_h = -1; __constant__ int xdim5_advec_cell_kernel3_ydir; int xdim5_advec_cell_kernel3_ydir_h = -1; __constant__ int ydim5_advec_cell_kernel3_ydir; int ydim5_advec_cell_kernel3_ydir_h = -1; __constant__ int xdim6_advec_cell_kernel3_ydir; int xdim6_advec_cell_kernel3_ydir_h = -1; __constant__ int ydim6_advec_cell_kernel3_ydir; int ydim6_advec_cell_kernel3_ydir_h = -1; __constant__ int xdim7_advec_cell_kernel3_ydir; int xdim7_advec_cell_kernel3_ydir_h = -1; __constant__ int ydim7_advec_cell_kernel3_ydir; int ydim7_advec_cell_kernel3_ydir_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 #undef OPS_ACC7 #define OPS_ACC0(x, y, z) \ (x + xdim0_advec_cell_kernel3_ydir * (y) + \ xdim0_advec_cell_kernel3_ydir * ydim0_advec_cell_kernel3_ydir * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_advec_cell_kernel3_ydir * (y) + \ xdim1_advec_cell_kernel3_ydir * ydim1_advec_cell_kernel3_ydir * (z)) #define OPS_ACC2(x, y, z) \ (x + xdim2_advec_cell_kernel3_ydir * (y) + \ xdim2_advec_cell_kernel3_ydir * ydim2_advec_cell_kernel3_ydir * (z)) #define OPS_ACC3(x, y, z) \ (x + xdim3_advec_cell_kernel3_ydir * (y) + \ xdim3_advec_cell_kernel3_ydir * ydim3_advec_cell_kernel3_ydir * (z)) #define OPS_ACC4(x, y, z) \ (x + xdim4_advec_cell_kernel3_ydir * (y) + \ xdim4_advec_cell_kernel3_ydir * ydim4_advec_cell_kernel3_ydir * (z)) #define OPS_ACC5(x, y, z) \ (x + xdim5_advec_cell_kernel3_ydir * (y) + \ xdim5_advec_cell_kernel3_ydir * ydim5_advec_cell_kernel3_ydir * (z)) #define OPS_ACC6(x, y, z) \ (x + xdim6_advec_cell_kernel3_ydir * (y) + \ xdim6_advec_cell_kernel3_ydir * ydim6_advec_cell_kernel3_ydir * (z)) #define OPS_ACC7(x, y, z) \ (x + xdim7_advec_cell_kernel3_ydir * (y) + \ xdim7_advec_cell_kernel3_ydir * ydim7_advec_cell_kernel3_ydir * (z)) // user function __device__ inline void advec_cell_kernel3_ydir(const double *vol_flux_y, const double *pre_vol, const int *yy, const double *vertexdy, const double *density1, const double *energy1, double *mass_flux_y, double *ener_flux) { double sigmat, sigmav, sigmam, sigma3, sigma4; double diffuw, diffdw, limiter; double one_by_six = 1.0 / 6.0; int y_max = field.y_max; int upwind, donor, downwind, dif; if (vol_flux_y[OPS_ACC0(0, 0, 0)] > 0.0) { upwind = -2; donor = -1; downwind = 0; dif = donor; } else if (yy[OPS_ACC2(0, 1, 0)] < y_max + 2 - 2) { upwind = 1; donor = 0; downwind = -1; dif = upwind; } else { upwind = 0; donor = 0; downwind = -1; dif = upwind; } sigmat = fabs(vol_flux_y[OPS_ACC0(0, 0, 0)]) / pre_vol[OPS_ACC1(0, donor, 0)]; sigma3 = (1.0 + sigmat) * (vertexdy[OPS_ACC3(0, 0, 0)] / vertexdy[OPS_ACC3(0, dif, 0)]); sigma4 = 2.0 - sigmat; sigmav = sigmat; diffuw = density1[OPS_ACC4(0, donor, 0)] - density1[OPS_ACC4(0, upwind, 0)]; diffdw = density1[OPS_ACC4(0, downwind, 0)] - density1[OPS_ACC4(0, donor, 0)]; if ((diffuw * diffdw) > 0.0) limiter = (1.0 - sigmav) * SIGN(1.0, diffdw) * MIN(MIN(fabs(diffuw), fabs(diffdw)), one_by_six * (sigma3 * fabs(diffuw) + sigma4 * fabs(diffdw))); else limiter = 0.0; mass_flux_y[OPS_ACC6(0, 0, 0)] = (vol_flux_y[OPS_ACC0(0, 0, 0)]) * (density1[OPS_ACC4(0, donor, 0)] + limiter); sigmam = fabs(mass_flux_y[OPS_ACC6(0, 0, 0)]) / (density1[OPS_ACC4(0, donor, 0)] * pre_vol[OPS_ACC1(0, donor, 0)]); diffuw = energy1[OPS_ACC5(0, donor, 0)] - energy1[OPS_ACC5(0, upwind, 0)]; diffdw = energy1[OPS_ACC5(0, downwind, 0)] - energy1[OPS_ACC5(0, donor, 0)]; if ((diffuw * diffdw) > 0.0) limiter = (1.0 - sigmam) * SIGN(1.0, diffdw) * MIN(MIN(fabs(diffuw), fabs(diffdw)), one_by_six * (sigma3 * fabs(diffuw) + sigma4 * fabs(diffdw))); else limiter = 0.0; ener_flux[OPS_ACC7(0, 0, 0)] = mass_flux_y[OPS_ACC6(0, 0, 0)] * (energy1[OPS_ACC5(0, donor, 0)] + limiter); } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 #undef OPS_ACC7 __global__ void ops_advec_cell_kernel3_ydir( const double *__restrict arg0, const double *__restrict arg1, const int *__restrict arg2, const double *__restrict arg3, const double *__restrict arg4, const double *__restrict arg5, double *__restrict arg6, double *__restrict arg7, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_advec_cell_kernel3_ydir + idx_z * 1 * 1 * xdim0_advec_cell_kernel3_ydir * ydim0_advec_cell_kernel3_ydir; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_advec_cell_kernel3_ydir + idx_z * 1 * 1 * xdim1_advec_cell_kernel3_ydir * ydim1_advec_cell_kernel3_ydir; arg2 += idx_x * 0 * 1 + idx_y * 1 * 1 * xdim2_advec_cell_kernel3_ydir + idx_z * 0 * 1 * xdim2_advec_cell_kernel3_ydir * ydim2_advec_cell_kernel3_ydir; arg3 += idx_x * 0 * 1 + idx_y * 1 * 1 * xdim3_advec_cell_kernel3_ydir + idx_z * 0 * 1 * xdim3_advec_cell_kernel3_ydir * ydim3_advec_cell_kernel3_ydir; arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_advec_cell_kernel3_ydir + idx_z * 1 * 1 * xdim4_advec_cell_kernel3_ydir * ydim4_advec_cell_kernel3_ydir; arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_advec_cell_kernel3_ydir + idx_z * 1 * 1 * xdim5_advec_cell_kernel3_ydir * ydim5_advec_cell_kernel3_ydir; arg6 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim6_advec_cell_kernel3_ydir + idx_z * 1 * 1 * xdim6_advec_cell_kernel3_ydir * ydim6_advec_cell_kernel3_ydir; arg7 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim7_advec_cell_kernel3_ydir + idx_z * 1 * 1 * xdim7_advec_cell_kernel3_ydir * ydim7_advec_cell_kernel3_ydir; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { advec_cell_kernel3_ydir(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7); } } // host stub function void ops_par_loop_advec_cell_kernel3_ydir(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) { // Timing double t1, t2, c1, c2; ops_arg args[8] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 8, range, 13)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(13, "advec_cell_kernel3_ydir"); OPS_kernels[13].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; int xdim5 = args[5].dat->size[0]; int ydim5 = args[5].dat->size[1]; int xdim6 = args[6].dat->size[0]; int ydim6 = args[6].dat->size[1]; int xdim7 = args[7].dat->size[0]; int ydim7 = args[7].dat->size[1]; if (xdim0 != xdim0_advec_cell_kernel3_ydir_h || ydim0 != ydim0_advec_cell_kernel3_ydir_h || xdim1 != xdim1_advec_cell_kernel3_ydir_h || ydim1 != ydim1_advec_cell_kernel3_ydir_h || xdim2 != xdim2_advec_cell_kernel3_ydir_h || ydim2 != ydim2_advec_cell_kernel3_ydir_h || xdim3 != xdim3_advec_cell_kernel3_ydir_h || ydim3 != ydim3_advec_cell_kernel3_ydir_h || xdim4 != xdim4_advec_cell_kernel3_ydir_h || ydim4 != ydim4_advec_cell_kernel3_ydir_h || xdim5 != xdim5_advec_cell_kernel3_ydir_h || ydim5 != ydim5_advec_cell_kernel3_ydir_h || xdim6 != xdim6_advec_cell_kernel3_ydir_h || ydim6 != ydim6_advec_cell_kernel3_ydir_h || xdim7 != xdim7_advec_cell_kernel3_ydir_h || ydim7 != ydim7_advec_cell_kernel3_ydir_h) { cudaMemcpyToSymbol(xdim0_advec_cell_kernel3_ydir, &xdim0, sizeof(int)); xdim0_advec_cell_kernel3_ydir_h = xdim0; cudaMemcpyToSymbol(ydim0_advec_cell_kernel3_ydir, &ydim0, sizeof(int)); ydim0_advec_cell_kernel3_ydir_h = ydim0; cudaMemcpyToSymbol(xdim1_advec_cell_kernel3_ydir, &xdim1, sizeof(int)); xdim1_advec_cell_kernel3_ydir_h = xdim1; cudaMemcpyToSymbol(ydim1_advec_cell_kernel3_ydir, &ydim1, sizeof(int)); ydim1_advec_cell_kernel3_ydir_h = ydim1; cudaMemcpyToSymbol(xdim2_advec_cell_kernel3_ydir, &xdim2, sizeof(int)); xdim2_advec_cell_kernel3_ydir_h = xdim2; cudaMemcpyToSymbol(ydim2_advec_cell_kernel3_ydir, &ydim2, sizeof(int)); ydim2_advec_cell_kernel3_ydir_h = ydim2; cudaMemcpyToSymbol(xdim3_advec_cell_kernel3_ydir, &xdim3, sizeof(int)); xdim3_advec_cell_kernel3_ydir_h = xdim3; cudaMemcpyToSymbol(ydim3_advec_cell_kernel3_ydir, &ydim3, sizeof(int)); ydim3_advec_cell_kernel3_ydir_h = ydim3; cudaMemcpyToSymbol(xdim4_advec_cell_kernel3_ydir, &xdim4, sizeof(int)); xdim4_advec_cell_kernel3_ydir_h = xdim4; cudaMemcpyToSymbol(ydim4_advec_cell_kernel3_ydir, &ydim4, sizeof(int)); ydim4_advec_cell_kernel3_ydir_h = ydim4; cudaMemcpyToSymbol(xdim5_advec_cell_kernel3_ydir, &xdim5, sizeof(int)); xdim5_advec_cell_kernel3_ydir_h = xdim5; cudaMemcpyToSymbol(ydim5_advec_cell_kernel3_ydir, &ydim5, sizeof(int)); ydim5_advec_cell_kernel3_ydir_h = ydim5; cudaMemcpyToSymbol(xdim6_advec_cell_kernel3_ydir, &xdim6, sizeof(int)); xdim6_advec_cell_kernel3_ydir_h = xdim6; cudaMemcpyToSymbol(ydim6_advec_cell_kernel3_ydir, &ydim6, sizeof(int)); ydim6_advec_cell_kernel3_ydir_h = ydim6; cudaMemcpyToSymbol(xdim7_advec_cell_kernel3_ydir, &xdim7, sizeof(int)); xdim7_advec_cell_kernel3_ydir_h = xdim7; cudaMemcpyToSymbol(ydim7_advec_cell_kernel3_ydir, &ydim7, sizeof(int)); ydim7_advec_cell_kernel3_ydir_h = ydim7; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; int dat2 = args[2].dat->elem_size; int dat3 = args[3].dat->elem_size; int dat4 = args[4].dat->elem_size; int dat5 = args[5].dat->elem_size; int dat6 = args[6].dat->elem_size; int dat7 = args[7].dat->elem_size; char *p_a[8]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d]; #endif int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]); base2 = base2 + dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]); base2 = base2 + dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] - d_m[2]); p_a[2] = (char *)args[2].data_d + base2; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d]; #endif int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]); base3 = base3 + dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]); base3 = base3 + dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] - d_m[2]); p_a[3] = (char *)args[3].data_d + base3; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d]; #endif int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]); base4 = base4 + dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]); base4 = base4 + dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] - d_m[2]); p_a[4] = (char *)args[4].data_d + base4; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d]; #endif int base5 = dat5 * 1 * (start[0] * args[5].stencil->stride[0] - args[5].dat->base[0] - d_m[0]); base5 = base5 + dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1] - args[5].dat->base[1] - d_m[1]); base5 = base5 + dat5 * args[5].dat->size[0] * args[5].dat->size[1] * (start[2] * args[5].stencil->stride[2] - args[5].dat->base[2] - d_m[2]); p_a[5] = (char *)args[5].data_d + base5; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d] + OPS_sub_dat_list[args[6].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d]; #endif int base6 = dat6 * 1 * (start[0] * args[6].stencil->stride[0] - args[6].dat->base[0] - d_m[0]); base6 = base6 + dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1] - args[6].dat->base[1] - d_m[1]); base6 = base6 + dat6 * args[6].dat->size[0] * args[6].dat->size[1] * (start[2] * args[6].stencil->stride[2] - args[6].dat->base[2] - d_m[2]); p_a[6] = (char *)args[6].data_d + base6; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[7].dat->d_m[d] + OPS_sub_dat_list[args[7].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[7].dat->d_m[d]; #endif int base7 = dat7 * 1 * (start[0] * args[7].stencil->stride[0] - args[7].dat->base[0] - d_m[0]); base7 = base7 + dat7 * args[7].dat->size[0] * (start[1] * args[7].stencil->stride[1] - args[7].dat->base[1] - d_m[1]); base7 = base7 + dat7 * args[7].dat->size[0] * args[7].dat->size[1] * (start[2] * args[7].stencil->stride[2] - args[7].dat->base[2] - d_m[2]); p_a[7] = (char *)args[7].data_d + base7; ops_H_D_exchanges_device(args, 8); ops_halo_exchanges(args, 8, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[13].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_advec_cell_kernel3_ydir<<<grid, tblock>>>( (double *)p_a[0], (double *)p_a[1], (int *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (double *)p_a[7], x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[13].time += t1 - t2; } ops_set_dirtybit_device(args, 8); ops_set_halo_dirtybit3(&args[6], range); ops_set_halo_dirtybit3(&args[7], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[13].mpi_time += t2 - t1; OPS_kernels[13].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[13].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[13].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[13].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[13].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[13].transfer += ops_compute_transfer(dim, start, end, &arg5); OPS_kernels[13].transfer += ops_compute_transfer(dim, start, end, &arg6); OPS_kernels[13].transfer += ops_compute_transfer(dim, start, end, &arg7); } }
563d409791cd4b3611e47f8138b50d3aac763b86.hip
// !!! This is a file automatically generated by hipify!!! /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/core/cuda/common.hpp" #include "opencv2/core/cuda/functional.hpp" #include "opencv2/core/cuda/transform.hpp" #include "opencv2/core/cuda/saturate_cast.hpp" #include "opencv2/core/cuda/simd_functions.hpp" #include "arithm_func_traits.hpp" using namespace cv::gpu; using namespace cv::gpu::cudev; namespace arithm { struct VSub4 : binary_function<uint, uint, uint> { __device__ __forceinline__ uint operator ()(uint a, uint b) const { return vsub4(a, b); } __device__ __forceinline__ VSub4() {} __device__ __forceinline__ VSub4(const VSub4& other) {} }; struct VSub2 : binary_function<uint, uint, uint> { __device__ __forceinline__ uint operator ()(uint a, uint b) const { return vsub2(a, b); } __device__ __forceinline__ VSub2() {} __device__ __forceinline__ VSub2(const VSub2& other) {} }; template <typename T, typename D> struct SubMat : binary_function<T, T, D> { __device__ __forceinline__ D operator ()(T a, T b) const { return saturate_cast<D>(a - b); } __device__ __forceinline__ SubMat() {} __device__ __forceinline__ SubMat(const SubMat& other) {} }; } namespace cv { namespace gpu { namespace cudev { template <> struct TransformFunctorTraits< arithm::VSub4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)> { }; template <> struct TransformFunctorTraits< arithm::VSub2 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)> { }; template <typename T, typename D> struct TransformFunctorTraits< arithm::SubMat<T, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)> { }; }}} namespace arithm { void subMat_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, hipStream_t stream) { cudev::transform(src1, src2, dst, VSub4(), WithOutMask(), stream); } void subMat_v2(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, hipStream_t stream) { cudev::transform(src1, src2, dst, VSub2(), WithOutMask(), stream); } template <typename T, typename D> void subMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream) { if (mask.data) cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, SubMat<T, D>(), mask, stream); else cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, SubMat<T, D>(), WithOutMask(), stream); } template void subMat<uchar, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); template void subMat<uchar, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); template void subMat<uchar, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); template void subMat<uchar, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); template void subMat<uchar, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); template void subMat<uchar, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); template void subMat<uchar, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); template void subMat<schar, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); template void subMat<schar, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); template void subMat<schar, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); template void subMat<schar, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); template void subMat<schar, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); template void subMat<schar, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); template void subMat<schar, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); //template void subMat<ushort, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); //template void subMat<ushort, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); template void subMat<ushort, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); template void subMat<ushort, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); template void subMat<ushort, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); template void subMat<ushort, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); template void subMat<ushort, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); //template void subMat<short, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); //template void subMat<short, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); template void subMat<short, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); template void subMat<short, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); template void subMat<short, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); template void subMat<short, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); template void subMat<short, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); //template void subMat<int, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); //template void subMat<int, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); //template void subMat<int, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); //template void subMat<int, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); template void subMat<int, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); template void subMat<int, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); template void subMat<int, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); //template void subMat<float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); //template void subMat<float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); //template void subMat<float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); //template void subMat<float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); //template void subMat<float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); template void subMat<float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); template void subMat<float, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); //template void subMat<double, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); //template void subMat<double, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); //template void subMat<double, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); //template void subMat<double, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); //template void subMat<double, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); //template void subMat<double, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); template void subMat<double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream); } #endif // CUDA_DISABLER
563d409791cd4b3611e47f8138b50d3aac763b86.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/core/cuda/common.hpp" #include "opencv2/core/cuda/functional.hpp" #include "opencv2/core/cuda/transform.hpp" #include "opencv2/core/cuda/saturate_cast.hpp" #include "opencv2/core/cuda/simd_functions.hpp" #include "arithm_func_traits.hpp" using namespace cv::gpu; using namespace cv::gpu::cudev; namespace arithm { struct VSub4 : binary_function<uint, uint, uint> { __device__ __forceinline__ uint operator ()(uint a, uint b) const { return vsub4(a, b); } __device__ __forceinline__ VSub4() {} __device__ __forceinline__ VSub4(const VSub4& other) {} }; struct VSub2 : binary_function<uint, uint, uint> { __device__ __forceinline__ uint operator ()(uint a, uint b) const { return vsub2(a, b); } __device__ __forceinline__ VSub2() {} __device__ __forceinline__ VSub2(const VSub2& other) {} }; template <typename T, typename D> struct SubMat : binary_function<T, T, D> { __device__ __forceinline__ D operator ()(T a, T b) const { return saturate_cast<D>(a - b); } __device__ __forceinline__ SubMat() {} __device__ __forceinline__ SubMat(const SubMat& other) {} }; } namespace cv { namespace gpu { namespace cudev { template <> struct TransformFunctorTraits< arithm::VSub4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)> { }; template <> struct TransformFunctorTraits< arithm::VSub2 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)> { }; template <typename T, typename D> struct TransformFunctorTraits< arithm::SubMat<T, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)> { }; }}} namespace arithm { void subMat_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream) { cudev::transform(src1, src2, dst, VSub4(), WithOutMask(), stream); } void subMat_v2(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream) { cudev::transform(src1, src2, dst, VSub2(), WithOutMask(), stream); } template <typename T, typename D> void subMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream) { if (mask.data) cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, SubMat<T, D>(), mask, stream); else cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, SubMat<T, D>(), WithOutMask(), stream); } template void subMat<uchar, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); template void subMat<uchar, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); template void subMat<uchar, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); template void subMat<uchar, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); template void subMat<uchar, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); template void subMat<uchar, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); template void subMat<uchar, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); template void subMat<schar, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); template void subMat<schar, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); template void subMat<schar, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); template void subMat<schar, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); template void subMat<schar, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); template void subMat<schar, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); template void subMat<schar, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); //template void subMat<ushort, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); //template void subMat<ushort, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); template void subMat<ushort, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); template void subMat<ushort, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); template void subMat<ushort, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); template void subMat<ushort, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); template void subMat<ushort, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); //template void subMat<short, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); //template void subMat<short, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); template void subMat<short, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); template void subMat<short, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); template void subMat<short, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); template void subMat<short, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); template void subMat<short, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); //template void subMat<int, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); //template void subMat<int, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); //template void subMat<int, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); //template void subMat<int, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); template void subMat<int, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); template void subMat<int, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); template void subMat<int, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); //template void subMat<float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); //template void subMat<float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); //template void subMat<float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); //template void subMat<float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); //template void subMat<float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); template void subMat<float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); template void subMat<float, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); //template void subMat<double, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); //template void subMat<double, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); //template void subMat<double, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); //template void subMat<double, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); //template void subMat<double, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); //template void subMat<double, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); template void subMat<double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream); } #endif // CUDA_DISABLER
1343310b08f027295582d34935c80a4bacd031dc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void Matrix_PermuteRows(const float * A , int Acount, int Acols, const float * B , int Bcount, int Bcols, float * out0 , int out0count, int out0cols) { int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x; int id_row, id_col, id_rowNew; if (id<Acount) { id_row = id/Acols; id_col = id%Acols; id_rowNew = B[id_row]*Acols; out0[id] = A[id_col + id_rowNew]; } }
1343310b08f027295582d34935c80a4bacd031dc.cu
#include "includes.h" __global__ void Matrix_PermuteRows(const float * A , int Acount, int Acols, const float * B , int Bcount, int Bcols, float * out0 , int out0count, int out0cols) { int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x; int id_row, id_col, id_rowNew; if (id<Acount) { id_row = id/Acols; id_col = id%Acols; id_rowNew = B[id_row]*Acols; out0[id] = A[id_col + id_rowNew]; } }
c3a7907696a750e7ca36adf1c4954ea2d73fe663.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <stdlib.h> #include <hip/hip_runtime.h> #include <hip/hip_complex.h> #include <fftw3.h> #include <hipfft.h> #include <sys/time.h> #include <assert.h> using namespace std; #define k_rangeres 30 #define k_calib 1941.05 #define RESULT_SIZE 2 #define DEBUG inline hipError_t checkCuda(hipError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != hipSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result)); assert(result == hipSuccess); } #endif return result; } float *generate_hamming_coef(int m, int n) { // Calculate normalization power on range cell float p_range=0; for(int i=0; i < m; i++) { p_range=p_range+pow(0.53836-0.46164*cos(2*M_PI*(i)/(m-1)), 2.0); } p_range=p_range/m; // Calculate normalization power on Doppler cell float p_doppler=0; for(int j=0; j < n; j++) { p_doppler=p_doppler+pow(0.53836-0.46164*cos(2*M_PI*(j)/(n-1)), 2.0); } p_doppler=p_doppler/n; // Constant since FFT is not normalized and the power is computed w.r.t. 50ohm const float K_wind = -1/(16383.5*m*n*sqrt(50)); const float c = K_wind/sqrt(p_range*p_doppler); // Generate elements float *_hamming_coef= new float[m*n]; for(int i=0; i < m; i++) { for(int j=0; j < n; j++) { _hamming_coef[i*n+j] = (0.53836-0.46164*cos(2*M_PI*(i)/(m-1))) * (0.53836-0.46164*cos(2*M_PI*(j)/(n-1))) * c; } } return _hamming_coef; } float *generate_ma_coef(int n){ float *_ma_coef = new float[n]; float _sum = 0.0; for(int i=0; i < n; i++) { _ma_coef[i]=exp(-(pow(i-((n-1)/2), 2.0))/2); _sum += _ma_coef[i]; } for(int i=0; i < n; i++){ _ma_coef[i] = _ma_coef[i]/_sum; } return _ma_coef; } __global__ void __apply_hamming(cuFloatComplex *a, float *b) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; a[idx] = make_cuFloatComplex(b[idx]*cuCrealf(a[idx]), b[idx]*cuCimagf(a[idx])); } __global__ void __apply_ma(cuFloatComplex *inout, cuFloatComplex *macoef) { const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x; inout[i*n+j] = cuCmulf(inout[i*n+j], macoef[j]); } __global__ void __conjugate(cuFloatComplex *a) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; a[idx].y *= -1; } __global__ void __shift(cuFloatComplex *inout, int n) { const unsigned int i = blockIdx.x, j = threadIdx.x; cuFloatComplex temp = inout[i*n+j]; inout[i*n+j] = inout[i*n+(j+n/2)]; inout[i*n+(j+n/2)] = temp; } __global__ void __clip(cuFloatComplex *inout, int n) { const unsigned int i = blockIdx.x, j = n-threadIdx.x-1; inout[i*n+j] = make_cuFloatComplex(0, 0); } __global__ void __abssqr(cuFloatComplex *inout, int n) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; float real, imag; real = cuCrealf(inout[idx]); imag = cuCimagf(inout[idx]); inout[idx] = make_cuFloatComplex(real*real + imag*imag, 0); } __global__ void __sum(cuFloatComplex *in, cuFloatComplex *out) { const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x; out[i*n+j] = make_cuFloatComplex(in[i*n+j].x, in[i*n+j].y); __syncthreads(); for (unsigned int s=blockDim.x/2; s>0; s>>=1) { if (j < s) { out[i*n+j] = cuCaddf(out[i*n+j], out[i*n+j+s]); } __syncthreads(); } } __global__ void __sum_v2(cuFloatComplex *in, cuFloatComplex *out) { const unsigned int i = 2*blockIdx.x, j = threadIdx.x, n = blockDim.x; #pragma unroll for (unsigned int d=0; d<2; d++) { out[i*n+j+n*d] = make_cuFloatComplex(in[i*n+j+n*d].x, in[i*n+j+n*d].y); } __syncthreads(); for (unsigned int s=blockDim.x/2; s>0; s>>=1) { if (j < s) { #pragma unroll for (unsigned int d=0; d<2; d++) { out[i*n+j+n*d] = cuCaddf(out[i*n+j+n*d], out[i*n+j+n*d+s]); } } __syncthreads(); } } __global__ void __sum_v3(cuFloatComplex *in, cuFloatComplex *out) { const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x; extern __shared__ cuFloatComplex sdata[]; sdata[j] = make_cuFloatComplex(in[i*n+j].x, in[i*n+j].y); __syncthreads(); for (unsigned int s=blockDim.x/2; s>0; s>>=1) { if (j < s) { sdata[j] = cuCaddf(sdata[j], sdata[j+s]); } __syncthreads(); } if(j==0) { out[i*n] = sdata[j]; } } __global__ void __sum_v4(cuFloatComplex *in, cuFloatComplex *out) { const unsigned int i = 2*blockIdx.x, j = threadIdx.x, n = blockDim.x; extern __shared__ cuFloatComplex sdata[]; #pragma unroll for (unsigned int d=0; d<2; d++) { sdata[j+n*d] = make_cuFloatComplex(in[i*n+j+n*d].x, in[i*n+j+n*d].y); } __syncthreads(); for (unsigned int s=blockDim.x/2; s>0; s>>=1) { if (j < s) { #pragma unroll for (unsigned int d=0; d<2; d++) { sdata[j+n*d] = cuCaddf(sdata[j+n*d], sdata[j+n*d+s]); } } __syncthreads(); } if(j==0) { #pragma unroll for (unsigned int d=0; d<2; d++) { out[i*n+n*d] = sdata[j+n*d]; } } } __global__ void __sum_inplace(cuFloatComplex *g_idata) { const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x; // __syncthreads(); for (unsigned int s=blockDim.x/2; s>0; s>>=1) { if (j < s) { // g_idata[i] = make_cuFloatComplex(g_idata[i].x+g_idata[i + s].x, 0); g_idata[i*n+j] = cuCaddf(g_idata[i*n+j], g_idata[i*n+j+s]); } __syncthreads(); } } __global__ void __sum_inplace_v2(cuFloatComplex *g_idata) { const unsigned int i = 2*blockIdx.x, j = threadIdx.x, n = blockDim.x; // __syncthreads(); for (unsigned int s=blockDim.x/2; s>0; s>>=1) { if (j < s) { // g_idata[i] = make_cuFloatComplex(g_idata[i].x+g_idata[i + s].x, 0); #pragma unroll for (unsigned int d=0; d<2; d++) { g_idata[i*n+j+n*d] = cuCaddf(g_idata[i*n+j+n*d], g_idata[i*n+j+n*d+s]); } } __syncthreads(); } } __global__ void __sum_inplace_v3(cuFloatComplex *in) { const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x; extern __shared__ cuFloatComplex sdata[]; sdata[j] = make_cuFloatComplex(in[i*n+j].x, in[i*n+j].y); __syncthreads(); for (unsigned int s=blockDim.x/2; s>0; s>>=1) { if (j < s) { sdata[j] = cuCaddf(sdata[j], sdata[j+s]); } __syncthreads(); } if(j==0) { in[i*n] = sdata[j]; } } __global__ void __sum_inplace_v4(cuFloatComplex *in) { const unsigned int i = 2*blockIdx.x, j = threadIdx.x, n = blockDim.x; extern __shared__ cuFloatComplex sdata[]; #pragma unroll for (unsigned int d=0; d<2; d++) { sdata[j+n*d] = make_cuFloatComplex(in[i*n+j+n*d].x, in[i*n+j+n*d].y); } __syncthreads(); for (unsigned int s=blockDim.x/2; s>0; s>>=1) { if (j < s) { #pragma unroll for (unsigned int d=0; d<2; d++) { sdata[j+n*d] = cuCaddf(sdata[j+n*d], sdata[j+n*d+s]); } } __syncthreads(); } if(j==0) { #pragma unroll for (unsigned int d=0; d<2; d++) { in[i*n+n*d] = sdata[j+n*d]; } } } __global__ void __avgconj(cuFloatComplex *inout, cuFloatComplex *sum) { const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x; float avgx = sum[i*n].x/n; float avgy = sum[i*n].y/n; inout[i*n+j] = make_cuFloatComplex(inout[i*n+j].x-avgx, (inout[i*n+j].y-avgy)*-1); } __global__ void __scale_real(cuFloatComplex *inout) { const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x; inout[i*n+j] = make_cuFloatComplex(inout[i*n+j].x/n, 0); } __global__ void __calcresult(cuFloatComplex *hh, cuFloatComplex *vv, cuFloatComplex *hv, float *out, int n) { const unsigned int i = blockIdx.x; float z = pow(i*k_rangeres, 2.0) * k_calib * hh[i*n].x; float zdb = 10 * log10(z); float zdr = 10 * (log10(hh[i*n].x)-log10(vv[i*n].x)); out[i*RESULT_SIZE+0] = zdb; out[i*RESULT_SIZE+1] = zdr; } __global__ void __calcresult_v2(cuFloatComplex *hh, cuFloatComplex *vv, cuFloatComplex *hv, float *out, int n) { const unsigned int i = threadIdx.x; float z = pow(i*k_rangeres, 2.0) * k_calib * hh[i*n].x; float zdb = 10 * log10(z); float zdr = 10 * (log10(hh[i*n].x)-log10(vv[i*n].x)); out[i*RESULT_SIZE+0] = zdb; out[i*RESULT_SIZE+1] = zdr; } void tick(timeval *begin) { gettimeofday(begin, NULL); } void tock(timeval *begin, timeval *end, string caption) { unsigned long long bb, e; gettimeofday(end, NULL); bb = (unsigned long long)(begin->tv_sec) * 1000000 + (unsigned long long)(begin->tv_usec) / 1; e = (unsigned long long)(end->tv_sec) * 1000000 + (unsigned long long)(end->tv_usec) / 1; cout << caption << ": " << e-bb << endl; } int main(int argc, char **argv) { ios_base::sync_with_stdio(false); struct timeval tb, te; tick(&tb); cuFloatComplex *iqhh, *iqvv, *iqhv; float *result; int sector_id; const int m = 1024; // cell const int n = 512; // sweep const int ma_count = 7; iqhh = new cuFloatComplex[m*n]; iqvv = new cuFloatComplex[m*n]; iqhv = new cuFloatComplex[m*n]; result = new float[(m/2)*RESULT_SIZE]; float a, b; // Generate Hamming coefficients const float *hamming_coef = generate_hamming_coef(m, n); // Generate MA coefficients float *ma_coef = generate_ma_coef(ma_count); fftwf_complex *_fft_ma = (fftwf_complex*) fftwf_malloc(sizeof(fftwf_complex) * n); fftwf_plan fft_ma_plan = fftwf_plan_dft_1d(n, _fft_ma, _fft_ma, FFTW_FORWARD, FFTW_ESTIMATE); for (int j=0; j<ma_count; j++) { _fft_ma[j][0] = ma_coef[j]; _fft_ma[j][1] = 0; } for (int j=ma_count; j<n; j++) { _fft_ma[j][0] = 0; _fft_ma[j][1] = 0; } fftwf_execute(fft_ma_plan); fftwf_destroy_plan(fft_ma_plan); cuFloatComplex *fft_ma; fft_ma = new cuFloatComplex[n]; for (int j=0; j<n; j++) { fft_ma[j] = make_cuFloatComplex(_fft_ma[j][0], _fft_ma[j][1]); } fftwf_free(_fft_ma); // Device buffers /*__constant__*/ float *d_hamming; /*__constant__*/ cuFloatComplex *d_ma; cuFloatComplex *d_iqhh, *d_iqvv, *d_iqhv; cuFloatComplex *d_sum; float *d_result; //float *d_powhh, *d_powvv; hipMalloc(&d_hamming, m*n*sizeof(float)); hipMalloc(&d_ma, n*sizeof(cuFloatComplex)); hipMalloc(&d_iqhh, m*n*sizeof(cuFloatComplex)); hipMalloc(&d_iqvv, m*n*sizeof(cuFloatComplex)); hipMalloc(&d_iqhv, m*n*sizeof(cuFloatComplex)); hipMalloc(&d_sum, m*n*sizeof(cuFloatComplex)); hipMalloc(&d_result, (m/2)*RESULT_SIZE*sizeof(float)); hipMemcpy(d_hamming, hamming_coef, m*n*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_ma, fft_ma, n*sizeof(cuFloatComplex), hipMemcpyHostToDevice); // CUFFT initialization hipfftHandle fft_range_handle; hipfftHandle fft_doppler_handle; hipfftHandle fft_pdop_handle; int rank = 1; // --- 1D FFTs int nn[] = { m }; // --- Size of the Fourier transform int istride = n, ostride = n; // --- Distance between two successive input/output elements int idist = 1, odist = 1; // --- Distance between batches int inembed[] = { 0 }; // --- Input size with pitch (ignored for 1D transforms) int onembed[] = { 0 }; // --- Output size with pitch (ignored for 1D transforms) int batch = n; // --- Number of batched executions hipfftPlanMany(&fft_range_handle, rank, nn, inembed, istride, idist, onembed, ostride, odist, HIPFFT_C2C, batch); hipfftPlan1d(&fft_doppler_handle, n, HIPFFT_C2C, m); hipfftPlan1d(&fft_pdop_handle, n, HIPFFT_C2C, m/2); tock(&tb, &te, "initialization"); float ms; // elapsed time in milliseconds sector_id = -1; // create events and streams hipEvent_t startEvent, stopEvent; hipEventCreate(&startEvent); hipEventCreate(&stopEvent); // hipEventCreate(&dummyEvent); hipEventRecord(startEvent,0); tick(&tb); while(sector_id < 126) { // tick(&tb); // Read 1 sector data // cin >> sector_id; sector_id++; for (int i=0; i<m; i++) { for (int j=0; j<n; j++) { // cin >> a >> b; iqhh[i*n+j] = make_cuFloatComplex(i, j); } } for (int i=0; i<m; i++) { for (int j=0; j<n; j++) { // cin >> a >> b; iqvv[i*n+j] = make_cuFloatComplex(j, i); } } for (int i=0; i<m; i++) { for (int j=0; j<n; j++) { // cin >> a >> b; iqhv[i*n+j] = make_cuFloatComplex(j, i); } } hipMemcpy(d_iqhh, iqhh, m*n*sizeof(cuFloatComplex), hipMemcpyHostToDevice); hipMemcpy(d_iqvv, iqvv, m*n*sizeof(cuFloatComplex), hipMemcpyHostToDevice); hipMemcpy(d_iqhv, iqhv, m*n*sizeof(cuFloatComplex), hipMemcpyHostToDevice); // apply Hamming coefficients hipLaunchKernelGGL(( __apply_hamming), dim3(m),dim3(n), 0, 0, d_iqhh, d_hamming); hipLaunchKernelGGL(( __apply_hamming), dim3(m),dim3(n), 0, 0, d_iqvv, d_hamming); hipLaunchKernelGGL(( __apply_hamming), dim3(m),dim3(n), 0, 0, d_iqhv, d_hamming); // FFT range profile hipfftExecC2C(fft_range_handle, d_iqhh, d_iqhh, HIPFFT_FORWARD); hipfftExecC2C(fft_range_handle, d_iqvv, d_iqvv, HIPFFT_FORWARD); hipfftExecC2C(fft_range_handle, d_iqhv, d_iqhv, HIPFFT_FORWARD); // FFT+shift Doppler profile hipLaunchKernelGGL(( __sum_v4), dim3(m/2),dim3(n),2*n*sizeof(cuFloatComplex), 0, d_iqhh, d_sum); hipLaunchKernelGGL(( __avgconj), dim3(m),dim3(n), 0, 0, d_iqhh, d_sum); hipLaunchKernelGGL(( __sum_v4), dim3(m/2),dim3(n),2*n*sizeof(cuFloatComplex), 0, d_iqvv, d_sum); hipLaunchKernelGGL(( __avgconj), dim3(m),dim3(n), 0, 0, d_iqvv, d_sum); hipLaunchKernelGGL(( __sum_v4), dim3(m/2),dim3(n),2*n*sizeof(cuFloatComplex), 0, d_iqhv, d_sum); hipLaunchKernelGGL(( __avgconj), dim3(m),dim3(n), 0, 0, d_iqhv, d_sum); hipfftExecC2C(fft_doppler_handle, d_iqhh, d_iqhh, HIPFFT_FORWARD); hipfftExecC2C(fft_doppler_handle, d_iqvv, d_iqvv, HIPFFT_FORWARD); hipfftExecC2C(fft_doppler_handle, d_iqhv, d_iqhv, HIPFFT_FORWARD); hipLaunchKernelGGL(( __conjugate), dim3(m),dim3(n), 0, 0, d_iqhh); hipLaunchKernelGGL(( __conjugate), dim3(m),dim3(n), 0, 0, d_iqvv); hipLaunchKernelGGL(( __conjugate), dim3(m),dim3(n), 0, 0, d_iqhv); hipLaunchKernelGGL(( __shift), dim3(m),dim3(n/2), 0, 0, d_iqhh, n); hipLaunchKernelGGL(( __shift), dim3(m),dim3(n/2), 0, 0, d_iqvv, n); hipLaunchKernelGGL(( __shift), dim3(m),dim3(n/2), 0, 0, d_iqhv, n); hipLaunchKernelGGL(( __clip), dim3(m),dim3(2), 0, 0, d_iqhh, n); hipLaunchKernelGGL(( __clip), dim3(m),dim3(2), 0, 0, d_iqvv, n); hipLaunchKernelGGL(( __clip), dim3(m),dim3(2), 0, 0, d_iqhv, n); // Get absolute value hipLaunchKernelGGL(( __abssqr), dim3(m/2),dim3(n), 0, 0, d_iqhh, n); hipLaunchKernelGGL(( __abssqr), dim3(m/2),dim3(n), 0, 0, d_iqvv, n); hipLaunchKernelGGL(( __abssqr), dim3(m/2),dim3(n), 0, 0, d_iqhv, n); // FFT PDOP hipfftExecC2C(fft_pdop_handle, d_iqhh, d_iqhh, HIPFFT_FORWARD); hipfftExecC2C(fft_pdop_handle, d_iqvv, d_iqvv, HIPFFT_FORWARD); hipfftExecC2C(fft_pdop_handle, d_iqhv, d_iqhv, HIPFFT_FORWARD); // Apply MA coefficients hipLaunchKernelGGL(( __apply_ma), dim3(m/2),dim3(n), 0, 0, d_iqhh, d_ma); hipLaunchKernelGGL(( __apply_ma), dim3(m/2),dim3(n), 0, 0, d_iqvv, d_ma); hipLaunchKernelGGL(( __apply_ma), dim3(m/2),dim3(n), 0, 0, d_iqhv, d_ma); // Inverse FFT hipfftExecC2C(fft_pdop_handle, d_iqhh, d_iqhh, HIPFFT_BACKWARD); hipfftExecC2C(fft_pdop_handle, d_iqvv, d_iqvv, HIPFFT_BACKWARD); hipfftExecC2C(fft_pdop_handle, d_iqhv, d_iqhv, HIPFFT_BACKWARD); hipLaunchKernelGGL(( __scale_real), dim3(m/2),dim3(n), 0, 0, d_iqhh); hipLaunchKernelGGL(( __scale_real), dim3(m/2),dim3(n), 0, 0, d_iqvv); hipLaunchKernelGGL(( __scale_real), dim3(m/2),dim3(n), 0, 0, d_iqhv); // Sum hipLaunchKernelGGL(( __sum_inplace_v4), dim3(m/4),dim3(n),2*n*sizeof(cuFloatComplex), 0, d_iqhh); hipLaunchKernelGGL(( __sum_inplace_v4), dim3(m/4),dim3(n),2*n*sizeof(cuFloatComplex), 0, d_iqvv); hipLaunchKernelGGL(( __sum_inplace_v4), dim3(m/4),dim3(n),2*n*sizeof(cuFloatComplex), 0, d_iqhv); // hipMemcpy(iqhh, d_iqhh, m*n*sizeof(cuFloatComplex), hipMemcpyDeviceToHost); // hipMemcpy(iqvv, d_iqvv, m*n*sizeof(cuFloatComplex), hipMemcpyDeviceToHost); // for (int i=0; i<m/2; i++) { // float z = pow(i*k_rangeres, 2.0) * k_calib * iqhh[i*n].x; // float zdb = 10 * log10(z); // float zdr = 10 * (log10(iqhh[i*n].x)-log10(iqvv[i*n].x)); // cout << zdb << " " << zdr << endl; // } // exit(0); // Calculate ZdB, Zdr hipLaunchKernelGGL(( __calcresult_v2), dim3(1),dim3(m/2), 0, 0, d_iqhh, d_iqvv, d_iqhv, d_result, n); hipMemcpy(result, d_result, (m/2)*RESULT_SIZE*sizeof(float), hipMemcpyDeviceToHost); // for (int i=0; i<m/2; i++) { // for (int j=0; j<RESULT_SIZE; j++) { // cout << result[i*RESULT_SIZE+j] << " "; // } // cout << endl; // } // exit(0); } tock(&tb, &te, "All (us)"); hipEventRecord(stopEvent, 0); hipEventSynchronize(stopEvent); hipEventElapsedTime(&ms, startEvent, stopEvent); printf("Time for sequential transfer and execute (ms): %f\n", ms); hipEventDestroy(startEvent); hipEventDestroy(stopEvent); hipFree(d_hamming); hipFree(d_ma); hipFree(d_iqhh); hipFree(d_iqvv); hipFree(d_iqhv); delete[] iqhh; delete[] iqvv; delete[] iqhv; return 0; } // hipMemcpy(iqhh, d_iqhh, m*n*sizeof(cuFloatComplex), hipMemcpyDeviceToHost); // hipMemcpy(iqvv, d_iqvv, m*n*sizeof(cuFloatComplex), hipMemcpyDeviceToHost); // for (int i=0; i<m; i++) { // for (int j=0; j<n; j++) { // cout << "(" << iqhh[i*n+j].x << "," << iqhh[i*n+j].y << ") "; // } // cout << endl; // } // // for (int i=0; i<m; i++) { // // for (int j=0; j<n; j++) { // // cout << iqvv[i*n+j].x << " "; // // } // // cout << endl; // // } // exit(0);
c3a7907696a750e7ca36adf1c4954ea2d73fe663.cu
#include <iostream> #include <stdlib.h> #include <cuda.h> #include <cuComplex.h> #include <fftw3.h> #include <cufft.h> #include <sys/time.h> #include <assert.h> using namespace std; #define k_rangeres 30 #define k_calib 1941.05 #define RESULT_SIZE 2 #define DEBUG inline cudaError_t checkCuda(cudaError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != cudaSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result)); assert(result == cudaSuccess); } #endif return result; } float *generate_hamming_coef(int m, int n) { // Calculate normalization power on range cell float p_range=0; for(int i=0; i < m; i++) { p_range=p_range+pow(0.53836-0.46164*cos(2*M_PI*(i)/(m-1)), 2.0); } p_range=p_range/m; // Calculate normalization power on Doppler cell float p_doppler=0; for(int j=0; j < n; j++) { p_doppler=p_doppler+pow(0.53836-0.46164*cos(2*M_PI*(j)/(n-1)), 2.0); } p_doppler=p_doppler/n; // Constant since FFT is not normalized and the power is computed w.r.t. 50ohm const float K_wind = -1/(16383.5*m*n*sqrt(50)); const float c = K_wind/sqrt(p_range*p_doppler); // Generate elements float *_hamming_coef= new float[m*n]; for(int i=0; i < m; i++) { for(int j=0; j < n; j++) { _hamming_coef[i*n+j] = (0.53836-0.46164*cos(2*M_PI*(i)/(m-1))) * (0.53836-0.46164*cos(2*M_PI*(j)/(n-1))) * c; } } return _hamming_coef; } float *generate_ma_coef(int n){ float *_ma_coef = new float[n]; float _sum = 0.0; for(int i=0; i < n; i++) { _ma_coef[i]=exp(-(pow(i-((n-1)/2), 2.0))/2); _sum += _ma_coef[i]; } for(int i=0; i < n; i++){ _ma_coef[i] = _ma_coef[i]/_sum; } return _ma_coef; } __global__ void __apply_hamming(cuFloatComplex *a, float *b) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; a[idx] = make_cuFloatComplex(b[idx]*cuCrealf(a[idx]), b[idx]*cuCimagf(a[idx])); } __global__ void __apply_ma(cuFloatComplex *inout, cuFloatComplex *macoef) { const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x; inout[i*n+j] = cuCmulf(inout[i*n+j], macoef[j]); } __global__ void __conjugate(cuFloatComplex *a) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; a[idx].y *= -1; } __global__ void __shift(cuFloatComplex *inout, int n) { const unsigned int i = blockIdx.x, j = threadIdx.x; cuFloatComplex temp = inout[i*n+j]; inout[i*n+j] = inout[i*n+(j+n/2)]; inout[i*n+(j+n/2)] = temp; } __global__ void __clip(cuFloatComplex *inout, int n) { const unsigned int i = blockIdx.x, j = n-threadIdx.x-1; inout[i*n+j] = make_cuFloatComplex(0, 0); } __global__ void __abssqr(cuFloatComplex *inout, int n) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; float real, imag; real = cuCrealf(inout[idx]); imag = cuCimagf(inout[idx]); inout[idx] = make_cuFloatComplex(real*real + imag*imag, 0); } __global__ void __sum(cuFloatComplex *in, cuFloatComplex *out) { const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x; out[i*n+j] = make_cuFloatComplex(in[i*n+j].x, in[i*n+j].y); __syncthreads(); for (unsigned int s=blockDim.x/2; s>0; s>>=1) { if (j < s) { out[i*n+j] = cuCaddf(out[i*n+j], out[i*n+j+s]); } __syncthreads(); } } __global__ void __sum_v2(cuFloatComplex *in, cuFloatComplex *out) { const unsigned int i = 2*blockIdx.x, j = threadIdx.x, n = blockDim.x; #pragma unroll for (unsigned int d=0; d<2; d++) { out[i*n+j+n*d] = make_cuFloatComplex(in[i*n+j+n*d].x, in[i*n+j+n*d].y); } __syncthreads(); for (unsigned int s=blockDim.x/2; s>0; s>>=1) { if (j < s) { #pragma unroll for (unsigned int d=0; d<2; d++) { out[i*n+j+n*d] = cuCaddf(out[i*n+j+n*d], out[i*n+j+n*d+s]); } } __syncthreads(); } } __global__ void __sum_v3(cuFloatComplex *in, cuFloatComplex *out) { const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x; extern __shared__ cuFloatComplex sdata[]; sdata[j] = make_cuFloatComplex(in[i*n+j].x, in[i*n+j].y); __syncthreads(); for (unsigned int s=blockDim.x/2; s>0; s>>=1) { if (j < s) { sdata[j] = cuCaddf(sdata[j], sdata[j+s]); } __syncthreads(); } if(j==0) { out[i*n] = sdata[j]; } } __global__ void __sum_v4(cuFloatComplex *in, cuFloatComplex *out) { const unsigned int i = 2*blockIdx.x, j = threadIdx.x, n = blockDim.x; extern __shared__ cuFloatComplex sdata[]; #pragma unroll for (unsigned int d=0; d<2; d++) { sdata[j+n*d] = make_cuFloatComplex(in[i*n+j+n*d].x, in[i*n+j+n*d].y); } __syncthreads(); for (unsigned int s=blockDim.x/2; s>0; s>>=1) { if (j < s) { #pragma unroll for (unsigned int d=0; d<2; d++) { sdata[j+n*d] = cuCaddf(sdata[j+n*d], sdata[j+n*d+s]); } } __syncthreads(); } if(j==0) { #pragma unroll for (unsigned int d=0; d<2; d++) { out[i*n+n*d] = sdata[j+n*d]; } } } __global__ void __sum_inplace(cuFloatComplex *g_idata) { const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x; // __syncthreads(); for (unsigned int s=blockDim.x/2; s>0; s>>=1) { if (j < s) { // g_idata[i] = make_cuFloatComplex(g_idata[i].x+g_idata[i + s].x, 0); g_idata[i*n+j] = cuCaddf(g_idata[i*n+j], g_idata[i*n+j+s]); } __syncthreads(); } } __global__ void __sum_inplace_v2(cuFloatComplex *g_idata) { const unsigned int i = 2*blockIdx.x, j = threadIdx.x, n = blockDim.x; // __syncthreads(); for (unsigned int s=blockDim.x/2; s>0; s>>=1) { if (j < s) { // g_idata[i] = make_cuFloatComplex(g_idata[i].x+g_idata[i + s].x, 0); #pragma unroll for (unsigned int d=0; d<2; d++) { g_idata[i*n+j+n*d] = cuCaddf(g_idata[i*n+j+n*d], g_idata[i*n+j+n*d+s]); } } __syncthreads(); } } __global__ void __sum_inplace_v3(cuFloatComplex *in) { const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x; extern __shared__ cuFloatComplex sdata[]; sdata[j] = make_cuFloatComplex(in[i*n+j].x, in[i*n+j].y); __syncthreads(); for (unsigned int s=blockDim.x/2; s>0; s>>=1) { if (j < s) { sdata[j] = cuCaddf(sdata[j], sdata[j+s]); } __syncthreads(); } if(j==0) { in[i*n] = sdata[j]; } } __global__ void __sum_inplace_v4(cuFloatComplex *in) { const unsigned int i = 2*blockIdx.x, j = threadIdx.x, n = blockDim.x; extern __shared__ cuFloatComplex sdata[]; #pragma unroll for (unsigned int d=0; d<2; d++) { sdata[j+n*d] = make_cuFloatComplex(in[i*n+j+n*d].x, in[i*n+j+n*d].y); } __syncthreads(); for (unsigned int s=blockDim.x/2; s>0; s>>=1) { if (j < s) { #pragma unroll for (unsigned int d=0; d<2; d++) { sdata[j+n*d] = cuCaddf(sdata[j+n*d], sdata[j+n*d+s]); } } __syncthreads(); } if(j==0) { #pragma unroll for (unsigned int d=0; d<2; d++) { in[i*n+n*d] = sdata[j+n*d]; } } } __global__ void __avgconj(cuFloatComplex *inout, cuFloatComplex *sum) { const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x; float avgx = sum[i*n].x/n; float avgy = sum[i*n].y/n; inout[i*n+j] = make_cuFloatComplex(inout[i*n+j].x-avgx, (inout[i*n+j].y-avgy)*-1); } __global__ void __scale_real(cuFloatComplex *inout) { const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x; inout[i*n+j] = make_cuFloatComplex(inout[i*n+j].x/n, 0); } __global__ void __calcresult(cuFloatComplex *hh, cuFloatComplex *vv, cuFloatComplex *hv, float *out, int n) { const unsigned int i = blockIdx.x; float z = pow(i*k_rangeres, 2.0) * k_calib * hh[i*n].x; float zdb = 10 * log10(z); float zdr = 10 * (log10(hh[i*n].x)-log10(vv[i*n].x)); out[i*RESULT_SIZE+0] = zdb; out[i*RESULT_SIZE+1] = zdr; } __global__ void __calcresult_v2(cuFloatComplex *hh, cuFloatComplex *vv, cuFloatComplex *hv, float *out, int n) { const unsigned int i = threadIdx.x; float z = pow(i*k_rangeres, 2.0) * k_calib * hh[i*n].x; float zdb = 10 * log10(z); float zdr = 10 * (log10(hh[i*n].x)-log10(vv[i*n].x)); out[i*RESULT_SIZE+0] = zdb; out[i*RESULT_SIZE+1] = zdr; } void tick(timeval *begin) { gettimeofday(begin, NULL); } void tock(timeval *begin, timeval *end, string caption) { unsigned long long bb, e; gettimeofday(end, NULL); bb = (unsigned long long)(begin->tv_sec) * 1000000 + (unsigned long long)(begin->tv_usec) / 1; e = (unsigned long long)(end->tv_sec) * 1000000 + (unsigned long long)(end->tv_usec) / 1; cout << caption << ": " << e-bb << endl; } int main(int argc, char **argv) { ios_base::sync_with_stdio(false); struct timeval tb, te; tick(&tb); cuFloatComplex *iqhh, *iqvv, *iqhv; float *result; int sector_id; const int m = 1024; // cell const int n = 512; // sweep const int ma_count = 7; iqhh = new cuFloatComplex[m*n]; iqvv = new cuFloatComplex[m*n]; iqhv = new cuFloatComplex[m*n]; result = new float[(m/2)*RESULT_SIZE]; float a, b; // Generate Hamming coefficients const float *hamming_coef = generate_hamming_coef(m, n); // Generate MA coefficients float *ma_coef = generate_ma_coef(ma_count); fftwf_complex *_fft_ma = (fftwf_complex*) fftwf_malloc(sizeof(fftwf_complex) * n); fftwf_plan fft_ma_plan = fftwf_plan_dft_1d(n, _fft_ma, _fft_ma, FFTW_FORWARD, FFTW_ESTIMATE); for (int j=0; j<ma_count; j++) { _fft_ma[j][0] = ma_coef[j]; _fft_ma[j][1] = 0; } for (int j=ma_count; j<n; j++) { _fft_ma[j][0] = 0; _fft_ma[j][1] = 0; } fftwf_execute(fft_ma_plan); fftwf_destroy_plan(fft_ma_plan); cuFloatComplex *fft_ma; fft_ma = new cuFloatComplex[n]; for (int j=0; j<n; j++) { fft_ma[j] = make_cuFloatComplex(_fft_ma[j][0], _fft_ma[j][1]); } fftwf_free(_fft_ma); // Device buffers /*__constant__*/ float *d_hamming; /*__constant__*/ cuFloatComplex *d_ma; cuFloatComplex *d_iqhh, *d_iqvv, *d_iqhv; cuFloatComplex *d_sum; float *d_result; //float *d_powhh, *d_powvv; cudaMalloc(&d_hamming, m*n*sizeof(float)); cudaMalloc(&d_ma, n*sizeof(cuFloatComplex)); cudaMalloc(&d_iqhh, m*n*sizeof(cuFloatComplex)); cudaMalloc(&d_iqvv, m*n*sizeof(cuFloatComplex)); cudaMalloc(&d_iqhv, m*n*sizeof(cuFloatComplex)); cudaMalloc(&d_sum, m*n*sizeof(cuFloatComplex)); cudaMalloc(&d_result, (m/2)*RESULT_SIZE*sizeof(float)); cudaMemcpy(d_hamming, hamming_coef, m*n*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_ma, fft_ma, n*sizeof(cuFloatComplex), cudaMemcpyHostToDevice); // CUFFT initialization cufftHandle fft_range_handle; cufftHandle fft_doppler_handle; cufftHandle fft_pdop_handle; int rank = 1; // --- 1D FFTs int nn[] = { m }; // --- Size of the Fourier transform int istride = n, ostride = n; // --- Distance between two successive input/output elements int idist = 1, odist = 1; // --- Distance between batches int inembed[] = { 0 }; // --- Input size with pitch (ignored for 1D transforms) int onembed[] = { 0 }; // --- Output size with pitch (ignored for 1D transforms) int batch = n; // --- Number of batched executions cufftPlanMany(&fft_range_handle, rank, nn, inembed, istride, idist, onembed, ostride, odist, CUFFT_C2C, batch); cufftPlan1d(&fft_doppler_handle, n, CUFFT_C2C, m); cufftPlan1d(&fft_pdop_handle, n, CUFFT_C2C, m/2); tock(&tb, &te, "initialization"); float ms; // elapsed time in milliseconds sector_id = -1; // create events and streams cudaEvent_t startEvent, stopEvent; cudaEventCreate(&startEvent); cudaEventCreate(&stopEvent); // cudaEventCreate(&dummyEvent); cudaEventRecord(startEvent,0); tick(&tb); while(sector_id < 126) { // tick(&tb); // Read 1 sector data // cin >> sector_id; sector_id++; for (int i=0; i<m; i++) { for (int j=0; j<n; j++) { // cin >> a >> b; iqhh[i*n+j] = make_cuFloatComplex(i, j); } } for (int i=0; i<m; i++) { for (int j=0; j<n; j++) { // cin >> a >> b; iqvv[i*n+j] = make_cuFloatComplex(j, i); } } for (int i=0; i<m; i++) { for (int j=0; j<n; j++) { // cin >> a >> b; iqhv[i*n+j] = make_cuFloatComplex(j, i); } } cudaMemcpy(d_iqhh, iqhh, m*n*sizeof(cuFloatComplex), cudaMemcpyHostToDevice); cudaMemcpy(d_iqvv, iqvv, m*n*sizeof(cuFloatComplex), cudaMemcpyHostToDevice); cudaMemcpy(d_iqhv, iqhv, m*n*sizeof(cuFloatComplex), cudaMemcpyHostToDevice); // apply Hamming coefficients __apply_hamming<<<m,n>>>(d_iqhh, d_hamming); __apply_hamming<<<m,n>>>(d_iqvv, d_hamming); __apply_hamming<<<m,n>>>(d_iqhv, d_hamming); // FFT range profile cufftExecC2C(fft_range_handle, d_iqhh, d_iqhh, CUFFT_FORWARD); cufftExecC2C(fft_range_handle, d_iqvv, d_iqvv, CUFFT_FORWARD); cufftExecC2C(fft_range_handle, d_iqhv, d_iqhv, CUFFT_FORWARD); // FFT+shift Doppler profile __sum_v4<<<m/2,n,2*n*sizeof(cuFloatComplex)>>>(d_iqhh, d_sum); __avgconj<<<m,n>>>(d_iqhh, d_sum); __sum_v4<<<m/2,n,2*n*sizeof(cuFloatComplex)>>>(d_iqvv, d_sum); __avgconj<<<m,n>>>(d_iqvv, d_sum); __sum_v4<<<m/2,n,2*n*sizeof(cuFloatComplex)>>>(d_iqhv, d_sum); __avgconj<<<m,n>>>(d_iqhv, d_sum); cufftExecC2C(fft_doppler_handle, d_iqhh, d_iqhh, CUFFT_FORWARD); cufftExecC2C(fft_doppler_handle, d_iqvv, d_iqvv, CUFFT_FORWARD); cufftExecC2C(fft_doppler_handle, d_iqhv, d_iqhv, CUFFT_FORWARD); __conjugate<<<m,n>>>(d_iqhh); __conjugate<<<m,n>>>(d_iqvv); __conjugate<<<m,n>>>(d_iqhv); __shift<<<m,n/2>>>(d_iqhh, n); __shift<<<m,n/2>>>(d_iqvv, n); __shift<<<m,n/2>>>(d_iqhv, n); __clip<<<m,2>>>(d_iqhh, n); __clip<<<m,2>>>(d_iqvv, n); __clip<<<m,2>>>(d_iqhv, n); // Get absolute value __abssqr<<<m/2,n>>>(d_iqhh, n); __abssqr<<<m/2,n>>>(d_iqvv, n); __abssqr<<<m/2,n>>>(d_iqhv, n); // FFT PDOP cufftExecC2C(fft_pdop_handle, d_iqhh, d_iqhh, CUFFT_FORWARD); cufftExecC2C(fft_pdop_handle, d_iqvv, d_iqvv, CUFFT_FORWARD); cufftExecC2C(fft_pdop_handle, d_iqhv, d_iqhv, CUFFT_FORWARD); // Apply MA coefficients __apply_ma<<<m/2,n>>>(d_iqhh, d_ma); __apply_ma<<<m/2,n>>>(d_iqvv, d_ma); __apply_ma<<<m/2,n>>>(d_iqhv, d_ma); // Inverse FFT cufftExecC2C(fft_pdop_handle, d_iqhh, d_iqhh, CUFFT_INVERSE); cufftExecC2C(fft_pdop_handle, d_iqvv, d_iqvv, CUFFT_INVERSE); cufftExecC2C(fft_pdop_handle, d_iqhv, d_iqhv, CUFFT_INVERSE); __scale_real<<<m/2,n>>>(d_iqhh); __scale_real<<<m/2,n>>>(d_iqvv); __scale_real<<<m/2,n>>>(d_iqhv); // Sum __sum_inplace_v4<<<m/4,n,2*n*sizeof(cuFloatComplex)>>>(d_iqhh); __sum_inplace_v4<<<m/4,n,2*n*sizeof(cuFloatComplex)>>>(d_iqvv); __sum_inplace_v4<<<m/4,n,2*n*sizeof(cuFloatComplex)>>>(d_iqhv); // cudaMemcpy(iqhh, d_iqhh, m*n*sizeof(cuFloatComplex), cudaMemcpyDeviceToHost); // cudaMemcpy(iqvv, d_iqvv, m*n*sizeof(cuFloatComplex), cudaMemcpyDeviceToHost); // for (int i=0; i<m/2; i++) { // float z = pow(i*k_rangeres, 2.0) * k_calib * iqhh[i*n].x; // float zdb = 10 * log10(z); // float zdr = 10 * (log10(iqhh[i*n].x)-log10(iqvv[i*n].x)); // cout << zdb << " " << zdr << endl; // } // exit(0); // Calculate ZdB, Zdr __calcresult_v2<<<1,m/2>>>(d_iqhh, d_iqvv, d_iqhv, d_result, n); cudaMemcpy(result, d_result, (m/2)*RESULT_SIZE*sizeof(float), cudaMemcpyDeviceToHost); // for (int i=0; i<m/2; i++) { // for (int j=0; j<RESULT_SIZE; j++) { // cout << result[i*RESULT_SIZE+j] << " "; // } // cout << endl; // } // exit(0); } tock(&tb, &te, "All (us)"); cudaEventRecord(stopEvent, 0); cudaEventSynchronize(stopEvent); cudaEventElapsedTime(&ms, startEvent, stopEvent); printf("Time for sequential transfer and execute (ms): %f\n", ms); cudaEventDestroy(startEvent); cudaEventDestroy(stopEvent); cudaFree(d_hamming); cudaFree(d_ma); cudaFree(d_iqhh); cudaFree(d_iqvv); cudaFree(d_iqhv); delete[] iqhh; delete[] iqvv; delete[] iqhv; return 0; } // cudaMemcpy(iqhh, d_iqhh, m*n*sizeof(cuFloatComplex), cudaMemcpyDeviceToHost); // cudaMemcpy(iqvv, d_iqvv, m*n*sizeof(cuFloatComplex), cudaMemcpyDeviceToHost); // for (int i=0; i<m; i++) { // for (int j=0; j<n; j++) { // cout << "(" << iqhh[i*n+j].x << "," << iqhh[i*n+j].y << ") "; // } // cout << endl; // } // // for (int i=0; i<m; i++) { // // for (int j=0; j<n; j++) { // // cout << iqvv[i*n+j].x << " "; // // } // // cout << endl; // // } // exit(0);
8d1ecbe6c71fd4effca91fe24d0705d4021de672.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "importer.cuh" #include "debug.h" #include "scene.cuh" #include "sphere_hip.cuh" #include "box.cuh" #include "ellipsoid.cuh" #include "polygon.cuh" #define GLM_FORCE_CUDA #include <glm/glm.hpp> #include <fstream> #include <string> namespace importer { __global__ void add_sphere_to_scene(Scene * scene, Material material, glm::vec3 position, float radius) { scene->add(new Sphere{ material, position, radius }); } __global__ void add_box_to_scene(Scene * scene, Material material, glm::vec3 position, glm::vec3 width, glm::vec3 height, glm::vec3 depth) { scene->add(new Box{ material, position, width, height, depth }); } __global__ void add_ellipsoid_to_scene(Scene * scene, Material material, glm::vec3 position, glm::vec3 width, glm::vec3 height, glm::vec3 depth) { scene->add(new Ellipsoid{ material, position, width, height, depth }); } __global__ void add_polygon_to_scene(Scene * scene, Material material, glm::vec3 * data, int count) { scene->add(new Polygon{ material, data, count }); } __global__ void add_light_to_scene(Scene * scene, glm::vec3 position, glm::vec3 intensity, float radius) { scene->add(PointLight{ position, intensity, radius }); } __global__ void set_scene_ambient(Scene * scene, glm::vec3 ambient) { scene->set_ambient(ambient); } __global__ void set_scene_camera(Scene * scene, glm::vec3 position, glm::vec3 right, glm::vec3 up, glm::vec3 center) { scene->set_camera(Camera{ position, right, up, center }); } void import_scene(const char * path, Scene * scene) { std::ifstream file{ path }; if (file.is_open()) { // Parse std::string line; while (std::getline(file, line)) { switch (line[0]) { case 'A': { // Parse glm::vec3 ambient; sscanf_s(line.c_str(), "A (%f,%f,%f)", &ambient.x, &ambient.y, &ambient.z); // Upload to GPU hipLaunchKernelGGL(( set_scene_ambient), dim3(1),dim3(1), 0, 0, scene, ambient); CheckCUDAError(hipGetLastError()); CheckCUDAError(hipDeviceSynchronize()); break; } case 'B': { // Parse position, width, height and depth glm::vec3 position, width, height, depth; sscanf_s(line.c_str(), "B (%f,%f,%f) (%f,%f,%f) (%f,%f,%f) (%f,%f,%f)", &position.x, &position.y, &position.z, &width.x, &width.y, &width.z, &height.x, &height.y, &height.z, &depth.x, &depth.y, &depth.z); // Parse material std::getline(file, line); Material material = import_material(line); // Upload to GPU hipLaunchKernelGGL(( add_box_to_scene), dim3(1),dim3(1), 0, 0, scene, material, position, width, height, depth); CheckCUDAError(hipGetLastError()); CheckCUDAError(hipDeviceSynchronize()); break; } case 'C': { // Parse projection center, right, up, distance to projection center glm::vec3 position, right, up, center; sscanf_s(line.c_str(), "C (%f,%f,%f) (%f,%f,%f) (%f,%f,%f) %f", &center.x, &center.y, &center.z, &right.x, &right.y, &right.z, &up.x, &up.y, &up.z, &position.x); // Compute position position = center + glm::normalize(glm::cross(right, up)) * position.x; // Upload to GPU hipLaunchKernelGGL(( set_scene_camera), dim3(1),dim3(1), 0, 0, scene, position, right, up, center); CheckCUDAError(hipGetLastError()); CheckCUDAError(hipDeviceSynchronize()); break; } case 'E': { // Parse position, width, height and depth glm::vec3 position, width, height, depth; sscanf_s(line.c_str(), "E (%f,%f,%f) (%f,%f,%f) (%f,%f,%f) (%f,%f,%f)", &position.x, &position.y, &position.z, &width.x, &width.y, &width.z, &height.x, &height.y, &height.z, &depth.x, &depth.y, &depth.z); // Parse material std::getline(file, line); Material material = import_material(line); // Upload to GPU hipLaunchKernelGGL(( add_ellipsoid_to_scene), dim3(1),dim3(1), 0, 0, scene, material, position, width, height, depth); CheckCUDAError(hipGetLastError()); CheckCUDAError(hipDeviceSynchronize()); break; } case 'L': { // Parse position and intensity glm::vec3 position, intensity; float radius; sscanf_s(line.c_str(), "L (%f,%f,%f) (%f,%f,%f) %f", &position.x, &position.y, &position.z, &intensity.x, &intensity.y, &intensity.z, &radius); // Upload to GPU hipLaunchKernelGGL(( add_light_to_scene), dim3(1),dim3(1), 0, 0, scene, position, intensity, radius); CheckCUDAError(hipGetLastError()); CheckCUDAError(hipDeviceSynchronize()); break; } case 'P': { // Parse vertex count int count = 0; sscanf_s(line.c_str(), "P %d", &count); // Shared memory to pass vertices to gpu glm::vec3 * vertices = nullptr; CheckCUDAError(hipMallocManaged((void **)&vertices, sizeof(glm::vec2) * count)); // Parse vertices size_t start = line.find_first_of("("); size_t end = line.find_first_of(")", start) + 1; for (int i = 0; i < count; ++i) { std::string vertex = line.substr(start, end - start); sscanf_s(vertex.c_str(), "(%f,%f,%f)", &vertices[i].x, &vertices[i].y, &vertices[i].z); start = line.find_first_of("(", end); end = line.find_first_of(")", start) + 1; } // Parse material std::getline(file, line); Material material = import_material(line); // Upload to GPU hipLaunchKernelGGL(( add_polygon_to_scene), dim3(1),dim3(1), 0, 0, scene, material, vertices, count); CheckCUDAError(hipGetLastError()); CheckCUDAError(hipDeviceSynchronize()); // Free memory CheckCUDAError(hipFree(vertices)); break; } case 'S': { glm::vec3 position; float radius; // Parse position and radius sscanf_s(line.c_str(), "S (%f,%f,%f) %f", &position.x, &position.y, &position.z, &radius); // Parse material std::getline(file, line); Material material = import_material(line); // Upload to GPU hipLaunchKernelGGL(( add_sphere_to_scene), dim3(1),dim3(1), 0, 0, scene, material, position, radius); CheckCUDAError(hipGetLastError()); CheckCUDAError(hipDeviceSynchronize()); break; } default: break; } } } } Material import_material(const std::string & line) { glm::vec3 color, attenuation; float specular_coefficient, shininess, permittivity, permeability; sscanf_s(line.c_str(), "(%f,%f,%f) %f %f (%f,%f,%f) %f %f", &color.r, &color.g, &color.b, &specular_coefficient, &shininess, &attenuation.x, &attenuation.y, &attenuation.z, &permittivity, &permeability); float refraction_index = std::sqrt(permittivity * permeability); return Material{ color, attenuation, specular_coefficient, shininess, permeability, refraction_index }; } }
8d1ecbe6c71fd4effca91fe24d0705d4021de672.cu
#include "importer.cuh" #include "debug.h" #include "scene.cuh" #include "sphere.cuh" #include "box.cuh" #include "ellipsoid.cuh" #include "polygon.cuh" #define GLM_FORCE_CUDA #include <glm/glm.hpp> #include <fstream> #include <string> namespace importer { __global__ void add_sphere_to_scene(Scene * scene, Material material, glm::vec3 position, float radius) { scene->add(new Sphere{ material, position, radius }); } __global__ void add_box_to_scene(Scene * scene, Material material, glm::vec3 position, glm::vec3 width, glm::vec3 height, glm::vec3 depth) { scene->add(new Box{ material, position, width, height, depth }); } __global__ void add_ellipsoid_to_scene(Scene * scene, Material material, glm::vec3 position, glm::vec3 width, glm::vec3 height, glm::vec3 depth) { scene->add(new Ellipsoid{ material, position, width, height, depth }); } __global__ void add_polygon_to_scene(Scene * scene, Material material, glm::vec3 * data, int count) { scene->add(new Polygon{ material, data, count }); } __global__ void add_light_to_scene(Scene * scene, glm::vec3 position, glm::vec3 intensity, float radius) { scene->add(PointLight{ position, intensity, radius }); } __global__ void set_scene_ambient(Scene * scene, glm::vec3 ambient) { scene->set_ambient(ambient); } __global__ void set_scene_camera(Scene * scene, glm::vec3 position, glm::vec3 right, glm::vec3 up, glm::vec3 center) { scene->set_camera(Camera{ position, right, up, center }); } void import_scene(const char * path, Scene * scene) { std::ifstream file{ path }; if (file.is_open()) { // Parse std::string line; while (std::getline(file, line)) { switch (line[0]) { case 'A': { // Parse glm::vec3 ambient; sscanf_s(line.c_str(), "A (%f,%f,%f)", &ambient.x, &ambient.y, &ambient.z); // Upload to GPU set_scene_ambient<<<1,1>>>(scene, ambient); CheckCUDAError(cudaGetLastError()); CheckCUDAError(cudaDeviceSynchronize()); break; } case 'B': { // Parse position, width, height and depth glm::vec3 position, width, height, depth; sscanf_s(line.c_str(), "B (%f,%f,%f) (%f,%f,%f) (%f,%f,%f) (%f,%f,%f)", &position.x, &position.y, &position.z, &width.x, &width.y, &width.z, &height.x, &height.y, &height.z, &depth.x, &depth.y, &depth.z); // Parse material std::getline(file, line); Material material = import_material(line); // Upload to GPU add_box_to_scene<<<1,1>>>(scene, material, position, width, height, depth); CheckCUDAError(cudaGetLastError()); CheckCUDAError(cudaDeviceSynchronize()); break; } case 'C': { // Parse projection center, right, up, distance to projection center glm::vec3 position, right, up, center; sscanf_s(line.c_str(), "C (%f,%f,%f) (%f,%f,%f) (%f,%f,%f) %f", &center.x, &center.y, &center.z, &right.x, &right.y, &right.z, &up.x, &up.y, &up.z, &position.x); // Compute position position = center + glm::normalize(glm::cross(right, up)) * position.x; // Upload to GPU set_scene_camera<<<1,1>>>(scene, position, right, up, center); CheckCUDAError(cudaGetLastError()); CheckCUDAError(cudaDeviceSynchronize()); break; } case 'E': { // Parse position, width, height and depth glm::vec3 position, width, height, depth; sscanf_s(line.c_str(), "E (%f,%f,%f) (%f,%f,%f) (%f,%f,%f) (%f,%f,%f)", &position.x, &position.y, &position.z, &width.x, &width.y, &width.z, &height.x, &height.y, &height.z, &depth.x, &depth.y, &depth.z); // Parse material std::getline(file, line); Material material = import_material(line); // Upload to GPU add_ellipsoid_to_scene<<<1,1>>>(scene, material, position, width, height, depth); CheckCUDAError(cudaGetLastError()); CheckCUDAError(cudaDeviceSynchronize()); break; } case 'L': { // Parse position and intensity glm::vec3 position, intensity; float radius; sscanf_s(line.c_str(), "L (%f,%f,%f) (%f,%f,%f) %f", &position.x, &position.y, &position.z, &intensity.x, &intensity.y, &intensity.z, &radius); // Upload to GPU add_light_to_scene<<<1,1>>>(scene, position, intensity, radius); CheckCUDAError(cudaGetLastError()); CheckCUDAError(cudaDeviceSynchronize()); break; } case 'P': { // Parse vertex count int count = 0; sscanf_s(line.c_str(), "P %d", &count); // Shared memory to pass vertices to gpu glm::vec3 * vertices = nullptr; CheckCUDAError(cudaMallocManaged((void **)&vertices, sizeof(glm::vec2) * count)); // Parse vertices size_t start = line.find_first_of("("); size_t end = line.find_first_of(")", start) + 1; for (int i = 0; i < count; ++i) { std::string vertex = line.substr(start, end - start); sscanf_s(vertex.c_str(), "(%f,%f,%f)", &vertices[i].x, &vertices[i].y, &vertices[i].z); start = line.find_first_of("(", end); end = line.find_first_of(")", start) + 1; } // Parse material std::getline(file, line); Material material = import_material(line); // Upload to GPU add_polygon_to_scene<<<1,1>>>(scene, material, vertices, count); CheckCUDAError(cudaGetLastError()); CheckCUDAError(cudaDeviceSynchronize()); // Free memory CheckCUDAError(cudaFree(vertices)); break; } case 'S': { glm::vec3 position; float radius; // Parse position and radius sscanf_s(line.c_str(), "S (%f,%f,%f) %f", &position.x, &position.y, &position.z, &radius); // Parse material std::getline(file, line); Material material = import_material(line); // Upload to GPU add_sphere_to_scene<<<1,1>>>(scene, material, position, radius); CheckCUDAError(cudaGetLastError()); CheckCUDAError(cudaDeviceSynchronize()); break; } default: break; } } } } Material import_material(const std::string & line) { glm::vec3 color, attenuation; float specular_coefficient, shininess, permittivity, permeability; sscanf_s(line.c_str(), "(%f,%f,%f) %f %f (%f,%f,%f) %f %f", &color.r, &color.g, &color.b, &specular_coefficient, &shininess, &attenuation.x, &attenuation.y, &attenuation.z, &permittivity, &permeability); float refraction_index = std::sqrt(permittivity * permeability); return Material{ color, attenuation, specular_coefficient, shininess, permeability, refraction_index }; } }
3ab879c2c888140b4d796c7968feea359fc6df3a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/native/SegmentReduce.h> #include <ATen/core/Tensor.h> #include <ATen/Dispatch.h> #include <ATen/NumericUtils.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/detail/KernelUtils.h> #include <ATen/hip/cub.cuh> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #else #include <ATen/ops/empty.h> #include <ATen/ops/zeros.h> #include <ATen/ops/cat.h> #include <ATen/ops/cumsum.h> #endif namespace at::native { namespace { struct CustomMax { template <typename OutputT> __host__ __device__ __forceinline__ OutputT operator()(const OutputT& a, const OutputT& b) const { if (at::_isnan(a)) { return a; } else if (at::_isnan(b)) { return b; } return std::max<OutputT>(a, b); } }; struct CustomSum { template <typename OutputT> __host__ __device__ __forceinline__ OutputT operator()(const OutputT& a, const OutputT& b) const { return a + b; } }; struct CustomProd { template <typename OutputT> __host__ __device__ __forceinline__ OutputT operator()(const OutputT& a, const OutputT& b) const { return a * b; } }; struct CustomMin { template <typename OutputT> __host__ __device__ __forceinline__ OutputT operator()(const OutputT& a, const OutputT& b) const { if (at::_isnan(a)) { return a; } else if (at::_isnan(b)) { return b; } return std::min<OutputT>(a, b); } }; template <typename scalar_t, typename index_t> __global__ static void post_sum_div_kernel( scalar_t* output_data, const index_t* lengths_data, const int64_t segment_count, bool is_initial_set, scalar_t initial) { CUDA_KERNEL_LOOP(index, segment_count) { CUDA_KERNEL_ASSERT(lengths_data[index] >= 0); if (lengths_data[index] == 0) { if (is_initial_set) { output_data[index] = initial; } else { output_data[index] = NAN; } } else if (!at::_isnan(output_data[index])) { output_data[index] = output_data[index] / lengths_data[index]; } } } template <typename scalar_t, typename index_t> __global__ void segment_reduce_forward_kernel( ReductionType reduction, scalar_t* output_data, const scalar_t* values_data, const index_t* lengths_data, const index_t* lengths_cumsum_data, const int64_t segment_count, const int64_t lengths_stride_axis, bool is_initial_set, scalar_t initial_value, const int64_t outer_offset, const int64_t inner_offset, const int64_t data_stride_axis, const int64_t data_size_axis, const int64_t output_stride_axis, const int64_t output_size_axis, const int64_t lengths_cumsum_stride_axis) { int64_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= (outer_offset * segment_count * inner_offset)) { return; } int64_t row_id = idx / inner_offset; int64_t lane_id = idx % inner_offset; // lane_id is the inner_idx int64_t outer_idx = row_id / segment_count; int64_t dim_idx = row_id % segment_count; int64_t offset_idx = outer_idx * lengths_cumsum_stride_axis * (segment_count + 1) + dim_idx; index_t offset_start = lengths_cumsum_data[offset_idx]; index_t offset_end = lengths_cumsum_data[offset_idx + 1]; // ===== step2: apply reduction for (index_t j = offset_start; j < offset_end; ++j) { int64_t data_index = outer_idx * data_stride_axis * data_size_axis + j * data_stride_axis + lane_id; const auto data = values_data[data_index]; // TODO: There is no need to branch with every element if (reduction == ReductionType::MAX) { initial_value = at::_isnan(data) ? data : std::max<scalar_t>(initial_value, data); } else if ( reduction == ReductionType::MEAN || reduction == ReductionType::SUM) { initial_value = initial_value + data; } else if (reduction == ReductionType::MIN) { initial_value = at::_isnan(data) ? data : std::min<scalar_t>(initial_value, data); } else if ( reduction == ReductionType::PROD) { initial_value = initial_value * data; } } // ===== step3: finalize reduction int64_t lengths_idx = outer_idx * lengths_stride_axis * segment_count + dim_idx; CUDA_KERNEL_ASSERT(lengths_data[lengths_idx] >= 0); if (lengths_data[lengths_idx] == 0 && !is_initial_set && reduction == ReductionType::MEAN) { initial_value = static_cast<scalar_t>(NAN); } else if ( reduction == ReductionType::MEAN && lengths_data[lengths_idx] > 0 && !at::_isnan(initial_value)) { initial_value = initial_value / lengths_data[lengths_idx]; } int64_t output_index = outer_idx * output_stride_axis * output_size_axis + dim_idx * output_stride_axis + lane_id; output_data[output_index] = initial_value; } template <typename scalar_t, typename index_t> __global__ void segment_reduce_backward_kernel( ReductionType reduction, scalar_t* grad_input_data, const scalar_t* grad_data, const scalar_t* output_data, const scalar_t* values_data, const index_t* lengths_data, const index_t* lengths_cumsum_data, const int64_t segment_count, const int64_t lengths_stride_axis, scalar_t initial_prod_value, const int64_t outer_offset, const int64_t inner_offset, const int64_t data_stride_axis, const int64_t data_size_axis, const int64_t output_stride_axis, const int64_t output_size_axis, const int64_t lengths_cumsum_stride_axis) { int64_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= (outer_offset * segment_count * inner_offset)) { return; } int64_t row_id = idx / inner_offset; int64_t lane_id = idx % inner_offset; // lane_id is the inner_idx int64_t outer_idx = row_id / segment_count; int64_t dim_idx = row_id % segment_count; int64_t lengths_idx = outer_idx * lengths_stride_axis * segment_count + dim_idx; auto segment_length = lengths_data[lengths_idx]; if (segment_length == 0) { return; } int64_t offset_idx = outer_idx * lengths_cumsum_stride_axis * (segment_count + 1) + dim_idx; index_t offset_start = lengths_cumsum_data[offset_idx]; index_t offset_end = lengths_cumsum_data[offset_idx + 1]; int64_t output_index = outer_idx * output_stride_axis * output_size_axis + dim_idx * output_stride_axis + lane_id; if (reduction == ReductionType::MAX || reduction == ReductionType::MIN) { int64_t counter = 0; for (int64_t j = offset_start; j < offset_end; ++j) { int64_t data_index = outer_idx * data_stride_axis * data_size_axis + j * data_stride_axis + lane_id; if (at::_isnan(values_data[data_index]) || values_data[data_index] == output_data[output_index]) { grad_input_data[data_index] = grad_data[output_index]; counter++; } } // Average gradient based on number of maximum elements in the // segment if (counter < 2) { return; } for (int64_t j = offset_start; j < offset_end; ++j) { int64_t data_index = outer_idx * data_stride_axis * data_size_axis + j * data_stride_axis + lane_id; if (grad_input_data[data_index] > 0) { grad_input_data[data_index] = grad_input_data[data_index] / counter; } } } else if (reduction == ReductionType::MEAN) { auto grad_val = grad_data[output_index] / segment_length; for (int64_t j = offset_start; j < offset_end; ++j) { int64_t data_index = outer_idx * data_stride_axis * data_size_axis + j * data_stride_axis + lane_id; grad_input_data[data_index] = grad_val; } } else if (reduction == ReductionType::SUM) { const auto& grad_val = grad_data[output_index]; for (int64_t j = offset_start; j < offset_end; ++j) { int64_t data_index = outer_idx * data_stride_axis * data_size_axis + j * data_stride_axis + lane_id; grad_input_data[data_index] = grad_val; } } else if (reduction == ReductionType::PROD) { const auto& grad_val = grad_data[output_index] * output_data[output_index]; for (int64_t j = offset_start; j < offset_end; ++j) { int64_t data_index = outer_idx * data_stride_axis * data_size_axis + j * data_stride_axis + lane_id; if (at::_isnan(values_data[data_index]) || values_data[data_index] == 0) { // explicitly compute exclusive prod scalar_t exclusive_prod = initial_prod_value; int64_t prod_idx; for (int64_t k = offset_start; k < offset_end; ++k) { if (k != j) { prod_idx = outer_idx * data_stride_axis * data_size_axis + k * data_stride_axis + lane_id; exclusive_prod *= values_data[prod_idx]; } } grad_input_data[data_index] = grad_data[output_index] * exclusive_prod; } else { grad_input_data[data_index] = grad_val / values_data[data_index]; } } } } } // namespace Tensor _segment_reduce_lengths_offsets_backward_cuda_kernel( const Tensor& grad_contig, const Tensor& output_contig, const Tensor& data_contig, ReductionType reduction, const Tensor& lengths_or_offsets_contig, int64_t axis, const c10::optional<Scalar>& initial, bool is_offsets_like) { axis = lengths_or_offsets_contig.dim() - 1; int64_t segment_count = is_offsets_like ? lengths_or_offsets_contig.size(axis) - 1 : lengths_or_offsets_contig.size(axis); int64_t lengths_stride_axis = lengths_or_offsets_contig.stride(axis); auto grad_input = at::zeros({data_contig.sizes()}, grad_contig.options()); auto offsets = lengths_or_offsets_contig; auto lengths = lengths_or_offsets_contig; if (is_offsets_like) { lengths = lengths.diff(); } else { auto zeros_shape = offsets.sizes().vec(); zeros_shape[axis] = 1; offsets = at::cat({at::zeros(zeros_shape, offsets.options()), offsets}, axis); offsets.cumsum_(axis); } // outer_offset is the size of the outer dimensions of output (before axis) // inner_offset is the size of the inner dimensions of output (after axis) int64_t outer_offset = 1, inner_offset = 1; for (int64_t d = 0; d < axis; d++) { outer_offset *= output_contig.size(d); } for (int64_t d = axis + 1; d < output_contig.dim(); d++) { inner_offset *= output_contig.size(d); } constexpr int threads_per_block = 256; int64_t num_blocks = (outer_offset * inner_offset * segment_count + threads_per_block - 1) / threads_per_block; num_blocks = ::max(num_blocks, (int64_t)1); auto data_stride_axis = data_contig.stride(axis); auto data_size_axis = data_contig.size(axis); auto output_stride_axis = output_contig.stride(axis); auto output_size_axis = output_contig.size(axis); auto offsets_stride_axis = offsets.stride(axis); AT_DISPATCH_INDEX_TYPES( lengths_or_offsets_contig.scalar_type(), "_segment_reduce_cuda_lengths_offsets_backward_kernel1", ([&] { const auto* lengths_data = lengths.const_data_ptr<index_t>(); auto* offsets_data = offsets.const_data_ptr<index_t>(); // TODO: Switch to TensorIterator for better maintainablility and // readability AT_DISPATCH_FLOATING_TYPES_AND2( kBFloat16, kHalf, data_contig.scalar_type(), "_segment_reduce_cpu", ([&]() { auto* output_data = output_contig.const_data_ptr<scalar_t>(); auto* grad_data = grad_contig.const_data_ptr<scalar_t>(); auto* grad_input_data = grad_input.mutable_data_ptr<scalar_t>(); const auto* values_data = data_contig.const_data_ptr<scalar_t>(); scalar_t initial_prod_value; if (initial.has_value()) { initial_prod_value = initial.value().to<scalar_t>(); } else { initial_prod_value = 1; } hipLaunchKernelGGL(( segment_reduce_backward_kernel<scalar_t>) , dim3(num_blocks), dim3(threads_per_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), reduction, grad_input_data, grad_data, output_data, values_data, lengths_data, offsets_data, segment_count, lengths_stride_axis, initial_prod_value, outer_offset, inner_offset, data_stride_axis, data_size_axis, output_stride_axis, output_size_axis, offsets_stride_axis ); C10_HIP_KERNEL_LAUNCH_CHECK(); })); })); return grad_input; } Tensor _segment_reduce_lengths_backward_cuda_kernel( const Tensor& grad_contig, const Tensor& output_contig, const Tensor& data_contig, ReductionType reduction, const Tensor& lengths_contig, int64_t axis, const c10::optional<Scalar>& initial) { return _segment_reduce_lengths_offsets_backward_cuda_kernel( grad_contig, output_contig, data_contig, reduction, lengths_contig, axis, initial, /*is_offsets_like=*/false); } Tensor _segment_reduce_offsets_backward_cuda_kernel( const Tensor& grad_contig, const Tensor& output_contig, const Tensor& data_contig, ReductionType reduction, const Tensor& offsets_contig, int64_t axis, const c10::optional<Scalar>& initial) { return _segment_reduce_lengths_offsets_backward_cuda_kernel( grad_contig, output_contig, data_contig, reduction, offsets_contig, axis, initial, /*is_offsets_like=*/true); } Tensor _segment_reduce_lengths_offsets_cuda_kernel( ReductionType reduction, const Tensor& data, const Tensor& lengths_or_offsets, int64_t axis, const c10::optional<Scalar>& initial, bool is_offsets_like) { // data and lengths_or_offsets should be contiguous from the call to .contiguous in segment_reduce_kernel TORCH_CHECK(data.is_contiguous()); TORCH_CHECK(lengths_or_offsets.is_contiguous()); axis = lengths_or_offsets.dim() - 1; int64_t segment_count = is_offsets_like ? lengths_or_offsets.size(axis) - 1 : lengths_or_offsets.size(axis); int64_t lengths_stride_axis = lengths_or_offsets.stride(axis); auto output_shape = data.sizes().vec(); output_shape[axis] = segment_count; auto output = at::empty(output_shape, data.options()); auto offsets = lengths_or_offsets; auto lengths = lengths_or_offsets; if (is_offsets_like) { lengths = lengths.diff(); } else { auto zeros_shape = offsets.sizes().vec(); zeros_shape[axis] = 1; offsets = at::cat({at::zeros(zeros_shape, offsets.options()), offsets}, axis); offsets.cumsum_(axis); } // outer_offset is the size of the outer dimensions of output (before axis) // inner_offset is the size of the inner dimensions of output (after axis) int64_t outer_offset = 1, inner_offset = 1; for (int64_t d = 0; d < axis; d++) { outer_offset *= output.size(d); } for (int64_t d = axis + 1; d < output.dim(); d++) { inner_offset *= output.size(d); } constexpr int threads_per_block = 256; // segment_count * stride_count is just output.numel() ? int64_t num_blocks = (output.numel() + threads_per_block - 1) / threads_per_block; num_blocks = ::max(num_blocks, (int64_t)1); auto data_stride_axis = data.stride(axis); auto data_size_axis = data.size(axis); auto output_stride_axis = output.stride(axis); auto output_size_axis = output.size(axis); auto offsets_stride_axis = offsets.stride(axis); AT_DISPATCH_INDEX_TYPES( lengths_or_offsets.scalar_type(), "_segment_reduce_cuda_kernel1", ([&] { auto* offsets_data_ptr = offsets.const_data_ptr<index_t>(); auto* lengths_data_ptr = lengths.const_data_ptr<index_t>(); AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, data.scalar_type(), "segment_reduce_cuda", [&]() { auto* data_data_ptr = data.const_data_ptr<scalar_t>(); auto* output_data_ptr = output.mutable_data_ptr<scalar_t>(); // initialize starting value scalar_t initial_value = 0; if (initial.has_value()) { initial_value = initial.value().to<scalar_t>(); } else if (reduction == ReductionType::MAX) { initial_value = -std::numeric_limits<scalar_t>::infinity(); } else if ( reduction == ReductionType::MEAN || reduction == ReductionType::SUM) { initial_value = 0; } else if (reduction == ReductionType::MIN) { initial_value = std::numeric_limits<scalar_t>::infinity(); } else if (reduction == ReductionType::PROD) { initial_value = 1; } if (output_shape.size() > 1) { hipLaunchKernelGGL(( segment_reduce_forward_kernel<scalar_t>) , dim3(num_blocks), dim3(threads_per_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), reduction, output_data_ptr, data_data_ptr, lengths_data_ptr, offsets_data_ptr, segment_count, lengths_stride_axis, initial.has_value(), initial_value, outer_offset, inner_offset, data_stride_axis, data_size_axis, output_stride_axis, output_size_axis, offsets_stride_axis ); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { if (reduction == ReductionType::MAX) { CustomMax max_op{}; CUB_WRAPPER( hipcub::DeviceSegmentedReduce::Reduce, data_data_ptr, output_data_ptr, segment_count, offsets_data_ptr, offsets_data_ptr + 1, max_op, initial_value, at::hip::getCurrentHIPStreamMasqueradingAsCUDA()); } else if (reduction == ReductionType::MEAN) { CustomSum sum_op{}; CUB_WRAPPER( hipcub::DeviceSegmentedReduce::Reduce, data_data_ptr, output_data_ptr, segment_count, offsets_data_ptr, offsets_data_ptr + 1, sum_op, initial_value, at::hip::getCurrentHIPStreamMasqueradingAsCUDA()); hipLaunchKernelGGL(( post_sum_div_kernel<scalar_t>) , dim3(num_blocks), dim3(threads_per_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), output_data_ptr, lengths_data_ptr, segment_count, initial.has_value(), initial_value); C10_HIP_KERNEL_LAUNCH_CHECK(); } else if (reduction == ReductionType::MIN) { CustomMin min_op{}; CUB_WRAPPER( hipcub::DeviceSegmentedReduce::Reduce, data_data_ptr, output_data_ptr, segment_count, offsets_data_ptr, offsets_data_ptr + 1, min_op, initial_value, at::hip::getCurrentHIPStreamMasqueradingAsCUDA()); } else if (reduction == ReductionType::SUM) { CustomSum sum_op{}; CUB_WRAPPER( hipcub::DeviceSegmentedReduce::Reduce, data_data_ptr, output_data_ptr, segment_count, offsets_data_ptr, offsets_data_ptr + 1, sum_op, initial_value, at::hip::getCurrentHIPStreamMasqueradingAsCUDA()); } else if (reduction == ReductionType::PROD) { CustomProd prod_op{}; CUB_WRAPPER( hipcub::DeviceSegmentedReduce::Reduce, data_data_ptr, output_data_ptr, segment_count, offsets_data_ptr, offsets_data_ptr + 1, prod_op, initial_value, at::hip::getCurrentHIPStreamMasqueradingAsCUDA()); } } }); })); return output; } Tensor _segment_reduce_lengths_cuda_kernel( ReductionType reduction, const Tensor& data, const Tensor& lengths, int64_t axis, const c10::optional<Scalar>& initial) { return _segment_reduce_lengths_offsets_cuda_kernel( reduction, data, lengths, axis, initial, /*is_offsets_like=*/false); } Tensor _segment_reduce_offsets_cuda_kernel( ReductionType reduction, const Tensor& data, const Tensor& offsets, int64_t axis, const c10::optional<Scalar>& initial) { return _segment_reduce_lengths_offsets_cuda_kernel( reduction, data, offsets, axis, initial, /*is_offsets_like=*/true); } REGISTER_DISPATCH(_segment_reduce_lengths_stub, &_segment_reduce_lengths_cuda_kernel); REGISTER_DISPATCH(_segment_reduce_offsets_stub, &_segment_reduce_offsets_cuda_kernel); REGISTER_DISPATCH( _segment_reduce_lengths_backward_stub, &_segment_reduce_lengths_backward_cuda_kernel); REGISTER_DISPATCH( _segment_reduce_offsets_backward_stub, &_segment_reduce_offsets_backward_cuda_kernel); } // namespace at::native
3ab879c2c888140b4d796c7968feea359fc6df3a.cu
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/native/SegmentReduce.h> #include <ATen/core/Tensor.h> #include <ATen/Dispatch.h> #include <ATen/NumericUtils.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/detail/KernelUtils.h> #include <ATen/cuda/cub.cuh> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #else #include <ATen/ops/empty.h> #include <ATen/ops/zeros.h> #include <ATen/ops/cat.h> #include <ATen/ops/cumsum.h> #endif namespace at::native { namespace { struct CustomMax { template <typename OutputT> __host__ __device__ __forceinline__ OutputT operator()(const OutputT& a, const OutputT& b) const { if (at::_isnan(a)) { return a; } else if (at::_isnan(b)) { return b; } return std::max<OutputT>(a, b); } }; struct CustomSum { template <typename OutputT> __host__ __device__ __forceinline__ OutputT operator()(const OutputT& a, const OutputT& b) const { return a + b; } }; struct CustomProd { template <typename OutputT> __host__ __device__ __forceinline__ OutputT operator()(const OutputT& a, const OutputT& b) const { return a * b; } }; struct CustomMin { template <typename OutputT> __host__ __device__ __forceinline__ OutputT operator()(const OutputT& a, const OutputT& b) const { if (at::_isnan(a)) { return a; } else if (at::_isnan(b)) { return b; } return std::min<OutputT>(a, b); } }; template <typename scalar_t, typename index_t> __global__ static void post_sum_div_kernel( scalar_t* output_data, const index_t* lengths_data, const int64_t segment_count, bool is_initial_set, scalar_t initial) { CUDA_KERNEL_LOOP(index, segment_count) { CUDA_KERNEL_ASSERT(lengths_data[index] >= 0); if (lengths_data[index] == 0) { if (is_initial_set) { output_data[index] = initial; } else { output_data[index] = NAN; } } else if (!at::_isnan(output_data[index])) { output_data[index] = output_data[index] / lengths_data[index]; } } } template <typename scalar_t, typename index_t> __global__ void segment_reduce_forward_kernel( ReductionType reduction, scalar_t* output_data, const scalar_t* values_data, const index_t* lengths_data, const index_t* lengths_cumsum_data, const int64_t segment_count, const int64_t lengths_stride_axis, bool is_initial_set, scalar_t initial_value, const int64_t outer_offset, const int64_t inner_offset, const int64_t data_stride_axis, const int64_t data_size_axis, const int64_t output_stride_axis, const int64_t output_size_axis, const int64_t lengths_cumsum_stride_axis) { int64_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= (outer_offset * segment_count * inner_offset)) { return; } int64_t row_id = idx / inner_offset; int64_t lane_id = idx % inner_offset; // lane_id is the inner_idx int64_t outer_idx = row_id / segment_count; int64_t dim_idx = row_id % segment_count; int64_t offset_idx = outer_idx * lengths_cumsum_stride_axis * (segment_count + 1) + dim_idx; index_t offset_start = lengths_cumsum_data[offset_idx]; index_t offset_end = lengths_cumsum_data[offset_idx + 1]; // ===== step2: apply reduction for (index_t j = offset_start; j < offset_end; ++j) { int64_t data_index = outer_idx * data_stride_axis * data_size_axis + j * data_stride_axis + lane_id; const auto data = values_data[data_index]; // TODO: There is no need to branch with every element if (reduction == ReductionType::MAX) { initial_value = at::_isnan(data) ? data : std::max<scalar_t>(initial_value, data); } else if ( reduction == ReductionType::MEAN || reduction == ReductionType::SUM) { initial_value = initial_value + data; } else if (reduction == ReductionType::MIN) { initial_value = at::_isnan(data) ? data : std::min<scalar_t>(initial_value, data); } else if ( reduction == ReductionType::PROD) { initial_value = initial_value * data; } } // ===== step3: finalize reduction int64_t lengths_idx = outer_idx * lengths_stride_axis * segment_count + dim_idx; CUDA_KERNEL_ASSERT(lengths_data[lengths_idx] >= 0); if (lengths_data[lengths_idx] == 0 && !is_initial_set && reduction == ReductionType::MEAN) { initial_value = static_cast<scalar_t>(NAN); } else if ( reduction == ReductionType::MEAN && lengths_data[lengths_idx] > 0 && !at::_isnan(initial_value)) { initial_value = initial_value / lengths_data[lengths_idx]; } int64_t output_index = outer_idx * output_stride_axis * output_size_axis + dim_idx * output_stride_axis + lane_id; output_data[output_index] = initial_value; } template <typename scalar_t, typename index_t> __global__ void segment_reduce_backward_kernel( ReductionType reduction, scalar_t* grad_input_data, const scalar_t* grad_data, const scalar_t* output_data, const scalar_t* values_data, const index_t* lengths_data, const index_t* lengths_cumsum_data, const int64_t segment_count, const int64_t lengths_stride_axis, scalar_t initial_prod_value, const int64_t outer_offset, const int64_t inner_offset, const int64_t data_stride_axis, const int64_t data_size_axis, const int64_t output_stride_axis, const int64_t output_size_axis, const int64_t lengths_cumsum_stride_axis) { int64_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= (outer_offset * segment_count * inner_offset)) { return; } int64_t row_id = idx / inner_offset; int64_t lane_id = idx % inner_offset; // lane_id is the inner_idx int64_t outer_idx = row_id / segment_count; int64_t dim_idx = row_id % segment_count; int64_t lengths_idx = outer_idx * lengths_stride_axis * segment_count + dim_idx; auto segment_length = lengths_data[lengths_idx]; if (segment_length == 0) { return; } int64_t offset_idx = outer_idx * lengths_cumsum_stride_axis * (segment_count + 1) + dim_idx; index_t offset_start = lengths_cumsum_data[offset_idx]; index_t offset_end = lengths_cumsum_data[offset_idx + 1]; int64_t output_index = outer_idx * output_stride_axis * output_size_axis + dim_idx * output_stride_axis + lane_id; if (reduction == ReductionType::MAX || reduction == ReductionType::MIN) { int64_t counter = 0; for (int64_t j = offset_start; j < offset_end; ++j) { int64_t data_index = outer_idx * data_stride_axis * data_size_axis + j * data_stride_axis + lane_id; if (at::_isnan(values_data[data_index]) || values_data[data_index] == output_data[output_index]) { grad_input_data[data_index] = grad_data[output_index]; counter++; } } // Average gradient based on number of maximum elements in the // segment if (counter < 2) { return; } for (int64_t j = offset_start; j < offset_end; ++j) { int64_t data_index = outer_idx * data_stride_axis * data_size_axis + j * data_stride_axis + lane_id; if (grad_input_data[data_index] > 0) { grad_input_data[data_index] = grad_input_data[data_index] / counter; } } } else if (reduction == ReductionType::MEAN) { auto grad_val = grad_data[output_index] / segment_length; for (int64_t j = offset_start; j < offset_end; ++j) { int64_t data_index = outer_idx * data_stride_axis * data_size_axis + j * data_stride_axis + lane_id; grad_input_data[data_index] = grad_val; } } else if (reduction == ReductionType::SUM) { const auto& grad_val = grad_data[output_index]; for (int64_t j = offset_start; j < offset_end; ++j) { int64_t data_index = outer_idx * data_stride_axis * data_size_axis + j * data_stride_axis + lane_id; grad_input_data[data_index] = grad_val; } } else if (reduction == ReductionType::PROD) { const auto& grad_val = grad_data[output_index] * output_data[output_index]; for (int64_t j = offset_start; j < offset_end; ++j) { int64_t data_index = outer_idx * data_stride_axis * data_size_axis + j * data_stride_axis + lane_id; if (at::_isnan(values_data[data_index]) || values_data[data_index] == 0) { // explicitly compute exclusive prod scalar_t exclusive_prod = initial_prod_value; int64_t prod_idx; for (int64_t k = offset_start; k < offset_end; ++k) { if (k != j) { prod_idx = outer_idx * data_stride_axis * data_size_axis + k * data_stride_axis + lane_id; exclusive_prod *= values_data[prod_idx]; } } grad_input_data[data_index] = grad_data[output_index] * exclusive_prod; } else { grad_input_data[data_index] = grad_val / values_data[data_index]; } } } } } // namespace Tensor _segment_reduce_lengths_offsets_backward_cuda_kernel( const Tensor& grad_contig, const Tensor& output_contig, const Tensor& data_contig, ReductionType reduction, const Tensor& lengths_or_offsets_contig, int64_t axis, const c10::optional<Scalar>& initial, bool is_offsets_like) { axis = lengths_or_offsets_contig.dim() - 1; int64_t segment_count = is_offsets_like ? lengths_or_offsets_contig.size(axis) - 1 : lengths_or_offsets_contig.size(axis); int64_t lengths_stride_axis = lengths_or_offsets_contig.stride(axis); auto grad_input = at::zeros({data_contig.sizes()}, grad_contig.options()); auto offsets = lengths_or_offsets_contig; auto lengths = lengths_or_offsets_contig; if (is_offsets_like) { lengths = lengths.diff(); } else { auto zeros_shape = offsets.sizes().vec(); zeros_shape[axis] = 1; offsets = at::cat({at::zeros(zeros_shape, offsets.options()), offsets}, axis); offsets.cumsum_(axis); } // outer_offset is the size of the outer dimensions of output (before axis) // inner_offset is the size of the inner dimensions of output (after axis) int64_t outer_offset = 1, inner_offset = 1; for (int64_t d = 0; d < axis; d++) { outer_offset *= output_contig.size(d); } for (int64_t d = axis + 1; d < output_contig.dim(); d++) { inner_offset *= output_contig.size(d); } constexpr int threads_per_block = 256; int64_t num_blocks = (outer_offset * inner_offset * segment_count + threads_per_block - 1) / threads_per_block; num_blocks = std::max(num_blocks, (int64_t)1); auto data_stride_axis = data_contig.stride(axis); auto data_size_axis = data_contig.size(axis); auto output_stride_axis = output_contig.stride(axis); auto output_size_axis = output_contig.size(axis); auto offsets_stride_axis = offsets.stride(axis); AT_DISPATCH_INDEX_TYPES( lengths_or_offsets_contig.scalar_type(), "_segment_reduce_cuda_lengths_offsets_backward_kernel1", ([&] { const auto* lengths_data = lengths.const_data_ptr<index_t>(); auto* offsets_data = offsets.const_data_ptr<index_t>(); // TODO: Switch to TensorIterator for better maintainablility and // readability AT_DISPATCH_FLOATING_TYPES_AND2( kBFloat16, kHalf, data_contig.scalar_type(), "_segment_reduce_cpu", ([&]() { auto* output_data = output_contig.const_data_ptr<scalar_t>(); auto* grad_data = grad_contig.const_data_ptr<scalar_t>(); auto* grad_input_data = grad_input.mutable_data_ptr<scalar_t>(); const auto* values_data = data_contig.const_data_ptr<scalar_t>(); scalar_t initial_prod_value; if (initial.has_value()) { initial_prod_value = initial.value().to<scalar_t>(); } else { initial_prod_value = 1; } segment_reduce_backward_kernel<scalar_t> <<<num_blocks, threads_per_block, 0, at::cuda::getCurrentCUDAStream()>>>( reduction, grad_input_data, grad_data, output_data, values_data, lengths_data, offsets_data, segment_count, lengths_stride_axis, initial_prod_value, outer_offset, inner_offset, data_stride_axis, data_size_axis, output_stride_axis, output_size_axis, offsets_stride_axis ); C10_CUDA_KERNEL_LAUNCH_CHECK(); })); })); return grad_input; } Tensor _segment_reduce_lengths_backward_cuda_kernel( const Tensor& grad_contig, const Tensor& output_contig, const Tensor& data_contig, ReductionType reduction, const Tensor& lengths_contig, int64_t axis, const c10::optional<Scalar>& initial) { return _segment_reduce_lengths_offsets_backward_cuda_kernel( grad_contig, output_contig, data_contig, reduction, lengths_contig, axis, initial, /*is_offsets_like=*/false); } Tensor _segment_reduce_offsets_backward_cuda_kernel( const Tensor& grad_contig, const Tensor& output_contig, const Tensor& data_contig, ReductionType reduction, const Tensor& offsets_contig, int64_t axis, const c10::optional<Scalar>& initial) { return _segment_reduce_lengths_offsets_backward_cuda_kernel( grad_contig, output_contig, data_contig, reduction, offsets_contig, axis, initial, /*is_offsets_like=*/true); } Tensor _segment_reduce_lengths_offsets_cuda_kernel( ReductionType reduction, const Tensor& data, const Tensor& lengths_or_offsets, int64_t axis, const c10::optional<Scalar>& initial, bool is_offsets_like) { // data and lengths_or_offsets should be contiguous from the call to .contiguous in segment_reduce_kernel TORCH_CHECK(data.is_contiguous()); TORCH_CHECK(lengths_or_offsets.is_contiguous()); axis = lengths_or_offsets.dim() - 1; int64_t segment_count = is_offsets_like ? lengths_or_offsets.size(axis) - 1 : lengths_or_offsets.size(axis); int64_t lengths_stride_axis = lengths_or_offsets.stride(axis); auto output_shape = data.sizes().vec(); output_shape[axis] = segment_count; auto output = at::empty(output_shape, data.options()); auto offsets = lengths_or_offsets; auto lengths = lengths_or_offsets; if (is_offsets_like) { lengths = lengths.diff(); } else { auto zeros_shape = offsets.sizes().vec(); zeros_shape[axis] = 1; offsets = at::cat({at::zeros(zeros_shape, offsets.options()), offsets}, axis); offsets.cumsum_(axis); } // outer_offset is the size of the outer dimensions of output (before axis) // inner_offset is the size of the inner dimensions of output (after axis) int64_t outer_offset = 1, inner_offset = 1; for (int64_t d = 0; d < axis; d++) { outer_offset *= output.size(d); } for (int64_t d = axis + 1; d < output.dim(); d++) { inner_offset *= output.size(d); } constexpr int threads_per_block = 256; // segment_count * stride_count is just output.numel() ? int64_t num_blocks = (output.numel() + threads_per_block - 1) / threads_per_block; num_blocks = std::max(num_blocks, (int64_t)1); auto data_stride_axis = data.stride(axis); auto data_size_axis = data.size(axis); auto output_stride_axis = output.stride(axis); auto output_size_axis = output.size(axis); auto offsets_stride_axis = offsets.stride(axis); AT_DISPATCH_INDEX_TYPES( lengths_or_offsets.scalar_type(), "_segment_reduce_cuda_kernel1", ([&] { auto* offsets_data_ptr = offsets.const_data_ptr<index_t>(); auto* lengths_data_ptr = lengths.const_data_ptr<index_t>(); AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, data.scalar_type(), "segment_reduce_cuda", [&]() { auto* data_data_ptr = data.const_data_ptr<scalar_t>(); auto* output_data_ptr = output.mutable_data_ptr<scalar_t>(); // initialize starting value scalar_t initial_value = 0; if (initial.has_value()) { initial_value = initial.value().to<scalar_t>(); } else if (reduction == ReductionType::MAX) { initial_value = -std::numeric_limits<scalar_t>::infinity(); } else if ( reduction == ReductionType::MEAN || reduction == ReductionType::SUM) { initial_value = 0; } else if (reduction == ReductionType::MIN) { initial_value = std::numeric_limits<scalar_t>::infinity(); } else if (reduction == ReductionType::PROD) { initial_value = 1; } if (output_shape.size() > 1) { segment_reduce_forward_kernel<scalar_t> <<<num_blocks, threads_per_block, 0, at::cuda::getCurrentCUDAStream()>>>( reduction, output_data_ptr, data_data_ptr, lengths_data_ptr, offsets_data_ptr, segment_count, lengths_stride_axis, initial.has_value(), initial_value, outer_offset, inner_offset, data_stride_axis, data_size_axis, output_stride_axis, output_size_axis, offsets_stride_axis ); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { if (reduction == ReductionType::MAX) { CustomMax max_op{}; CUB_WRAPPER( cub::DeviceSegmentedReduce::Reduce, data_data_ptr, output_data_ptr, segment_count, offsets_data_ptr, offsets_data_ptr + 1, max_op, initial_value, at::cuda::getCurrentCUDAStream()); } else if (reduction == ReductionType::MEAN) { CustomSum sum_op{}; CUB_WRAPPER( cub::DeviceSegmentedReduce::Reduce, data_data_ptr, output_data_ptr, segment_count, offsets_data_ptr, offsets_data_ptr + 1, sum_op, initial_value, at::cuda::getCurrentCUDAStream()); post_sum_div_kernel<scalar_t> <<<num_blocks, threads_per_block, 0, at::cuda::getCurrentCUDAStream()>>>( output_data_ptr, lengths_data_ptr, segment_count, initial.has_value(), initial_value); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else if (reduction == ReductionType::MIN) { CustomMin min_op{}; CUB_WRAPPER( cub::DeviceSegmentedReduce::Reduce, data_data_ptr, output_data_ptr, segment_count, offsets_data_ptr, offsets_data_ptr + 1, min_op, initial_value, at::cuda::getCurrentCUDAStream()); } else if (reduction == ReductionType::SUM) { CustomSum sum_op{}; CUB_WRAPPER( cub::DeviceSegmentedReduce::Reduce, data_data_ptr, output_data_ptr, segment_count, offsets_data_ptr, offsets_data_ptr + 1, sum_op, initial_value, at::cuda::getCurrentCUDAStream()); } else if (reduction == ReductionType::PROD) { CustomProd prod_op{}; CUB_WRAPPER( cub::DeviceSegmentedReduce::Reduce, data_data_ptr, output_data_ptr, segment_count, offsets_data_ptr, offsets_data_ptr + 1, prod_op, initial_value, at::cuda::getCurrentCUDAStream()); } } }); })); return output; } Tensor _segment_reduce_lengths_cuda_kernel( ReductionType reduction, const Tensor& data, const Tensor& lengths, int64_t axis, const c10::optional<Scalar>& initial) { return _segment_reduce_lengths_offsets_cuda_kernel( reduction, data, lengths, axis, initial, /*is_offsets_like=*/false); } Tensor _segment_reduce_offsets_cuda_kernel( ReductionType reduction, const Tensor& data, const Tensor& offsets, int64_t axis, const c10::optional<Scalar>& initial) { return _segment_reduce_lengths_offsets_cuda_kernel( reduction, data, offsets, axis, initial, /*is_offsets_like=*/true); } REGISTER_DISPATCH(_segment_reduce_lengths_stub, &_segment_reduce_lengths_cuda_kernel); REGISTER_DISPATCH(_segment_reduce_offsets_stub, &_segment_reduce_offsets_cuda_kernel); REGISTER_DISPATCH( _segment_reduce_lengths_backward_stub, &_segment_reduce_lengths_backward_cuda_kernel); REGISTER_DISPATCH( _segment_reduce_offsets_backward_stub, &_segment_reduce_offsets_backward_cuda_kernel); } // namespace at::native
c0eba572f88aac00ec1aa6f62a099488e51dc3d6.hip
// !!! This is a file automatically generated by hipify!!! /* kernels/compress.cuh -- Supplemental CUDA kernels used by Enoki-JIT Copyright (c) 2020 Wenzel Jakob <wenzel.jakob@epfl.ch> All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. */ #include "reduce_hip.cuh" #include "scan.cuh" #include "compress.cuh" #include "mkperm.cuh" #include "misc.cuh"
c0eba572f88aac00ec1aa6f62a099488e51dc3d6.cu
/* kernels/compress.cuh -- Supplemental CUDA kernels used by Enoki-JIT Copyright (c) 2020 Wenzel Jakob <wenzel.jakob@epfl.ch> All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. */ #include "reduce.cuh" #include "scan.cuh" #include "compress.cuh" #include "mkperm.cuh" #include "misc.cuh"
f7f0ec1804216b75123694716c7600efae16b24c.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <gtest/gtest.h> #include <optional> #include <raft/core/interruptible.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/random/rng.cuh> #include <raft/stats/r2_score.cuh> #include <raft/util/cuda_utils.cuh> #include <rmm/device_uvector.hpp> #include <stdio.h> #include <stdlib.h> #include <vector> namespace raft { namespace stats { template <typename T> struct R2_scoreInputs { T tolerance; int nrows; unsigned long long int seed; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const R2_scoreInputs<T>& dims) { return os; } template <typename T> class R2_scoreTest : public ::testing::TestWithParam<R2_scoreInputs<T>> { protected: R2_scoreTest() : stream(resource::get_cuda_stream(handle)) {} void SetUp() override { params = ::testing::TestWithParam<R2_scoreInputs<T>>::GetParam(); raft::random::RngState r(params.seed); rmm::device_uvector<T> y(params.nrows, stream); rmm::device_uvector<T> y_hat(params.nrows, stream); uniform(handle, r, y.data(), params.nrows, (T)-1.0, (T)1.0); uniform(handle, r, y_hat.data(), params.nrows, (T)-1.0, (T)1.0); actualVal = r2_score(handle, raft::make_device_vector_view<const T>(y.data(), params.nrows), raft::make_device_vector_view<const T>(y_hat.data(), params.nrows)); expectedVal = T(0); std::vector<T> h_y(params.nrows, 0); std::vector<T> h_y_hat(params.nrows, 0); raft::update_host(h_y.data(), y.data(), params.nrows, stream); raft::update_host(h_y_hat.data(), y_hat.data(), params.nrows, stream); T mean = T(0); for (int i = 0; i < params.nrows; ++i) { mean += h_y[i]; } mean /= params.nrows; std::vector<T> sse_arr(params.nrows, 0); std::vector<T> ssto_arr(params.nrows, 0); T sse = T(0); T ssto = T(0); for (int i = 0; i < params.nrows; ++i) { sse += (h_y[i] - h_y_hat[i]) * (h_y[i] - h_y_hat[i]); ssto += (h_y[i] - mean) * (h_y[i] - mean); } expectedVal = 1.0 - sse / ssto; raft::interruptible::synchronize(stream); } protected: R2_scoreInputs<T> params; raft::resources handle; hipStream_t stream = 0; T expectedVal, actualVal; }; const std::vector<R2_scoreInputs<float>> inputsf = { {0.001f, 30, 1234ULL}, {0.001f, 100, 1234ULL}, {0.001f, 1000, 1234ULL}}; typedef R2_scoreTest<float> R2_scoreTestF; TEST_P(R2_scoreTestF, Result) { auto eq = raft::CompareApprox<float>(params.tolerance); ASSERT_TRUE(match(expectedVal, actualVal, eq)); } INSTANTIATE_TEST_CASE_P(R2_scoreTests, R2_scoreTestF, ::testing::ValuesIn(inputsf)); const std::vector<R2_scoreInputs<double>> inputsd = { {0.001, 30, 1234ULL}, {0.001, 100, 1234ULL}, {0.001, 1000, 1234ULL}}; typedef R2_scoreTest<double> R2_scoreTestD; TEST_P(R2_scoreTestD, Result) { auto eq = raft::CompareApprox<double>(params.tolerance); ASSERT_TRUE(match(expectedVal, actualVal, eq)); } INSTANTIATE_TEST_CASE_P(R2_scoreTests, R2_scoreTestD, ::testing::ValuesIn(inputsd)); } // end namespace stats } // end namespace raft
f7f0ec1804216b75123694716c7600efae16b24c.cu
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <gtest/gtest.h> #include <optional> #include <raft/core/interruptible.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/random/rng.cuh> #include <raft/stats/r2_score.cuh> #include <raft/util/cuda_utils.cuh> #include <rmm/device_uvector.hpp> #include <stdio.h> #include <stdlib.h> #include <vector> namespace raft { namespace stats { template <typename T> struct R2_scoreInputs { T tolerance; int nrows; unsigned long long int seed; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const R2_scoreInputs<T>& dims) { return os; } template <typename T> class R2_scoreTest : public ::testing::TestWithParam<R2_scoreInputs<T>> { protected: R2_scoreTest() : stream(resource::get_cuda_stream(handle)) {} void SetUp() override { params = ::testing::TestWithParam<R2_scoreInputs<T>>::GetParam(); raft::random::RngState r(params.seed); rmm::device_uvector<T> y(params.nrows, stream); rmm::device_uvector<T> y_hat(params.nrows, stream); uniform(handle, r, y.data(), params.nrows, (T)-1.0, (T)1.0); uniform(handle, r, y_hat.data(), params.nrows, (T)-1.0, (T)1.0); actualVal = r2_score(handle, raft::make_device_vector_view<const T>(y.data(), params.nrows), raft::make_device_vector_view<const T>(y_hat.data(), params.nrows)); expectedVal = T(0); std::vector<T> h_y(params.nrows, 0); std::vector<T> h_y_hat(params.nrows, 0); raft::update_host(h_y.data(), y.data(), params.nrows, stream); raft::update_host(h_y_hat.data(), y_hat.data(), params.nrows, stream); T mean = T(0); for (int i = 0; i < params.nrows; ++i) { mean += h_y[i]; } mean /= params.nrows; std::vector<T> sse_arr(params.nrows, 0); std::vector<T> ssto_arr(params.nrows, 0); T sse = T(0); T ssto = T(0); for (int i = 0; i < params.nrows; ++i) { sse += (h_y[i] - h_y_hat[i]) * (h_y[i] - h_y_hat[i]); ssto += (h_y[i] - mean) * (h_y[i] - mean); } expectedVal = 1.0 - sse / ssto; raft::interruptible::synchronize(stream); } protected: R2_scoreInputs<T> params; raft::resources handle; cudaStream_t stream = 0; T expectedVal, actualVal; }; const std::vector<R2_scoreInputs<float>> inputsf = { {0.001f, 30, 1234ULL}, {0.001f, 100, 1234ULL}, {0.001f, 1000, 1234ULL}}; typedef R2_scoreTest<float> R2_scoreTestF; TEST_P(R2_scoreTestF, Result) { auto eq = raft::CompareApprox<float>(params.tolerance); ASSERT_TRUE(match(expectedVal, actualVal, eq)); } INSTANTIATE_TEST_CASE_P(R2_scoreTests, R2_scoreTestF, ::testing::ValuesIn(inputsf)); const std::vector<R2_scoreInputs<double>> inputsd = { {0.001, 30, 1234ULL}, {0.001, 100, 1234ULL}, {0.001, 1000, 1234ULL}}; typedef R2_scoreTest<double> R2_scoreTestD; TEST_P(R2_scoreTestD, Result) { auto eq = raft::CompareApprox<double>(params.tolerance); ASSERT_TRUE(match(expectedVal, actualVal, eq)); } INSTANTIATE_TEST_CASE_P(R2_scoreTests, R2_scoreTestD, ::testing::ValuesIn(inputsd)); } // end namespace stats } // end namespace raft
7ae3d03dff09a599be31a8a4415b56e01d5f178b.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "compress_write_permutation.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *d_write_permutation = NULL; hipMalloc(&d_write_permutation, XSIZE*YSIZE); int *d_full_write_permutation = NULL; hipMalloc(&d_full_write_permutation, XSIZE*YSIZE); int *d_gcs = NULL; hipMalloc(&d_gcs, XSIZE*YSIZE); int total_pad_row_num = 1; int chunk = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( compress_write_permutation), dim3(gridBlock),dim3(threadBlock), 0, 0, d_write_permutation,d_full_write_permutation,d_gcs,total_pad_row_num,chunk); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( compress_write_permutation), dim3(gridBlock),dim3(threadBlock), 0, 0, d_write_permutation,d_full_write_permutation,d_gcs,total_pad_row_num,chunk); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( compress_write_permutation), dim3(gridBlock),dim3(threadBlock), 0, 0, d_write_permutation,d_full_write_permutation,d_gcs,total_pad_row_num,chunk); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
7ae3d03dff09a599be31a8a4415b56e01d5f178b.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "compress_write_permutation.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *d_write_permutation = NULL; cudaMalloc(&d_write_permutation, XSIZE*YSIZE); int *d_full_write_permutation = NULL; cudaMalloc(&d_full_write_permutation, XSIZE*YSIZE); int *d_gcs = NULL; cudaMalloc(&d_gcs, XSIZE*YSIZE); int total_pad_row_num = 1; int chunk = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); compress_write_permutation<<<gridBlock,threadBlock>>>(d_write_permutation,d_full_write_permutation,d_gcs,total_pad_row_num,chunk); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { compress_write_permutation<<<gridBlock,threadBlock>>>(d_write_permutation,d_full_write_permutation,d_gcs,total_pad_row_num,chunk); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { compress_write_permutation<<<gridBlock,threadBlock>>>(d_write_permutation,d_full_write_permutation,d_gcs,total_pad_row_num,chunk); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
b011b08d598c7afa26cee7309db8c66df7288ad7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <cstdlib> #include <iostream> #include <numeric> #include <time.h> #include <math.h> #include <stdio.h> using namespace std; __global__ void sum(double* input) { const int tid = threadIdx.x; int step_size = 1; int number_of_threads = blockDim.x; while (number_of_threads > 0) { if (tid < number_of_threads) { const int fst = tid * step_size * 2; const int snd = fst + step_size; input[fst] += input[snd]; } step_size <<= 1; number_of_threads >>= 1; } } __global__ void variance(int* n, double *x, double *mean) { int index = threadIdx.x; int stride = blockDim.x; for (int i = index; i < *n; i+= stride) { x[i] = (x[i] - *mean) ; x[i] = x[i] * x[i]; } } double calculateMean(int count , double *h) { const int size = count * sizeof(double); double* d; //mean hipMalloc(&d, size); hipMemcpy(d, h, size, hipMemcpyHostToDevice); hipLaunchKernelGGL(( sum), dim3(1), dim3(count / 2) , 0, 0, d); double result; hipMemcpy(&result, d, sizeof(double), hipMemcpyDeviceToHost); result /= count; // cout << "\nAverage is " << result << endl; hipFree(d); return result; } double* calculateVariance(int *count , double *h , double *mean) { const int size = (*count) * sizeof(double); double* d ; int* n; double *tempMean; hipMalloc(&d, size); hipMalloc(&n, sizeof(int)); hipMalloc(&tempMean, sizeof(double)); hipMemcpy(d, h, size, hipMemcpyHostToDevice); hipMemcpy(n, count, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(tempMean, mean, sizeof(double), hipMemcpyHostToDevice); hipLaunchKernelGGL(( variance), dim3(1), dim3((*count)/2) , 0, 0, n , d , tempMean); hipMemcpy(h, d, size, hipMemcpyDeviceToHost); hipFree(d); hipFree(n); hipFree(tempMean); return h; } int main() { const int count = 8; // size of array srand(2); double h[count]; cout << "Elements of array are : " << endl; for ( int i = 0 ; i < count ; i ++ ) { h[i] = rand()%10; cout << h[i] << "\t" ; } int tempCount = count ; double mean = calculateMean(count , h); cout << "\nMean is : " << mean << endl; double *res = calculateVariance(&tempCount , h , &mean); // for ( int i = 0 ; i < count ; i ++ ) { // // cout << res[i] << "\t" ; // // } double variance = calculateMean(count , res); cout << "Variance is : " << variance << endl; cout << "Standard Deviation is : " << sqrt(variance) << endl; delete[] &h; return 0; }
b011b08d598c7afa26cee7309db8c66df7288ad7.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <cstdlib> #include <iostream> #include <numeric> #include <time.h> #include <math.h> #include <stdio.h> using namespace std; __global__ void sum(double* input) { const int tid = threadIdx.x; int step_size = 1; int number_of_threads = blockDim.x; while (number_of_threads > 0) { if (tid < number_of_threads) { const int fst = tid * step_size * 2; const int snd = fst + step_size; input[fst] += input[snd]; } step_size <<= 1; number_of_threads >>= 1; } } __global__ void variance(int* n, double *x, double *mean) { int index = threadIdx.x; int stride = blockDim.x; for (int i = index; i < *n; i+= stride) { x[i] = (x[i] - *mean) ; x[i] = x[i] * x[i]; } } double calculateMean(int count , double *h) { const int size = count * sizeof(double); double* d; //mean cudaMalloc(&d, size); cudaMemcpy(d, h, size, cudaMemcpyHostToDevice); sum<<<1, count / 2 >>> (d); double result; cudaMemcpy(&result, d, sizeof(double), cudaMemcpyDeviceToHost); result /= count; // cout << "\nAverage is " << result << endl; cudaFree(d); return result; } double* calculateVariance(int *count , double *h , double *mean) { const int size = (*count) * sizeof(double); double* d ; int* n; double *tempMean; cudaMalloc(&d, size); cudaMalloc(&n, sizeof(int)); cudaMalloc(&tempMean, sizeof(double)); cudaMemcpy(d, h, size, cudaMemcpyHostToDevice); cudaMemcpy(n, count, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(tempMean, mean, sizeof(double), cudaMemcpyHostToDevice); variance<<<1, (*count)/2 >>> (n , d , tempMean); cudaMemcpy(h, d, size, cudaMemcpyDeviceToHost); cudaFree(d); cudaFree(n); cudaFree(tempMean); return h; } int main() { const int count = 8; // size of array srand(2); double h[count]; cout << "Elements of array are : " << endl; for ( int i = 0 ; i < count ; i ++ ) { h[i] = rand()%10; cout << h[i] << "\t" ; } int tempCount = count ; double mean = calculateMean(count , h); cout << "\nMean is : " << mean << endl; double *res = calculateVariance(&tempCount , h , &mean); // for ( int i = 0 ; i < count ; i ++ ) { // // cout << res[i] << "\t" ; // // } double variance = calculateMean(count , res); cout << "Variance is : " << variance << endl; cout << "Standard Deviation is : " << sqrt(variance) << endl; delete[] &h; return 0; }
b2bd8d1b50f546149f2322cc7fd099dbf75e33ba.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "skeleton.hpp" #include "animesh_kers.hpp" void compute_pot_launch_ker(const Cuda_utils::DA_Vec3& d_verts, Cuda_utils::DA_float& d_pot) { const int nb_verts = d_verts.size(); const int block_size = 256; const int grid_size = (nb_verts + block_size - 1) / block_size; hipLaunchKernelGGL(( Animesh_kers::compute_base_potential), dim3(grid_size), dim3(block_size), 0, 0, 0, // HACK: always skeleton id zero (Point3*)d_verts.ptr(), d_verts.size(), d_pot.ptr(), 0); }
b2bd8d1b50f546149f2322cc7fd099dbf75e33ba.cu
#include "skeleton.hpp" #include "animesh_kers.hpp" void compute_pot_launch_ker(const Cuda_utils::DA_Vec3& d_verts, Cuda_utils::DA_float& d_pot) { const int nb_verts = d_verts.size(); const int block_size = 256; const int grid_size = (nb_verts + block_size - 1) / block_size; Animesh_kers::compute_base_potential<<<grid_size, block_size>>> (0, // HACK: always skeleton id zero (Point3*)d_verts.ptr(), d_verts.size(), d_pot.ptr(), 0); }
32363c8dd08092c3a6fd437438582f9f19238fab.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include <chrono> #include <hip/hip_runtime.h> __global__ void k0 (const float *__restrict__ a, float *__restrict__ o) { int t = blockIdx.x * blockDim.x + threadIdx.x; float x = a[t]; o[t] = coshf(x)/sinhf(x) - 1.f/x; } __global__ void k1 (const float *__restrict__ a, float *__restrict__ o) { int t = blockIdx.x * blockDim.x + threadIdx.x; float x = a[t]; o[t] = 1.f / tanhf(x) - 1.f/x; } /* Copyright (c) 2018-2021, Norbert Juffa All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ __global__ void k2 (const float *__restrict__ a, float *__restrict__ o) { int t = blockIdx.x * blockDim.x + threadIdx.x; float x = a[t]; float s, r; s = x * x; r = 7.70960469e-8f; r = fmaf (r, s, -1.65101926e-6f); r = fmaf (r, s, 2.03457112e-5f); r = fmaf (r, s, -2.10521728e-4f); r = fmaf (r, s, 2.11580913e-3f); r = fmaf (r, s, -2.22220998e-2f); r = fmaf (r, s, 8.33333284e-2f); r = fmaf (r, x, 0.25f * x); o[t] = r; } int main(int argc, char* argv[]) { if (argc != 3) { printf("Usage %s <n> <repeat>\n", argv[0]); return 1; } const int n = atoi(argv[1]); const int repeat = atoi(argv[2]); const size_t size = sizeof(float) * n; float *a, *o, *o0, *o1, *o2; a = (float*) malloc (size); o = (float*) malloc (size); // the range [-1.8, -0.00001) for (int i = 0; i < n; i++) { a[i] = -1.8f + i * (1.79999f / n); } o0 = (float*) malloc (size); o1 = (float*) malloc (size); o2 = (float*) malloc (size); float *d_a, *d_o0, *d_o1, *d_o2; hipMalloc((void**)&d_a, size); hipMalloc((void**)&d_o0, size); hipMalloc((void**)&d_o1, size); hipMalloc((void**)&d_o2, size); hipMemcpy(d_a, a, size, hipMemcpyHostToDevice); hipDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) { hipLaunchKernelGGL(( k0), dim3(n/256), dim3(256), 0, 0, d_a, d_o0); } hipDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average execution time of k0: %f (s)\n", (time * 1e-9f) / repeat); start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) { hipLaunchKernelGGL(( k1), dim3(n/256), dim3(256), 0, 0, d_a, d_o1); } hipDeviceSynchronize(); end = std::chrono::steady_clock::now(); time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average execution time of k1: %f (s)\n", (time * 1e-9f) / repeat); start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) { hipLaunchKernelGGL(( k2), dim3(n/256), dim3(256), 0, 0, d_a, d_o2); } hipDeviceSynchronize(); end = std::chrono::steady_clock::now(); time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average execution time of k2: %f (s)\n", (time * 1e-9f) / repeat); hipMemcpy(o0, d_o0, size, hipMemcpyDeviceToHost); hipMemcpy(o1, d_o1, size, hipMemcpyDeviceToHost); hipMemcpy(o2, d_o2, size, hipMemcpyDeviceToHost); // https://en.wikipedia.org/wiki/Brillouin_and_Langevin_functions for (int i = 0; i < n; i++) { float x = a[i]; float x2 = x * x; float x4 = x2 * x2; float x6 = x4 * x2; o[i] = x * (1.f/3.f - 1.f/45.f * x2 + 2.f/945.f * x4 - 1.f/4725.f * x6); } float e[3] = {0,0,0}; for (int i = 0; i < n; i++) { e[0] += (o[i] - o0[i]) * (o[i] - o0[i]); e[1] += (o[i] - o1[i]) * (o[i] - o1[i]); e[2] += (o[i] - o2[i]) * (o[i] - o2[i]); } printf("\nError statistics for the kernels:\n"); for (int i = 0; i < 3; i++) { printf("%f ", sqrt(e[i])); } printf("\n"); free(a); free(o); free(o0); free(o1); free(o2); hipFree(d_a); hipFree(d_o0); hipFree(d_o1); hipFree(d_o2); return 0; }
32363c8dd08092c3a6fd437438582f9f19238fab.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <chrono> #include <cuda.h> __global__ void k0 (const float *__restrict__ a, float *__restrict__ o) { int t = blockIdx.x * blockDim.x + threadIdx.x; float x = a[t]; o[t] = coshf(x)/sinhf(x) - 1.f/x; } __global__ void k1 (const float *__restrict__ a, float *__restrict__ o) { int t = blockIdx.x * blockDim.x + threadIdx.x; float x = a[t]; o[t] = 1.f / tanhf(x) - 1.f/x; } /* Copyright (c) 2018-2021, Norbert Juffa All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ __global__ void k2 (const float *__restrict__ a, float *__restrict__ o) { int t = blockIdx.x * blockDim.x + threadIdx.x; float x = a[t]; float s, r; s = x * x; r = 7.70960469e-8f; r = fmaf (r, s, -1.65101926e-6f); r = fmaf (r, s, 2.03457112e-5f); r = fmaf (r, s, -2.10521728e-4f); r = fmaf (r, s, 2.11580913e-3f); r = fmaf (r, s, -2.22220998e-2f); r = fmaf (r, s, 8.33333284e-2f); r = fmaf (r, x, 0.25f * x); o[t] = r; } int main(int argc, char* argv[]) { if (argc != 3) { printf("Usage %s <n> <repeat>\n", argv[0]); return 1; } const int n = atoi(argv[1]); const int repeat = atoi(argv[2]); const size_t size = sizeof(float) * n; float *a, *o, *o0, *o1, *o2; a = (float*) malloc (size); o = (float*) malloc (size); // the range [-1.8, -0.00001) for (int i = 0; i < n; i++) { a[i] = -1.8f + i * (1.79999f / n); } o0 = (float*) malloc (size); o1 = (float*) malloc (size); o2 = (float*) malloc (size); float *d_a, *d_o0, *d_o1, *d_o2; cudaMalloc((void**)&d_a, size); cudaMalloc((void**)&d_o0, size); cudaMalloc((void**)&d_o1, size); cudaMalloc((void**)&d_o2, size); cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) { k0<<<n/256, 256>>>(d_a, d_o0); } cudaDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average execution time of k0: %f (s)\n", (time * 1e-9f) / repeat); start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) { k1<<<n/256, 256>>>(d_a, d_o1); } cudaDeviceSynchronize(); end = std::chrono::steady_clock::now(); time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average execution time of k1: %f (s)\n", (time * 1e-9f) / repeat); start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) { k2<<<n/256, 256>>>(d_a, d_o2); } cudaDeviceSynchronize(); end = std::chrono::steady_clock::now(); time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average execution time of k2: %f (s)\n", (time * 1e-9f) / repeat); cudaMemcpy(o0, d_o0, size, cudaMemcpyDeviceToHost); cudaMemcpy(o1, d_o1, size, cudaMemcpyDeviceToHost); cudaMemcpy(o2, d_o2, size, cudaMemcpyDeviceToHost); // https://en.wikipedia.org/wiki/Brillouin_and_Langevin_functions for (int i = 0; i < n; i++) { float x = a[i]; float x2 = x * x; float x4 = x2 * x2; float x6 = x4 * x2; o[i] = x * (1.f/3.f - 1.f/45.f * x2 + 2.f/945.f * x4 - 1.f/4725.f * x6); } float e[3] = {0,0,0}; for (int i = 0; i < n; i++) { e[0] += (o[i] - o0[i]) * (o[i] - o0[i]); e[1] += (o[i] - o1[i]) * (o[i] - o1[i]); e[2] += (o[i] - o2[i]) * (o[i] - o2[i]); } printf("\nError statistics for the kernels:\n"); for (int i = 0; i < 3; i++) { printf("%f ", sqrt(e[i])); } printf("\n"); free(a); free(o); free(o0); free(o1); free(o2); cudaFree(d_a); cudaFree(d_o0); cudaFree(d_o1); cudaFree(d_o2); return 0; }
993e39e39d4d114b52132ef91d9c0b2cb650cbf5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "EA_CUDA.h" #define CUDA_ERROR_CHECK #define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ ) #define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ ) #define MAX_R_SIZE 5 inline void __cudaSafeCall( hipError_t err, const char *file, const int line ) { #ifdef CUDA_ERROR_CHECK if ( hipSuccess != err ) { fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n", file, line, hipGetErrorString( err ) ); exit( -1 ); } #endif return; } inline void __cudaCheckError( const char *file, const int line ) { #ifdef CUDA_ERROR_CHECK hipError_t err = hipGetLastError(); if ( hipSuccess != err ) { fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n", file, line, hipGetErrorString( err ) ); exit( -1 ); } // More careful checking. However, this will affect performance. // Comment away if needed. err = hipDeviceSynchronize(); if( hipSuccess != err ) { fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n", file, line, hipGetErrorString( err ) ); exit( -1 ); } #endif return; } int CalConfigurationsDE(dim3 & block_dim, dim3 & grid_dim, GPU_IslandInfo GPU_island_info) { int total_individual_num = GPU_island_info.island_size; if (MAX_THREAD / WARP_SIZE < total_individual_num) { block_dim = dim3(WARP_SIZE, MAX_THREAD / WARP_SIZE, 1); grid_dim = dim3(total_individual_num / block_dim.y, 1, 1); } else { block_dim = dim3(WARP_SIZE, total_individual_num, 1); grid_dim = dim3(1, 1, 1); } return 0; } inline __device__ __host__ int NextPow2(int x) { int y = 1; while (y < x) y <<= 1; if (y < WARP_SIZE) y = WARP_SIZE; return y; } static __device__ __forceinline__ int device_ParallelBest(real * vector, int * index) { __syncwarp(); if(threadIdx.x < WARP_SIZE / 2) if(vector[threadIdx.x] > vector[threadIdx.x + WARP_SIZE / 2]) { vector[threadIdx.x] = vector[threadIdx.x + WARP_SIZE / 2]; index[threadIdx.x] = index[threadIdx.x + WARP_SIZE / 2]; } __syncwarp(); if(threadIdx.x < WARP_SIZE / 4) if(vector[threadIdx.x] > vector[threadIdx.x + WARP_SIZE / 4]) { vector[threadIdx.x] = vector[threadIdx.x + WARP_SIZE / 4]; index[threadIdx.x] = index[threadIdx.x + WARP_SIZE / 4]; } __syncwarp(); if(threadIdx.x < WARP_SIZE / 8) if(vector[threadIdx.x] > vector[threadIdx.x + WARP_SIZE / 8]) { vector[threadIdx.x] = vector[threadIdx.x + WARP_SIZE / 8]; index[threadIdx.x] = index[threadIdx.x + WARP_SIZE / 8]; } __syncwarp(); if(threadIdx.x < WARP_SIZE / 16) if(vector[threadIdx.x] > vector[threadIdx.x + WARP_SIZE / 16]) { vector[threadIdx.x] = vector[threadIdx.x + WARP_SIZE / 16]; index[threadIdx.x] = index[threadIdx.x + WARP_SIZE / 16]; } __syncwarp(); if(threadIdx.x < WARP_SIZE / 32) if(vector[threadIdx.x] > vector[threadIdx.x + WARP_SIZE / 32]) { vector[threadIdx.x] = vector[threadIdx.x + WARP_SIZE / 32]; index[threadIdx.x] = index[threadIdx.x + WARP_SIZE / 32]; } return 0; } __global__ void global_DE_FindBestIndividualInIsland(int * d_best_individual_ID, GPU_Population d_population, GPU_IslandInfo d_island_info, ProblemInfo problem_info) { int island_ID = blockIdx.x; int subpop_size = d_island_info.island_size / d_island_info.subisland_num; int next_pow2_pop = NextPow2(subpop_size); int loop_times_pop = next_pow2_pop / WARP_SIZE; __shared__ real sh_fitness[WARP_SIZE]; __shared__ int sh_index[WARP_SIZE]; int thread_ID = threadIdx.x; sh_fitness[thread_ID] = 1e20; sh_index[thread_ID] = -1; int sort_individual_ID = d_island_info.permutated_index[thread_ID]; if (thread_ID < subpop_size) { sh_fitness[threadIdx.x] = d_population.fitness_value[sort_individual_ID]; sh_index[threadIdx.x] = sort_individual_ID; } for (int i = 1; i < loop_times_pop; i++) { thread_ID = threadIdx.x + i * WARP_SIZE; sort_individual_ID = d_island_info.permutated_index[thread_ID]; if (thread_ID < subpop_size) { real tmp_value = d_population.fitness_value[sort_individual_ID]; if (sh_fitness[threadIdx.x] > tmp_value) { sh_fitness[threadIdx.x] = tmp_value; sh_index[threadIdx.x] = sort_individual_ID; } } } device_ParallelBest(sh_fitness, sh_index); if(threadIdx.x == 0) d_best_individual_ID[island_ID] = sh_index[0]; } __device__ __forceinline__ real device_CheckBound(real to_check_elements, real min_bound, real max_bound) { while ((to_check_elements < min_bound) || (to_check_elements > max_bound)) { if (to_check_elements < min_bound) to_check_elements = min_bound + (min_bound - to_check_elements); if (to_check_elements > max_bound) to_check_elements = max_bound - (to_check_elements - max_bound); } return to_check_elements; } __global__ void global_DE_GenerateNewPopulation(GPU_Population d_candidate, GPU_Population d_population, int * d_best_individual_ID, GPU_IslandInfo d_island_info, DEInfo *d_DE_info, hiprandState_t *d_rand_states, \ ProblemInfo problem_info) { int dim = problem_info.dim; int individual_ID = threadIdx.y + blockIdx.x * blockDim.y; int selected_individual_ID = d_island_info.permutated_index[individual_ID]; int subpop_size = d_island_info.island_size / d_island_info.subisland_num; int next_pow2_dim = NextPow2(dim); int loop_times = next_pow2_dim / WARP_SIZE; int island_ID = individual_ID / subpop_size; int local_r[MAX_R_SIZE] = {0}; int random_state_ID = individual_ID * WARP_SIZE; hiprandState_t local_state = d_rand_states[random_state_ID]; for(int i = 0; i < MAX_R_SIZE; i++) { #ifdef GPU_DOUBLE_PRECISION local_r[i] = (int)(hiprand_uniform_double(&local_state) * subpop_size); #endif #ifdef GPU_SINGLE_PRECISION local_r[i] = (int)(hiprand_uniform(&local_state) * subpop_size); #endif if(local_r[i] == subpop_size) local_r[i]--; for(int j = 0; j < i; j++) { while(local_r[i] == local_r[j]) { #ifdef GPU_DOUBLE_PRECISION local_r[i] = (int)(hiprand_uniform_double(&local_state) * subpop_size); #endif #ifdef GPU_SINGLE_PRECISION local_r[i] = (int)(hiprand_uniform(&local_state) * subpop_size); #endif } } if(local_r[i] == subpop_size) local_r[i]--; } for(int i = 0; i < MAX_R_SIZE; i++) { local_r[i] = d_island_info.permutated_index[local_r[i] + island_ID * subpop_size]; } #ifdef GPU_DOUBLE_PRECISION int local_j = (int) (hiprand_uniform_double(&local_state) * dim); #endif #ifdef GPU_SINGLE_PRECISION int local_j = (int) (hiprand_uniform (&local_state) * dim); #endif if(threadIdx.x == 0) d_rand_states[random_state_ID] = local_state; random_state_ID = threadIdx.x + individual_ID * WARP_SIZE; local_state = d_rand_states[random_state_ID]; real F = d_DE_info[island_ID].F; real local_candidate_elements = 0; for(int i = 0; i < loop_times; i++) { int element_ID = threadIdx.x + i * WARP_SIZE; #ifdef GPU_DOUBLE_PRECISION real rand_CR = hiprand_uniform_double(&local_state); #endif #ifdef GPU_SINGLE_PRECISION real rand_CR = hiprand_uniform (&local_state); #endif if(element_ID < dim) { if ((element_ID == local_j) || (rand_CR < d_DE_info[island_ID].CR)) { if(d_DE_info[island_ID].strategy_ID == 0) local_candidate_elements = d_population.elements[element_ID + selected_individual_ID * next_pow2_dim] + \ F * (d_population.elements[element_ID + local_r[0] * next_pow2_dim] - d_population.elements[element_ID + local_r[1] * next_pow2_dim]); if(d_DE_info[island_ID].strategy_ID == 1) local_candidate_elements = d_population.elements[element_ID + selected_individual_ID * next_pow2_dim] + \ F * (d_population.elements[element_ID + local_r[0] * next_pow2_dim] - d_population.elements[element_ID + local_r[1] * next_pow2_dim]) + \ F * (d_population.elements[element_ID + local_r[2] * next_pow2_dim] - d_population.elements[element_ID + local_r[3] * next_pow2_dim]); if(d_DE_info[island_ID].strategy_ID == 2) local_candidate_elements = d_population.elements[element_ID + selected_individual_ID * next_pow2_dim] + \ F * (d_population.elements[element_ID + d_best_individual_ID[island_ID] * next_pow2_dim] - d_population.elements[element_ID + selected_individual_ID * next_pow2_dim]) +\ F * (d_population.elements[element_ID + local_r[0] * next_pow2_dim] - d_population.elements[element_ID + local_r[1] * next_pow2_dim]); if(d_DE_info[island_ID].strategy_ID == 3) local_candidate_elements = d_population.elements[element_ID + selected_individual_ID * next_pow2_dim] + \ F * (d_population.elements[element_ID + d_best_individual_ID[island_ID] * next_pow2_dim] - d_population.elements[element_ID + selected_individual_ID * next_pow2_dim]) +\ F * (d_population.elements[element_ID + local_r[0] * next_pow2_dim] - d_population.elements[element_ID + local_r[1] * next_pow2_dim]) +\ F * (d_population.elements[element_ID + local_r[1] * next_pow2_dim] - d_population.elements[element_ID + local_r[2] * next_pow2_dim]); if(d_DE_info[island_ID].strategy_ID == 4) local_candidate_elements = d_population.elements[element_ID + local_r[0] * next_pow2_dim] + \ F * (d_population.elements[element_ID + local_r[1] * next_pow2_dim] - d_population.elements[element_ID + local_r[2] * next_pow2_dim]); if(d_DE_info[island_ID].strategy_ID == 5) local_candidate_elements = d_population.elements[element_ID + local_r[0] * next_pow2_dim] + \ F * (d_population.elements[element_ID + local_r[1] * next_pow2_dim] - d_population.elements[element_ID + local_r[2] * next_pow2_dim]) +\ F * (d_population.elements[element_ID + local_r[3] * next_pow2_dim] - d_population.elements[element_ID + local_r[4] * next_pow2_dim]); if(d_DE_info[island_ID].strategy_ID == 6) local_candidate_elements = d_population.elements[element_ID + d_best_individual_ID[island_ID] * next_pow2_dim] + \ F * (d_population.elements[element_ID + local_r[0] * next_pow2_dim] - d_population.elements[element_ID + local_r[1] * next_pow2_dim]); if(d_DE_info[island_ID].strategy_ID == 7) local_candidate_elements = d_population.elements[element_ID + d_best_individual_ID[island_ID] * next_pow2_dim] + \ F * (d_population.elements[element_ID + local_r[0] * next_pow2_dim] - d_population.elements[element_ID + local_r[1] * next_pow2_dim]) +\ F * (d_population.elements[element_ID + local_r[2] * next_pow2_dim] - d_population.elements[element_ID + local_r[3] * next_pow2_dim]); if(d_DE_info[island_ID].strategy_ID == 8) local_candidate_elements = d_population.elements[element_ID + selected_individual_ID * next_pow2_dim] + \ F * (d_population.elements[element_ID + local_r[0] * next_pow2_dim] - d_population.elements[element_ID + selected_individual_ID * next_pow2_dim]) +\ F * (d_population.elements[element_ID + local_r[1] * next_pow2_dim] - d_population.elements[element_ID + local_r[2] * next_pow2_dim]); } else { local_candidate_elements = d_population.elements[element_ID + selected_individual_ID * next_pow2_dim]; } local_candidate_elements = device_CheckBound(local_candidate_elements, problem_info.min_bound, problem_info.max_bound); d_candidate.elements[element_ID + selected_individual_ID * next_pow2_dim] = local_candidate_elements; } } d_rand_states[random_state_ID] = local_state; } extern "C" int API_DE_GenerateNewPopulation(GPU_Population d_candidate, GPU_Population d_population, int * d_best_individual_ID, GPU_IslandInfo d_island_info, DEInfo *d_DE_info, hiprandState_t *d_rand_states, ProblemInfo problem_info) { dim3 block_dim, grid_dim; global_DE_FindBestIndividualInIsland<< <dim3(d_island_info.subisland_num, 1, 1), dim3(WARP_SIZE, 1, 1) >> >(d_best_individual_ID, d_population, d_island_info, problem_info); CudaCheckError(); CalConfigurationsDE(block_dim, grid_dim, d_island_info); global_DE_GenerateNewPopulation << <grid_dim, block_dim >> >(d_candidate, d_population, d_best_individual_ID, d_island_info, d_DE_info, d_rand_states, problem_info); CudaCheckError(); return 0; } __global__ void global_DE_SelectSurvival(GPU_Population d_population, GPU_Population d_candidate, ProblemInfo problem_info) { int dim = problem_info.dim; int individual_ID = threadIdx.y + blockIdx.x * blockDim.y; int next_pow2_dim = NextPow2(dim); int loop_times = next_pow2_dim / WARP_SIZE; if(d_candidate.fitness_value[individual_ID] < d_population.fitness_value[individual_ID]) { for(int i = 0; i < loop_times; i++) { int element_ID = threadIdx.x + i * WARP_SIZE; if(element_ID < dim) d_population.elements[element_ID + individual_ID * next_pow2_dim] = d_candidate.elements[element_ID + individual_ID * next_pow2_dim]; } if (threadIdx.x == 0) { d_population.fitness_value[individual_ID] = d_candidate.fitness_value[individual_ID]; } } } extern "C" int API_DE_SelectSurvival(GPU_Population d_population, GPU_Population d_candidate, GPU_IslandInfo GPU_island_info, ProblemInfo problem_info) { dim3 block_dim, grid_dim; CalConfigurationsDE(block_dim, grid_dim, GPU_island_info); global_DE_SelectSurvival << <grid_dim, block_dim >> >(d_population, d_candidate, problem_info); CudaCheckError(); return 0; } /* static __device__ __forceinline__ real device_ParallelSum(real * vector) { if(threadIdx.x < WARP_SIZE / 2) vector[threadIdx.x] += vector[threadIdx.x + WARP_SIZE / 2]; if(threadIdx.x < WARP_SIZE / 4) vector[threadIdx.x] += vector[threadIdx.x + WARP_SIZE / 4]; if(threadIdx.x < WARP_SIZE / 8) vector[threadIdx.x] += vector[threadIdx.x + WARP_SIZE / 8]; if(threadIdx.x < WARP_SIZE / 16) vector[threadIdx.x] += vector[threadIdx.x + WARP_SIZE / 16]; if(threadIdx.x < WARP_SIZE / 32) vector[threadIdx.x] += vector[threadIdx.x + WARP_SIZE / 32]; return vector[0]; }; static __device__ __forceinline__ real device_ParallelBest(real * vector) { if(threadIdx.x < WARP_SIZE / 2) vector[threadIdx.x] = vector[threadIdx.x] > vector[threadIdx.x + WARP_SIZE / 2] ? vector[threadIdx.x + WARP_SIZE / 2] : vector[threadIdx.x] if(threadIdx.x < WARP_SIZE / 4) vector[threadIdx.x] = vector[threadIdx.x] > vector[threadIdx.x + WARP_SIZE / 4] ? vector[threadIdx.x + WARP_SIZE / 4] : vector[threadIdx.x] if(threadIdx.x < WARP_SIZE / 8) vector[threadIdx.x] = vector[threadIdx.x] > vector[threadIdx.x + WARP_SIZE / 8] ? vector[threadIdx.x + WARP_SIZE / 8] : vector[threadIdx.x] if(threadIdx.x < WARP_SIZE / 16) vector[threadIdx.x] = vector[threadIdx.x] > vector[threadIdx.x + WARP_SIZE / 16] ? vector[threadIdx.x + WARP_SIZE / 16] : vector[threadIdx.x] if(threadIdx.x < WARP_SIZE / 32) vector[threadIdx.x] = vector[threadIdx.x] > vector[threadIdx.x + WARP_SIZE / 32] ? vector[threadIdx.x + WARP_SIZE / 32] : vector[threadIdx.x] return vector[0]; }; extern __shared__ real sh_mem[]; __global__ void global_CalulateMatrixDistance(real * d_distance, real * d_elements, int pop_size, int dim) { int next_pow2_dim = NextPow2(problem_info.dim); int loop_times_dim = next_pow2_dim / WARP_SIZE; int loop_times_individual = pop_size / blockDim.y; int from_individual_ID = blockIdx.x; int to_individual_ID = 0; real sh_elements = sh_mem; real sh_sumed_value = sh_mem + WARP_SIZE; for(int i = 0; i < loop_times_individual; i++) { to_individual_ID = threadIdx.y + i * blockDim.y; sh_sumed_value[threadIdx.x + threadIdx.y * WARP_SIZE] = 0; for(int j = 0; j < loop_times_dim; j++) { int element_ID = threadIdx.x + j * WARP_SIZE; if(threadIdx.y == 0) sh_elements[threadIdx.x] = d_elements[element_ID + from_individual_ID * next_pow2_dim]; real tmp1 = sh_elements[threadIdx.x]; real tmp2 = d_elements[element_ID + to_individual_ID * next_pow2_dim]; tmp1 = (tmp1 - tmp2) * (tmp1 - tmp2) sh_sumed_value[threadIdx.x + threadIdx.y * WARP_SIZE] += tmp1; } d_distance[to_individual_ID + from_individual_ID * pop_size] = device_ParallelSum(sh_sumed_value + threadIdx.y * WARP_SIZE); } } __global__ void global_FindNearest(int *d_nearest_index, real * d_distance, int pop_size) { int next_pow2_dim = NextPow2(problem_info.dim); int loop_times_individual = pop_size / WARP_SIZE; int individual_ID = threadIdx.y + blockIdx.x * blockDim.y; int local_individual_ID = threadIdx.x; real sh_distance = sh_mem; sh_distance[local_individual_ID] = d_distance[local_individual_ID + individual_ID * pop_size]; for(int i = 1; i < loop_times_individual; i++) { local_individual_ID = threadIdx.x + i * WARP_SIZE; if(sh_distance[local_individual_ID] > d_distance[local_individual_ID + individual_ID * pop_size]) sh_distance[threadIdx.x + individual_ID * WARP_SIZE] = d_distance[local_individual_ID + individual_ID * pop_size]; } nearest_index[individual_ID] = device_ParallelBest(sh_distance + threadIdx.y * WARP_SIZE); } int FindNearestIndividualIndex(int *nearest_index, real * d_elements, real * d_distance, int pop_size, int dim) { dim3 block1(WARP_SIZE, WARP_SIZE / 2 , 1); dim3 grid1(pop_size, 1, 1); global_CalulateMatrixDistance<<<grid1, block1, WARP_SIZE * (block1.y + 1) * sizeof(real)>>>(d_distance, d_elements, pop_size, dim); int tmp_block_y = MAX_THREAD / 2 < pop_size ? MAX_THREAD / 2 : pop_size; dim3 block2(WARP_SIZE, tmp_block_y, 1); dim3 grid2(1, pop_size / tmp_block_y, 1); global_FindNeares<<<grid2, block2, WARP_SIZE * tmp_block_y * sizeof(real)>>>(d_nearest_index, d_elements, pop_size); return 0; } */
993e39e39d4d114b52132ef91d9c0b2cb650cbf5.cu
#include "EA_CUDA.h" #define CUDA_ERROR_CHECK #define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ ) #define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ ) #define MAX_R_SIZE 5 inline void __cudaSafeCall( cudaError err, const char *file, const int line ) { #ifdef CUDA_ERROR_CHECK if ( cudaSuccess != err ) { fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n", file, line, cudaGetErrorString( err ) ); exit( -1 ); } #endif return; } inline void __cudaCheckError( const char *file, const int line ) { #ifdef CUDA_ERROR_CHECK cudaError err = cudaGetLastError(); if ( cudaSuccess != err ) { fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n", file, line, cudaGetErrorString( err ) ); exit( -1 ); } // More careful checking. However, this will affect performance. // Comment away if needed. err = cudaDeviceSynchronize(); if( cudaSuccess != err ) { fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n", file, line, cudaGetErrorString( err ) ); exit( -1 ); } #endif return; } int CalConfigurationsDE(dim3 & block_dim, dim3 & grid_dim, GPU_IslandInfo GPU_island_info) { int total_individual_num = GPU_island_info.island_size; if (MAX_THREAD / WARP_SIZE < total_individual_num) { block_dim = dim3(WARP_SIZE, MAX_THREAD / WARP_SIZE, 1); grid_dim = dim3(total_individual_num / block_dim.y, 1, 1); } else { block_dim = dim3(WARP_SIZE, total_individual_num, 1); grid_dim = dim3(1, 1, 1); } return 0; } inline __device__ __host__ int NextPow2(int x) { int y = 1; while (y < x) y <<= 1; if (y < WARP_SIZE) y = WARP_SIZE; return y; } static __device__ __forceinline__ int device_ParallelBest(real * vector, int * index) { __syncwarp(); if(threadIdx.x < WARP_SIZE / 2) if(vector[threadIdx.x] > vector[threadIdx.x + WARP_SIZE / 2]) { vector[threadIdx.x] = vector[threadIdx.x + WARP_SIZE / 2]; index[threadIdx.x] = index[threadIdx.x + WARP_SIZE / 2]; } __syncwarp(); if(threadIdx.x < WARP_SIZE / 4) if(vector[threadIdx.x] > vector[threadIdx.x + WARP_SIZE / 4]) { vector[threadIdx.x] = vector[threadIdx.x + WARP_SIZE / 4]; index[threadIdx.x] = index[threadIdx.x + WARP_SIZE / 4]; } __syncwarp(); if(threadIdx.x < WARP_SIZE / 8) if(vector[threadIdx.x] > vector[threadIdx.x + WARP_SIZE / 8]) { vector[threadIdx.x] = vector[threadIdx.x + WARP_SIZE / 8]; index[threadIdx.x] = index[threadIdx.x + WARP_SIZE / 8]; } __syncwarp(); if(threadIdx.x < WARP_SIZE / 16) if(vector[threadIdx.x] > vector[threadIdx.x + WARP_SIZE / 16]) { vector[threadIdx.x] = vector[threadIdx.x + WARP_SIZE / 16]; index[threadIdx.x] = index[threadIdx.x + WARP_SIZE / 16]; } __syncwarp(); if(threadIdx.x < WARP_SIZE / 32) if(vector[threadIdx.x] > vector[threadIdx.x + WARP_SIZE / 32]) { vector[threadIdx.x] = vector[threadIdx.x + WARP_SIZE / 32]; index[threadIdx.x] = index[threadIdx.x + WARP_SIZE / 32]; } return 0; } __global__ void global_DE_FindBestIndividualInIsland(int * d_best_individual_ID, GPU_Population d_population, GPU_IslandInfo d_island_info, ProblemInfo problem_info) { int island_ID = blockIdx.x; int subpop_size = d_island_info.island_size / d_island_info.subisland_num; int next_pow2_pop = NextPow2(subpop_size); int loop_times_pop = next_pow2_pop / WARP_SIZE; __shared__ real sh_fitness[WARP_SIZE]; __shared__ int sh_index[WARP_SIZE]; int thread_ID = threadIdx.x; sh_fitness[thread_ID] = 1e20; sh_index[thread_ID] = -1; int sort_individual_ID = d_island_info.permutated_index[thread_ID]; if (thread_ID < subpop_size) { sh_fitness[threadIdx.x] = d_population.fitness_value[sort_individual_ID]; sh_index[threadIdx.x] = sort_individual_ID; } for (int i = 1; i < loop_times_pop; i++) { thread_ID = threadIdx.x + i * WARP_SIZE; sort_individual_ID = d_island_info.permutated_index[thread_ID]; if (thread_ID < subpop_size) { real tmp_value = d_population.fitness_value[sort_individual_ID]; if (sh_fitness[threadIdx.x] > tmp_value) { sh_fitness[threadIdx.x] = tmp_value; sh_index[threadIdx.x] = sort_individual_ID; } } } device_ParallelBest(sh_fitness, sh_index); if(threadIdx.x == 0) d_best_individual_ID[island_ID] = sh_index[0]; } __device__ __forceinline__ real device_CheckBound(real to_check_elements, real min_bound, real max_bound) { while ((to_check_elements < min_bound) || (to_check_elements > max_bound)) { if (to_check_elements < min_bound) to_check_elements = min_bound + (min_bound - to_check_elements); if (to_check_elements > max_bound) to_check_elements = max_bound - (to_check_elements - max_bound); } return to_check_elements; } __global__ void global_DE_GenerateNewPopulation(GPU_Population d_candidate, GPU_Population d_population, int * d_best_individual_ID, GPU_IslandInfo d_island_info, DEInfo *d_DE_info, curandState *d_rand_states, \ ProblemInfo problem_info) { int dim = problem_info.dim; int individual_ID = threadIdx.y + blockIdx.x * blockDim.y; int selected_individual_ID = d_island_info.permutated_index[individual_ID]; int subpop_size = d_island_info.island_size / d_island_info.subisland_num; int next_pow2_dim = NextPow2(dim); int loop_times = next_pow2_dim / WARP_SIZE; int island_ID = individual_ID / subpop_size; int local_r[MAX_R_SIZE] = {0}; int random_state_ID = individual_ID * WARP_SIZE; curandState local_state = d_rand_states[random_state_ID]; for(int i = 0; i < MAX_R_SIZE; i++) { #ifdef GPU_DOUBLE_PRECISION local_r[i] = (int)(curand_uniform_double(&local_state) * subpop_size); #endif #ifdef GPU_SINGLE_PRECISION local_r[i] = (int)(curand_uniform(&local_state) * subpop_size); #endif if(local_r[i] == subpop_size) local_r[i]--; for(int j = 0; j < i; j++) { while(local_r[i] == local_r[j]) { #ifdef GPU_DOUBLE_PRECISION local_r[i] = (int)(curand_uniform_double(&local_state) * subpop_size); #endif #ifdef GPU_SINGLE_PRECISION local_r[i] = (int)(curand_uniform(&local_state) * subpop_size); #endif } } if(local_r[i] == subpop_size) local_r[i]--; } for(int i = 0; i < MAX_R_SIZE; i++) { local_r[i] = d_island_info.permutated_index[local_r[i] + island_ID * subpop_size]; } #ifdef GPU_DOUBLE_PRECISION int local_j = (int) (curand_uniform_double(&local_state) * dim); #endif #ifdef GPU_SINGLE_PRECISION int local_j = (int) (curand_uniform (&local_state) * dim); #endif if(threadIdx.x == 0) d_rand_states[random_state_ID] = local_state; random_state_ID = threadIdx.x + individual_ID * WARP_SIZE; local_state = d_rand_states[random_state_ID]; real F = d_DE_info[island_ID].F; real local_candidate_elements = 0; for(int i = 0; i < loop_times; i++) { int element_ID = threadIdx.x + i * WARP_SIZE; #ifdef GPU_DOUBLE_PRECISION real rand_CR = curand_uniform_double(&local_state); #endif #ifdef GPU_SINGLE_PRECISION real rand_CR = curand_uniform (&local_state); #endif if(element_ID < dim) { if ((element_ID == local_j) || (rand_CR < d_DE_info[island_ID].CR)) { if(d_DE_info[island_ID].strategy_ID == 0) local_candidate_elements = d_population.elements[element_ID + selected_individual_ID * next_pow2_dim] + \ F * (d_population.elements[element_ID + local_r[0] * next_pow2_dim] - d_population.elements[element_ID + local_r[1] * next_pow2_dim]); if(d_DE_info[island_ID].strategy_ID == 1) local_candidate_elements = d_population.elements[element_ID + selected_individual_ID * next_pow2_dim] + \ F * (d_population.elements[element_ID + local_r[0] * next_pow2_dim] - d_population.elements[element_ID + local_r[1] * next_pow2_dim]) + \ F * (d_population.elements[element_ID + local_r[2] * next_pow2_dim] - d_population.elements[element_ID + local_r[3] * next_pow2_dim]); if(d_DE_info[island_ID].strategy_ID == 2) local_candidate_elements = d_population.elements[element_ID + selected_individual_ID * next_pow2_dim] + \ F * (d_population.elements[element_ID + d_best_individual_ID[island_ID] * next_pow2_dim] - d_population.elements[element_ID + selected_individual_ID * next_pow2_dim]) +\ F * (d_population.elements[element_ID + local_r[0] * next_pow2_dim] - d_population.elements[element_ID + local_r[1] * next_pow2_dim]); if(d_DE_info[island_ID].strategy_ID == 3) local_candidate_elements = d_population.elements[element_ID + selected_individual_ID * next_pow2_dim] + \ F * (d_population.elements[element_ID + d_best_individual_ID[island_ID] * next_pow2_dim] - d_population.elements[element_ID + selected_individual_ID * next_pow2_dim]) +\ F * (d_population.elements[element_ID + local_r[0] * next_pow2_dim] - d_population.elements[element_ID + local_r[1] * next_pow2_dim]) +\ F * (d_population.elements[element_ID + local_r[1] * next_pow2_dim] - d_population.elements[element_ID + local_r[2] * next_pow2_dim]); if(d_DE_info[island_ID].strategy_ID == 4) local_candidate_elements = d_population.elements[element_ID + local_r[0] * next_pow2_dim] + \ F * (d_population.elements[element_ID + local_r[1] * next_pow2_dim] - d_population.elements[element_ID + local_r[2] * next_pow2_dim]); if(d_DE_info[island_ID].strategy_ID == 5) local_candidate_elements = d_population.elements[element_ID + local_r[0] * next_pow2_dim] + \ F * (d_population.elements[element_ID + local_r[1] * next_pow2_dim] - d_population.elements[element_ID + local_r[2] * next_pow2_dim]) +\ F * (d_population.elements[element_ID + local_r[3] * next_pow2_dim] - d_population.elements[element_ID + local_r[4] * next_pow2_dim]); if(d_DE_info[island_ID].strategy_ID == 6) local_candidate_elements = d_population.elements[element_ID + d_best_individual_ID[island_ID] * next_pow2_dim] + \ F * (d_population.elements[element_ID + local_r[0] * next_pow2_dim] - d_population.elements[element_ID + local_r[1] * next_pow2_dim]); if(d_DE_info[island_ID].strategy_ID == 7) local_candidate_elements = d_population.elements[element_ID + d_best_individual_ID[island_ID] * next_pow2_dim] + \ F * (d_population.elements[element_ID + local_r[0] * next_pow2_dim] - d_population.elements[element_ID + local_r[1] * next_pow2_dim]) +\ F * (d_population.elements[element_ID + local_r[2] * next_pow2_dim] - d_population.elements[element_ID + local_r[3] * next_pow2_dim]); if(d_DE_info[island_ID].strategy_ID == 8) local_candidate_elements = d_population.elements[element_ID + selected_individual_ID * next_pow2_dim] + \ F * (d_population.elements[element_ID + local_r[0] * next_pow2_dim] - d_population.elements[element_ID + selected_individual_ID * next_pow2_dim]) +\ F * (d_population.elements[element_ID + local_r[1] * next_pow2_dim] - d_population.elements[element_ID + local_r[2] * next_pow2_dim]); } else { local_candidate_elements = d_population.elements[element_ID + selected_individual_ID * next_pow2_dim]; } local_candidate_elements = device_CheckBound(local_candidate_elements, problem_info.min_bound, problem_info.max_bound); d_candidate.elements[element_ID + selected_individual_ID * next_pow2_dim] = local_candidate_elements; } } d_rand_states[random_state_ID] = local_state; } extern "C" int API_DE_GenerateNewPopulation(GPU_Population d_candidate, GPU_Population d_population, int * d_best_individual_ID, GPU_IslandInfo d_island_info, DEInfo *d_DE_info, curandState *d_rand_states, ProblemInfo problem_info) { dim3 block_dim, grid_dim; global_DE_FindBestIndividualInIsland<< <dim3(d_island_info.subisland_num, 1, 1), dim3(WARP_SIZE, 1, 1) >> >(d_best_individual_ID, d_population, d_island_info, problem_info); CudaCheckError(); CalConfigurationsDE(block_dim, grid_dim, d_island_info); global_DE_GenerateNewPopulation << <grid_dim, block_dim >> >(d_candidate, d_population, d_best_individual_ID, d_island_info, d_DE_info, d_rand_states, problem_info); CudaCheckError(); return 0; } __global__ void global_DE_SelectSurvival(GPU_Population d_population, GPU_Population d_candidate, ProblemInfo problem_info) { int dim = problem_info.dim; int individual_ID = threadIdx.y + blockIdx.x * blockDim.y; int next_pow2_dim = NextPow2(dim); int loop_times = next_pow2_dim / WARP_SIZE; if(d_candidate.fitness_value[individual_ID] < d_population.fitness_value[individual_ID]) { for(int i = 0; i < loop_times; i++) { int element_ID = threadIdx.x + i * WARP_SIZE; if(element_ID < dim) d_population.elements[element_ID + individual_ID * next_pow2_dim] = d_candidate.elements[element_ID + individual_ID * next_pow2_dim]; } if (threadIdx.x == 0) { d_population.fitness_value[individual_ID] = d_candidate.fitness_value[individual_ID]; } } } extern "C" int API_DE_SelectSurvival(GPU_Population d_population, GPU_Population d_candidate, GPU_IslandInfo GPU_island_info, ProblemInfo problem_info) { dim3 block_dim, grid_dim; CalConfigurationsDE(block_dim, grid_dim, GPU_island_info); global_DE_SelectSurvival << <grid_dim, block_dim >> >(d_population, d_candidate, problem_info); CudaCheckError(); return 0; } /* static __device__ __forceinline__ real device_ParallelSum(real * vector) { if(threadIdx.x < WARP_SIZE / 2) vector[threadIdx.x] += vector[threadIdx.x + WARP_SIZE / 2]; if(threadIdx.x < WARP_SIZE / 4) vector[threadIdx.x] += vector[threadIdx.x + WARP_SIZE / 4]; if(threadIdx.x < WARP_SIZE / 8) vector[threadIdx.x] += vector[threadIdx.x + WARP_SIZE / 8]; if(threadIdx.x < WARP_SIZE / 16) vector[threadIdx.x] += vector[threadIdx.x + WARP_SIZE / 16]; if(threadIdx.x < WARP_SIZE / 32) vector[threadIdx.x] += vector[threadIdx.x + WARP_SIZE / 32]; return vector[0]; }; static __device__ __forceinline__ real device_ParallelBest(real * vector) { if(threadIdx.x < WARP_SIZE / 2) vector[threadIdx.x] = vector[threadIdx.x] > vector[threadIdx.x + WARP_SIZE / 2] ? vector[threadIdx.x + WARP_SIZE / 2] : vector[threadIdx.x] if(threadIdx.x < WARP_SIZE / 4) vector[threadIdx.x] = vector[threadIdx.x] > vector[threadIdx.x + WARP_SIZE / 4] ? vector[threadIdx.x + WARP_SIZE / 4] : vector[threadIdx.x] if(threadIdx.x < WARP_SIZE / 8) vector[threadIdx.x] = vector[threadIdx.x] > vector[threadIdx.x + WARP_SIZE / 8] ? vector[threadIdx.x + WARP_SIZE / 8] : vector[threadIdx.x] if(threadIdx.x < WARP_SIZE / 16) vector[threadIdx.x] = vector[threadIdx.x] > vector[threadIdx.x + WARP_SIZE / 16] ? vector[threadIdx.x + WARP_SIZE / 16] : vector[threadIdx.x] if(threadIdx.x < WARP_SIZE / 32) vector[threadIdx.x] = vector[threadIdx.x] > vector[threadIdx.x + WARP_SIZE / 32] ? vector[threadIdx.x + WARP_SIZE / 32] : vector[threadIdx.x] return vector[0]; }; extern __shared__ real sh_mem[]; __global__ void global_CalulateMatrixDistance(real * d_distance, real * d_elements, int pop_size, int dim) { int next_pow2_dim = NextPow2(problem_info.dim); int loop_times_dim = next_pow2_dim / WARP_SIZE; int loop_times_individual = pop_size / blockDim.y; int from_individual_ID = blockIdx.x; int to_individual_ID = 0; real sh_elements = sh_mem; real sh_sumed_value = sh_mem + WARP_SIZE; for(int i = 0; i < loop_times_individual; i++) { to_individual_ID = threadIdx.y + i * blockDim.y; sh_sumed_value[threadIdx.x + threadIdx.y * WARP_SIZE] = 0; for(int j = 0; j < loop_times_dim; j++) { int element_ID = threadIdx.x + j * WARP_SIZE; if(threadIdx.y == 0) sh_elements[threadIdx.x] = d_elements[element_ID + from_individual_ID * next_pow2_dim]; real tmp1 = sh_elements[threadIdx.x]; real tmp2 = d_elements[element_ID + to_individual_ID * next_pow2_dim]; tmp1 = (tmp1 - tmp2) * (tmp1 - tmp2) sh_sumed_value[threadIdx.x + threadIdx.y * WARP_SIZE] += tmp1; } d_distance[to_individual_ID + from_individual_ID * pop_size] = device_ParallelSum(sh_sumed_value + threadIdx.y * WARP_SIZE); } } __global__ void global_FindNearest(int *d_nearest_index, real * d_distance, int pop_size) { int next_pow2_dim = NextPow2(problem_info.dim); int loop_times_individual = pop_size / WARP_SIZE; int individual_ID = threadIdx.y + blockIdx.x * blockDim.y; int local_individual_ID = threadIdx.x; real sh_distance = sh_mem; sh_distance[local_individual_ID] = d_distance[local_individual_ID + individual_ID * pop_size]; for(int i = 1; i < loop_times_individual; i++) { local_individual_ID = threadIdx.x + i * WARP_SIZE; if(sh_distance[local_individual_ID] > d_distance[local_individual_ID + individual_ID * pop_size]) sh_distance[threadIdx.x + individual_ID * WARP_SIZE] = d_distance[local_individual_ID + individual_ID * pop_size]; } nearest_index[individual_ID] = device_ParallelBest(sh_distance + threadIdx.y * WARP_SIZE); } int FindNearestIndividualIndex(int *nearest_index, real * d_elements, real * d_distance, int pop_size, int dim) { dim3 block1(WARP_SIZE, WARP_SIZE / 2 , 1); dim3 grid1(pop_size, 1, 1); global_CalulateMatrixDistance<<<grid1, block1, WARP_SIZE * (block1.y + 1) * sizeof(real)>>>(d_distance, d_elements, pop_size, dim); int tmp_block_y = MAX_THREAD / 2 < pop_size ? MAX_THREAD / 2 : pop_size; dim3 block2(WARP_SIZE, tmp_block_y, 1); dim3 grid2(1, pop_size / tmp_block_y, 1); global_FindNeares<<<grid2, block2, WARP_SIZE * tmp_block_y * sizeof(real)>>>(d_nearest_index, d_elements, pop_size); return 0; } */
2cc7250933f71e80cf2de29505480035cb8c2aba.hip
// !!! This is a file automatically generated by hipify!!! /* * LightScan.cu * * Created on: Aug 18, 2015 * Author: Yongchao Liu * Affiliation: Gerogia Institute of Technology * Official Homepage: http://www.cc.gatech.edu/~yliu * Personal Homepage: https://sites.google.com/site/yongchaosoftware * */ #include "Options.h" #include "Scan.cuh" #include "Operator.cuh" template<typename T> bool CPUverify(T *hostData, T *hostResult, int num, int maxIters) { // cpu verify for (int iter = 0; iter < maxIters; ++iter) { for (int i = 0; i < num - 1; i++) { hostData[i + 1] = hostData[i] + hostData[i + 1]; } } T diff = 0; for (int i = 0; i < num; i++) { diff += hostData[i] - hostResult[i]; } cout << "CPU verify result diff (GPUvsCPU) = " << diff << endl; bool bTestResult = false; if (diff == 0) { bTestResult = true; } return bTestResult; } template<typename T> bool parallel_scan(Options& opt) { T *hostData, *hostResult; T *devData; utils::CommPair<T> *devPartialSums; const int numElements = opt._numElements; const hipDeviceProp_t& deviceProp = opt._deviceProps[opt._gpuIndex].second; /*set GPU*/ cout << "Use GPU: " << opt._gpuIndex << endl; hipSetDevice(opt._deviceProps[opt._gpuIndex].first); CudaCheckError(); /*host-side memory allocation*/ hipHostMalloc((void **) &hostData, sizeof(T) * numElements); CudaCheckError(); hipHostMalloc((void **) &hostResult, sizeof(T) * numElements); CudaCheckError(); //initialize data: cout << "Scan using cyclic-based approach" << endl; cout << "---------------------------------------------------" << endl; cout << "Initialize test data [1, 1, 1...] of " << numElements << " elements" << endl; for (int i = 0; i < numElements; i++) { //hostData[i] = random() & 0xFF; hostData[i] = 1; } /*kernel configuration*/ const int numElementsPerThread = opt._numElemsPerThread > 0 ? opt._numElemsPerThread : Scan::get_num_elements_per_thread<T>(); const int numThreadsPerBlock = deviceProp.maxThreadsPerBlock/4; /*use the maximum number of threads per block*/ const int numElementsPerBlock = numThreadsPerBlock * numElementsPerThread; /*number of elements per thread block*/ const int numElementsAligned = (numElements + numElementsPerBlock - 1) / numElementsPerBlock * numElementsPerBlock; const int numBlocksPerGrid = deviceProp.multiProcessorCount; cout << "numElementsPerThread: " << numElementsPerThread << endl; cout << "numThreadsPerBlock: " << numThreadsPerBlock << endl; cout << "numBlocksPerGrid: " << numBlocksPerGrid << endl; cout << "numElementsAligned: " << numElementsAligned << endl; cout << "Number of iterations: " << opt._maxIters << endl; // initialize a timer hipEvent_t start, stop; hipEventCreate(&start); CudaCheckError(); hipEventCreate(&stop); CudaCheckError(); float et = 0, inc = 0; hipMalloc((void **) &devData, numElementsAligned * sizeof(T)); CudaCheckError(); hipMalloc((void **) &devPartialSums, numElementsAligned / numElementsPerBlock * sizeof(utils::CommPair<T>)); CudaCheckError(); /*transfer data to the device*/ hipMemcpy(devData, hostData, numElements * sizeof(T), hipMemcpyHostToDevice); CudaCheckError(); cout << "Invoke the kernel" << endl; /*start recording the runtime*/ hipEventRecord(start, 0); CudaCheckError(); /*define an operator for the Scan operation*/ typedef scanop::Add<T> SUM; /*invoke the kernel*/ for (int i = 0; i < opt._maxIters; ++i) { switch (numElementsPerThread) { case 4: Scan::scan<T, SUM, utils::CommPair<T>, 4>(numBlocksPerGrid, numThreadsPerBlock, numThreadsPerBlock, devData, devData, numElementsAligned / numElementsPerBlock, devPartialSums); break; case 8: Scan::scan<T, SUM, utils::CommPair<T>, 8>(numBlocksPerGrid, numThreadsPerBlock, numThreadsPerBlock, devData, devData, numElementsAligned / numElementsPerBlock, devPartialSums); break; case 16: Scan::scan<T, SUM, utils::CommPair<T>, 16>(numBlocksPerGrid, numThreadsPerBlock, numThreadsPerBlock, devData, devData, numElementsAligned / numElementsPerBlock, devPartialSums); break; case 20: Scan::scan<T, SUM, utils::CommPair<T>, 20>(numBlocksPerGrid, numThreadsPerBlock, numThreadsPerBlock, devData, devData, numElementsAligned / numElementsPerBlock, devPartialSums); break; case 24: Scan::scan<T, SUM, utils::CommPair<T>, 24>(numBlocksPerGrid, numThreadsPerBlock, numThreadsPerBlock, devData, devData, numElementsAligned / numElementsPerBlock, devPartialSums); break; case 28: Scan::scan<T, SUM, utils::CommPair<T>, 28>(numBlocksPerGrid, numThreadsPerBlock, numThreadsPerBlock, devData, devData, numElementsAligned / numElementsPerBlock, devPartialSums); break; case 32: Scan::scan<T, SUM, utils::CommPair<T>, 32>(numBlocksPerGrid, numThreadsPerBlock, numThreadsPerBlock, devData, devData, numElementsAligned / numElementsPerBlock, devPartialSums); break; case 36: Scan::scan<T, SUM, utils::CommPair<T>, 36>(numBlocksPerGrid, numThreadsPerBlock, numThreadsPerBlock, devData, devData, numElementsAligned / numElementsPerBlock, devPartialSums); break; case 40: Scan::scan<T, SUM, utils::CommPair<T>, 40>(numBlocksPerGrid, numThreadsPerBlock, numThreadsPerBlock, devData, devData, numElementsAligned / numElementsPerBlock, devPartialSums); break; case 44: Scan::scan<T, SUM, utils::CommPair<T>, 44>(numBlocksPerGrid, numThreadsPerBlock, numThreadsPerBlock, devData, devData, numElementsAligned / numElementsPerBlock, devPartialSums); break; case 48: Scan::scan<T, SUM, utils::CommPair<T>, 48>(numBlocksPerGrid, numThreadsPerBlock, numThreadsPerBlock, devData, devData, numElementsAligned / numElementsPerBlock, devPartialSums); break; default: cerr << "Unsupported number of elements per thread: " << numElementsPerThread << endl; exit(-1); } } /*end recording the runtime*/ hipEventRecord(stop, 0); CudaCheckError(); hipEventSynchronize(stop); CudaCheckError(); hipEventElapsedTime(&inc, start, stop); CudaCheckError(); cout << "Finished the kernel" << endl; et += inc; et /= opt._maxIters; /*load back the data*/ hipMemcpy(hostResult, devData, numElements * sizeof(T), hipMemcpyDeviceToHost); CudaCheckError(); printf("Time (ms): %f\n", et); printf("%d elements scanned in %f ms -> %f (aligned %f) GigaElements/s\n", numElements, et, numElements / (et / 1000.0f) / 1000000000.0f, numElementsAligned / (et / 1000.0f) / 1000000000.0f); /*verify the results*/ bool bTestResult = opt._verify ? CPUverify<T>(hostData, hostResult, numElements, opt._maxIters) : true; hipHostFree(hostData); CudaCheckError(); hipHostFree(hostResult); CudaCheckError(); hipFree(devData); CudaCheckError(); hipFree(devPartialSums); CudaCheckError(); return bTestResult; } int main(int argc, char* argv[]) { Options opt; /*parse parameters*/ if (opt.parse(argc, argv) == false) { return -1; } /*invoke the kernel*/ bool ret = parallel_scan<int>(opt); if (opt._verify) { if (ret) { cout << "The scan results are correct" << endl; } else { cout << "The scan results are incorrect" << endl; } } return 0; }
2cc7250933f71e80cf2de29505480035cb8c2aba.cu
/* * LightScan.cu * * Created on: Aug 18, 2015 * Author: Yongchao Liu * Affiliation: Gerogia Institute of Technology * Official Homepage: http://www.cc.gatech.edu/~yliu * Personal Homepage: https://sites.google.com/site/yongchaosoftware * */ #include "Options.h" #include "Scan.cuh" #include "Operator.cuh" template<typename T> bool CPUverify(T *hostData, T *hostResult, int num, int maxIters) { // cpu verify for (int iter = 0; iter < maxIters; ++iter) { for (int i = 0; i < num - 1; i++) { hostData[i + 1] = hostData[i] + hostData[i + 1]; } } T diff = 0; for (int i = 0; i < num; i++) { diff += hostData[i] - hostResult[i]; } cout << "CPU verify result diff (GPUvsCPU) = " << diff << endl; bool bTestResult = false; if (diff == 0) { bTestResult = true; } return bTestResult; } template<typename T> bool parallel_scan(Options& opt) { T *hostData, *hostResult; T *devData; utils::CommPair<T> *devPartialSums; const int numElements = opt._numElements; const cudaDeviceProp& deviceProp = opt._deviceProps[opt._gpuIndex].second; /*set GPU*/ cout << "Use GPU: " << opt._gpuIndex << endl; cudaSetDevice(opt._deviceProps[opt._gpuIndex].first); CudaCheckError(); /*host-side memory allocation*/ cudaMallocHost((void **) &hostData, sizeof(T) * numElements); CudaCheckError(); cudaMallocHost((void **) &hostResult, sizeof(T) * numElements); CudaCheckError(); //initialize data: cout << "Scan using cyclic-based approach" << endl; cout << "---------------------------------------------------" << endl; cout << "Initialize test data [1, 1, 1...] of " << numElements << " elements" << endl; for (int i = 0; i < numElements; i++) { //hostData[i] = random() & 0xFF; hostData[i] = 1; } /*kernel configuration*/ const int numElementsPerThread = opt._numElemsPerThread > 0 ? opt._numElemsPerThread : Scan::get_num_elements_per_thread<T>(); const int numThreadsPerBlock = deviceProp.maxThreadsPerBlock/4; /*use the maximum number of threads per block*/ const int numElementsPerBlock = numThreadsPerBlock * numElementsPerThread; /*number of elements per thread block*/ const int numElementsAligned = (numElements + numElementsPerBlock - 1) / numElementsPerBlock * numElementsPerBlock; const int numBlocksPerGrid = deviceProp.multiProcessorCount; cout << "numElementsPerThread: " << numElementsPerThread << endl; cout << "numThreadsPerBlock: " << numThreadsPerBlock << endl; cout << "numBlocksPerGrid: " << numBlocksPerGrid << endl; cout << "numElementsAligned: " << numElementsAligned << endl; cout << "Number of iterations: " << opt._maxIters << endl; // initialize a timer cudaEvent_t start, stop; cudaEventCreate(&start); CudaCheckError(); cudaEventCreate(&stop); CudaCheckError(); float et = 0, inc = 0; cudaMalloc((void **) &devData, numElementsAligned * sizeof(T)); CudaCheckError(); cudaMalloc((void **) &devPartialSums, numElementsAligned / numElementsPerBlock * sizeof(utils::CommPair<T>)); CudaCheckError(); /*transfer data to the device*/ cudaMemcpy(devData, hostData, numElements * sizeof(T), cudaMemcpyHostToDevice); CudaCheckError(); cout << "Invoke the kernel" << endl; /*start recording the runtime*/ cudaEventRecord(start, 0); CudaCheckError(); /*define an operator for the Scan operation*/ typedef scanop::Add<T> SUM; /*invoke the kernel*/ for (int i = 0; i < opt._maxIters; ++i) { switch (numElementsPerThread) { case 4: Scan::scan<T, SUM, utils::CommPair<T>, 4>(numBlocksPerGrid, numThreadsPerBlock, numThreadsPerBlock, devData, devData, numElementsAligned / numElementsPerBlock, devPartialSums); break; case 8: Scan::scan<T, SUM, utils::CommPair<T>, 8>(numBlocksPerGrid, numThreadsPerBlock, numThreadsPerBlock, devData, devData, numElementsAligned / numElementsPerBlock, devPartialSums); break; case 16: Scan::scan<T, SUM, utils::CommPair<T>, 16>(numBlocksPerGrid, numThreadsPerBlock, numThreadsPerBlock, devData, devData, numElementsAligned / numElementsPerBlock, devPartialSums); break; case 20: Scan::scan<T, SUM, utils::CommPair<T>, 20>(numBlocksPerGrid, numThreadsPerBlock, numThreadsPerBlock, devData, devData, numElementsAligned / numElementsPerBlock, devPartialSums); break; case 24: Scan::scan<T, SUM, utils::CommPair<T>, 24>(numBlocksPerGrid, numThreadsPerBlock, numThreadsPerBlock, devData, devData, numElementsAligned / numElementsPerBlock, devPartialSums); break; case 28: Scan::scan<T, SUM, utils::CommPair<T>, 28>(numBlocksPerGrid, numThreadsPerBlock, numThreadsPerBlock, devData, devData, numElementsAligned / numElementsPerBlock, devPartialSums); break; case 32: Scan::scan<T, SUM, utils::CommPair<T>, 32>(numBlocksPerGrid, numThreadsPerBlock, numThreadsPerBlock, devData, devData, numElementsAligned / numElementsPerBlock, devPartialSums); break; case 36: Scan::scan<T, SUM, utils::CommPair<T>, 36>(numBlocksPerGrid, numThreadsPerBlock, numThreadsPerBlock, devData, devData, numElementsAligned / numElementsPerBlock, devPartialSums); break; case 40: Scan::scan<T, SUM, utils::CommPair<T>, 40>(numBlocksPerGrid, numThreadsPerBlock, numThreadsPerBlock, devData, devData, numElementsAligned / numElementsPerBlock, devPartialSums); break; case 44: Scan::scan<T, SUM, utils::CommPair<T>, 44>(numBlocksPerGrid, numThreadsPerBlock, numThreadsPerBlock, devData, devData, numElementsAligned / numElementsPerBlock, devPartialSums); break; case 48: Scan::scan<T, SUM, utils::CommPair<T>, 48>(numBlocksPerGrid, numThreadsPerBlock, numThreadsPerBlock, devData, devData, numElementsAligned / numElementsPerBlock, devPartialSums); break; default: cerr << "Unsupported number of elements per thread: " << numElementsPerThread << endl; exit(-1); } } /*end recording the runtime*/ cudaEventRecord(stop, 0); CudaCheckError(); cudaEventSynchronize(stop); CudaCheckError(); cudaEventElapsedTime(&inc, start, stop); CudaCheckError(); cout << "Finished the kernel" << endl; et += inc; et /= opt._maxIters; /*load back the data*/ cudaMemcpy(hostResult, devData, numElements * sizeof(T), cudaMemcpyDeviceToHost); CudaCheckError(); printf("Time (ms): %f\n", et); printf("%d elements scanned in %f ms -> %f (aligned %f) GigaElements/s\n", numElements, et, numElements / (et / 1000.0f) / 1000000000.0f, numElementsAligned / (et / 1000.0f) / 1000000000.0f); /*verify the results*/ bool bTestResult = opt._verify ? CPUverify<T>(hostData, hostResult, numElements, opt._maxIters) : true; cudaFreeHost(hostData); CudaCheckError(); cudaFreeHost(hostResult); CudaCheckError(); cudaFree(devData); CudaCheckError(); cudaFree(devPartialSums); CudaCheckError(); return bTestResult; } int main(int argc, char* argv[]) { Options opt; /*parse parameters*/ if (opt.parse(argc, argv) == false) { return -1; } /*invoke the kernel*/ bool ret = parallel_scan<int>(opt); if (opt._verify) { if (ret) { cout << "The scan results are correct" << endl; } else { cout << "The scan results are incorrect" << endl; } } return 0; }
6424345c12706171dccd5ae89b1f6fd3b5baed14.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector arithmetic ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar arithmetic =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector comparison ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar comparison =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector math (one argument) ============================================= // Calculate the arc cosine of the input argument. extern "C" // Calculate the nonnegative arc hyperbolic cosine of the input argument. extern "C" // Calculate the arc sine of the input argument. extern "C" // Calculate the arc hyperbolic sine of the input argument. extern "C" // Calculate the arc tangent of the input argument. extern "C" // Calculate the arc hyperbolic tangent of the input argument. extern "C" // Calculate the cube root of the input argument. extern "C" // Calculate ceiling of the input argument. extern "C" // Calculate the cosine of the input argument. extern "C" // Calculate the hyperbolic cosine of the input argument. extern "C" // Calculate the cosine of the input argument p . extern "C" // Calculate the complementary error function of the input argument. extern "C" // Calculate the inverse complementary error function of the input argument. extern "C" // Calculate the scaled complementary error function of the input argument. extern "C" // Calculate the error function of the input argument. extern "C" // Calculate the inverse error function of the input argument. extern "C" // Calculate the base 10 exponential of the input argument. extern "C" // Calculate the base 2 exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument, minus 1. extern "C" // Calculate the absolute value of its argument. extern "C" // Calculate the largest integer less than or equal to x. extern "C" // Calculate the value of the Bessel function of the first kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the first kind of order 1 for the input argument. extern "C" // Calculate the natural logarithm of the absolute value of the gamma function of the input argument. extern "C" // Calculate the base 10 logarithm of the input argument. extern "C" // Calculate the value of l o g e ( 1 + x ) . extern "C" // Calculate the base 2 logarithm of the input argument. extern "C" // Calculate the doubleing point representation of the exponent of the input argument. extern "C" // Calculate the natural logarithm of the input argument. extern "C" // Calculate the standard normal cumulative distribution function. extern "C" // Calculate the inverse of the standard normal cumulative distribution function. extern "C" // Calculate reciprocal cube root function. extern "C" // Round input to nearest integer value in doubleing-point. extern "C" // Round to nearest integer value in doubleing-point. extern "C" // Calculate the reciprocal of the square root of the input argument. extern "C" // Calculate the sine of the input argument. extern "C" // Calculate the hyperbolic sine of the input argument. extern "C" // Calculate the sine of the input argument p . extern "C" // Calculate the square root of the input argument. extern "C" // Calculate the tangent of the input argument. extern "C" // Calculate the hyperbolic tangent of the input argument. extern "C" // Calculate the gamma function of the input argument. extern "C" // Truncate input argument to the integral part. extern "C" // Calculate the value of the Bessel function of the second kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the second kind of order 1 for the input argument. extern "C" //=== Vector math (two arguments) ============================================ // Create value with given magnitude, copying sign of second value. extern "C" // Compute the positive difference between x and y. extern "C" // Divide two doubleing point values. extern "C" // Determine the maximum numeric value of the arguments. extern "C" // Determine the minimum numeric value of the arguments. extern "C" // Calculate the doubleing-point remainder of x / y. extern "C" // Calculate the square root of the sum of squares of two arguments. extern "C" // Return next representable single-precision doubleing-point value afer argument. extern "C" // Calculate the value of first argument to the power of second argument. extern "C" // Compute single-precision doubleing-point remainder. extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //WARNING : device_sum size should be gridDim.x __global__ void vec_initIndex(int n, int *index) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int idy = threadIdx.y + blockIdx.y * blockDim.y; int id = idy * gridDim.x * blockDim.x + idx; if (id < n) { index[id] = id; } }
6424345c12706171dccd5ae89b1f6fd3b5baed14.cu
#include "includes.h" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector arithmetic ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar arithmetic =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector comparison ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar comparison =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector math (one argument) ============================================= // Calculate the arc cosine of the input argument. extern "C" // Calculate the nonnegative arc hyperbolic cosine of the input argument. extern "C" // Calculate the arc sine of the input argument. extern "C" // Calculate the arc hyperbolic sine of the input argument. extern "C" // Calculate the arc tangent of the input argument. extern "C" // Calculate the arc hyperbolic tangent of the input argument. extern "C" // Calculate the cube root of the input argument. extern "C" // Calculate ceiling of the input argument. extern "C" // Calculate the cosine of the input argument. extern "C" // Calculate the hyperbolic cosine of the input argument. extern "C" // Calculate the cosine of the input argument × p . extern "C" // Calculate the complementary error function of the input argument. extern "C" // Calculate the inverse complementary error function of the input argument. extern "C" // Calculate the scaled complementary error function of the input argument. extern "C" // Calculate the error function of the input argument. extern "C" // Calculate the inverse error function of the input argument. extern "C" // Calculate the base 10 exponential of the input argument. extern "C" // Calculate the base 2 exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument, minus 1. extern "C" // Calculate the absolute value of its argument. extern "C" // Calculate the largest integer less than or equal to x. extern "C" // Calculate the value of the Bessel function of the first kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the first kind of order 1 for the input argument. extern "C" // Calculate the natural logarithm of the absolute value of the gamma function of the input argument. extern "C" // Calculate the base 10 logarithm of the input argument. extern "C" // Calculate the value of l o g e ( 1 + x ) . extern "C" // Calculate the base 2 logarithm of the input argument. extern "C" // Calculate the doubleing point representation of the exponent of the input argument. extern "C" // Calculate the natural logarithm of the input argument. extern "C" // Calculate the standard normal cumulative distribution function. extern "C" // Calculate the inverse of the standard normal cumulative distribution function. extern "C" // Calculate reciprocal cube root function. extern "C" // Round input to nearest integer value in doubleing-point. extern "C" // Round to nearest integer value in doubleing-point. extern "C" // Calculate the reciprocal of the square root of the input argument. extern "C" // Calculate the sine of the input argument. extern "C" // Calculate the hyperbolic sine of the input argument. extern "C" // Calculate the sine of the input argument × p . extern "C" // Calculate the square root of the input argument. extern "C" // Calculate the tangent of the input argument. extern "C" // Calculate the hyperbolic tangent of the input argument. extern "C" // Calculate the gamma function of the input argument. extern "C" // Truncate input argument to the integral part. extern "C" // Calculate the value of the Bessel function of the second kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the second kind of order 1 for the input argument. extern "C" //=== Vector math (two arguments) ============================================ // Create value with given magnitude, copying sign of second value. extern "C" // Compute the positive difference between x and y. extern "C" // Divide two doubleing point values. extern "C" // Determine the maximum numeric value of the arguments. extern "C" // Determine the minimum numeric value of the arguments. extern "C" // Calculate the doubleing-point remainder of x / y. extern "C" // Calculate the square root of the sum of squares of two arguments. extern "C" // Return next representable single-precision doubleing-point value afer argument. extern "C" // Calculate the value of first argument to the power of second argument. extern "C" // Compute single-precision doubleing-point remainder. extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //WARNING : device_sum size should be gridDim.x __global__ void vec_initIndex(int n, int *index) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int idy = threadIdx.y + blockIdx.y * blockDim.y; int id = idy * gridDim.x * blockDim.x + idx; if (id < n) { index[id] = id; } }
1ea19e268589e234adbe57faae214c7274462616.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 2016 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include "rocblas.h" #include "../debug.h" /* macro for index calculations */ #define INDX( row, col, ld ) ( ( (col) * (ld) ) + (row) ) /* matrix size and thread dimensions */ #define SIZE 1024 /* define blocksize X and blocksize Y and blocksize K */ #define THREADS_PER_BLOCK_X 16 // Thread block size, x dimension #define THREADS_PER_BLOCK_Y 16 // Thread block size, y dimension #define BLOCK_K 16 // square block of K size __global__ void GPU_shmem2(const int m, double const * const a, double const * const b, double *c ) { /* setup some constanst for later use */ const int tx = threadIdx.x; const int ty = threadIdx.y; const int iby = blockIdx.y * THREADS_PER_BLOCK_Y; const int ibx = blockIdx.x * THREADS_PER_BLOCK_X; /* shared memory arrays for A and B */ __shared__ double as[ FIXME ][ FIXME ]; __shared__ double bs[ FIXME ][ FIXME ]; /* space for C to be held in registers */ double c_tmp = 0.0 ; /* calculate my initial offset into A and B */ int aoff = INDX( FIXME, FIXME, m ); int boff = INDX( FIXME, FIXME, m ); /* main loop over blocks of K */ for( int Kblock = 0; Kblock < m; Kblock+=BLOCK_K ) { /* read block of A into shared memory */ as[ FIXME ][ FIXME ] = a[ aoff ]; /* read block of B into shared memory */ bs[ FIXME ][ FIXME ] = b[ boff ]; /* increment A and B offsets for next round of data reads */ boff += BLOCK_K; aoff += m * BLOCK_K; /* triply nested loop to perform the matmult on the blocks */ #pragma unroll for( int k = 0 ; k < BLOCK_K ; k++ ) { c_tmp += as[ FIXME ][ FIXME ] * bs[ FIXME ][ FIXME ]; } } /* end for Kblock */ /* set C to its proper index int the C matrix */ int coff = INDX( ibx + tx, iby + ty, m ); /* write results to the C matrix */ c[ coff ] = c_tmp; } /* end GPU_shmem2 */ int main( int argc, char *argv[] ) { /* get GPU device number and name */ int dev; hipDeviceProp_t deviceProp; checkCUDA( hipGetDevice( &dev ) ); checkCUDA( hipGetDeviceProperties( &deviceProp, dev ) ); printf("Using GPU %d: %s\n", dev, deviceProp.name ); const int size = SIZE; fprintf(stdout, "Matrix size is %d\n",size); double *h_a, *h_b, *h_c, *h_c1; double *d_a, *d_b, *d_c; size_t numbytes = (size_t ) size * (size_t ) size * sizeof( double ); h_a = (double *) malloc( numbytes ); if( h_a == NULL ) { fprintf(stderr,"Error in host malloc\n"); return 911; } h_b = (double *) malloc( numbytes ); if( h_b == NULL ) { fprintf(stderr,"Error in host malloc\n"); return 911; } h_c = (double *) malloc( numbytes ); if( h_c == NULL ) { fprintf(stderr,"Error in host malloc\n"); return 911; } h_c1 = (double *) malloc( numbytes ); if( h_c1 == NULL ) { fprintf(stderr,"Error in host malloc\n"); return 911; } /* zero out the host memory for C matrices */ memset( h_c, 0, numbytes ); memset( h_c1, 0, numbytes ); fprintf( stdout, "Total memory required is %lf MB\n", 3.0 * (double) numbytes / 1000000.0 ); /* initialize the A and B matrices */ for( int i = 0; i < size * size; i++ ) { h_a[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 ); h_b[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 ); } /* allocate a, b, c in gpu memory */ checkCUDA( hipMalloc( (void **)&d_a, numbytes ) ); checkCUDA( hipMalloc( (void **)&d_b, numbytes ) ); checkCUDA( hipMalloc( (void **)&d_c, numbytes )); /* copy a and b to device */ checkCUDA( hipMemcpy( d_a, h_a, numbytes, hipMemcpyHostToDevice ) ); checkCUDA( hipMemcpy( d_b, h_b, numbytes, hipMemcpyHostToDevice ) ); hipblasHandle_t handle; checkCUBLAS( hipblasCreate( &handle ) ); double alpha = 1.0; double beta = 0.0; /* start timers */ hipEvent_t start, stop; checkCUDA( hipEventCreate( &start ) ); checkCUDA( hipEventCreate( &stop ) ); checkCUDA( hipEventRecord( start, 0 ) ); /* call CUBLAS dgemm */ checkCUBLAS( hipblasDgemm( handle, HIPBLAS_OP_N, HIPBLAS_OP_N, size, size, size, &alpha, d_a, size, d_b, size, &beta, d_c, size ) ); /* stop timers */ checkCUDA( hipEventRecord( stop, 0 ) ); checkCUDA( hipEventSynchronize( stop ) ); float elapsedTime; checkCUDA( hipEventElapsedTime( &elapsedTime, start, stop ) ); /* print GPU CUBLAS timing information */ fprintf(stdout, "Total time GPU CUBLAS is %f sec\n", elapsedTime / 1000.0f ); fprintf(stdout, "Performance is %f GFlop/s\n", 2.0 * (double) size * (double) size * (double) size / ( (double) elapsedTime / 1000.0 ) * 1.e-9 ); /* copy C from device to host for error checking */ checkCUDA( hipMemcpy( h_c, d_c, numbytes, hipMemcpyDeviceToHost ) ); /* reset C on device to zero */ checkCUDA( hipMemset( d_c, 0, numbytes ) ); /* setup grid and block sizes */ dim3 threads( THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y, 1 ); dim3 blocks( size / THREADS_PER_BLOCK_X, size / THREADS_PER_BLOCK_Y, 1 ); /* start timers */ checkCUDA( hipEventRecord( start, 0 ) ); /* call GPU_naive */ hipLaunchKernelGGL(( GPU_shmem2), dim3(blocks), dim3(threads) , 0, 0, size, d_a, d_b, d_c ); CUDA_CHECK() checkCUDA( hipDeviceSynchronize() ); /* stop timers */ checkCUDA( hipEventRecord( stop, 0 ) ); checkCUDA( hipEventSynchronize( stop ) ); checkCUDA( hipEventElapsedTime( &elapsedTime, start, stop ) ); /* print data for GPU naive */ fprintf(stdout, "Total time GPU SHMEM is %f sec\n", elapsedTime / 1000.0f ); fprintf(stdout, "Performance is %f GFlop/s\n", 2.0 * (double) size * (double) size * (double) size / ( (double) elapsedTime / 1000.0 ) * 1.e-9 ); /* copy C back to host */ checkCUDA( hipMemcpy( h_c1, d_c, numbytes, hipMemcpyDeviceToHost ) ); checkCUBLAS( hipblasDestroy( handle ) ); checkCUDA( hipEventDestroy( start ) ); checkCUDA( hipEventDestroy( stop ) ); /* check CUBLAS versus GPU NAIVE numerical results */ double temp = 0.0; for( int i = 0; i < size * size; i++ ) { temp += ( h_c[i] - h_c1[i] ) * ( h_c[i] - h_c1[i] ); } /* end for */ printf("error is %f\n",temp); if( temp > 10 ) printf("FAIL\n"); else printf("PASS\n"); /* cleanup */ checkCUDA( hipFree( d_a ) ); checkCUDA( hipFree( d_b ) ); checkCUDA( hipFree( d_c ) ); free( h_a ); free( h_b ); free( h_c ); free( h_c1 ); checkCUDA( hipDeviceReset() ); return 0; }
1ea19e268589e234adbe57faae214c7274462616.cu
/* * Copyright 2016 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include "cublas_v2.h" #include "../debug.h" /* macro for index calculations */ #define INDX( row, col, ld ) ( ( (col) * (ld) ) + (row) ) /* matrix size and thread dimensions */ #define SIZE 1024 /* define blocksize X and blocksize Y and blocksize K */ #define THREADS_PER_BLOCK_X 16 // Thread block size, x dimension #define THREADS_PER_BLOCK_Y 16 // Thread block size, y dimension #define BLOCK_K 16 // square block of K size __global__ void GPU_shmem2(const int m, double const * const a, double const * const b, double *c ) { /* setup some constanst for later use */ const int tx = threadIdx.x; const int ty = threadIdx.y; const int iby = blockIdx.y * THREADS_PER_BLOCK_Y; const int ibx = blockIdx.x * THREADS_PER_BLOCK_X; /* shared memory arrays for A and B */ __shared__ double as[ FIXME ][ FIXME ]; __shared__ double bs[ FIXME ][ FIXME ]; /* space for C to be held in registers */ double c_tmp = 0.0 ; /* calculate my initial offset into A and B */ int aoff = INDX( FIXME, FIXME, m ); int boff = INDX( FIXME, FIXME, m ); /* main loop over blocks of K */ for( int Kblock = 0; Kblock < m; Kblock+=BLOCK_K ) { /* read block of A into shared memory */ as[ FIXME ][ FIXME ] = a[ aoff ]; /* read block of B into shared memory */ bs[ FIXME ][ FIXME ] = b[ boff ]; /* increment A and B offsets for next round of data reads */ boff += BLOCK_K; aoff += m * BLOCK_K; /* triply nested loop to perform the matmult on the blocks */ #pragma unroll for( int k = 0 ; k < BLOCK_K ; k++ ) { c_tmp += as[ FIXME ][ FIXME ] * bs[ FIXME ][ FIXME ]; } } /* end for Kblock */ /* set C to its proper index int the C matrix */ int coff = INDX( ibx + tx, iby + ty, m ); /* write results to the C matrix */ c[ coff ] = c_tmp; } /* end GPU_shmem2 */ int main( int argc, char *argv[] ) { /* get GPU device number and name */ int dev; cudaDeviceProp deviceProp; checkCUDA( cudaGetDevice( &dev ) ); checkCUDA( cudaGetDeviceProperties( &deviceProp, dev ) ); printf("Using GPU %d: %s\n", dev, deviceProp.name ); const int size = SIZE; fprintf(stdout, "Matrix size is %d\n",size); double *h_a, *h_b, *h_c, *h_c1; double *d_a, *d_b, *d_c; size_t numbytes = (size_t ) size * (size_t ) size * sizeof( double ); h_a = (double *) malloc( numbytes ); if( h_a == NULL ) { fprintf(stderr,"Error in host malloc\n"); return 911; } h_b = (double *) malloc( numbytes ); if( h_b == NULL ) { fprintf(stderr,"Error in host malloc\n"); return 911; } h_c = (double *) malloc( numbytes ); if( h_c == NULL ) { fprintf(stderr,"Error in host malloc\n"); return 911; } h_c1 = (double *) malloc( numbytes ); if( h_c1 == NULL ) { fprintf(stderr,"Error in host malloc\n"); return 911; } /* zero out the host memory for C matrices */ memset( h_c, 0, numbytes ); memset( h_c1, 0, numbytes ); fprintf( stdout, "Total memory required is %lf MB\n", 3.0 * (double) numbytes / 1000000.0 ); /* initialize the A and B matrices */ for( int i = 0; i < size * size; i++ ) { h_a[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 ); h_b[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 ); } /* allocate a, b, c in gpu memory */ checkCUDA( cudaMalloc( (void **)&d_a, numbytes ) ); checkCUDA( cudaMalloc( (void **)&d_b, numbytes ) ); checkCUDA( cudaMalloc( (void **)&d_c, numbytes )); /* copy a and b to device */ checkCUDA( cudaMemcpy( d_a, h_a, numbytes, cudaMemcpyHostToDevice ) ); checkCUDA( cudaMemcpy( d_b, h_b, numbytes, cudaMemcpyHostToDevice ) ); cublasHandle_t handle; checkCUBLAS( cublasCreate( &handle ) ); double alpha = 1.0; double beta = 0.0; /* start timers */ cudaEvent_t start, stop; checkCUDA( cudaEventCreate( &start ) ); checkCUDA( cudaEventCreate( &stop ) ); checkCUDA( cudaEventRecord( start, 0 ) ); /* call CUBLAS dgemm */ checkCUBLAS( cublasDgemm( handle, CUBLAS_OP_N, CUBLAS_OP_N, size, size, size, &alpha, d_a, size, d_b, size, &beta, d_c, size ) ); /* stop timers */ checkCUDA( cudaEventRecord( stop, 0 ) ); checkCUDA( cudaEventSynchronize( stop ) ); float elapsedTime; checkCUDA( cudaEventElapsedTime( &elapsedTime, start, stop ) ); /* print GPU CUBLAS timing information */ fprintf(stdout, "Total time GPU CUBLAS is %f sec\n", elapsedTime / 1000.0f ); fprintf(stdout, "Performance is %f GFlop/s\n", 2.0 * (double) size * (double) size * (double) size / ( (double) elapsedTime / 1000.0 ) * 1.e-9 ); /* copy C from device to host for error checking */ checkCUDA( cudaMemcpy( h_c, d_c, numbytes, cudaMemcpyDeviceToHost ) ); /* reset C on device to zero */ checkCUDA( cudaMemset( d_c, 0, numbytes ) ); /* setup grid and block sizes */ dim3 threads( THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y, 1 ); dim3 blocks( size / THREADS_PER_BLOCK_X, size / THREADS_PER_BLOCK_Y, 1 ); /* start timers */ checkCUDA( cudaEventRecord( start, 0 ) ); /* call GPU_naive */ GPU_shmem2<<< blocks, threads >>> ( size, d_a, d_b, d_c ); CUDA_CHECK() checkCUDA( cudaDeviceSynchronize() ); /* stop timers */ checkCUDA( cudaEventRecord( stop, 0 ) ); checkCUDA( cudaEventSynchronize( stop ) ); checkCUDA( cudaEventElapsedTime( &elapsedTime, start, stop ) ); /* print data for GPU naive */ fprintf(stdout, "Total time GPU SHMEM is %f sec\n", elapsedTime / 1000.0f ); fprintf(stdout, "Performance is %f GFlop/s\n", 2.0 * (double) size * (double) size * (double) size / ( (double) elapsedTime / 1000.0 ) * 1.e-9 ); /* copy C back to host */ checkCUDA( cudaMemcpy( h_c1, d_c, numbytes, cudaMemcpyDeviceToHost ) ); checkCUBLAS( cublasDestroy( handle ) ); checkCUDA( cudaEventDestroy( start ) ); checkCUDA( cudaEventDestroy( stop ) ); /* check CUBLAS versus GPU NAIVE numerical results */ double temp = 0.0; for( int i = 0; i < size * size; i++ ) { temp += ( h_c[i] - h_c1[i] ) * ( h_c[i] - h_c1[i] ); } /* end for */ printf("error is %f\n",temp); if( temp > 10 ) printf("FAIL\n"); else printf("PASS\n"); /* cleanup */ checkCUDA( cudaFree( d_a ) ); checkCUDA( cudaFree( d_b ) ); checkCUDA( cudaFree( d_c ) ); free( h_a ); free( h_b ); free( h_c ); free( h_c1 ); checkCUDA( cudaDeviceReset() ); return 0; }
f685404ab0f221bb9bb8cd16f1f01ff6e09dc0ea.hip
// !!! This is a file automatically generated by hipify!!! /* Collatz code for CS 4380 / CS 5351 Copyright (c) 2018, Texas State University. All rights reserved. Redistribution in source or binary form, with or without modification, is *not* permitted. Use in source and binary forms, with or without modification, is only permitted for academic use in CS 4380 or CS 5351 at Texas State University. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Author: Martin Burtscher */ #include <cstdlib> #include <cstdio> #include <hip/hip_runtime.h> #include <sys/time.h> static const int ThreadsPerBlock = 512; //collatz kernel function static __global__ void collatzKernel(const long range, int* maxlen) { // compute sequence lengths const long idx = threadIdx.x + blockIdx.x * (long)blockDim.x; int beg = (idx * 4) + 1; int end = ((idx + 1 )* 4) + 1; int newMaxlen = 0; if(idx < range/4){ for(int i = beg; i < end; i++){ long val = i; int len = 1; while (val != 1) { len++; if ((val % 2) == 0) { val = val / 2; // even } else { val = 3 * val + 1; // odd } } <<<<<<< HEAD if(localMax < len) {localMax = len;} ======= if(newMaxlen < len) {newMaxlen = len;} >>>>>>> e018abb6a5f7bfe0dd92a35580dd7fa291dcbce4 } //thread updating maxlen using atomicMax if (*maxlen < newMaxlen) atomicMax(maxlen, newMaxlen); } } static void CheckCuda() { hipError_t e; hipDeviceSynchronize(); if (hipSuccess != (e = hipGetLastError())) { fprintf(stderr, "CUDA error %d: %s\n", e, hipGetErrorString(e)); exit(-1); } } int main(int argc, char *argv[]) { printf("Collatz v1.0\n"); // check command line if (argc != 2) {fprintf(stderr, "usage: %s range\n", argv[0]); exit(-1);} const long range = atol(argv[1]); if (range < 1) {fprintf(stderr, "error: range must be at least 1\n"); exit(-1);} if (range % 4 != 0) {fprintf(stderr, "error: range must be a multiple of 4\n"); exit(-1);} printf("range: 1, ..., %ld\n", range); //allocate space for device copy of maxlen int* d_maxlen; const int size = sizeof(int); hipMalloc((void **)&d_maxlen, size); //intializing the cpu maxlen int maxlen = 0; //copying maxlen value to device if (hipSuccess != hipMemcpy(d_maxlen, &maxlen, size, hipMemcpyHostToDevice)) {fprintf(stderr, "copying to device failed\n"); exit(-1);} // start time timeval start, end; gettimeofday(&start, NULL); //collatzKernel<<<(range + ThreadsPerBlock - 1) / ThreadsPerBlock, ThreadsPerBlock>>>(range, d_maxlen); hipLaunchKernelGGL(( collatzKernel), dim3(((range/4 + ThreadsPerBlock) - 1) / ThreadsPerBlock), dim3(ThreadsPerBlock), 0, 0, range, d_maxlen); hipDeviceSynchronize(); // end time gettimeofday(&end, NULL); const double runtime = end.tv_sec - start.tv_sec + (end.tv_usec - start.tv_usec) / 1000000.0; printf("compute time: %.3f s\n", runtime); CheckCuda(); // copy result back to host if (hipSuccess != hipMemcpy(&maxlen, d_maxlen, size, hipMemcpyDeviceToHost)) {fprintf(stderr, "copying from device failed\n"); exit(-1);} // print result printf("longest sequence: %d elements\n", maxlen); hipFree(d_maxlen); return 0; }
f685404ab0f221bb9bb8cd16f1f01ff6e09dc0ea.cu
/* Collatz code for CS 4380 / CS 5351 Copyright (c) 2018, Texas State University. All rights reserved. Redistribution in source or binary form, with or without modification, is *not* permitted. Use in source and binary forms, with or without modification, is only permitted for academic use in CS 4380 or CS 5351 at Texas State University. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Author: Martin Burtscher */ #include <cstdlib> #include <cstdio> #include <cuda.h> #include <sys/time.h> static const int ThreadsPerBlock = 512; //collatz kernel function static __global__ void collatzKernel(const long range, int* maxlen) { // compute sequence lengths const long idx = threadIdx.x + blockIdx.x * (long)blockDim.x; int beg = (idx * 4) + 1; int end = ((idx + 1 )* 4) + 1; int newMaxlen = 0; if(idx < range/4){ for(int i = beg; i < end; i++){ long val = i; int len = 1; while (val != 1) { len++; if ((val % 2) == 0) { val = val / 2; // even } else { val = 3 * val + 1; // odd } } <<<<<<< HEAD if(localMax < len) {localMax = len;} ======= if(newMaxlen < len) {newMaxlen = len;} >>>>>>> e018abb6a5f7bfe0dd92a35580dd7fa291dcbce4 } //thread updating maxlen using atomicMax if (*maxlen < newMaxlen) atomicMax(maxlen, newMaxlen); } } static void CheckCuda() { cudaError_t e; cudaDeviceSynchronize(); if (cudaSuccess != (e = cudaGetLastError())) { fprintf(stderr, "CUDA error %d: %s\n", e, cudaGetErrorString(e)); exit(-1); } } int main(int argc, char *argv[]) { printf("Collatz v1.0\n"); // check command line if (argc != 2) {fprintf(stderr, "usage: %s range\n", argv[0]); exit(-1);} const long range = atol(argv[1]); if (range < 1) {fprintf(stderr, "error: range must be at least 1\n"); exit(-1);} if (range % 4 != 0) {fprintf(stderr, "error: range must be a multiple of 4\n"); exit(-1);} printf("range: 1, ..., %ld\n", range); //allocate space for device copy of maxlen int* d_maxlen; const int size = sizeof(int); cudaMalloc((void **)&d_maxlen, size); //intializing the cpu maxlen int maxlen = 0; //copying maxlen value to device if (cudaSuccess != cudaMemcpy(d_maxlen, &maxlen, size, cudaMemcpyHostToDevice)) {fprintf(stderr, "copying to device failed\n"); exit(-1);} // start time timeval start, end; gettimeofday(&start, NULL); //collatzKernel<<<(range + ThreadsPerBlock - 1) / ThreadsPerBlock, ThreadsPerBlock>>>(range, d_maxlen); collatzKernel<<<((range/4 + ThreadsPerBlock) - 1) / ThreadsPerBlock, ThreadsPerBlock>>>(range, d_maxlen); cudaDeviceSynchronize(); // end time gettimeofday(&end, NULL); const double runtime = end.tv_sec - start.tv_sec + (end.tv_usec - start.tv_usec) / 1000000.0; printf("compute time: %.3f s\n", runtime); CheckCuda(); // copy result back to host if (cudaSuccess != cudaMemcpy(&maxlen, d_maxlen, size, cudaMemcpyDeviceToHost)) {fprintf(stderr, "copying from device failed\n"); exit(-1);} // print result printf("longest sequence: %d elements\n", maxlen); cudaFree(d_maxlen); return 0; }
07b8a5b8b026ad12be75430be273bb80cb3f0ccc.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2009-2018 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. #include "ComputeFreeVolumeGPU.cuh" #include "IntegratorHPMCMonoGPU.cuh" #include "IntegratorHPMCMonoImplicitGPU.cuh" #include "IntegratorHPMCMonoImplicitNewGPU.cuh" #include "ShapePolyhedron.h" namespace hpmc { namespace detail { //! HPMC kernels for ShapePolyhedron template hipError_t gpu_hpmc_free_volume<ShapePolyhedron>(const hpmc_free_volume_args_t &args, const typename ShapePolyhedron::param_type *d_params); template hipError_t gpu_hpmc_update<ShapePolyhedron>(const hpmc_args_t& args, const typename ShapePolyhedron::param_type *d_params); template hipError_t gpu_hpmc_implicit_count_overlaps<ShapePolyhedron>(const hpmc_implicit_args_t& args, const typename ShapePolyhedron::param_type *d_params); template hipError_t gpu_hpmc_implicit_accept_reject<ShapePolyhedron>(const hpmc_implicit_args_t& args, const typename ShapePolyhedron::param_type *d_params); template hipError_t gpu_hpmc_insert_depletants_queue<ShapePolyhedron>(const hpmc_implicit_args_new_t& args, const typename ShapePolyhedron::param_type *d_params); template hipError_t gpu_hpmc_implicit_accept_reject_new<ShapePolyhedron>(const hpmc_implicit_args_new_t& args, const typename ShapePolyhedron::param_type *d_params); }; // end namespace detail } // end namespace hpmc
07b8a5b8b026ad12be75430be273bb80cb3f0ccc.cu
// Copyright (c) 2009-2018 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. #include "ComputeFreeVolumeGPU.cuh" #include "IntegratorHPMCMonoGPU.cuh" #include "IntegratorHPMCMonoImplicitGPU.cuh" #include "IntegratorHPMCMonoImplicitNewGPU.cuh" #include "ShapePolyhedron.h" namespace hpmc { namespace detail { //! HPMC kernels for ShapePolyhedron template cudaError_t gpu_hpmc_free_volume<ShapePolyhedron>(const hpmc_free_volume_args_t &args, const typename ShapePolyhedron::param_type *d_params); template cudaError_t gpu_hpmc_update<ShapePolyhedron>(const hpmc_args_t& args, const typename ShapePolyhedron::param_type *d_params); template cudaError_t gpu_hpmc_implicit_count_overlaps<ShapePolyhedron>(const hpmc_implicit_args_t& args, const typename ShapePolyhedron::param_type *d_params); template cudaError_t gpu_hpmc_implicit_accept_reject<ShapePolyhedron>(const hpmc_implicit_args_t& args, const typename ShapePolyhedron::param_type *d_params); template cudaError_t gpu_hpmc_insert_depletants_queue<ShapePolyhedron>(const hpmc_implicit_args_new_t& args, const typename ShapePolyhedron::param_type *d_params); template cudaError_t gpu_hpmc_implicit_accept_reject_new<ShapePolyhedron>(const hpmc_implicit_args_new_t& args, const typename ShapePolyhedron::param_type *d_params); }; // end namespace detail } // end namespace hpmc
0971c8b020d04bed0c0cd14c540a3dab4ce23209.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2014 Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * --------------------------------------------------------------------------- * Copyright 2014 Nervana Systems Inc. All rights reserved. * * * Added argmin, argmax support, other operations further fleshed out. * --------------------------------------------------------------------------- */ #include <stdio.h> #include <hip/hip_runtime.h> #include "../include/nvmatrix_kernels.cuh" __global__ void kTile(const float* src, float* tgt, const uint srcWidth, const uint srcHeight, const uint tgtWidth, const uint tgtHeight) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; const int numThreads = blockDim.x * gridDim.x; // const unsigned int numEls = tgtWidth * tgtHeight; for (uint i = idx; i < tgtWidth * tgtHeight; i += numThreads) { const uint y = i / tgtWidth; const uint x = i % tgtWidth; const uint srcY = y % srcHeight; const uint srcX = x % srcWidth; tgt[i] = src[srcY * srcWidth + srcX]; } } __global__ void kDotProduct_r(float* a, float* b, float* target, const uint numElements) { __shared__ float shmem[DP_BLOCKSIZE]; uint eidx = DP_BLOCKSIZE * blockIdx.x + threadIdx.x; shmem[threadIdx.x] = 0; if (eidx < gridDim.x * DP_BLOCKSIZE) { for (; eidx < numElements; eidx += gridDim.x * DP_BLOCKSIZE) { shmem[threadIdx.x] += a[eidx] * b[eidx]; } } __syncthreads(); if (threadIdx.x < 256) { shmem[threadIdx.x] += shmem[threadIdx.x + 256]; } __syncthreads(); if (threadIdx.x < 128) { shmem[threadIdx.x] += shmem[threadIdx.x + 128]; } __syncthreads(); if (threadIdx.x < 64) { shmem[threadIdx.x] += shmem[threadIdx.x + 64]; } __syncthreads(); if (threadIdx.x < 32) { volatile float* mysh = &shmem[threadIdx.x]; *mysh += mysh[32]; *mysh += mysh[16]; *mysh += mysh[8]; *mysh += mysh[4]; *mysh += mysh[2]; *mysh += mysh[1]; if (threadIdx.x == 0) { target[blockIdx.x] = *mysh; } } } __global__ void kSetupCurand(hiprandState_t *state, unsigned long long seed) { const uint tidx = NUM_RND_THREADS_PER_BLOCK * blockIdx.x + threadIdx.x; /* Each thread gets same seed, a different sequence number, no offset */ hiprand_init(seed, tidx, 0, &state[tidx]); } __global__ void kArgMaxColumnwise(float* mat, float* target, unsigned int width, unsigned int height) { __shared__ float max_vals[32]; __shared__ unsigned int max_args[32]; float cur_max = -2e38; unsigned int cur_arg = 0; float val = 0; for (unsigned int i = threadIdx.x; i < height; i += 32) { val = mat[i * width + blockIdx.x]; if (val > cur_max) { cur_max = val; cur_arg = i; } } max_vals[threadIdx.x] = cur_max; max_args[threadIdx.x] = cur_arg; __syncthreads(); if (threadIdx.x == 0) { cur_max = -2e38; cur_arg = 0; for (unsigned int i = 0; i < 32; i++) if (max_vals[i] > cur_max) { cur_max = max_vals[i]; cur_arg = max_args[i]; } target[blockIdx.x] = cur_arg; } } __global__ void kArgMaxRowwise(float* mat, float* target, unsigned int width, unsigned int height) { __shared__ float max_vals[32]; __shared__ unsigned int max_args[32]; float cur_max = -2e38; unsigned int cur_arg = 0; float val = 0; for (unsigned int i = threadIdx.x; i < width; i += 32) { val = mat[blockIdx.x * width + i]; if (val > cur_max) { cur_max = val; cur_arg = i; } } max_vals[threadIdx.x] = cur_max; max_args[threadIdx.x] = cur_arg; __syncthreads(); if (threadIdx.x == 0) { cur_max = -2e38; cur_arg = 0; for (unsigned int i = 0; i < 32; i++) if (max_vals[i] > cur_max) { cur_max = max_vals[i]; cur_arg = max_args[i]; } target[blockIdx.x] = cur_arg; } } __global__ void kArgMinColumnwise(float* mat, float* target, unsigned int width, unsigned int height) { __shared__ float min_vals[32]; __shared__ unsigned int min_args[32]; float cur_min = 2e38; unsigned int cur_arg = 0; float val = 0; for (unsigned int i = threadIdx.x; i < height; i += 32) { val = mat[i * width + blockIdx.x]; if (val < cur_min) { cur_min = val; cur_arg = i; } } min_vals[threadIdx.x] = cur_min; min_args[threadIdx.x] = cur_arg; __syncthreads(); if (threadIdx.x == 0) { cur_min = 2e38; cur_arg = 0; for (unsigned int i = 0; i < 32; i++) if (min_vals[i] < cur_min) { cur_min = min_vals[i]; cur_arg = min_args[i]; } target[blockIdx.x] = cur_arg; } } __global__ void kArgMinRowwise(float* mat, float* target, unsigned int width, unsigned int height) { __shared__ float min_vals[32]; __shared__ unsigned int min_args[32]; float cur_min = 2e38; unsigned int cur_arg = 0; float val = 0; for (unsigned int i = threadIdx.x; i < width; i += 32) { val = mat[blockIdx.x * width + i]; if (val < cur_min) { cur_min = val; cur_arg = i; } } min_vals[threadIdx.x] = cur_min; min_args[threadIdx.x] = cur_arg; __syncthreads(); if (threadIdx.x == 0) { cur_min = 2e38; cur_arg = 0; for (unsigned int i = 0; i < 32; i++) if (min_vals[i] < cur_min) { cur_min = min_vals[i]; cur_arg = min_args[i]; } target[blockIdx.x] = cur_arg; } }
0971c8b020d04bed0c0cd14c540a3dab4ce23209.cu
/* * Copyright 2014 Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * --------------------------------------------------------------------------- * Copyright 2014 Nervana Systems Inc. All rights reserved. * * * Added argmin, argmax support, other operations further fleshed out. * --------------------------------------------------------------------------- */ #include <stdio.h> #include <cuda_runtime.h> #include "../include/nvmatrix_kernels.cuh" __global__ void kTile(const float* src, float* tgt, const uint srcWidth, const uint srcHeight, const uint tgtWidth, const uint tgtHeight) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; const int numThreads = blockDim.x * gridDim.x; // const unsigned int numEls = tgtWidth * tgtHeight; for (uint i = idx; i < tgtWidth * tgtHeight; i += numThreads) { const uint y = i / tgtWidth; const uint x = i % tgtWidth; const uint srcY = y % srcHeight; const uint srcX = x % srcWidth; tgt[i] = src[srcY * srcWidth + srcX]; } } __global__ void kDotProduct_r(float* a, float* b, float* target, const uint numElements) { __shared__ float shmem[DP_BLOCKSIZE]; uint eidx = DP_BLOCKSIZE * blockIdx.x + threadIdx.x; shmem[threadIdx.x] = 0; if (eidx < gridDim.x * DP_BLOCKSIZE) { for (; eidx < numElements; eidx += gridDim.x * DP_BLOCKSIZE) { shmem[threadIdx.x] += a[eidx] * b[eidx]; } } __syncthreads(); if (threadIdx.x < 256) { shmem[threadIdx.x] += shmem[threadIdx.x + 256]; } __syncthreads(); if (threadIdx.x < 128) { shmem[threadIdx.x] += shmem[threadIdx.x + 128]; } __syncthreads(); if (threadIdx.x < 64) { shmem[threadIdx.x] += shmem[threadIdx.x + 64]; } __syncthreads(); if (threadIdx.x < 32) { volatile float* mysh = &shmem[threadIdx.x]; *mysh += mysh[32]; *mysh += mysh[16]; *mysh += mysh[8]; *mysh += mysh[4]; *mysh += mysh[2]; *mysh += mysh[1]; if (threadIdx.x == 0) { target[blockIdx.x] = *mysh; } } } __global__ void kSetupCurand(curandState *state, unsigned long long seed) { const uint tidx = NUM_RND_THREADS_PER_BLOCK * blockIdx.x + threadIdx.x; /* Each thread gets same seed, a different sequence number, no offset */ curand_init(seed, tidx, 0, &state[tidx]); } __global__ void kArgMaxColumnwise(float* mat, float* target, unsigned int width, unsigned int height) { __shared__ float max_vals[32]; __shared__ unsigned int max_args[32]; float cur_max = -2e38; unsigned int cur_arg = 0; float val = 0; for (unsigned int i = threadIdx.x; i < height; i += 32) { val = mat[i * width + blockIdx.x]; if (val > cur_max) { cur_max = val; cur_arg = i; } } max_vals[threadIdx.x] = cur_max; max_args[threadIdx.x] = cur_arg; __syncthreads(); if (threadIdx.x == 0) { cur_max = -2e38; cur_arg = 0; for (unsigned int i = 0; i < 32; i++) if (max_vals[i] > cur_max) { cur_max = max_vals[i]; cur_arg = max_args[i]; } target[blockIdx.x] = cur_arg; } } __global__ void kArgMaxRowwise(float* mat, float* target, unsigned int width, unsigned int height) { __shared__ float max_vals[32]; __shared__ unsigned int max_args[32]; float cur_max = -2e38; unsigned int cur_arg = 0; float val = 0; for (unsigned int i = threadIdx.x; i < width; i += 32) { val = mat[blockIdx.x * width + i]; if (val > cur_max) { cur_max = val; cur_arg = i; } } max_vals[threadIdx.x] = cur_max; max_args[threadIdx.x] = cur_arg; __syncthreads(); if (threadIdx.x == 0) { cur_max = -2e38; cur_arg = 0; for (unsigned int i = 0; i < 32; i++) if (max_vals[i] > cur_max) { cur_max = max_vals[i]; cur_arg = max_args[i]; } target[blockIdx.x] = cur_arg; } } __global__ void kArgMinColumnwise(float* mat, float* target, unsigned int width, unsigned int height) { __shared__ float min_vals[32]; __shared__ unsigned int min_args[32]; float cur_min = 2e38; unsigned int cur_arg = 0; float val = 0; for (unsigned int i = threadIdx.x; i < height; i += 32) { val = mat[i * width + blockIdx.x]; if (val < cur_min) { cur_min = val; cur_arg = i; } } min_vals[threadIdx.x] = cur_min; min_args[threadIdx.x] = cur_arg; __syncthreads(); if (threadIdx.x == 0) { cur_min = 2e38; cur_arg = 0; for (unsigned int i = 0; i < 32; i++) if (min_vals[i] < cur_min) { cur_min = min_vals[i]; cur_arg = min_args[i]; } target[blockIdx.x] = cur_arg; } } __global__ void kArgMinRowwise(float* mat, float* target, unsigned int width, unsigned int height) { __shared__ float min_vals[32]; __shared__ unsigned int min_args[32]; float cur_min = 2e38; unsigned int cur_arg = 0; float val = 0; for (unsigned int i = threadIdx.x; i < width; i += 32) { val = mat[blockIdx.x * width + i]; if (val < cur_min) { cur_min = val; cur_arg = i; } } min_vals[threadIdx.x] = cur_min; min_args[threadIdx.x] = cur_arg; __syncthreads(); if (threadIdx.x == 0) { cur_min = 2e38; cur_arg = 0; for (unsigned int i = 0; i < 32; i++) if (min_vals[i] < cur_min) { cur_min = min_vals[i]; cur_arg = min_args[i]; } target[blockIdx.x] = cur_arg; } }
e327c0be7ad71dc996b51cbd1d6df3b14633e3eb.hip
// !!! This is a file automatically generated by hipify!!! #include "Declaration.hh" #include "G4HepEmData.hh" #include "G4HepEmElectronData.hh" #include <hip/hip_runtime.h> #include "G4HepEmCuUtils.hh" // // Note: both specialisations (needed to be called from the host) are done in // this .cu file below in the TestResMacXSecDataOnDevice function. template <bool TisSBModel> __global__ void TestElemSelectorDataBremKernel ( const struct G4HepEmElectronDataOnDevice* theElectronData_d, int* tsInImc_d, double* tsInEkin_d, double* tsInLogEkin_d, double* tsInRngVals_d, int* tsOutRes_d, int numTestCases ) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < numTestCases) { // the matrial-cut index int imc = tsInImc_d[tid]; // get start index of the data for this material-cut: // NOTE: start index = -1 in case of single element material, i.e. no selector int i0 = TisSBModel ? theElectronData_d->fElemSelectorBremSBDataStart[imc] : theElectronData_d->fElemSelectorBremRBDataStart[imc]; // NOTE: one should try to avoid to call this kernel for materials with single element !!! if ( i0 < 0 ) { tsOutRes_d[tid] = 0; } else { int numElem = theElectronData_d->fElemSelectorNumElements[imc]; int numData; double logE0; double invLD; double* xdata; if (TisSBModel) { numData = theElectronData_d->fElemSelectorNumBremSBData[imc]; logE0 = theElectronData_d->fElemSelectorBremSBAuxData[2*imc]; invLD = theElectronData_d->fElemSelectorBremSBAuxData[2*imc+1]; xdata = &(theElectronData_d->fElemSelectorBremSBData[i0]); } else { numData = theElectronData_d->fElemSelectorNumBremRBData[imc]; logE0 = theElectronData_d->fElemSelectorBremRBAuxData[2*imc]; invLD = theElectronData_d->fElemSelectorBremRBAuxData[2*imc+1]; xdata = &(theElectronData_d->fElemSelectorBremRBData[i0]); } // make sure that $x \in [x[0],x[ndata-1]]$ double xv = max( xdata[0], min( xdata[ numElem * ( numData - 1 ) ], tsInEkin_d[ tid ] ) ); // compute the lowerindex of the x bin (idx \in [0,N-2] will be guaranted) int idxEkin = __double2int_rz( max( 0.0, min( (tsInLogEkin_d[tid] -logE0) * invLD, numData - 2.0 ) ) ); // the real index position is idxEkin x numElem int indx0 = idxEkin * numElem; int indx1 = indx0 + numElem; // linear interpolation double x1 = xdata[ indx0++ ]; double x2 = xdata[ indx1++ ]; double dl = x2-x1; double b = max( 0., min( 1., (xv - x1) / dl ) ); int theElemIndex = 0; // discrete probabilities, for selecting a given element, are from element index of 0 till #elements-2 // NOTE: non-deterministic while loop can be turned to deterministic sampling tables for the underlying // discrete distributions (using Alias table) and combining them with statistical interpolation. while ( theElemIndex < numElem-1 && tsInRngVals_d[tid] > xdata[indx0+theElemIndex]+b*(xdata[indx1+theElemIndex]-xdata[indx0+theElemIndex])) { ++theElemIndex; } tsOutRes_d[tid] = theElemIndex; } } } void TestElemSelectorDataOnDevice ( const struct G4HepEmData* hepEmData, int* tsInImc_h, double* tsInEkin_h, double* tsInLogEkin_h, double* tsInRngVals_h, int* tsOutRes_h, int numTestCases, int indxModel, bool iselectron ) { // // --- Allocate device side memory for the input/output data and copy all input // data from host to device int* tsInImc_d = nullptr; double* tsInEkin_d = nullptr; double* tsInLogEkin_d = nullptr; double* tsInRngVals_d = nullptr; int* tsOutRes_d = nullptr; // gpuErrchk ( hipMalloc ( &tsInImc_d, sizeof( int ) * numTestCases ) ); gpuErrchk ( hipMalloc ( &tsInEkin_d, sizeof( double ) * numTestCases ) ); gpuErrchk ( hipMalloc ( &tsInLogEkin_d, sizeof( double ) * numTestCases ) ); gpuErrchk ( hipMalloc ( &tsInRngVals_d, sizeof( double ) * numTestCases ) ); gpuErrchk ( hipMalloc ( &tsOutRes_d, sizeof( int ) * numTestCases ) ); // // --- Copy the input data from host to device (test material-cut index, ekin and log-ekin arrays) gpuErrchk ( hipMemcpy ( tsInImc_d, tsInImc_h, sizeof( int ) * numTestCases, hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy ( tsInEkin_d, tsInEkin_h, sizeof( double ) * numTestCases, hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy ( tsInLogEkin_d, tsInLogEkin_h, sizeof( double ) * numTestCases, hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy ( tsInRngVals_d, tsInRngVals_h, sizeof( double ) * numTestCases, hipMemcpyHostToDevice) ); // // --- Launch the kernels const struct G4HepEmElectronDataOnDevice* theElectronData = iselectron ? hepEmData->fTheElectronData_gpu : hepEmData->fThePositronData_gpu; int numThreads = 512; int numBlocks = ::ceil( float(numTestCases)/numThreads ); switch (indxModel) { case 0: // not used break; case 1: hipLaunchKernelGGL(( TestElemSelectorDataBremKernel < true >) , dim3(numBlocks), dim3(numThreads) , 0, 0, theElectronData, tsInImc_d, tsInEkin_d, tsInLogEkin_d, tsInRngVals_d, tsOutRes_d, numTestCases ); break; case 2: hipLaunchKernelGGL(( TestElemSelectorDataBremKernel < false >) , dim3(numBlocks), dim3(numThreads) , 0, 0, theElectronData, tsInImc_d, tsInEkin_d, tsInLogEkin_d, tsInRngVals_d, tsOutRes_d, numTestCases ); break; } // // --- Synchronize to make sure that completed on the device hipDeviceSynchronize(); // // --- Copy the results from the device to the host gpuErrchk ( hipMemcpy ( tsOutRes_h, tsOutRes_d, sizeof( int ) * numTestCases, hipMemcpyDeviceToHost ) ); // // --- Free all dynamically allocated (device side) memory hipFree ( tsInImc_d ); hipFree ( tsInEkin_d ); hipFree ( tsInLogEkin_d ); hipFree ( tsInRngVals_d ); hipFree ( tsOutRes_d ); }
e327c0be7ad71dc996b51cbd1d6df3b14633e3eb.cu
#include "Declaration.hh" #include "G4HepEmData.hh" #include "G4HepEmElectronData.hh" #include <cuda_runtime.h> #include "G4HepEmCuUtils.hh" // // Note: both specialisations (needed to be called from the host) are done in // this .cu file below in the TestResMacXSecDataOnDevice function. template <bool TisSBModel> __global__ void TestElemSelectorDataBremKernel ( const struct G4HepEmElectronDataOnDevice* theElectronData_d, int* tsInImc_d, double* tsInEkin_d, double* tsInLogEkin_d, double* tsInRngVals_d, int* tsOutRes_d, int numTestCases ) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < numTestCases) { // the matrial-cut index int imc = tsInImc_d[tid]; // get start index of the data for this material-cut: // NOTE: start index = -1 in case of single element material, i.e. no selector int i0 = TisSBModel ? theElectronData_d->fElemSelectorBremSBDataStart[imc] : theElectronData_d->fElemSelectorBremRBDataStart[imc]; // NOTE: one should try to avoid to call this kernel for materials with single element !!! if ( i0 < 0 ) { tsOutRes_d[tid] = 0; } else { int numElem = theElectronData_d->fElemSelectorNumElements[imc]; int numData; double logE0; double invLD; double* xdata; if (TisSBModel) { numData = theElectronData_d->fElemSelectorNumBremSBData[imc]; logE0 = theElectronData_d->fElemSelectorBremSBAuxData[2*imc]; invLD = theElectronData_d->fElemSelectorBremSBAuxData[2*imc+1]; xdata = &(theElectronData_d->fElemSelectorBremSBData[i0]); } else { numData = theElectronData_d->fElemSelectorNumBremRBData[imc]; logE0 = theElectronData_d->fElemSelectorBremRBAuxData[2*imc]; invLD = theElectronData_d->fElemSelectorBremRBAuxData[2*imc+1]; xdata = &(theElectronData_d->fElemSelectorBremRBData[i0]); } // make sure that $x \in [x[0],x[ndata-1]]$ double xv = max( xdata[0], min( xdata[ numElem * ( numData - 1 ) ], tsInEkin_d[ tid ] ) ); // compute the lowerindex of the x bin (idx \in [0,N-2] will be guaranted) int idxEkin = __double2int_rz( max( 0.0, min( (tsInLogEkin_d[tid] -logE0) * invLD, numData - 2.0 ) ) ); // the real index position is idxEkin x numElem int indx0 = idxEkin * numElem; int indx1 = indx0 + numElem; // linear interpolation double x1 = xdata[ indx0++ ]; double x2 = xdata[ indx1++ ]; double dl = x2-x1; double b = max( 0., min( 1., (xv - x1) / dl ) ); int theElemIndex = 0; // discrete probabilities, for selecting a given element, are from element index of 0 till #elements-2 // NOTE: non-deterministic while loop can be turned to deterministic sampling tables for the underlying // discrete distributions (using Alias table) and combining them with statistical interpolation. while ( theElemIndex < numElem-1 && tsInRngVals_d[tid] > xdata[indx0+theElemIndex]+b*(xdata[indx1+theElemIndex]-xdata[indx0+theElemIndex])) { ++theElemIndex; } tsOutRes_d[tid] = theElemIndex; } } } void TestElemSelectorDataOnDevice ( const struct G4HepEmData* hepEmData, int* tsInImc_h, double* tsInEkin_h, double* tsInLogEkin_h, double* tsInRngVals_h, int* tsOutRes_h, int numTestCases, int indxModel, bool iselectron ) { // // --- Allocate device side memory for the input/output data and copy all input // data from host to device int* tsInImc_d = nullptr; double* tsInEkin_d = nullptr; double* tsInLogEkin_d = nullptr; double* tsInRngVals_d = nullptr; int* tsOutRes_d = nullptr; // gpuErrchk ( cudaMalloc ( &tsInImc_d, sizeof( int ) * numTestCases ) ); gpuErrchk ( cudaMalloc ( &tsInEkin_d, sizeof( double ) * numTestCases ) ); gpuErrchk ( cudaMalloc ( &tsInLogEkin_d, sizeof( double ) * numTestCases ) ); gpuErrchk ( cudaMalloc ( &tsInRngVals_d, sizeof( double ) * numTestCases ) ); gpuErrchk ( cudaMalloc ( &tsOutRes_d, sizeof( int ) * numTestCases ) ); // // --- Copy the input data from host to device (test material-cut index, ekin and log-ekin arrays) gpuErrchk ( cudaMemcpy ( tsInImc_d, tsInImc_h, sizeof( int ) * numTestCases, cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy ( tsInEkin_d, tsInEkin_h, sizeof( double ) * numTestCases, cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy ( tsInLogEkin_d, tsInLogEkin_h, sizeof( double ) * numTestCases, cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy ( tsInRngVals_d, tsInRngVals_h, sizeof( double ) * numTestCases, cudaMemcpyHostToDevice) ); // // --- Launch the kernels const struct G4HepEmElectronDataOnDevice* theElectronData = iselectron ? hepEmData->fTheElectronData_gpu : hepEmData->fThePositronData_gpu; int numThreads = 512; int numBlocks = std::ceil( float(numTestCases)/numThreads ); switch (indxModel) { case 0: // not used break; case 1: TestElemSelectorDataBremKernel < true > <<< numBlocks, numThreads >>> (theElectronData, tsInImc_d, tsInEkin_d, tsInLogEkin_d, tsInRngVals_d, tsOutRes_d, numTestCases ); break; case 2: TestElemSelectorDataBremKernel < false > <<< numBlocks, numThreads >>> (theElectronData, tsInImc_d, tsInEkin_d, tsInLogEkin_d, tsInRngVals_d, tsOutRes_d, numTestCases ); break; } // // --- Synchronize to make sure that completed on the device cudaDeviceSynchronize(); // // --- Copy the results from the device to the host gpuErrchk ( cudaMemcpy ( tsOutRes_h, tsOutRes_d, sizeof( int ) * numTestCases, cudaMemcpyDeviceToHost ) ); // // --- Free all dynamically allocated (device side) memory cudaFree ( tsInImc_d ); cudaFree ( tsInEkin_d ); cudaFree ( tsInLogEkin_d ); cudaFree ( tsInRngVals_d ); cudaFree ( tsOutRes_d ); }
1e49f5b490fd9385f8c01847d151e68d597ef8d7.hip
// !!! This is a file automatically generated by hipify!!! #include "WTAddKernel.cuh" void WTAdditionKernel(WTAll &argWT, Document &argDoc) { unsigned int* deviceCounter; hipMalloc(&deviceCounter, sizeof(unsigned int)); hipMemset(deviceCounter, 0, sizeof(unsigned int)); int numOfWordS = argWT.blockCount + argWT.warpCount; int numOfWordD = argWT.wordLength - argWT.numOfWordS; /*int blockCounter = 0; int iterBlock = (argWT.numOfWordS - 1) / GridDim + 1; int* deviceWordLength; int numOfWordD = argWT.wordLength-argWT.numOfWordS;*/ /*hipMalloc((void**)&deviceWordLength, (1) * sizeof(int)); hipMemcpy(deviceWordLength, &argWT.numOfWordS, sizeof(int),hipMemcpyHostToDevice);*/ //for (int i = 0; i < iterBlock; i++) { // hipMemcpy(argDoc.d_blockCounter, &blockCounter, (1) * sizeof(int), hipMemcpyHostToDevice); sparseMatrixAdd << <GridDim, BlockDim >> >(argWT.deviceWTCount, argWT.deviceWTOffset, argWT.deviceNZWTCount, argWT.deviceWTIndex, argWT.deviceWTValue, argWT.deviceChunkWTCount, argWT.deviceChunkWTOffset, argWT.deviceChunkNZWTCount, argWT.deviceChunkWTIndex, argWT.deviceChunkWTValue, argDoc.d_dense, argWT.numOfWordS, deviceCounter ,argWT.deviceWTRowSum, numOfWordD); H_ERR(hipDeviceSynchronize()); /* blockCounter++; }*/ }
1e49f5b490fd9385f8c01847d151e68d597ef8d7.cu
#include "WTAddKernel.cuh" void WTAdditionKernel(WTAll &argWT, Document &argDoc) { unsigned int* deviceCounter; cudaMalloc(&deviceCounter, sizeof(unsigned int)); cudaMemset(deviceCounter, 0, sizeof(unsigned int)); int numOfWordS = argWT.blockCount + argWT.warpCount; int numOfWordD = argWT.wordLength - argWT.numOfWordS; /*int blockCounter = 0; int iterBlock = (argWT.numOfWordS - 1) / GridDim + 1; int* deviceWordLength; int numOfWordD = argWT.wordLength-argWT.numOfWordS;*/ /*cudaMalloc((void**)&deviceWordLength, (1) * sizeof(int)); cudaMemcpy(deviceWordLength, &argWT.numOfWordS, sizeof(int),cudaMemcpyHostToDevice);*/ //for (int i = 0; i < iterBlock; i++) { // cudaMemcpy(argDoc.d_blockCounter, &blockCounter, (1) * sizeof(int), cudaMemcpyHostToDevice); sparseMatrixAdd << <GridDim, BlockDim >> >(argWT.deviceWTCount, argWT.deviceWTOffset, argWT.deviceNZWTCount, argWT.deviceWTIndex, argWT.deviceWTValue, argWT.deviceChunkWTCount, argWT.deviceChunkWTOffset, argWT.deviceChunkNZWTCount, argWT.deviceChunkWTIndex, argWT.deviceChunkWTValue, argDoc.d_dense, argWT.numOfWordS, deviceCounter ,argWT.deviceWTRowSum, numOfWordD); H_ERR(cudaDeviceSynchronize()); /* blockCounter++; }*/ }
7899d02cb8ab64d123978f2f5a89cbd7edcf31ad.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. #include <pybind11/pybind11.h> #include <iostream> #include <torch/extension.h> #include <functional> #include <pybind11/stl.h> #include <pybind11/functional.h> #include <torch/extension.h> #include <ATen/hip/Exceptions.h> #include <ATen/hip/detail/DeviceThreadHandles.h> #include <ATen/hip/HIPContext.h> #include "utils.h" __device__ int bisect_index(const int* values, int len, int needle) { int a = 0, b = len; while (b > a + 1) { int m = (a + b) / 2; if(values[m] > needle) { b = m; } else { a = m; } } if(values[a] != needle) { printf("Error!! needle %d not found in array of length %d\n", needle, len); } return a; } __global__ void mult_MtM_kernel(int batchSize, int M_numRows, int M_nnz, const int* M_rowPtr, const int* M_colInd, const double* Ms_val, int MtM_numRows, int MtM_nnz, const int* MtM_rowPtr, const int* MtM_colInd, double* MtMs_val) { int row = blockIdx.x * blockDim.x + threadIdx.x; int batchIndex = blockIdx.y * blockDim.y + threadIdx.y; if(batchIndex >= batchSize || row >= M_numRows) { return; } // matrices are in CSR format: // rowPtr determines begin/end of row data, // colInd determines the column index int srcRow_offset = M_rowPtr[row]; int srcRow_len = M_rowPtr[row+1] - srcRow_offset; const int* srcRow_colInd = M_colInd + srcRow_offset; const double* srcRow_val = Ms_val + batchIndex * M_nnz + srcRow_offset; double* MtMs_batch_val = MtMs_val + batchIndex * MtM_nnz; for(int i = 0; i < srcRow_len; i++) { int dstRow = srcRow_colInd[i]; int dstRow_offset = MtM_rowPtr[dstRow]; int dstRow_len = MtM_rowPtr[dstRow + 1] - MtM_rowPtr[dstRow]; const int* dstRow_colInd = MtM_colInd + dstRow_offset; double* dstRow_val = MtMs_batch_val + dstRow_offset; for(int j = 0; j < srcRow_len; j++) { double val = srcRow_val[i] * srcRow_val[j]; int dstCol = srcRow_colInd[j]; // The result has a different sparsity pattern. Therefore we have to // identify where the destination's `colInd` is `dstCol`, working // in row of order `dstRow` in destination int positionInDstRow = bisect_index(dstRow_colInd, dstRow_len, dstCol); atomicAdd(dstRow_val + positionInDstRow, val); } } } torch::Tensor mult_MtM(int batchSize, const torch::Tensor& M_rowPtr, const torch::Tensor& M_colInd, const torch::Tensor& Ms_val, const torch::Tensor& MtM_rowPtr, const torch::Tensor& MtM_colInd) { int64_t M_numRows = M_rowPtr.size(0) - 1; int64_t M_nnz = M_colInd.size(0); THESEUS_TENSOR_CHECK_CUDA(M_rowPtr, 1, M_rowPtr.size(0), torch::kInt); THESEUS_TENSOR_CHECK_CUDA(M_colInd, 1, M_colInd.size(0), torch::kInt); // TODO: add support for float THESEUS_TENSOR_CHECK_CUDA(Ms_val, 2, batchSize, torch::kDouble); TORCH_CHECK(Ms_val.size(1) == M_nnz); int64_t MtM_numRows = MtM_rowPtr.size(0) - 1; int64_t MtM_nnz = MtM_colInd.size(0); TORCH_CHECK(MtM_rowPtr.device().is_cuda()); TORCH_CHECK(MtM_colInd.device().is_cuda()); TORCH_CHECK(MtM_rowPtr.dim() == 1); TORCH_CHECK(MtM_colInd.dim() == 1); auto xOptions = torch::TensorOptions().dtype(torch::kDouble).device(Ms_val.device()); torch::Tensor MtMs_val = torch::zeros({(long)batchSize, (long)MtM_nnz}, xOptions); // TODO: do experiments on choice of work group size dim3 wgs(1, 16); dim3 numBlocks((M_numRows + wgs.x - 1) / wgs.x, (batchSize + wgs.y - 1) / wgs.y); M_rowPtr.data_ptr<int>(); M_colInd.data_ptr<int>(); Ms_val.data_ptr<double>(); MtM_rowPtr.data_ptr<int>(); MtM_colInd.data_ptr<int>(); MtMs_val.data_ptr<double>(); // TODO: set stream according to torch hipLaunchKernelGGL(( mult_MtM_kernel), dim3(numBlocks), dim3(wgs), 0, 0, batchSize, M_numRows, M_nnz, M_rowPtr.data_ptr<int>(), M_colInd.data_ptr<int>(), Ms_val.data_ptr<double>(), MtM_numRows, MtM_nnz, MtM_rowPtr.data_ptr<int>(), MtM_colInd.data_ptr<int>(), MtMs_val.data_ptr<double>()); return MtMs_val; } template<typename INT> __global__ void mat_vec_kernel(int batchSize, INT M_numRows, INT M_numCols, INT M_nnz, const INT* M_rowPtr, const INT* M_colInd, const double* Ms_val, const double* vec, double* retv) { INT row = blockIdx.x * blockDim.x + threadIdx.x; int batchIndex = blockIdx.y * blockDim.y + threadIdx.y; if(batchIndex >= batchSize || row >= M_numRows) { return; } INT srcRow_offset = M_rowPtr[row]; INT srcRow_len = M_rowPtr[row+1] - srcRow_offset; const INT* srcRow_colInd = M_colInd + srcRow_offset; const double* srcRow_val = Ms_val + batchIndex * M_nnz + srcRow_offset; const double* srcVec = vec + batchIndex * M_numCols; double value = 0.0; for(INT i = 0; i < srcRow_len; i++) { value += srcRow_val[i] * srcVec[srcRow_colInd[i]]; } *(retv + batchIndex * M_numRows + row) = value; } torch::Tensor mat_vec(int batchSize, int64_t M_numCols, const torch::Tensor& M_rowPtr, const torch::Tensor& M_colInd, const torch::Tensor& Ms_val, const torch::Tensor& vec) { int64_t M_numRows = M_rowPtr.size(0) - 1; int64_t M_nnz = M_colInd.size(0); TORCH_CHECK(M_rowPtr.device().is_cuda()); TORCH_CHECK(M_rowPtr.dim() == 1); TORCH_CHECK(M_rowPtr.dtype() == torch::kInt || M_rowPtr.dtype() == torch::kInt64); THESEUS_TENSOR_CHECK_CUDA(M_colInd, 1, M_colInd.size(0), M_rowPtr.dtype()); // TODO: add support for float THESEUS_TENSOR_CHECK_CUDA(Ms_val, 2, batchSize, torch::kDouble); TORCH_CHECK(Ms_val.size(1) == M_nnz); THESEUS_TENSOR_CHECK_CUDA(vec, 2, batchSize, vec.dtype()); auto xOptions = torch::TensorOptions().dtype(torch::kDouble).device(Ms_val.device()); torch::Tensor retv = torch::empty({(long)batchSize, (long)M_numRows}, xOptions); // TODO: do experiments on choice of work group size dim3 wgs(1, 16); dim3 numBlocks((M_numRows + wgs.x - 1) / wgs.x, (batchSize + wgs.y - 1) / wgs.y); if(M_rowPtr.dtype() == torch::kInt) { hipLaunchKernelGGL(( mat_vec_kernel<int>), dim3(numBlocks), dim3(wgs), 0, 0, batchSize, M_numRows, M_numCols, M_nnz, M_rowPtr.data_ptr<int>(), M_colInd.data_ptr<int>(), Ms_val.data_ptr<double>(), vec.data_ptr<double>(), retv.data_ptr<double>()); } else { hipLaunchKernelGGL(( mat_vec_kernel<int64_t>), dim3(numBlocks), dim3(wgs), 0, 0, batchSize, M_numRows, M_numCols, M_nnz, M_rowPtr.data_ptr<int64_t>(), M_colInd.data_ptr<int64_t>(), Ms_val.data_ptr<double>(), vec.data_ptr<double>(), retv.data_ptr<double>()); } return retv; } template<typename INT> __global__ void tmat_vec_kernel(int batchSize, INT M_numRows, INT M_numCols, INT M_nnz, const INT* M_rowPtr, const INT* M_colInd, const double* Ms_val, const double* vec, double* retv) { INT row = blockIdx.x * blockDim.x + threadIdx.x; int batchIndex = blockIdx.y * blockDim.y + threadIdx.y; if(batchIndex >= batchSize || row >= M_numRows) { return; } INT srcRow_offset = M_rowPtr[row]; INT srcRow_len = M_rowPtr[row+1] - srcRow_offset; const INT* srcRow_colInd = M_colInd + srcRow_offset; const double* srcRow_val = Ms_val + batchIndex * M_nnz + srcRow_offset; double vecVal = vec[batchIndex * M_numRows + row]; double* dstVec = retv + batchIndex * M_numCols; for(INT i = 0; i < srcRow_len; i++) { atomicAdd(dstVec + srcRow_colInd[i], vecVal * srcRow_val[i]); } } torch::Tensor tmat_vec(int batchSize, int64_t M_numCols, const torch::Tensor& M_rowPtr, const torch::Tensor& M_colInd, const torch::Tensor& Ms_val, const torch::Tensor& vec) { int64_t M_numRows = M_rowPtr.size(0) - 1; int64_t M_nnz = M_colInd.size(0); TORCH_CHECK(M_rowPtr.device().is_cuda()); TORCH_CHECK(M_rowPtr.dtype() == torch::kInt || M_rowPtr.dtype() == torch::kInt64); TORCH_CHECK(M_rowPtr.dim() == 1); THESEUS_TENSOR_CHECK_CUDA(M_colInd, 1, M_colInd.size(0), M_rowPtr.dtype()); // TODO: add support for float THESEUS_TENSOR_CHECK_CUDA(Ms_val, 2, batchSize, torch::kDouble); TORCH_CHECK(Ms_val.size(1) == M_nnz); THESEUS_TENSOR_CHECK_CUDA(vec, 2, batchSize, vec.dtype()); TORCH_CHECK(vec.size(1) == M_numRows); auto xOptions = torch::TensorOptions().dtype(torch::kDouble).device(Ms_val.device()); torch::Tensor retv = torch::zeros({(long)batchSize, (long)M_numCols}, xOptions); // TODO: do experiments on choice of work group size dim3 wgs(1, 16); dim3 numBlocks((M_numRows + wgs.x - 1) / wgs.x, (batchSize + wgs.y - 1) / wgs.y); if(M_rowPtr.dtype() == torch::kInt) { hipLaunchKernelGGL(( tmat_vec_kernel<int>), dim3(numBlocks), dim3(wgs), 0, 0, batchSize, M_numRows, M_numCols, M_nnz, M_rowPtr.data_ptr<int>(), M_colInd.data_ptr<int>(), Ms_val.data_ptr<double>(), vec.data_ptr<double>(), retv.data_ptr<double>()); } else { hipLaunchKernelGGL(( tmat_vec_kernel<int64_t>), dim3(numBlocks), dim3(wgs), 0, 0, batchSize, M_numRows, M_numCols, M_nnz, M_rowPtr.data_ptr<int64_t>(), M_colInd.data_ptr<int64_t>(), Ms_val.data_ptr<double>(), vec.data_ptr<double>(), retv.data_ptr<double>()); } return retv; } __global__ void apply_damping_kernel(int batchSize, int M_numRows, int M_numCols, int M_nnz, const int* M_rowPtr, const int* M_colInd, double* Ms_val, double* alpha, double* beta) { int row = blockIdx.x * blockDim.x + threadIdx.x; int batchIndex = blockIdx.y * blockDim.y + threadIdx.y; if(batchIndex >= batchSize || row >= M_numRows) { return; } int srcRow_offset = M_rowPtr[row]; int srcRow_len = M_rowPtr[row+1] - srcRow_offset; const int* srcRow_colInd = M_colInd + srcRow_offset; double* srcRow_val = Ms_val + batchIndex * M_nnz + srcRow_offset; for(int i = 0; i < srcRow_len; i++) { if(srcRow_colInd[i] == row) { srcRow_val[i] += alpha[batchIndex] * srcRow_val[i] + beta[batchIndex]; } } } void apply_damping(int batchSize, int M_numCols, const torch::Tensor& M_rowPtr, const torch::Tensor& M_colInd, const torch::Tensor& Ms_val, const torch::Tensor& alpha, const torch::Tensor& beta) { int64_t M_numRows = M_rowPtr.size(0) - 1; int64_t M_nnz = M_colInd.size(0); THESEUS_TENSOR_CHECK_CUDA(M_rowPtr, 1, M_rowPtr.size(0), torch::kInt); THESEUS_TENSOR_CHECK_CUDA(M_colInd, 1, M_colInd.size(0), torch::kInt); // TODO: add support for float THESEUS_TENSOR_CHECK_CUDA(Ms_val, 2, batchSize, torch::kDouble); TORCH_CHECK(Ms_val.size(1) == M_nnz); THESEUS_TENSOR_CHECK_CUDA(alpha, 1, batchSize, torch::kDouble); THESEUS_TENSOR_CHECK_CUDA(beta, 1, batchSize, torch::kDouble); // TODO: do experiments on choice of work group size dim3 wgs(1, 16); dim3 numBlocks((M_numRows + wgs.x - 1) / wgs.x, (batchSize + wgs.y - 1) / wgs.y); hipLaunchKernelGGL(( apply_damping_kernel), dim3(numBlocks), dim3(wgs), 0, 0, batchSize, M_numRows, M_numCols, M_nnz, M_rowPtr.data_ptr<int>(), M_colInd.data_ptr<int>(), Ms_val.data_ptr<double>(), alpha.data_ptr<double>(), beta.data_ptr<double>()); } PYBIND11_MODULE(mat_mult, m) { m.doc() = "Python bindings for batched mat operations"; m.def("mult_MtM", &mult_MtM, "Batched multiplication of mat by transpose: Mt * M\n" "The sparse structure of the result must be computed\n" "beforehand and supplied as MtM_rowPtr, MtM_colInd", py::arg("batch_size"), py::arg("M_rowPtr"), py::arg("M_colInd"), py::arg("Ms_val"), py::arg("MtM_rowPtr"), py::arg("MtM_colInd") ); m.def("mat_vec", &mat_vec, "Batched multiplication of mat by vector: M * v", py::arg("batch_size"), py::arg("M_numCols"), py::arg("M_rowPtr"), py::arg("M_colInd"), py::arg("Ms_val"), py::arg("vec") ); m.def("tmat_vec", &tmat_vec, "Batched multiplication of transposed mat by vector: Mt * v", py::arg("batch_size"), py::arg("M_numCols"), py::arg("M_rowPtr"), py::arg("M_colInd"), py::arg("Ms_val"), py::arg("vec") ); m.def("apply_damping", &apply_damping, "M.diagonal() += M.diagonal() * alpha + beta", py::arg("batch_size"), py::arg("M_numCols"), py::arg("M_rowPtr"), py::arg("M_colInd"), py::arg("Ms_val"), py::arg("alpha"), py::arg("beta") ); };
7899d02cb8ab64d123978f2f5a89cbd7edcf31ad.cu
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. #include <pybind11/pybind11.h> #include <iostream> #include <torch/extension.h> #include <functional> #include <pybind11/stl.h> #include <pybind11/functional.h> #include <torch/extension.h> #include <ATen/cuda/Exceptions.h> #include <ATen/cuda/detail/DeviceThreadHandles.h> #include <ATen/cuda/CUDAContext.h> #include "utils.h" __device__ int bisect_index(const int* values, int len, int needle) { int a = 0, b = len; while (b > a + 1) { int m = (a + b) / 2; if(values[m] > needle) { b = m; } else { a = m; } } if(values[a] != needle) { printf("Error!! needle %d not found in array of length %d\n", needle, len); } return a; } __global__ void mult_MtM_kernel(int batchSize, int M_numRows, int M_nnz, const int* M_rowPtr, const int* M_colInd, const double* Ms_val, int MtM_numRows, int MtM_nnz, const int* MtM_rowPtr, const int* MtM_colInd, double* MtMs_val) { int row = blockIdx.x * blockDim.x + threadIdx.x; int batchIndex = blockIdx.y * blockDim.y + threadIdx.y; if(batchIndex >= batchSize || row >= M_numRows) { return; } // matrices are in CSR format: // rowPtr determines begin/end of row data, // colInd determines the column index int srcRow_offset = M_rowPtr[row]; int srcRow_len = M_rowPtr[row+1] - srcRow_offset; const int* srcRow_colInd = M_colInd + srcRow_offset; const double* srcRow_val = Ms_val + batchIndex * M_nnz + srcRow_offset; double* MtMs_batch_val = MtMs_val + batchIndex * MtM_nnz; for(int i = 0; i < srcRow_len; i++) { int dstRow = srcRow_colInd[i]; int dstRow_offset = MtM_rowPtr[dstRow]; int dstRow_len = MtM_rowPtr[dstRow + 1] - MtM_rowPtr[dstRow]; const int* dstRow_colInd = MtM_colInd + dstRow_offset; double* dstRow_val = MtMs_batch_val + dstRow_offset; for(int j = 0; j < srcRow_len; j++) { double val = srcRow_val[i] * srcRow_val[j]; int dstCol = srcRow_colInd[j]; // The result has a different sparsity pattern. Therefore we have to // identify where the destination's `colInd` is `dstCol`, working // in row of order `dstRow` in destination int positionInDstRow = bisect_index(dstRow_colInd, dstRow_len, dstCol); atomicAdd(dstRow_val + positionInDstRow, val); } } } torch::Tensor mult_MtM(int batchSize, const torch::Tensor& M_rowPtr, const torch::Tensor& M_colInd, const torch::Tensor& Ms_val, const torch::Tensor& MtM_rowPtr, const torch::Tensor& MtM_colInd) { int64_t M_numRows = M_rowPtr.size(0) - 1; int64_t M_nnz = M_colInd.size(0); THESEUS_TENSOR_CHECK_CUDA(M_rowPtr, 1, M_rowPtr.size(0), torch::kInt); THESEUS_TENSOR_CHECK_CUDA(M_colInd, 1, M_colInd.size(0), torch::kInt); // TODO: add support for float THESEUS_TENSOR_CHECK_CUDA(Ms_val, 2, batchSize, torch::kDouble); TORCH_CHECK(Ms_val.size(1) == M_nnz); int64_t MtM_numRows = MtM_rowPtr.size(0) - 1; int64_t MtM_nnz = MtM_colInd.size(0); TORCH_CHECK(MtM_rowPtr.device().is_cuda()); TORCH_CHECK(MtM_colInd.device().is_cuda()); TORCH_CHECK(MtM_rowPtr.dim() == 1); TORCH_CHECK(MtM_colInd.dim() == 1); auto xOptions = torch::TensorOptions().dtype(torch::kDouble).device(Ms_val.device()); torch::Tensor MtMs_val = torch::zeros({(long)batchSize, (long)MtM_nnz}, xOptions); // TODO: do experiments on choice of work group size dim3 wgs(1, 16); dim3 numBlocks((M_numRows + wgs.x - 1) / wgs.x, (batchSize + wgs.y - 1) / wgs.y); M_rowPtr.data_ptr<int>(); M_colInd.data_ptr<int>(); Ms_val.data_ptr<double>(); MtM_rowPtr.data_ptr<int>(); MtM_colInd.data_ptr<int>(); MtMs_val.data_ptr<double>(); // TODO: set stream according to torch mult_MtM_kernel<<<numBlocks, wgs>>>(batchSize, M_numRows, M_nnz, M_rowPtr.data_ptr<int>(), M_colInd.data_ptr<int>(), Ms_val.data_ptr<double>(), MtM_numRows, MtM_nnz, MtM_rowPtr.data_ptr<int>(), MtM_colInd.data_ptr<int>(), MtMs_val.data_ptr<double>()); return MtMs_val; } template<typename INT> __global__ void mat_vec_kernel(int batchSize, INT M_numRows, INT M_numCols, INT M_nnz, const INT* M_rowPtr, const INT* M_colInd, const double* Ms_val, const double* vec, double* retv) { INT row = blockIdx.x * blockDim.x + threadIdx.x; int batchIndex = blockIdx.y * blockDim.y + threadIdx.y; if(batchIndex >= batchSize || row >= M_numRows) { return; } INT srcRow_offset = M_rowPtr[row]; INT srcRow_len = M_rowPtr[row+1] - srcRow_offset; const INT* srcRow_colInd = M_colInd + srcRow_offset; const double* srcRow_val = Ms_val + batchIndex * M_nnz + srcRow_offset; const double* srcVec = vec + batchIndex * M_numCols; double value = 0.0; for(INT i = 0; i < srcRow_len; i++) { value += srcRow_val[i] * srcVec[srcRow_colInd[i]]; } *(retv + batchIndex * M_numRows + row) = value; } torch::Tensor mat_vec(int batchSize, int64_t M_numCols, const torch::Tensor& M_rowPtr, const torch::Tensor& M_colInd, const torch::Tensor& Ms_val, const torch::Tensor& vec) { int64_t M_numRows = M_rowPtr.size(0) - 1; int64_t M_nnz = M_colInd.size(0); TORCH_CHECK(M_rowPtr.device().is_cuda()); TORCH_CHECK(M_rowPtr.dim() == 1); TORCH_CHECK(M_rowPtr.dtype() == torch::kInt || M_rowPtr.dtype() == torch::kInt64); THESEUS_TENSOR_CHECK_CUDA(M_colInd, 1, M_colInd.size(0), M_rowPtr.dtype()); // TODO: add support for float THESEUS_TENSOR_CHECK_CUDA(Ms_val, 2, batchSize, torch::kDouble); TORCH_CHECK(Ms_val.size(1) == M_nnz); THESEUS_TENSOR_CHECK_CUDA(vec, 2, batchSize, vec.dtype()); auto xOptions = torch::TensorOptions().dtype(torch::kDouble).device(Ms_val.device()); torch::Tensor retv = torch::empty({(long)batchSize, (long)M_numRows}, xOptions); // TODO: do experiments on choice of work group size dim3 wgs(1, 16); dim3 numBlocks((M_numRows + wgs.x - 1) / wgs.x, (batchSize + wgs.y - 1) / wgs.y); if(M_rowPtr.dtype() == torch::kInt) { mat_vec_kernel<int><<<numBlocks, wgs>>>(batchSize, M_numRows, M_numCols, M_nnz, M_rowPtr.data_ptr<int>(), M_colInd.data_ptr<int>(), Ms_val.data_ptr<double>(), vec.data_ptr<double>(), retv.data_ptr<double>()); } else { mat_vec_kernel<int64_t><<<numBlocks, wgs>>>(batchSize, M_numRows, M_numCols, M_nnz, M_rowPtr.data_ptr<int64_t>(), M_colInd.data_ptr<int64_t>(), Ms_val.data_ptr<double>(), vec.data_ptr<double>(), retv.data_ptr<double>()); } return retv; } template<typename INT> __global__ void tmat_vec_kernel(int batchSize, INT M_numRows, INT M_numCols, INT M_nnz, const INT* M_rowPtr, const INT* M_colInd, const double* Ms_val, const double* vec, double* retv) { INT row = blockIdx.x * blockDim.x + threadIdx.x; int batchIndex = blockIdx.y * blockDim.y + threadIdx.y; if(batchIndex >= batchSize || row >= M_numRows) { return; } INT srcRow_offset = M_rowPtr[row]; INT srcRow_len = M_rowPtr[row+1] - srcRow_offset; const INT* srcRow_colInd = M_colInd + srcRow_offset; const double* srcRow_val = Ms_val + batchIndex * M_nnz + srcRow_offset; double vecVal = vec[batchIndex * M_numRows + row]; double* dstVec = retv + batchIndex * M_numCols; for(INT i = 0; i < srcRow_len; i++) { atomicAdd(dstVec + srcRow_colInd[i], vecVal * srcRow_val[i]); } } torch::Tensor tmat_vec(int batchSize, int64_t M_numCols, const torch::Tensor& M_rowPtr, const torch::Tensor& M_colInd, const torch::Tensor& Ms_val, const torch::Tensor& vec) { int64_t M_numRows = M_rowPtr.size(0) - 1; int64_t M_nnz = M_colInd.size(0); TORCH_CHECK(M_rowPtr.device().is_cuda()); TORCH_CHECK(M_rowPtr.dtype() == torch::kInt || M_rowPtr.dtype() == torch::kInt64); TORCH_CHECK(M_rowPtr.dim() == 1); THESEUS_TENSOR_CHECK_CUDA(M_colInd, 1, M_colInd.size(0), M_rowPtr.dtype()); // TODO: add support for float THESEUS_TENSOR_CHECK_CUDA(Ms_val, 2, batchSize, torch::kDouble); TORCH_CHECK(Ms_val.size(1) == M_nnz); THESEUS_TENSOR_CHECK_CUDA(vec, 2, batchSize, vec.dtype()); TORCH_CHECK(vec.size(1) == M_numRows); auto xOptions = torch::TensorOptions().dtype(torch::kDouble).device(Ms_val.device()); torch::Tensor retv = torch::zeros({(long)batchSize, (long)M_numCols}, xOptions); // TODO: do experiments on choice of work group size dim3 wgs(1, 16); dim3 numBlocks((M_numRows + wgs.x - 1) / wgs.x, (batchSize + wgs.y - 1) / wgs.y); if(M_rowPtr.dtype() == torch::kInt) { tmat_vec_kernel<int><<<numBlocks, wgs>>>(batchSize, M_numRows, M_numCols, M_nnz, M_rowPtr.data_ptr<int>(), M_colInd.data_ptr<int>(), Ms_val.data_ptr<double>(), vec.data_ptr<double>(), retv.data_ptr<double>()); } else { tmat_vec_kernel<int64_t><<<numBlocks, wgs>>>(batchSize, M_numRows, M_numCols, M_nnz, M_rowPtr.data_ptr<int64_t>(), M_colInd.data_ptr<int64_t>(), Ms_val.data_ptr<double>(), vec.data_ptr<double>(), retv.data_ptr<double>()); } return retv; } __global__ void apply_damping_kernel(int batchSize, int M_numRows, int M_numCols, int M_nnz, const int* M_rowPtr, const int* M_colInd, double* Ms_val, double* alpha, double* beta) { int row = blockIdx.x * blockDim.x + threadIdx.x; int batchIndex = blockIdx.y * blockDim.y + threadIdx.y; if(batchIndex >= batchSize || row >= M_numRows) { return; } int srcRow_offset = M_rowPtr[row]; int srcRow_len = M_rowPtr[row+1] - srcRow_offset; const int* srcRow_colInd = M_colInd + srcRow_offset; double* srcRow_val = Ms_val + batchIndex * M_nnz + srcRow_offset; for(int i = 0; i < srcRow_len; i++) { if(srcRow_colInd[i] == row) { srcRow_val[i] += alpha[batchIndex] * srcRow_val[i] + beta[batchIndex]; } } } void apply_damping(int batchSize, int M_numCols, const torch::Tensor& M_rowPtr, const torch::Tensor& M_colInd, const torch::Tensor& Ms_val, const torch::Tensor& alpha, const torch::Tensor& beta) { int64_t M_numRows = M_rowPtr.size(0) - 1; int64_t M_nnz = M_colInd.size(0); THESEUS_TENSOR_CHECK_CUDA(M_rowPtr, 1, M_rowPtr.size(0), torch::kInt); THESEUS_TENSOR_CHECK_CUDA(M_colInd, 1, M_colInd.size(0), torch::kInt); // TODO: add support for float THESEUS_TENSOR_CHECK_CUDA(Ms_val, 2, batchSize, torch::kDouble); TORCH_CHECK(Ms_val.size(1) == M_nnz); THESEUS_TENSOR_CHECK_CUDA(alpha, 1, batchSize, torch::kDouble); THESEUS_TENSOR_CHECK_CUDA(beta, 1, batchSize, torch::kDouble); // TODO: do experiments on choice of work group size dim3 wgs(1, 16); dim3 numBlocks((M_numRows + wgs.x - 1) / wgs.x, (batchSize + wgs.y - 1) / wgs.y); apply_damping_kernel<<<numBlocks, wgs>>>(batchSize, M_numRows, M_numCols, M_nnz, M_rowPtr.data_ptr<int>(), M_colInd.data_ptr<int>(), Ms_val.data_ptr<double>(), alpha.data_ptr<double>(), beta.data_ptr<double>()); } PYBIND11_MODULE(mat_mult, m) { m.doc() = "Python bindings for batched mat operations"; m.def("mult_MtM", &mult_MtM, "Batched multiplication of mat by transpose: Mt * M\n" "The sparse structure of the result must be computed\n" "beforehand and supplied as MtM_rowPtr, MtM_colInd", py::arg("batch_size"), py::arg("M_rowPtr"), py::arg("M_colInd"), py::arg("Ms_val"), py::arg("MtM_rowPtr"), py::arg("MtM_colInd") ); m.def("mat_vec", &mat_vec, "Batched multiplication of mat by vector: M * v", py::arg("batch_size"), py::arg("M_numCols"), py::arg("M_rowPtr"), py::arg("M_colInd"), py::arg("Ms_val"), py::arg("vec") ); m.def("tmat_vec", &tmat_vec, "Batched multiplication of transposed mat by vector: Mt * v", py::arg("batch_size"), py::arg("M_numCols"), py::arg("M_rowPtr"), py::arg("M_colInd"), py::arg("Ms_val"), py::arg("vec") ); m.def("apply_damping", &apply_damping, "M.diagonal() += M.diagonal() * alpha + beta", py::arg("batch_size"), py::arg("M_numCols"), py::arg("M_rowPtr"), py::arg("M_colInd"), py::arg("Ms_val"), py::arg("alpha"), py::arg("beta") ); };
d53c251ca9c6d821e93851854afd50a4fc332410.hip
// !!! This is a file automatically generated by hipify!!! #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/sequence.h> #include "cuda_complex.hpp" #include "stats.hpp" #ifdef SAVE_IMAGE #include <SFML/Graphics.hpp> #endif typedef float Real; typedef complex<Real> Complex; std::ostream& operator<<(std::ostream& out, Complex const& z) { return out << "(" << z.real() << ";" << z.imag() << ")"; } typedef std::pair<Complex, Complex> ComplexRange; std::ostream& operator<<(std::ostream& out, ComplexRange const& range) { return out << "{" << range.first << ";" << range.second << "}"; } typedef bool Color; static const Color inSetColor = true; static const Color notInSetColor = false; typedef std::size_t Index; struct Mandelbrot : public thrust::unary_function<Index, Color> { Mandelbrot(std::size_t side, std::size_t maxIterations, ComplexRange const& range) : side(side) , maxIterations(maxIterations) , range(range) { /* - */ } // Perform the set computation void operator()() const { // Create an array on the host system const std::size_t size = side * side; thrust::host_vector<Color> img(size); /* * WARNING : limited memory * * If the computation is too intense (# iterations) it seems the device runs * out of memory faster.. * * Here a naive workaround is implemented : if the size of the set and the resolution * is too high then the work is splitted. * * A better implementation would take account of the free memory on the GPU. */ // Those numbers are really magic.... if ((side >= 4000 && side < 10000 && maxIterations >= 4000) || (side >= 10000 && maxIterations >= 500)) { const std::size_t step = 1000000; // Create an array on the device thrust::device_vector<Color> deviceImg(step); for (std::size_t i = 0; i < size; i += step) { // Then, transform the indexes into 'colors' thrust::transform(thrust::counting_iterator<Index>(i), thrust::counting_iterator<Index>(::min(i + step, size)), deviceImg.begin(), *this); // apply op()(Index) // Copy the data to the host memory thrust::copy(deviceImg.begin(), deviceImg.end(), img.begin() + i); } } else { // Create an array on the device thrust::device_vector<Color> deviceImg(size); // Then, transform the indexes into 'colors' thrust::transform(thrust::counting_iterator<Index>(0), thrust::counting_iterator<Index>(size), deviceImg.begin(), *this); // apply op()(Index) // Copy the data to the host memory thrust::copy(deviceImg.begin(), deviceImg.end(), img.begin()); } #ifdef SAVE_IMAGE static std::size_t imgId = 0; // Export it to png sf::Image png; png.create(side, side, sf::Color::White); for (std::size_t x = 0; x < side; ++x) { for (std::size_t y = 0; y < side; ++y) { png.setPixel(x, y, img[y * side + x] == inSetColor ? sf::Color::Black : sf::Color::White); } } std::stringstream filename; filename << "tmp/fractal_" << imgId << "_" << csvdescription() << ".png"; png.saveToFile(filename.str()); ++imgId; #endif } __host__ __device__ Color operator()(Index const& index) { const unsigned int x = index % side; const unsigned int y = index / side; // integer division const Complex c( range.first.real() + x / (side - Real(1.0f)) * (range.second.real() - range.first.real()), range.first.imag() + y / (side - Real(1.0f)) * (range.second.imag() - range.first.imag()) ); Complex z( 0, 0 ); std::size_t iter = 0; for (iter = 0; iter < maxIterations && abs(z) < Real(2.0f); ++iter) { z = z * z + c; } return iter == maxIterations ? inSetColor : notInSetColor; } std::string csvdescription() const { std::stringstream ss; ss << side << "," << maxIterations << "," << range; return ss.str(); } std::size_t side, maxIterations; ComplexRange range; }; int main(int, char**) { const std::size_t sides[] = { 100, 200, 400, 800, 1200, 1600, 2000, 4000, 10000 }; const std::size_t sidesCount = 9; const std::size_t iterations[] = { 1, 10, 30, 80, 150, 250, 500, 1000, 2000, 8000 }; const std::size_t iterationsCount = 10; const ComplexRange ranges[] = { ComplexRange( Complex(-1.72, 1.2), Complex(1.0, -1.2) ), ComplexRange( Complex(-0.7, 0), Complex(0.3, -1) ), ComplexRange( Complex(-0.4, -0.5), Complex(0.1, -1) ), ComplexRange( Complex(-0.4, -0.6), Complex(-0.2, -0.8) ), ComplexRange( Complex(-0.24, -0.64), Complex(-0.26, -0.66) ) }; const std::size_t rangesCount = 5; #ifdef SAVE_IMAGE const std::size_t repetitions = 1; #else const std::size_t repetitions = 4; #endif for (std::size_t s = 0; s < sidesCount; ++s) for (std::size_t i = 0; i < iterationsCount; ++i) for (std::size_t r = 0; r < rangesCount; ++r) stats<Mandelbrot, void>(Mandelbrot(sides[s], iterations[i], ranges[r]), iterations[i] >= 1000 && sides[s] >= 2000 ? 1 : repetitions); return 0; }
d53c251ca9c6d821e93851854afd50a4fc332410.cu
#include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/sequence.h> #include "cuda_complex.hpp" #include "stats.hpp" #ifdef SAVE_IMAGE #include <SFML/Graphics.hpp> #endif typedef float Real; typedef complex<Real> Complex; std::ostream& operator<<(std::ostream& out, Complex const& z) { return out << "(" << z.real() << ";" << z.imag() << ")"; } typedef std::pair<Complex, Complex> ComplexRange; std::ostream& operator<<(std::ostream& out, ComplexRange const& range) { return out << "{" << range.first << ";" << range.second << "}"; } typedef bool Color; static const Color inSetColor = true; static const Color notInSetColor = false; typedef std::size_t Index; struct Mandelbrot : public thrust::unary_function<Index, Color> { Mandelbrot(std::size_t side, std::size_t maxIterations, ComplexRange const& range) : side(side) , maxIterations(maxIterations) , range(range) { /* - */ } // Perform the set computation void operator()() const { // Create an array on the host system const std::size_t size = side * side; thrust::host_vector<Color> img(size); /* * WARNING : limited memory * * If the computation is too intense (# iterations) it seems the device runs * out of memory faster.. * * Here a naive workaround is implemented : if the size of the set and the resolution * is too high then the work is splitted. * * A better implementation would take account of the free memory on the GPU. */ // Those numbers are really magic.... if ((side >= 4000 && side < 10000 && maxIterations >= 4000) || (side >= 10000 && maxIterations >= 500)) { const std::size_t step = 1000000; // Create an array on the device thrust::device_vector<Color> deviceImg(step); for (std::size_t i = 0; i < size; i += step) { // Then, transform the indexes into 'colors' thrust::transform(thrust::counting_iterator<Index>(i), thrust::counting_iterator<Index>(std::min(i + step, size)), deviceImg.begin(), *this); // apply op()(Index) // Copy the data to the host memory thrust::copy(deviceImg.begin(), deviceImg.end(), img.begin() + i); } } else { // Create an array on the device thrust::device_vector<Color> deviceImg(size); // Then, transform the indexes into 'colors' thrust::transform(thrust::counting_iterator<Index>(0), thrust::counting_iterator<Index>(size), deviceImg.begin(), *this); // apply op()(Index) // Copy the data to the host memory thrust::copy(deviceImg.begin(), deviceImg.end(), img.begin()); } #ifdef SAVE_IMAGE static std::size_t imgId = 0; // Export it to png sf::Image png; png.create(side, side, sf::Color::White); for (std::size_t x = 0; x < side; ++x) { for (std::size_t y = 0; y < side; ++y) { png.setPixel(x, y, img[y * side + x] == inSetColor ? sf::Color::Black : sf::Color::White); } } std::stringstream filename; filename << "tmp/fractal_" << imgId << "_" << csvdescription() << ".png"; png.saveToFile(filename.str()); ++imgId; #endif } __host__ __device__ Color operator()(Index const& index) { const unsigned int x = index % side; const unsigned int y = index / side; // integer division const Complex c( range.first.real() + x / (side - Real(1.0f)) * (range.second.real() - range.first.real()), range.first.imag() + y / (side - Real(1.0f)) * (range.second.imag() - range.first.imag()) ); Complex z( 0, 0 ); std::size_t iter = 0; for (iter = 0; iter < maxIterations && abs(z) < Real(2.0f); ++iter) { z = z * z + c; } return iter == maxIterations ? inSetColor : notInSetColor; } std::string csvdescription() const { std::stringstream ss; ss << side << "," << maxIterations << "," << range; return ss.str(); } std::size_t side, maxIterations; ComplexRange range; }; int main(int, char**) { const std::size_t sides[] = { 100, 200, 400, 800, 1200, 1600, 2000, 4000, 10000 }; const std::size_t sidesCount = 9; const std::size_t iterations[] = { 1, 10, 30, 80, 150, 250, 500, 1000, 2000, 8000 }; const std::size_t iterationsCount = 10; const ComplexRange ranges[] = { ComplexRange( Complex(-1.72, 1.2), Complex(1.0, -1.2) ), ComplexRange( Complex(-0.7, 0), Complex(0.3, -1) ), ComplexRange( Complex(-0.4, -0.5), Complex(0.1, -1) ), ComplexRange( Complex(-0.4, -0.6), Complex(-0.2, -0.8) ), ComplexRange( Complex(-0.24, -0.64), Complex(-0.26, -0.66) ) }; const std::size_t rangesCount = 5; #ifdef SAVE_IMAGE const std::size_t repetitions = 1; #else const std::size_t repetitions = 4; #endif for (std::size_t s = 0; s < sidesCount; ++s) for (std::size_t i = 0; i < iterationsCount; ++i) for (std::size_t r = 0; r < rangesCount; ++r) stats<Mandelbrot, void>(Mandelbrot(sides[s], iterations[i], ranges[r]), iterations[i] >= 1000 && sides[s] >= 2000 ? 1 : repetitions); return 0; }
1655ea5fb5405a940fc6ee292104a881525995cd.hip
// !!! This is a file automatically generated by hipify!!! #include <THH/THHBlas.h> #include <THH/THHGeneral.h> #include <TH/THHalf.h> #include <algorithm> float THCudaBlas_Sdot(THCState *state, int64_t n, float *x, int64_t incx, float *y, int64_t incy) { if (n == 1) { incx = 1; incy = 1; } if ((n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX)) { int i_n = (int)n; int i_incx = (int)incx; int i_incy = (int)incy; float result; hipblasHandle_t handle = THCState_getCurrentBlasHandle(state); hipblasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(hipblasSdot(handle, i_n, x, i_incx, y, i_incy, &result)); return result; } THError("Cublas_Sdot only supports n, incx and incy " "up to signed integer limits: %d", INT_MAX); return 0; } double THCudaBlas_Ddot(THCState *state, int64_t n, double *x, int64_t incx, double *y, int64_t incy) { if (n == 1) { incx = 1; incy = 1; } if ((n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX)) { int i_n = (int)n; int i_incx = (int)incx; int i_incy = (int)incy; double result; hipblasHandle_t handle = THCState_getCurrentBlasHandle(state); hipblasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(hipblasDdot(handle, i_n, x, i_incx, y, i_incy, &result)); return result; } THError("Cublas_Ddot only supports n, incx and incy " "up to signed integer limits: %d", INT_MAX); return 0; } at::Half THCudaBlas_Hdot(THCState *state, int64_t n, at::Half *x, int64_t incx, at::Half *y, int64_t incy) { #if TORCH_HIP_VERSION >= 8000 if (n == 1) { incx = 1; incy = 1; } if ((n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX)) { at::Half result; hipblasHandle_t handle = THCState_getCurrentBlasHandle(state); hipblasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(hipblasDotEx_v2(handle, n, x, HIP_R_16F, incx, y, HIP_R_16F, incy, &result, HIP_R_16F, HIP_R_32F)); return result; } THError("Cublas_Hdot only supports n, incx and incy " "up to signed integer limits: %d", INT_MAX); return 0.0; #else THError("Cublas_Hdot requires CUDA 8.0+"); return 0.0; #endif } /* Level 2 */ void adjustLdLevel2(int64_t m, int64_t n, int64_t *lda) { // Note: leading dimensions generally are checked that they are > 0 and at least as big the result // requires (even if the value won't be used). // TODO: why does Level3 check trans but this doesn't? if (n <= 1) *lda = std::max<int64_t>(m, 1); } void THCudaBlas_Sgemv(THCState *state, char trans, int64_t m, int64_t n, float alpha, float *a, int64_t lda, float *x, int64_t incx, float beta, float *y, int64_t incy) { adjustLdLevel2(m, n, &lda); hipblasOperation_t op; if (trans == 't') op = HIPBLAS_OP_T; else if (trans == 'n') op = HIPBLAS_OP_N; else if (trans == 'c') op = HIPBLAS_OP_C; else THError("Cublas_Sgemv parameter trans should be 't', 'n' or 'c'."); if( (m <= INT_MAX) && (n <= INT_MAX) && (lda > 0) && (lda <= INT_MAX) && (incx > 0) && (incx <= INT_MAX) && (incy > 0) && (incy <= INT_MAX) ) { int i_m = (int)m; int i_n = (int)n; int i_lda = (int)lda; int i_incx = (int)incx; int i_incy = (int)incy; hipblasHandle_t handle = THCState_getCurrentBlasHandle(state); hipblasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(hipblasSgemv(handle, op, i_m, i_n, &alpha, a, i_lda, x, i_incx, &beta, y, i_incy)); return; } THError("Cublas_Sgemv only supports m, n, lda, incx, incy" "in the range 0 < [val] <= %d", INT_MAX); } void THCudaBlas_Dgemv(THCState *state, char trans, int64_t m, int64_t n, double alpha, double *a, int64_t lda, double *x, int64_t incx, double beta, double *y, int64_t incy) { adjustLdLevel2(m, n, &lda); hipblasOperation_t op; if (trans == 't') op = HIPBLAS_OP_T; else if (trans == 'n') op = HIPBLAS_OP_N; else if (trans == 'c') op = HIPBLAS_OP_C; else THError("Cublas_Sgemv parameter trans should be 't', 'n' or 'c'."); if( (m <= INT_MAX) && (n <= INT_MAX) && (lda > 0) && (lda <= INT_MAX) && (incx > 0) && (incx <= INT_MAX) && (incy > 0) && (incy <= INT_MAX) ) { int i_m = (int)m; int i_n = (int)n; int i_lda = (int)lda; int i_incx = (int)incx; int i_incy = (int)incy; hipblasHandle_t handle = THCState_getCurrentBlasHandle(state); hipblasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(hipblasDgemv(handle, op, i_m, i_n, &alpha, a, i_lda, x, i_incx, &beta, y, i_incy)); return; } THError("Cublas_Dgemv only supports m, n, lda, incx, incy" "in the range 0 < [val] <= %d", INT_MAX); } void THCudaBlas_Sger(THCState *state, int64_t m, int64_t n, float alpha, float *x, int64_t incx, float *y, int64_t incy, float *a, int64_t lda) { adjustLdLevel2(m, n, &lda); if( (m <= INT_MAX) && (n <= INT_MAX) && (lda <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) ) { int i_m = (int)m; int i_n = (int)n; int i_lda = (int)lda; int i_incx = (int)incx; int i_incy = (int)incy; hipblasHandle_t handle = THCState_getCurrentBlasHandle(state); hipblasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(hipblasSger(handle, i_m, i_n, &alpha, x, i_incx, y, i_incy, a, i_lda)); return; } THError("Cublas_Sger only supports m, n, lda, incx, incy" "with the bound [val] <= %d", INT_MAX); } void THCudaBlas_Dger(THCState *state, int64_t m, int64_t n, double alpha, double *x, int64_t incx, double *y, int64_t incy, double *a, int64_t lda) { adjustLdLevel2(m, n, &lda); if( (m <= INT_MAX) && (n <= INT_MAX) && (lda <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) ) { int i_m = (int)m; int i_n = (int)n; int i_lda = (int)lda; int i_incx = (int)incx; int i_incy = (int)incy; hipblasHandle_t handle = THCState_getCurrentBlasHandle(state); hipblasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(hipblasDger(handle, i_m, i_n, &alpha, x, i_incx, y, i_incy, a, i_lda)); return; } THError("Cublas_Dger only supports m, n, lda, incx, incy" "with the bound [val] <= %d", INT_MAX); } hipblasOperation_t convertTransToCublasOperation(char trans) { if (trans == 't') return HIPBLAS_OP_T; else if (trans == 'n') return HIPBLAS_OP_N; else if (trans == 'c') return HIPBLAS_OP_C; else { THError("trans must be one of: t, n, c"); return HIPBLAS_OP_T; } } void adjustLdLevel3(char transa, char transb, int64_t m, int64_t n, int64_t k, int64_t *lda, int64_t *ldb, int64_t *ldc) { int transa_ = ((transa == 't') || (transa == 'T')); int transb_ = ((transb == 't') || (transb == 'T')); // Note: leading dimensions generally are checked that they are > 0 and at least as big the result // requires (even if the value won't be used). if(n <= 1) *ldc = std::max<int64_t>(m, 1); if(transa_) { if(m <= 1) *lda = std::max<int64_t>(k, 1); } else { if(k <= 1) *lda = std::max<int64_t>(m, 1); } if(transb_) { if(k <= 1) *ldb = std::max<int64_t>(n, 1); } else { if(n <= 1) *ldb = std::max<int64_t>(k, 1); } } /* Level 3 */ void THCudaBlas_Sgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, float alpha, float *a, int64_t lda, float *b, int64_t ldb, float beta, float *c, int64_t ldc) { adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc); hipblasOperation_t opa = convertTransToCublasOperation(transa); hipblasOperation_t opb = convertTransToCublasOperation(transb); if( (m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (lda <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX) ) { int i_m = (int)m; int i_n = (int)n; int i_k = (int)k; int i_lda = (int)lda; int i_ldb = (int)ldb; int i_ldc = (int)ldc; hipblasHandle_t handle = THCState_getCurrentBlasHandle(state); hipblasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(hipblasSgemm(handle, opa, opb, i_m, i_n, i_k, &alpha, a, i_lda, b, i_ldb, &beta, c, i_ldc)); return; } THError("Cublas_Sgemm only supports m, n, k, lda, ldb, ldc" "with the bound [val] <= %d", INT_MAX); } // In CUDA 8.0, definition of data types for sgemmex changed #if TORCH_HIP_VERSION < 8000 # define HIP_R_16F HIPBLAS_DATA_HALF #endif void THCudaBlas_Hgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, at::Half alpha, at::Half *a, int64_t lda, at::Half *b, int64_t ldb, at::Half beta, at::Half *c, int64_t ldc) { adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc); hipblasOperation_t opa = convertTransToCublasOperation(transa); hipblasOperation_t opb = convertTransToCublasOperation(transb); if( (m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (lda <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX) ) { int i_m = (int)m; int i_n = (int)n; int i_k = (int)k; int i_lda = (int)lda; int i_ldb = (int)ldb; int i_ldc = (int)ldc; hipblasHandle_t handle = THCState_getCurrentBlasHandle(state); hipblasSetStream(handle, THCState_getCurrentStream(state)); #ifdef __HIP_PLATFORM_HCC__ THCublasCheck(rocblas_hgemm(handle, opa, opb, i_m, i_n, i_k, reinterpret_cast<rocblas_half*>(&alpha), reinterpret_cast<rocblas_half*>(a), i_lda, reinterpret_cast<rocblas_half*>(b), i_ldb, reinterpret_cast<rocblas_half*>(&beta), reinterpret_cast<rocblas_half*>(c), i_ldc)); #else // Simulated Hgemm float fAlpha = alpha; float fBeta = beta; #if TORCH_HIP_VERSION < 9000 THCublasCheck(cublasSgemmEx(handle, opa, opb, i_m, i_n, i_k, &fAlpha, a, HIP_R_16F, i_lda, b, HIP_R_16F, i_ldb, &fBeta, c, HIP_R_16F, i_ldc)); #else hipDeviceProp_t* prop = THCState_getCurrentDeviceProperties(state); if (prop->major >= 5){ #ifndef __HIP_PLATFORM_HCC__ THCublasCheck(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH)); #endif THCublasCheck(hipblasGemmEx(handle, opa, opb, i_m, i_n, i_k, &fAlpha, a, HIP_R_16F, i_lda, b, HIP_R_16F, i_ldb, &fBeta, c, HIP_R_16F, i_ldc, HIP_R_32F, CUBLAS_GEMM_DFALT_TENSOR_OP)); #ifndef __HIP_PLATFORM_HCC__ THCublasCheck(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH)); #endif }else{ THCublasCheck(cublasSgemmEx(handle, opa, opb, i_m, i_n, i_k, &fAlpha, a, HIP_R_16F, i_lda, b, HIP_R_16F, i_ldb, &fBeta, c, HIP_R_16F, i_ldc)); } #endif #endif return; } THError("Cublas_Hgemm only supports m, n, k, lda, ldb, ldc" "with th bound [val] <= %d", INT_MAX); } void THCudaBlas_Dgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, double alpha, double *a, int64_t lda, double *b, int64_t ldb, double beta, double *c, int64_t ldc) { adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc); hipblasOperation_t opa = convertTransToCublasOperation(transa); hipblasOperation_t opb = convertTransToCublasOperation(transb); if( (m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (lda <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX) ) { int i_m = (int)m; int i_n = (int)n; int i_k = (int)k; int i_lda = (int)lda; int i_ldb = (int)ldb; int i_ldc = (int)ldc; hipblasHandle_t handle = THCState_getCurrentBlasHandle(state); hipblasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(hipblasDgemm(handle, opa, opb, i_m, i_n, i_k, &alpha, a, i_lda, b, i_ldb, &beta, c, i_ldc)); return; } THError("Cublas_Dgemm only supports m, n, k, lda, ldb, ldc" "with the bound [val] <= %d", INT_MAX); } #if TORCH_HIP_VERSION >= 9010 void THCudaBlas_HgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, at::Half alpha, const at::Half *a, int64_t lda, int64_t strideA, const at::Half *b, int64_t ldb, int64_t strideB, at::Half beta, at::Half *c, int64_t ldc, int64_t strideC, int64_t batchCount) { if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) ) { THError("Cublas_SgemmStridedBatched only supports m, n, k, lda, ldb, ldc, batchCount" "with the bound [val] <= %d", INT_MAX); } adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc); hipblasOperation_t opa = convertTransToCublasOperation(transa); hipblasOperation_t opb = convertTransToCublasOperation(transb); hipblasHandle_t handle = THCState_getCurrentBlasHandle(state); hipblasSetStream(handle, THCState_getCurrentStream(state)); float fAlpha = alpha; float fBeta = beta; THCublasCheck(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH)); THCublasCheck(hipblasGemmStridedBatchedEx(handle, opa, opb, (int)m, (int)n, (int)k, (void*)&fAlpha, a, HIP_R_16F, (int)lda, strideA, b, HIP_R_16F, (int)ldb, strideB, (void*)&fBeta, c, HIP_R_16F, (int)ldc, strideC, (int)batchCount, HIP_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); THCublasCheck(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH)); } #endif void THCudaBlas_SgemmBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, float alpha, const float *a[], int64_t lda, const float *b[], int64_t ldb, float beta, float *c[], int64_t ldc, int64_t batchCount) { if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) ) { THError("Cublas_SgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount" "with the bound [val] <= %d", INT_MAX); } #ifdef __HIP_PLATFORM_HCC__ const int64_t stridea = (transa == 'N' || transa == 'n') ? lda*k : lda*n; const int64_t strideb = (transb == 'N' || transb == 'n') ? ldb*n : ldb*k; const int64_t stridec = ldc*n; THCudaBlas_SgemmStridedBatched(state, transa, transb, m, n, k, alpha, *a, lda, stridea, *b, ldb, strideb, beta, *c, ldc, stridec, batchCount); #else adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc); hipblasOperation_t opa = convertTransToCublasOperation(transa); hipblasOperation_t opb = convertTransToCublasOperation(transb); hipblasHandle_t handle = THCState_getCurrentBlasHandle(state); hipblasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(hipblasSgemmBatched(handle, opa, opb, (int)m, (int)n, (int)k, &alpha, a, (int)lda, b, (int)ldb, &beta, c, (int)ldc, (int)batchCount)); #endif } #if TORCH_HIP_VERSION >= 8000 || defined __HIP_PLATFORM_HCC__ void THCudaBlas_SgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, float alpha, const float *a, int64_t lda, int64_t strideA, const float *b, int64_t ldb, int64_t strideB, float beta, float *c, int64_t ldc, int64_t strideC, int64_t batchCount) { if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) ) { THError("Cublas_SgemmStridedBatched only supports m, n, k, lda, ldb, ldc, batchCount" "with the bound [val] <= %d", INT_MAX); } adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc); hipblasOperation_t opa = convertTransToCublasOperation(transa); hipblasOperation_t opb = convertTransToCublasOperation(transb); hipblasHandle_t handle = THCState_getCurrentBlasHandle(state); hipblasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(hipblasSgemmStridedBatched(handle, opa, opb, (int)m, (int)n, (int)k, &alpha, a, (int)lda, strideA, b, (int)ldb, strideB, &beta, c, (int)ldc, strideC, (int)batchCount)); } #endif void THCudaBlas_DgemmBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, double alpha, const double *a[], int64_t lda, const double *b[], int64_t ldb, double beta, double *c[], int64_t ldc, int64_t batchCount) { if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) ) { THError("Cublas_DgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount" "with the bound [val] <= %d", INT_MAX); } #ifdef __HIP_PLATFORM_HCC__ const int64_t stridea = (transa == 'N' || transa == 'n') ? lda*k : lda*n; const int64_t strideb = (transb == 'N' || transb == 'n') ? ldb*n : ldb*k; const int64_t stridec = ldc*n; THCudaBlas_DgemmStridedBatched(state, transa, transb, m, n, k, alpha, *a, lda, stridea, *b, ldb, strideb, beta, *c, ldc, stridec, batchCount); #else adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc); hipblasOperation_t opa = convertTransToCublasOperation(transa); hipblasOperation_t opb = convertTransToCublasOperation(transb); hipblasHandle_t handle = THCState_getCurrentBlasHandle(state); hipblasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(hipblasDgemmBatched(handle, opa, opb, (int)m, (int)n, (int)k, &alpha, a, (int)lda, b, (int)ldb, &beta, c, (int)ldc, (int)batchCount)); #endif } #if TORCH_HIP_VERSION >= 8000 || defined __HIP_PLATFORM_HCC__ void THCudaBlas_DgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, double alpha, const double *a, int64_t lda, int64_t strideA, const double *b, int64_t ldb, int64_t strideB, double beta, double *c, int64_t ldc, int64_t strideC, int64_t batchCount) { if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) ) { THError("Cublas_DgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount" "with the bound [val] <= %d", INT_MAX); } adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc); hipblasOperation_t opa = convertTransToCublasOperation(transa); hipblasOperation_t opb = convertTransToCublasOperation(transb); hipblasHandle_t handle = THCState_getCurrentBlasHandle(state); hipblasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(hipblasDgemmStridedBatched(handle, opa, opb, (int)m, (int)n, (int)k, &alpha, a, (int)lda, strideA, b, (int)ldb, strideB, &beta, c, (int)ldc, strideC, (int)batchCount)); } #endif /* Inverse */ void THCudaBlas_Sgetrf(THCState *state, int n, float **a, int lda, int *pivot, int *info, int batchSize) { if( (n >= INT_MAX) || (lda >= INT_MAX) || (batchSize >= INT_MAX) ) { THError("Cublas_Sgetrf only supports n, lda, batchSize" "with the bound [val] <= %d", INT_MAX); } hipblasHandle_t handle = THCState_getCurrentBlasHandle(state); hipblasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(hipblasSgetrfBatched(handle, n, a, lda, pivot, info, batchSize)); } void THCudaBlas_Dgetrf(THCState *state, int n, double **a, int lda, int *pivot, int *info, int batchSize) { if( (n >= INT_MAX) || (lda >= INT_MAX) || (batchSize >= INT_MAX) ) { THError("Cublas_Dgetrf only supports n, lda, batchSize" "with the bound [val] <= %d", INT_MAX); } hipblasHandle_t handle = THCState_getCurrentBlasHandle(state); hipblasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(hipblasDgetrfBatched(handle, n, a, lda, pivot, info, batchSize)); } void THCudaBlas_Sgetrs(THCState *state, char transa, int n, int nrhs, const float **a, int lda, int *pivot, float **b, int ldb, int *info, int batchSize) { if( (n >= INT_MAX) || (nrhs >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (batchSize >= INT_MAX) ) { THError("Cublas_Dgetrs only supports n, nrhs, lda, ldb, batchSize" "with the bound [val] <= %d", INT_MAX); } // no need to adjust leading dimensions, since matrices are square hipblasOperation_t opa = convertTransToCublasOperation(transa); hipblasHandle_t handle = THCState_getCurrentBlasHandle(state); hipblasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(hipblasSgetrsBatched(handle, opa, n, nrhs, a, lda, pivot, b, ldb, info, batchSize)); } void THCudaBlas_Dgetrs(THCState *state, char transa, int n, int nrhs, const double **a, int lda, int *pivot, double **b, int ldb, int *info, int batchSize) { if( (n >= INT_MAX) || (nrhs >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (batchSize >= INT_MAX) ) { THError("Cublas_Dgetrs only supports n, nrhs, lda, ldb, batchSize" "with the bound [val] <= %d", INT_MAX); } // no need to adjust leading dimensions, since matrices are square hipblasOperation_t opa = convertTransToCublasOperation(transa); hipblasHandle_t handle = THCState_getCurrentBlasHandle(state); hipblasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(hipblasDgetrsBatched(handle, opa, n, nrhs, a, lda, pivot, b, ldb, info, batchSize)); } void THCudaBlas_Sgetri(THCState *state, int n, const float **a, int lda, int *pivot, float **c, int ldc, int *info, int batchSize) { if( (n >= INT_MAX) || (lda >= INT_MAX)|| (ldc >= INT_MAX) || (batchSize >= INT_MAX) ) { THError("Cublas_Sgetri only supports n, lda, ldc, batchSize" "with the bound [val] <= %d", INT_MAX); } hipblasHandle_t handle = THCState_getCurrentBlasHandle(state); hipblasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(hipblasSgetriBatched(handle, n, a, lda, pivot, c, ldc, info, batchSize)); } void THCudaBlas_Dgetri(THCState *state, int n, const double **a, int lda, int *pivot, double **c, int ldc, int *info, int batchSize) { if( (n >= INT_MAX) || (lda >= INT_MAX)|| (ldc >= INT_MAX) || (batchSize >= INT_MAX) ) { THError("Cublas_Dgetri only supports n, lda, ldc, batchSize" "with the bound [val] <= %d", INT_MAX); } hipblasHandle_t handle = THCState_getCurrentBlasHandle(state); hipblasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(hipblasDgetriBatched(handle, n, a, lda, pivot, c, ldc, info, batchSize)); }
1655ea5fb5405a940fc6ee292104a881525995cd.cu
#include <THC/THCBlas.h> #include <THC/THCGeneral.h> #include <TH/THHalf.h> #include <algorithm> float THCudaBlas_Sdot(THCState *state, int64_t n, float *x, int64_t incx, float *y, int64_t incy) { if (n == 1) { incx = 1; incy = 1; } if ((n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX)) { int i_n = (int)n; int i_incx = (int)incx; int i_incy = (int)incy; float result; cublasHandle_t handle = THCState_getCurrentBlasHandle(state); cublasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(cublasSdot(handle, i_n, x, i_incx, y, i_incy, &result)); return result; } THError("Cublas_Sdot only supports n, incx and incy " "up to signed integer limits: %d", INT_MAX); return 0; } double THCudaBlas_Ddot(THCState *state, int64_t n, double *x, int64_t incx, double *y, int64_t incy) { if (n == 1) { incx = 1; incy = 1; } if ((n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX)) { int i_n = (int)n; int i_incx = (int)incx; int i_incy = (int)incy; double result; cublasHandle_t handle = THCState_getCurrentBlasHandle(state); cublasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(cublasDdot(handle, i_n, x, i_incx, y, i_incy, &result)); return result; } THError("Cublas_Ddot only supports n, incx and incy " "up to signed integer limits: %d", INT_MAX); return 0; } at::Half THCudaBlas_Hdot(THCState *state, int64_t n, at::Half *x, int64_t incx, at::Half *y, int64_t incy) { #if CUDA_VERSION >= 8000 if (n == 1) { incx = 1; incy = 1; } if ((n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX)) { at::Half result; cublasHandle_t handle = THCState_getCurrentBlasHandle(state); cublasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(cublasDotEx(handle, n, x, CUDA_R_16F, incx, y, CUDA_R_16F, incy, &result, CUDA_R_16F, CUDA_R_32F)); return result; } THError("Cublas_Hdot only supports n, incx and incy " "up to signed integer limits: %d", INT_MAX); return 0.0; #else THError("Cublas_Hdot requires CUDA 8.0+"); return 0.0; #endif } /* Level 2 */ void adjustLdLevel2(int64_t m, int64_t n, int64_t *lda) { // Note: leading dimensions generally are checked that they are > 0 and at least as big the result // requires (even if the value won't be used). // TODO: why does Level3 check trans but this doesn't? if (n <= 1) *lda = std::max<int64_t>(m, 1); } void THCudaBlas_Sgemv(THCState *state, char trans, int64_t m, int64_t n, float alpha, float *a, int64_t lda, float *x, int64_t incx, float beta, float *y, int64_t incy) { adjustLdLevel2(m, n, &lda); cublasOperation_t op; if (trans == 't') op = CUBLAS_OP_T; else if (trans == 'n') op = CUBLAS_OP_N; else if (trans == 'c') op = CUBLAS_OP_C; else THError("Cublas_Sgemv parameter trans should be 't', 'n' or 'c'."); if( (m <= INT_MAX) && (n <= INT_MAX) && (lda > 0) && (lda <= INT_MAX) && (incx > 0) && (incx <= INT_MAX) && (incy > 0) && (incy <= INT_MAX) ) { int i_m = (int)m; int i_n = (int)n; int i_lda = (int)lda; int i_incx = (int)incx; int i_incy = (int)incy; cublasHandle_t handle = THCState_getCurrentBlasHandle(state); cublasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(cublasSgemv(handle, op, i_m, i_n, &alpha, a, i_lda, x, i_incx, &beta, y, i_incy)); return; } THError("Cublas_Sgemv only supports m, n, lda, incx, incy" "in the range 0 < [val] <= %d", INT_MAX); } void THCudaBlas_Dgemv(THCState *state, char trans, int64_t m, int64_t n, double alpha, double *a, int64_t lda, double *x, int64_t incx, double beta, double *y, int64_t incy) { adjustLdLevel2(m, n, &lda); cublasOperation_t op; if (trans == 't') op = CUBLAS_OP_T; else if (trans == 'n') op = CUBLAS_OP_N; else if (trans == 'c') op = CUBLAS_OP_C; else THError("Cublas_Sgemv parameter trans should be 't', 'n' or 'c'."); if( (m <= INT_MAX) && (n <= INT_MAX) && (lda > 0) && (lda <= INT_MAX) && (incx > 0) && (incx <= INT_MAX) && (incy > 0) && (incy <= INT_MAX) ) { int i_m = (int)m; int i_n = (int)n; int i_lda = (int)lda; int i_incx = (int)incx; int i_incy = (int)incy; cublasHandle_t handle = THCState_getCurrentBlasHandle(state); cublasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(cublasDgemv(handle, op, i_m, i_n, &alpha, a, i_lda, x, i_incx, &beta, y, i_incy)); return; } THError("Cublas_Dgemv only supports m, n, lda, incx, incy" "in the range 0 < [val] <= %d", INT_MAX); } void THCudaBlas_Sger(THCState *state, int64_t m, int64_t n, float alpha, float *x, int64_t incx, float *y, int64_t incy, float *a, int64_t lda) { adjustLdLevel2(m, n, &lda); if( (m <= INT_MAX) && (n <= INT_MAX) && (lda <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) ) { int i_m = (int)m; int i_n = (int)n; int i_lda = (int)lda; int i_incx = (int)incx; int i_incy = (int)incy; cublasHandle_t handle = THCState_getCurrentBlasHandle(state); cublasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(cublasSger(handle, i_m, i_n, &alpha, x, i_incx, y, i_incy, a, i_lda)); return; } THError("Cublas_Sger only supports m, n, lda, incx, incy" "with the bound [val] <= %d", INT_MAX); } void THCudaBlas_Dger(THCState *state, int64_t m, int64_t n, double alpha, double *x, int64_t incx, double *y, int64_t incy, double *a, int64_t lda) { adjustLdLevel2(m, n, &lda); if( (m <= INT_MAX) && (n <= INT_MAX) && (lda <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) ) { int i_m = (int)m; int i_n = (int)n; int i_lda = (int)lda; int i_incx = (int)incx; int i_incy = (int)incy; cublasHandle_t handle = THCState_getCurrentBlasHandle(state); cublasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(cublasDger(handle, i_m, i_n, &alpha, x, i_incx, y, i_incy, a, i_lda)); return; } THError("Cublas_Dger only supports m, n, lda, incx, incy" "with the bound [val] <= %d", INT_MAX); } cublasOperation_t convertTransToCublasOperation(char trans) { if (trans == 't') return CUBLAS_OP_T; else if (trans == 'n') return CUBLAS_OP_N; else if (trans == 'c') return CUBLAS_OP_C; else { THError("trans must be one of: t, n, c"); return CUBLAS_OP_T; } } void adjustLdLevel3(char transa, char transb, int64_t m, int64_t n, int64_t k, int64_t *lda, int64_t *ldb, int64_t *ldc) { int transa_ = ((transa == 't') || (transa == 'T')); int transb_ = ((transb == 't') || (transb == 'T')); // Note: leading dimensions generally are checked that they are > 0 and at least as big the result // requires (even if the value won't be used). if(n <= 1) *ldc = std::max<int64_t>(m, 1); if(transa_) { if(m <= 1) *lda = std::max<int64_t>(k, 1); } else { if(k <= 1) *lda = std::max<int64_t>(m, 1); } if(transb_) { if(k <= 1) *ldb = std::max<int64_t>(n, 1); } else { if(n <= 1) *ldb = std::max<int64_t>(k, 1); } } /* Level 3 */ void THCudaBlas_Sgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, float alpha, float *a, int64_t lda, float *b, int64_t ldb, float beta, float *c, int64_t ldc) { adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc); cublasOperation_t opa = convertTransToCublasOperation(transa); cublasOperation_t opb = convertTransToCublasOperation(transb); if( (m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (lda <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX) ) { int i_m = (int)m; int i_n = (int)n; int i_k = (int)k; int i_lda = (int)lda; int i_ldb = (int)ldb; int i_ldc = (int)ldc; cublasHandle_t handle = THCState_getCurrentBlasHandle(state); cublasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(cublasSgemm(handle, opa, opb, i_m, i_n, i_k, &alpha, a, i_lda, b, i_ldb, &beta, c, i_ldc)); return; } THError("Cublas_Sgemm only supports m, n, k, lda, ldb, ldc" "with the bound [val] <= %d", INT_MAX); } // In CUDA 8.0, definition of data types for sgemmex changed #if CUDA_VERSION < 8000 # define CUDA_R_16F CUBLAS_DATA_HALF #endif void THCudaBlas_Hgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, at::Half alpha, at::Half *a, int64_t lda, at::Half *b, int64_t ldb, at::Half beta, at::Half *c, int64_t ldc) { adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc); cublasOperation_t opa = convertTransToCublasOperation(transa); cublasOperation_t opb = convertTransToCublasOperation(transb); if( (m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (lda <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX) ) { int i_m = (int)m; int i_n = (int)n; int i_k = (int)k; int i_lda = (int)lda; int i_ldb = (int)ldb; int i_ldc = (int)ldc; cublasHandle_t handle = THCState_getCurrentBlasHandle(state); cublasSetStream(handle, THCState_getCurrentStream(state)); #ifdef __HIP_PLATFORM_HCC__ THCublasCheck(rocblas_hgemm(handle, opa, opb, i_m, i_n, i_k, reinterpret_cast<rocblas_half*>(&alpha), reinterpret_cast<rocblas_half*>(a), i_lda, reinterpret_cast<rocblas_half*>(b), i_ldb, reinterpret_cast<rocblas_half*>(&beta), reinterpret_cast<rocblas_half*>(c), i_ldc)); #else // Simulated Hgemm float fAlpha = alpha; float fBeta = beta; #if CUDA_VERSION < 9000 THCublasCheck(cublasSgemmEx(handle, opa, opb, i_m, i_n, i_k, &fAlpha, a, CUDA_R_16F, i_lda, b, CUDA_R_16F, i_ldb, &fBeta, c, CUDA_R_16F, i_ldc)); #else cudaDeviceProp* prop = THCState_getCurrentDeviceProperties(state); if (prop->major >= 5){ #ifndef __HIP_PLATFORM_HCC__ THCublasCheck(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH)); #endif THCublasCheck(cublasGemmEx(handle, opa, opb, i_m, i_n, i_k, &fAlpha, a, CUDA_R_16F, i_lda, b, CUDA_R_16F, i_ldb, &fBeta, c, CUDA_R_16F, i_ldc, CUDA_R_32F, CUBLAS_GEMM_DFALT_TENSOR_OP)); #ifndef __HIP_PLATFORM_HCC__ THCublasCheck(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH)); #endif }else{ THCublasCheck(cublasSgemmEx(handle, opa, opb, i_m, i_n, i_k, &fAlpha, a, CUDA_R_16F, i_lda, b, CUDA_R_16F, i_ldb, &fBeta, c, CUDA_R_16F, i_ldc)); } #endif #endif return; } THError("Cublas_Hgemm only supports m, n, k, lda, ldb, ldc" "with th bound [val] <= %d", INT_MAX); } void THCudaBlas_Dgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, double alpha, double *a, int64_t lda, double *b, int64_t ldb, double beta, double *c, int64_t ldc) { adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc); cublasOperation_t opa = convertTransToCublasOperation(transa); cublasOperation_t opb = convertTransToCublasOperation(transb); if( (m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (lda <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX) ) { int i_m = (int)m; int i_n = (int)n; int i_k = (int)k; int i_lda = (int)lda; int i_ldb = (int)ldb; int i_ldc = (int)ldc; cublasHandle_t handle = THCState_getCurrentBlasHandle(state); cublasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(cublasDgemm(handle, opa, opb, i_m, i_n, i_k, &alpha, a, i_lda, b, i_ldb, &beta, c, i_ldc)); return; } THError("Cublas_Dgemm only supports m, n, k, lda, ldb, ldc" "with the bound [val] <= %d", INT_MAX); } #if CUDA_VERSION >= 9010 void THCudaBlas_HgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, at::Half alpha, const at::Half *a, int64_t lda, int64_t strideA, const at::Half *b, int64_t ldb, int64_t strideB, at::Half beta, at::Half *c, int64_t ldc, int64_t strideC, int64_t batchCount) { if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) ) { THError("Cublas_SgemmStridedBatched only supports m, n, k, lda, ldb, ldc, batchCount" "with the bound [val] <= %d", INT_MAX); } adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc); cublasOperation_t opa = convertTransToCublasOperation(transa); cublasOperation_t opb = convertTransToCublasOperation(transb); cublasHandle_t handle = THCState_getCurrentBlasHandle(state); cublasSetStream(handle, THCState_getCurrentStream(state)); float fAlpha = alpha; float fBeta = beta; THCublasCheck(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH)); THCublasCheck(cublasGemmStridedBatchedEx(handle, opa, opb, (int)m, (int)n, (int)k, (void*)&fAlpha, a, CUDA_R_16F, (int)lda, strideA, b, CUDA_R_16F, (int)ldb, strideB, (void*)&fBeta, c, CUDA_R_16F, (int)ldc, strideC, (int)batchCount, CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); THCublasCheck(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH)); } #endif void THCudaBlas_SgemmBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, float alpha, const float *a[], int64_t lda, const float *b[], int64_t ldb, float beta, float *c[], int64_t ldc, int64_t batchCount) { if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) ) { THError("Cublas_SgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount" "with the bound [val] <= %d", INT_MAX); } #ifdef __HIP_PLATFORM_HCC__ const int64_t stridea = (transa == 'N' || transa == 'n') ? lda*k : lda*n; const int64_t strideb = (transb == 'N' || transb == 'n') ? ldb*n : ldb*k; const int64_t stridec = ldc*n; THCudaBlas_SgemmStridedBatched(state, transa, transb, m, n, k, alpha, *a, lda, stridea, *b, ldb, strideb, beta, *c, ldc, stridec, batchCount); #else adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc); cublasOperation_t opa = convertTransToCublasOperation(transa); cublasOperation_t opb = convertTransToCublasOperation(transb); cublasHandle_t handle = THCState_getCurrentBlasHandle(state); cublasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(cublasSgemmBatched(handle, opa, opb, (int)m, (int)n, (int)k, &alpha, a, (int)lda, b, (int)ldb, &beta, c, (int)ldc, (int)batchCount)); #endif } #if CUDA_VERSION >= 8000 || defined __HIP_PLATFORM_HCC__ void THCudaBlas_SgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, float alpha, const float *a, int64_t lda, int64_t strideA, const float *b, int64_t ldb, int64_t strideB, float beta, float *c, int64_t ldc, int64_t strideC, int64_t batchCount) { if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) ) { THError("Cublas_SgemmStridedBatched only supports m, n, k, lda, ldb, ldc, batchCount" "with the bound [val] <= %d", INT_MAX); } adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc); cublasOperation_t opa = convertTransToCublasOperation(transa); cublasOperation_t opb = convertTransToCublasOperation(transb); cublasHandle_t handle = THCState_getCurrentBlasHandle(state); cublasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(cublasSgemmStridedBatched(handle, opa, opb, (int)m, (int)n, (int)k, &alpha, a, (int)lda, strideA, b, (int)ldb, strideB, &beta, c, (int)ldc, strideC, (int)batchCount)); } #endif void THCudaBlas_DgemmBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, double alpha, const double *a[], int64_t lda, const double *b[], int64_t ldb, double beta, double *c[], int64_t ldc, int64_t batchCount) { if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) ) { THError("Cublas_DgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount" "with the bound [val] <= %d", INT_MAX); } #ifdef __HIP_PLATFORM_HCC__ const int64_t stridea = (transa == 'N' || transa == 'n') ? lda*k : lda*n; const int64_t strideb = (transb == 'N' || transb == 'n') ? ldb*n : ldb*k; const int64_t stridec = ldc*n; THCudaBlas_DgemmStridedBatched(state, transa, transb, m, n, k, alpha, *a, lda, stridea, *b, ldb, strideb, beta, *c, ldc, stridec, batchCount); #else adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc); cublasOperation_t opa = convertTransToCublasOperation(transa); cublasOperation_t opb = convertTransToCublasOperation(transb); cublasHandle_t handle = THCState_getCurrentBlasHandle(state); cublasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(cublasDgemmBatched(handle, opa, opb, (int)m, (int)n, (int)k, &alpha, a, (int)lda, b, (int)ldb, &beta, c, (int)ldc, (int)batchCount)); #endif } #if CUDA_VERSION >= 8000 || defined __HIP_PLATFORM_HCC__ void THCudaBlas_DgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, double alpha, const double *a, int64_t lda, int64_t strideA, const double *b, int64_t ldb, int64_t strideB, double beta, double *c, int64_t ldc, int64_t strideC, int64_t batchCount) { if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) ) { THError("Cublas_DgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount" "with the bound [val] <= %d", INT_MAX); } adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc); cublasOperation_t opa = convertTransToCublasOperation(transa); cublasOperation_t opb = convertTransToCublasOperation(transb); cublasHandle_t handle = THCState_getCurrentBlasHandle(state); cublasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(cublasDgemmStridedBatched(handle, opa, opb, (int)m, (int)n, (int)k, &alpha, a, (int)lda, strideA, b, (int)ldb, strideB, &beta, c, (int)ldc, strideC, (int)batchCount)); } #endif /* Inverse */ void THCudaBlas_Sgetrf(THCState *state, int n, float **a, int lda, int *pivot, int *info, int batchSize) { if( (n >= INT_MAX) || (lda >= INT_MAX) || (batchSize >= INT_MAX) ) { THError("Cublas_Sgetrf only supports n, lda, batchSize" "with the bound [val] <= %d", INT_MAX); } cublasHandle_t handle = THCState_getCurrentBlasHandle(state); cublasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(cublasSgetrfBatched(handle, n, a, lda, pivot, info, batchSize)); } void THCudaBlas_Dgetrf(THCState *state, int n, double **a, int lda, int *pivot, int *info, int batchSize) { if( (n >= INT_MAX) || (lda >= INT_MAX) || (batchSize >= INT_MAX) ) { THError("Cublas_Dgetrf only supports n, lda, batchSize" "with the bound [val] <= %d", INT_MAX); } cublasHandle_t handle = THCState_getCurrentBlasHandle(state); cublasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(cublasDgetrfBatched(handle, n, a, lda, pivot, info, batchSize)); } void THCudaBlas_Sgetrs(THCState *state, char transa, int n, int nrhs, const float **a, int lda, int *pivot, float **b, int ldb, int *info, int batchSize) { if( (n >= INT_MAX) || (nrhs >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (batchSize >= INT_MAX) ) { THError("Cublas_Dgetrs only supports n, nrhs, lda, ldb, batchSize" "with the bound [val] <= %d", INT_MAX); } // no need to adjust leading dimensions, since matrices are square cublasOperation_t opa = convertTransToCublasOperation(transa); cublasHandle_t handle = THCState_getCurrentBlasHandle(state); cublasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(cublasSgetrsBatched(handle, opa, n, nrhs, a, lda, pivot, b, ldb, info, batchSize)); } void THCudaBlas_Dgetrs(THCState *state, char transa, int n, int nrhs, const double **a, int lda, int *pivot, double **b, int ldb, int *info, int batchSize) { if( (n >= INT_MAX) || (nrhs >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (batchSize >= INT_MAX) ) { THError("Cublas_Dgetrs only supports n, nrhs, lda, ldb, batchSize" "with the bound [val] <= %d", INT_MAX); } // no need to adjust leading dimensions, since matrices are square cublasOperation_t opa = convertTransToCublasOperation(transa); cublasHandle_t handle = THCState_getCurrentBlasHandle(state); cublasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(cublasDgetrsBatched(handle, opa, n, nrhs, a, lda, pivot, b, ldb, info, batchSize)); } void THCudaBlas_Sgetri(THCState *state, int n, const float **a, int lda, int *pivot, float **c, int ldc, int *info, int batchSize) { if( (n >= INT_MAX) || (lda >= INT_MAX)|| (ldc >= INT_MAX) || (batchSize >= INT_MAX) ) { THError("Cublas_Sgetri only supports n, lda, ldc, batchSize" "with the bound [val] <= %d", INT_MAX); } cublasHandle_t handle = THCState_getCurrentBlasHandle(state); cublasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(cublasSgetriBatched(handle, n, a, lda, pivot, c, ldc, info, batchSize)); } void THCudaBlas_Dgetri(THCState *state, int n, const double **a, int lda, int *pivot, double **c, int ldc, int *info, int batchSize) { if( (n >= INT_MAX) || (lda >= INT_MAX)|| (ldc >= INT_MAX) || (batchSize >= INT_MAX) ) { THError("Cublas_Dgetri only supports n, lda, ldc, batchSize" "with the bound [val] <= %d", INT_MAX); } cublasHandle_t handle = THCState_getCurrentBlasHandle(state); cublasSetStream(handle, THCState_getCurrentStream(state)); THCublasCheck(cublasDgetriBatched(handle, n, a, lda, pivot, c, ldc, info, batchSize)); }