hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
20dcf619f030ed5812cb1b7498483dd10b071dee.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*************************************************************************************************** * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Unit test for the OrderedSequenceBarrier class */ #include "../common/cutlass_unit_test.h" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <cute/tensor.hpp> #include <cute/arch/cluster_sm90.hpp> #include <cutlass/util/reference/host/gemm.h> #include <cutlass/cluster_launch.hpp> #include "cutlass/core_io.h" #include "cutlass/util/print_error.hpp" #include "cutlass/util/GPU_Clock.hpp" #include "testbed.h" #include "cutlass/pipeline.hpp" #include "cutlass/arch/barrier.h" #include "cute/arch/cluster_sm90.hpp" using namespace cute; //////////////////// KERNEL ///////////////////////// template<typename OrderedSequencer> struct SharedStorage { typename OrderedSequencer::SharedStorage storage; }; // Goal of this kernel is to complete deadlock-free template<int Stages, int GroupCount, int ThreadsPerGroup> __global__ static void ordered_sequence_device(uint32_t const num_iterations) { extern __shared__ char shared_memory[]; using SequenceBarrier = typename cutlass::OrderedSequenceBarrier<Stages, GroupCount>; using SmemStorage = SharedStorage<SequenceBarrier>; SmemStorage& shared_storage = *reinterpret_cast<SmemStorage*>(shared_memory); int group_idx = threadIdx.x / ThreadsPerGroup; typename SequenceBarrier::Params params; params.group_id = group_idx; // sequence ID params.group_size = ThreadsPerGroup; // Number of threads / participants in a group SequenceBarrier barrier(shared_storage.storage, params); // Ensure All CTAs in Cluster have completed init before issuing commits __syncthreads(); cute::cluster_arrive_relaxed(); cute::cluster_wait(); CUTLASS_PRAGMA_NO_UNROLL for (int i = 0; i < num_iterations; ++i){ barrier.wait(); // STAGE 1 CODE... #ifndef NDEBUG int thread_idx_in_group = threadIdx.x % ThreadsPerGroup; if (thread_idx_in_group == 0) { printf("STAGE 0 : Group_IDX : %d, id = %d, iter = %d, tidx = %d\n", group_idx, params.group_id, i, threadIdx.x); } #endif // Simulates long running stage #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 700) __nanosleep(100000); #endif barrier.arrive(); barrier.wait(); // STAGE 2 CODE... #ifndef NDEBUG if (thread_idx_in_group == 0) { printf("STAGE 1 : Group_IDX : %d, id = %d, iter = %d, tidx = %d\n", group_idx, params.group_id, i, threadIdx.x); } #endif // Simulates long running stage #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 700) __nanosleep(100000); #endif barrier.arrive(); } // To make sure remote SMEM doesn't get destroyed cute::cluster_arrive(); cute::cluster_wait(); } ///////////////////////////////////////////////////// template<uint32_t Stages_, uint32_t GroupCount_> struct PipelineTest { // // Data members // static constexpr uint32_t ThreadsPerGroup = 128; static constexpr uint32_t BlockSize = GroupCount_ * ThreadsPerGroup; static constexpr uint32_t Stages = Stages_; static constexpr uint32_t GroupCount = GroupCount_; using SequenceBarrier = typename cutlass::OrderedSequenceBarrier<Stages, GroupCount>; using SmemStorage = SharedStorage<SequenceBarrier>; // // Methods // // Run CuTe GEMM kernel hipError_t run(uint32_t const kNumIters, hipStream_t stream = nullptr) { // Pipeline (multistage pipeline) auto cluster_shape = Shape<_1, _1, _1>{}; // // Configure and launch // int iterations = 1; hipError_t result; for (int iter = 0; iter < iterations; ++iter) { int smem_size = int(sizeof(SmemStorage)); result = hipFuncSetAttribute( ordered_sequence_device<Stages, GroupCount, ThreadsPerGroup>, hipFuncAttributeMaxDynamicSharedMemorySize, smem_size); // Launch a single Cluster, with 128 thread per CTA dim3 dimCluster(size<0>(cluster_shape), size<1>(cluster_shape), size<2>(cluster_shape)); dim3 dimGrid(size<0>(cluster_shape), size<1>(cluster_shape), 1); dim3 dimBlock(BlockSize,1,1); const void* kernel = (const void*)ordered_sequence_device<Stages, GroupCount, ThreadsPerGroup>; int iters = kNumIters; void* kernel_params[] = {reinterpret_cast<void*>(&iters)}; cutlass::ClusterLauncher::launch(dimGrid, dimCluster, dimBlock, smem_size, stream, kernel, kernel_params); } // profiling loop ends result = hipDeviceSynchronize(); if (result != hipSuccess) { std::cerr << "Error: hipDeviceSynchronize() failed" << std::endl; return result; } return hipSuccess; } }; #if CUDA_12_0_SM90_FEATURES_SUPPORTED TEST(SM90_Verify_OrderedSequence, Depth_2_Length_2) { Options options; static constexpr uint32_t GroupCount = 2; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, GroupCount>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_OrderedSequence, Depth_2_Length_3) { Options options; static constexpr uint32_t GroupCount = 3; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, GroupCount>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_OrderedSequence, Depth_2_Length_4) { Options options; static constexpr uint32_t GroupCount = 4; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, GroupCount>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_OrderedSequence, Depth_2_Length_5) { Options options; static constexpr uint32_t GroupCount = 5; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, GroupCount>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } #endif
20dcf619f030ed5812cb1b7498483dd10b071dee.cu
/*************************************************************************************************** * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Unit test for the OrderedSequenceBarrier class */ #include "../common/cutlass_unit_test.h" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <cute/tensor.hpp> #include <cute/arch/cluster_sm90.hpp> #include <cutlass/util/reference/host/gemm.h> #include <cutlass/cluster_launch.hpp> #include "cutlass/core_io.h" #include "cutlass/util/print_error.hpp" #include "cutlass/util/GPU_Clock.hpp" #include "testbed.h" #include "cutlass/pipeline.hpp" #include "cutlass/arch/barrier.h" #include "cute/arch/cluster_sm90.hpp" using namespace cute; //////////////////// KERNEL ///////////////////////// template<typename OrderedSequencer> struct SharedStorage { typename OrderedSequencer::SharedStorage storage; }; // Goal of this kernel is to complete deadlock-free template<int Stages, int GroupCount, int ThreadsPerGroup> __global__ static void ordered_sequence_device(uint32_t const num_iterations) { extern __shared__ char shared_memory[]; using SequenceBarrier = typename cutlass::OrderedSequenceBarrier<Stages, GroupCount>; using SmemStorage = SharedStorage<SequenceBarrier>; SmemStorage& shared_storage = *reinterpret_cast<SmemStorage*>(shared_memory); int group_idx = threadIdx.x / ThreadsPerGroup; typename SequenceBarrier::Params params; params.group_id = group_idx; // sequence ID params.group_size = ThreadsPerGroup; // Number of threads / participants in a group SequenceBarrier barrier(shared_storage.storage, params); // Ensure All CTAs in Cluster have completed init before issuing commits __syncthreads(); cute::cluster_arrive_relaxed(); cute::cluster_wait(); CUTLASS_PRAGMA_NO_UNROLL for (int i = 0; i < num_iterations; ++i){ barrier.wait(); // STAGE 1 CODE... #ifndef NDEBUG int thread_idx_in_group = threadIdx.x % ThreadsPerGroup; if (thread_idx_in_group == 0) { printf("STAGE 0 : Group_IDX : %d, id = %d, iter = %d, tidx = %d\n", group_idx, params.group_id, i, threadIdx.x); } #endif // Simulates long running stage #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 700) __nanosleep(100000); #endif barrier.arrive(); barrier.wait(); // STAGE 2 CODE... #ifndef NDEBUG if (thread_idx_in_group == 0) { printf("STAGE 1 : Group_IDX : %d, id = %d, iter = %d, tidx = %d\n", group_idx, params.group_id, i, threadIdx.x); } #endif // Simulates long running stage #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 700) __nanosleep(100000); #endif barrier.arrive(); } // To make sure remote SMEM doesn't get destroyed cute::cluster_arrive(); cute::cluster_wait(); } ///////////////////////////////////////////////////// template<uint32_t Stages_, uint32_t GroupCount_> struct PipelineTest { // // Data members // static constexpr uint32_t ThreadsPerGroup = 128; static constexpr uint32_t BlockSize = GroupCount_ * ThreadsPerGroup; static constexpr uint32_t Stages = Stages_; static constexpr uint32_t GroupCount = GroupCount_; using SequenceBarrier = typename cutlass::OrderedSequenceBarrier<Stages, GroupCount>; using SmemStorage = SharedStorage<SequenceBarrier>; // // Methods // // Run CuTe GEMM kernel cudaError_t run(uint32_t const kNumIters, cudaStream_t stream = nullptr) { // Pipeline (multistage pipeline) auto cluster_shape = Shape<_1, _1, _1>{}; // // Configure and launch // int iterations = 1; cudaError_t result; for (int iter = 0; iter < iterations; ++iter) { int smem_size = int(sizeof(SmemStorage)); result = cudaFuncSetAttribute( ordered_sequence_device<Stages, GroupCount, ThreadsPerGroup>, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size); // Launch a single Cluster, with 128 thread per CTA dim3 dimCluster(size<0>(cluster_shape), size<1>(cluster_shape), size<2>(cluster_shape)); dim3 dimGrid(size<0>(cluster_shape), size<1>(cluster_shape), 1); dim3 dimBlock(BlockSize,1,1); const void* kernel = (const void*)ordered_sequence_device<Stages, GroupCount, ThreadsPerGroup>; int iters = kNumIters; void* kernel_params[] = {reinterpret_cast<void*>(&iters)}; cutlass::ClusterLauncher::launch(dimGrid, dimCluster, dimBlock, smem_size, stream, kernel, kernel_params); } // profiling loop ends result = cudaDeviceSynchronize(); if (result != cudaSuccess) { std::cerr << "Error: cudaDeviceSynchronize() failed" << std::endl; return result; } return cudaSuccess; } }; #if CUDA_12_0_SM90_FEATURES_SUPPORTED TEST(SM90_Verify_OrderedSequence, Depth_2_Length_2) { Options options; static constexpr uint32_t GroupCount = 2; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, GroupCount>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_OrderedSequence, Depth_2_Length_3) { Options options; static constexpr uint32_t GroupCount = 3; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, GroupCount>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_OrderedSequence, Depth_2_Length_4) { Options options; static constexpr uint32_t GroupCount = 4; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, GroupCount>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_OrderedSequence, Depth_2_Length_5) { Options options; static constexpr uint32_t GroupCount = 5; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, GroupCount>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } #endif
7412796d6d9184a3691e3cda905d26532417a16e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #define _CRT_SECURE_NO_WARNINGS #include <stdio.h> #include <stdlib.h> #include <string> #include <stdbool.h> #include <math.h> #include <iostream> using namespace std; struct node { int data; struct node *next; }; struct Itemset { string data; struct Itemset *next; }; struct ItemsetCollection { Itemset *data; struct ItemsetCollection *next; }; struct rules { Itemset *X; Itemset *Y; double sp; double cf; struct rules *next; }; //hien thi danh sach void print_node(node *head) { struct node *ptr = head; if (ptr == NULL) cout << "this object is empty"; cout << "\n[ "; //bat dau tu phan dau danh sach while (ptr != NULL) { cout << ptr->data; ptr = ptr->next; } cout << " ]"; } void print_ItemsetRules(Itemset *head) { struct Itemset *ptr = head; //bat dau tu phan dau danh sach while (ptr != NULL) { cout << ptr->data; ptr = ptr->next; } } void print_Itemset(Itemset *head) { struct Itemset *ptr = head; if (ptr == NULL) cout << "\n this object is empty"; cout << "\n[ "; //bat dau tu phan dau danh sach while (ptr != NULL) { cout << "(" << ptr->data << ") "; ptr = ptr->next; } cout << " ]"; } void print_ItemsetCollection(ItemsetCollection *head) { struct ItemsetCollection *ptr = head; cout << "\n[ "; //bat dau tu phan dau danh sach while (ptr != NULL) { print_Itemset(ptr->data); ptr = ptr->next; } cout << "\n]"; } void print_rules(rules *head) { struct rules *ptr = head; cout << "\n[\n "; //bat dau tu phan dau danh sach while (ptr != NULL) { print_ItemsetRules(ptr->X); cout << " => "; print_ItemsetRules(ptr->Y); cout << " " << ptr->sp << " "; cout << ptr->cf; cout << "\n"; ptr = ptr->next; } cout << "]"; } //chen link tai vi tri dau tien void insertFirst_node(node *&head, int data) { //tao mot link struct node *link = (struct node*) malloc(sizeof(struct node)); link->data = data; //tro link nay toi first Itemset cu link->next = head; //tro first toi first Itemset moi head = link; } void insertFirst_Itemset(Itemset *&head, string data) { //tao mot link Itemset *link = new Itemset(); link->data = data; //tro link nay toi first Itemset cu link->next = head; //tro first toi first Itemset moi head = link; } void insertFirst_rules(rules *&head, Itemset *X, Itemset *Y, double sp, double cf) { //tao mot link struct rules *link = (struct rules*) malloc(sizeof(struct rules)); link->X = X; link->Y = Y; link->cf = cf; link->sp = sp; //tro link nay toi first Itemset cu link->next = head; //tro first toi first Itemset moi head = link; } void insertFirst_ItemsetCollection_cuda(ItemsetCollection *&head, Itemset *data) { //tao mot link struct ItemsetCollection *link = (struct ItemsetCollection*) malloc(sizeof(struct ItemsetCollection)); link->data = data; //tro link nay toi first Itemset cu link->next = head; //tro first toi first Itemset moi head = link; } void insertFirst_ItemsetCollection(ItemsetCollection *&head, Itemset *data) { //tao mot link struct ItemsetCollection *link = (struct ItemsetCollection*) malloc(sizeof(struct ItemsetCollection)); link->data = data; //tro link nay toi first Itemset cu link->next = head; //tro first toi first Itemset moi head = link; } //xoa phan tu dau tien struct Itemset* deleteFirst(Itemset *head) { //luu tham chieu toi first link struct Itemset *tempLink = head; //danh dau next toi first link la first head = head->next; //tra ve link bi xoa return tempLink; } //kiem tra list co trong hay khong bool isEmpty(Itemset *head) { return head == NULL; } int length_Itemset(Itemset *head) { int length = 0; struct Itemset *current; for (current = head; current != NULL; current = current->next) { length++; } return length; } int length_node(node *head) { int length = 0; struct node *current; for (current = head; current != NULL; current = current->next) { length++; } return length; } int length_ItemsetCollection(ItemsetCollection *head) { int length = 0; struct ItemsetCollection *current; for (current = head; current != NULL; current = current->next) { length++; } return length; } //tim mot link voi key da cho struct Itemset* find_(Itemset *head, string data) { //bat dau tim tu first link struct Itemset* current = head; //neu list la trong if (head == NULL) { return NULL; } //duyet qua list while (current->data != data) { //neu day la last Itemset if (current->next == NULL) { return NULL; } else { //di chuyen toi next link current = current->next; } } //neu tim thay du lieu, tra ve link hien tai return current; } //tim gia tri cua node tai vi tri index int findData_node(node *head, int index) { int dem = 0; //bat dau tim tu first link struct node* current = head; //neu list la trong if (head == NULL) { return NULL; } //duyet qua list while (dem != index) { //di chuyen toi next link current = current->next; dem++; } //neu tim thay du lieu, tra ve link hien tai return current->data; } //tim gia tri cua itemset tai vi tri index string findData_Itemset(Itemset *head, int index) { int dem = 0; //bat dau tim tu first link struct Itemset* current = head; //neu list la trong if (head == NULL) { return ""; } //duyet qua list while (dem != index) { //di chuyen toi next link current = current->next; dem++; } //neu tim thay du lieu, tra ve link hien tai return current->data; } //xoa mot link voi key da cho struct Itemset* deleteKey(Itemset *&head, string data) { //bat dau tu first link struct Itemset* current = head; struct Itemset* previous = NULL; //neu list la trong if (head == NULL) { return NULL; } //duyet qua list while (current->data != data) { //neu day la last Itemset if (current->next == NULL) { return NULL; } else { //luu tham chieu toi link hien tai previous = current; //di chuyen toi next link current = current->next; } } //cap nhat link if (current == head) { //thay doi first de tro toi next link //head = NULL; head = head->next; } else { //bo qua link hien tai previous->next = current->next; return current; } } // ham sap xep void sort(Itemset *head) { int i, j, k, tempKey; string tempData; struct Itemset *current; struct Itemset *next; int size = length_Itemset(head); k = size; for (i = 0; i < size - 1; i++, k--) { current = head; next = head->next; for (j = 1; j < k; j++) { if (current->data > next->data) { tempData = current->data; current->data = next->data; next->data = tempData; } current = current->next; next = next->next; } } } // ham dao nguoc list void reverse_Itemset(struct Itemset** head_ref) { struct Itemset* prev = NULL; struct Itemset* current = *head_ref; struct Itemset* next; while (current != NULL) { next = current->next; current->next = prev; prev = current; current = next; } *head_ref = prev; } void reverse_ItemsetCollection(struct ItemsetCollection** head_ref) { struct ItemsetCollection* prev = NULL; struct ItemsetCollection* current = *head_ref; struct ItemsetCollection* next; while (current != NULL) { next = current->next; current->next = prev; prev = current; current = next; } *head_ref = prev; } //////////////////////////////////////////////////////////////////////////////// //****************************************************************************** //clear all ItemsetCollection void clearItemsetCollection(ItemsetCollection *&head) { //bat dau tu phan dau danh sach while (head != NULL) { head = head->next; } } //count ItemsetCollection int countItemsetCollection(ItemsetCollection *head) { //bat dau tu phan dau danh sach int kq = 0; while (head != NULL) { kq++; head = head->next; } return kq; } int countRules(rules *head) { //bat dau tu phan dau danh sach int kq = 0; while (head != NULL) { kq++; head = head->next; } return kq; } //count Itemset int countItemset(Itemset *head) { //bat dau tu phan dau danh sach int kq = 0; while (head != NULL) { kq++; head = head->next; } return kq; } //ttim so lan xuat hien itemset trong itemcollection int Appear(ItemsetCollection *db, Itemset *item) { int db_size = countItemsetCollection(db); int item_size = countItemset(item); struct ItemsetCollection *ptr_1 = db; int dem_z = 0; while (ptr_1 != NULL) { int dem = 0; struct Itemset *tmp = item; while (tmp != NULL) { if (find_(ptr_1->data, tmp->data) != NULL) { dem++; } tmp = tmp->next; if (dem == item_size) { dem_z++; } } ptr_1 = ptr_1->next; } return dem_z; } //ham tinh do pho bien double FindSupport(ItemsetCollection *db, Itemset *item) { double kq = 0.0; kq = ((double)Appear(db, item) / (double)countItemsetCollection(db)) * 100; return kq; } int GetBit(int value, int position) { int bit = value & (int)pow(2.0, position); return (bit > 0 ? 1 : 0); } //doi thap phan sang nhi phan node* DecimalToBinary(int value, int length) { struct node *binary = NULL; for (int position = 0; position < length; position++) { insertFirst_node(binary, GetBit(value, position)); } return (binary); } //dem so bit 1 trong chuoi nhi phan int GetOnCount(int value, int length) { int dem = 0; node* binary = DecimalToBinary(value, length); struct node *ptr = binary; //bat dau tu phan dau danh sach while (ptr != NULL) { if (ptr->data == 1) dem++; ptr = ptr->next; } return dem; } //ham tim tat ca tap con cua tap co k phan tu struct ItemsetCollection* FindSubsets(Itemset *itemset, int n) { ItemsetCollection *subsets = NULL; int subsetCount = (int)pow(2.0, countItemset(itemset)); for (int i = 0; i < subsetCount; i++) { if (n == 0 || GetOnCount(i, countItemset(itemset)) == n) { node* binary = DecimalToBinary(i, countItemset(itemset)); Itemset *subset = NULL; for (int nodeIndex = 0; nodeIndex < length_node(binary); nodeIndex++) { if (findData_node(binary, nodeIndex) == 1) { insertFirst_Itemset(subset, findData_Itemset(itemset, nodeIndex)); } } insertFirst_ItemsetCollection(subsets, subset); } } return subsets; } //--------------------------------mine data with sp----------------------------------------------------------// struct Itemset* GetUniqueItems(ItemsetCollection *head) { Itemset *kq = NULL; struct ItemsetCollection *ptr_1 = head; insertFirst_Itemset(kq, ptr_1->data->data); while (ptr_1 != NULL) { Itemset *ptr_2 = ptr_1->data; while (ptr_2 != NULL) { if (find_(kq, ptr_2->data) == NULL) { insertFirst_Itemset(kq, ptr_2->data); } ptr_2 = ptr_2->next; } ptr_1 = ptr_1->next; } return kq; } struct ItemsetCollection* doApriori(ItemsetCollection *db, double supportThreshold,int first[]) { Itemset *I = GetUniqueItems(db); ItemsetCollection *L = NULL;// tap du lieu pho bien ItemsetCollection *Li = NULL;// tap du lieu ItemsetCollection *Ci = NULL;// tap du lieu duoc luot bot //duyet su lap lai cua phan tu dau tien trong tap du lieu while (I != NULL) { Itemset *tmp = NULL; insertFirst_Itemset(tmp, I->data); insertFirst_ItemsetCollection(Ci, tmp); I = I->next; } // int first_tt = length_ItemsetCollection(Ci)-1; ItemsetCollection *Ci_tmp1 = Ci; while (Ci_tmp1 != NULL) { if (first[first_tt] >= supportThreshold) { insertFirst_ItemsetCollection(Li, Ci_tmp1->data); insertFirst_ItemsetCollection(L, Ci_tmp1->data); } first_tt--; Ci_tmp1 = Ci_tmp1->next; } clearItemsetCollection(Ci); Ci = FindSubsets(GetUniqueItems(Li), 2); int k = 3; // //su lap lai cac lan ke tiep for (int i = 0; i < length_ItemsetCollection(Ci); i++) { //lay Li tu Ci (phan tu dc luot bo) clearItemsetCollection(Li); ItemsetCollection *Ci_tmp = Ci; while (Ci_tmp != NULL) { double sp = FindSupport(db, Ci_tmp->data); if (sp >= supportThreshold) { insertFirst_ItemsetCollection(Li, Ci_tmp->data); insertFirst_ItemsetCollection(L, Ci_tmp->data); } Ci_tmp = Ci_tmp->next; } if (Li == NULL) break; clearItemsetCollection(Ci); Ci = FindSubsets(GetUniqueItems(Li), k); k++; } return (L); } void print_ItemsetCollection_sp(ItemsetCollection *head, ItemsetCollection *db) { struct ItemsetCollection *ptr = head; cout << "\n[ "; //bat dau tu phan dau danh sach while (ptr != NULL) { print_Itemset(ptr->data); cout << FindSupport(db, ptr->data); ptr = ptr->next; } cout << "\n]"; } //--------------------------------mine data with cf---------------------------------------------------------// void Mine(ItemsetCollection *db, ItemsetCollection *L, int size_L, double confidenceThreshold, rules *&allRules) { ItemsetCollection *tmp_L = L; for (int i = 0; i < size_L; i++) { ItemsetCollection *subsets = FindSubsets(tmp_L[i].data, 0); ItemsetCollection *tmp_subset = subsets; while (tmp_subset != NULL) { //tao ra ban copy du lieu cua tmp_L de dung cho viec xoa ItemsetCollection *copy_L = NULL; ItemsetCollection *tmp_L_L = tmp_L; for (int j = 0; j < size_L; j++) { Itemset *tmp_L_L_sub = tmp_L_L[i].data; Itemset *chil = NULL; while (tmp_L_L_sub != NULL) { insertFirst_Itemset(chil, tmp_L_L_sub->data); tmp_L_L_sub = tmp_L_L_sub->next; } insertFirst_ItemsetCollection(copy_L, chil); //tmp_L_L = tmp_L_L->next; } reverse_ItemsetCollection(&copy_L); // double confidence = (FindSupport(db, tmp_L[i].data) / FindSupport(db, tmp_subset->data))*100.0; if (confidence >= confidenceThreshold) { rules *rule = NULL; //rule->X = tmp->data; Itemset *tmp_X = NULL; tmp_X = tmp_subset->data; //xoa x ra khoi tmp_L->data while (tmp_X != NULL) { deleteKey(copy_L->data, tmp_X->data); tmp_X = tmp_X->next; } //rule->sp = FindSupport(db, tmp_L->data); //rule->cf = confidence; if (length_Itemset(tmp_subset->data) > 0 && length_Itemset(copy_L->data) > 0) { insertFirst_rules(allRules, tmp_subset->data, copy_L->data, FindSupport(db, tmp_L[i].data), confidence); } } tmp_subset = tmp_subset->next; } //tmp_L = tmp_L->next; } } int unique_count(ItemsetCollection *db, string a) { ItemsetCollection *tmp1 = db; int kq = 0; while (tmp1 != NULL) { Itemset *tmp2 = tmp1->data; while (tmp2 != NULL) { if (tmp2->data == a) { kq++; } tmp2 = tmp2->next; } tmp1 = tmp1->next; } return kq; } __global__ void arradd(int* a, int* b, int* c, int size) { int myid = threadIdx.x; c[myid] = a[myid] + b[myid]; } __global__ void additem(int *sp_dv, int *kq_dv, int size) { int myid = threadIdx.x; kq_dv[myid] = (int)((sp_dv[myid]*100)/size); //kq_dv[myid] = sp_dv[myid]; } int main() { struct Itemset *tmp = NULL; int *sp_dv; int *kq_dv; int *kq_host=new int[100]; struct Itemset *a = NULL; struct Itemset *b = NULL; struct Itemset *c = NULL; struct Itemset *d = NULL; struct Itemset *e = NULL; struct Itemset *z = NULL; struct Itemset *z1 = NULL; insertFirst_Itemset(a, "beer"); insertFirst_Itemset(a, "diaper"); insertFirst_Itemset(a, "baby powder"); insertFirst_Itemset(a, "bread"); insertFirst_Itemset(a, "umbrella"); print_Itemset(a); cout << "\n"; insertFirst_Itemset(b, "diaper"); insertFirst_Itemset(b, "baby powder"); print_Itemset(b); cout << "\n"; insertFirst_Itemset(c, "beer"); insertFirst_Itemset(c, "diaper"); insertFirst_Itemset(c, "milk"); print_Itemset(c); cout << "\n"; insertFirst_Itemset(d, "diaper"); insertFirst_Itemset(d, "beer"); insertFirst_Itemset(d, "detergent"); print_Itemset(d); cout << "\n"; insertFirst_Itemset(e, "beer"); insertFirst_Itemset(e, "milk"); insertFirst_Itemset(e, "coca-cola"); print_Itemset(e); cout << "\n"; struct ItemsetCollection *L = NULL; Itemset *Z_[5]; Z_[0] = a; Z_[1] = b; Z_[2] = c; Z_[3] = d; Z_[4] = e; for (int i = 0; i < 5; i++) { insertFirst_ItemsetCollection(L, Z_[i]); } print_ItemsetCollection(L); cout << "\n"; cout << "\nunique item: "; Itemset *uniqueItems = GetUniqueItems(L); print_Itemset(uniqueItems); cout << "\n"; int *sp_first = new int[100]; Itemset *uni_tmp = uniqueItems; int tt = 0; while (uni_tmp != NULL) { sp_first[tt] = unique_count(L, uni_tmp->data); tt++; kq_host[tt] = 0; uni_tmp = uni_tmp->next; } hipMalloc(&sp_dv, length_Itemset(uniqueItems) * sizeof(int)); hipMemcpy(sp_dv, sp_first, length_Itemset(uniqueItems) * sizeof(int), hipMemcpyHostToDevice); hipMalloc(&kq_dv, length_Itemset(uniqueItems) * sizeof(int)); additem << < 1, length_Itemset(uniqueItems) >> > (sp_dv, kq_dv, length_ItemsetCollection(L)); hipMemcpy(kq_host, kq_dv, length_Itemset(uniqueItems) * sizeof(int), hipMemcpyDeviceToHost); hipFree(sp_dv); hipFree(kq_dv); for (int i = 0; i < 8; i++) { cout << " " << kq_host[i]; } //covert L to array struct Itemcollection ItemsetCollection db[5]; ItemsetCollection *tmp_L = L; for (int i = 0; i < length_ItemsetCollection(L); i++) { db[i].data = tmp_L->data; tmp_L = tmp_L->next; } //test apriori(do pho bien) ItemsetCollection *L1 = doApriori(L, 40.0,kq_host); cout << "\n itemsets in L \n" << countItemsetCollection(L1);//dem tap du lieu pho bien print_ItemsetCollection_sp(L1, L); //covert L1 to array struct Itemcollection ItemsetCollection db1[7]; ItemsetCollection *tmp_L1 = L1; int size_L1 = length_ItemsetCollection(L1); for (int i = 0; i < size_L1; i++) { db1[i].data = tmp_L1->data; tmp_L1 = tmp_L1->next; } //test mining(tim luat co do tin cay >=70%) rules *allRules = NULL; Mine(L, db1, size_L1, 70.0, allRules); cout << "\n rules \n" << countRules(allRules); print_rules(allRules); }
7412796d6d9184a3691e3cda905d26532417a16e.cu
 #include "cuda_runtime.h" #include "device_launch_parameters.h" #define _CRT_SECURE_NO_WARNINGS #include <stdio.h> #include <stdlib.h> #include <string> #include <stdbool.h> #include <math.h> #include <iostream> using namespace std; struct node { int data; struct node *next; }; struct Itemset { string data; struct Itemset *next; }; struct ItemsetCollection { Itemset *data; struct ItemsetCollection *next; }; struct rules { Itemset *X; Itemset *Y; double sp; double cf; struct rules *next; }; //hien thi danh sach void print_node(node *head) { struct node *ptr = head; if (ptr == NULL) cout << "this object is empty"; cout << "\n[ "; //bat dau tu phan dau danh sach while (ptr != NULL) { cout << ptr->data; ptr = ptr->next; } cout << " ]"; } void print_ItemsetRules(Itemset *head) { struct Itemset *ptr = head; //bat dau tu phan dau danh sach while (ptr != NULL) { cout << ptr->data; ptr = ptr->next; } } void print_Itemset(Itemset *head) { struct Itemset *ptr = head; if (ptr == NULL) cout << "\n this object is empty"; cout << "\n[ "; //bat dau tu phan dau danh sach while (ptr != NULL) { cout << "(" << ptr->data << ") "; ptr = ptr->next; } cout << " ]"; } void print_ItemsetCollection(ItemsetCollection *head) { struct ItemsetCollection *ptr = head; cout << "\n[ "; //bat dau tu phan dau danh sach while (ptr != NULL) { print_Itemset(ptr->data); ptr = ptr->next; } cout << "\n]"; } void print_rules(rules *head) { struct rules *ptr = head; cout << "\n[\n "; //bat dau tu phan dau danh sach while (ptr != NULL) { print_ItemsetRules(ptr->X); cout << " => "; print_ItemsetRules(ptr->Y); cout << " " << ptr->sp << " "; cout << ptr->cf; cout << "\n"; ptr = ptr->next; } cout << "]"; } //chen link tai vi tri dau tien void insertFirst_node(node *&head, int data) { //tao mot link struct node *link = (struct node*) malloc(sizeof(struct node)); link->data = data; //tro link nay toi first Itemset cu link->next = head; //tro first toi first Itemset moi head = link; } void insertFirst_Itemset(Itemset *&head, string data) { //tao mot link Itemset *link = new Itemset(); link->data = data; //tro link nay toi first Itemset cu link->next = head; //tro first toi first Itemset moi head = link; } void insertFirst_rules(rules *&head, Itemset *X, Itemset *Y, double sp, double cf) { //tao mot link struct rules *link = (struct rules*) malloc(sizeof(struct rules)); link->X = X; link->Y = Y; link->cf = cf; link->sp = sp; //tro link nay toi first Itemset cu link->next = head; //tro first toi first Itemset moi head = link; } void insertFirst_ItemsetCollection_cuda(ItemsetCollection *&head, Itemset *data) { //tao mot link struct ItemsetCollection *link = (struct ItemsetCollection*) malloc(sizeof(struct ItemsetCollection)); link->data = data; //tro link nay toi first Itemset cu link->next = head; //tro first toi first Itemset moi head = link; } void insertFirst_ItemsetCollection(ItemsetCollection *&head, Itemset *data) { //tao mot link struct ItemsetCollection *link = (struct ItemsetCollection*) malloc(sizeof(struct ItemsetCollection)); link->data = data; //tro link nay toi first Itemset cu link->next = head; //tro first toi first Itemset moi head = link; } //xoa phan tu dau tien struct Itemset* deleteFirst(Itemset *head) { //luu tham chieu toi first link struct Itemset *tempLink = head; //danh dau next toi first link la first head = head->next; //tra ve link bi xoa return tempLink; } //kiem tra list co trong hay khong bool isEmpty(Itemset *head) { return head == NULL; } int length_Itemset(Itemset *head) { int length = 0; struct Itemset *current; for (current = head; current != NULL; current = current->next) { length++; } return length; } int length_node(node *head) { int length = 0; struct node *current; for (current = head; current != NULL; current = current->next) { length++; } return length; } int length_ItemsetCollection(ItemsetCollection *head) { int length = 0; struct ItemsetCollection *current; for (current = head; current != NULL; current = current->next) { length++; } return length; } //tim mot link voi key da cho struct Itemset* find_(Itemset *head, string data) { //bat dau tim tu first link struct Itemset* current = head; //neu list la trong if (head == NULL) { return NULL; } //duyet qua list while (current->data != data) { //neu day la last Itemset if (current->next == NULL) { return NULL; } else { //di chuyen toi next link current = current->next; } } //neu tim thay du lieu, tra ve link hien tai return current; } //tim gia tri cua node tai vi tri index int findData_node(node *head, int index) { int dem = 0; //bat dau tim tu first link struct node* current = head; //neu list la trong if (head == NULL) { return NULL; } //duyet qua list while (dem != index) { //di chuyen toi next link current = current->next; dem++; } //neu tim thay du lieu, tra ve link hien tai return current->data; } //tim gia tri cua itemset tai vi tri index string findData_Itemset(Itemset *head, int index) { int dem = 0; //bat dau tim tu first link struct Itemset* current = head; //neu list la trong if (head == NULL) { return ""; } //duyet qua list while (dem != index) { //di chuyen toi next link current = current->next; dem++; } //neu tim thay du lieu, tra ve link hien tai return current->data; } //xoa mot link voi key da cho struct Itemset* deleteKey(Itemset *&head, string data) { //bat dau tu first link struct Itemset* current = head; struct Itemset* previous = NULL; //neu list la trong if (head == NULL) { return NULL; } //duyet qua list while (current->data != data) { //neu day la last Itemset if (current->next == NULL) { return NULL; } else { //luu tham chieu toi link hien tai previous = current; //di chuyen toi next link current = current->next; } } //cap nhat link if (current == head) { //thay doi first de tro toi next link //head = NULL; head = head->next; } else { //bo qua link hien tai previous->next = current->next; return current; } } // ham sap xep void sort(Itemset *head) { int i, j, k, tempKey; string tempData; struct Itemset *current; struct Itemset *next; int size = length_Itemset(head); k = size; for (i = 0; i < size - 1; i++, k--) { current = head; next = head->next; for (j = 1; j < k; j++) { if (current->data > next->data) { tempData = current->data; current->data = next->data; next->data = tempData; } current = current->next; next = next->next; } } } // ham dao nguoc list void reverse_Itemset(struct Itemset** head_ref) { struct Itemset* prev = NULL; struct Itemset* current = *head_ref; struct Itemset* next; while (current != NULL) { next = current->next; current->next = prev; prev = current; current = next; } *head_ref = prev; } void reverse_ItemsetCollection(struct ItemsetCollection** head_ref) { struct ItemsetCollection* prev = NULL; struct ItemsetCollection* current = *head_ref; struct ItemsetCollection* next; while (current != NULL) { next = current->next; current->next = prev; prev = current; current = next; } *head_ref = prev; } //////////////////////////////////////////////////////////////////////////////// //****************************************************************************** //clear all ItemsetCollection void clearItemsetCollection(ItemsetCollection *&head) { //bat dau tu phan dau danh sach while (head != NULL) { head = head->next; } } //count ItemsetCollection int countItemsetCollection(ItemsetCollection *head) { //bat dau tu phan dau danh sach int kq = 0; while (head != NULL) { kq++; head = head->next; } return kq; } int countRules(rules *head) { //bat dau tu phan dau danh sach int kq = 0; while (head != NULL) { kq++; head = head->next; } return kq; } //count Itemset int countItemset(Itemset *head) { //bat dau tu phan dau danh sach int kq = 0; while (head != NULL) { kq++; head = head->next; } return kq; } //ttim so lan xuat hien itemset trong itemcollection int Appear(ItemsetCollection *db, Itemset *item) { int db_size = countItemsetCollection(db); int item_size = countItemset(item); struct ItemsetCollection *ptr_1 = db; int dem_z = 0; while (ptr_1 != NULL) { int dem = 0; struct Itemset *tmp = item; while (tmp != NULL) { if (find_(ptr_1->data, tmp->data) != NULL) { dem++; } tmp = tmp->next; if (dem == item_size) { dem_z++; } } ptr_1 = ptr_1->next; } return dem_z; } //ham tinh do pho bien double FindSupport(ItemsetCollection *db, Itemset *item) { double kq = 0.0; kq = ((double)Appear(db, item) / (double)countItemsetCollection(db)) * 100; return kq; } int GetBit(int value, int position) { int bit = value & (int)pow(2.0, position); return (bit > 0 ? 1 : 0); } //doi thap phan sang nhi phan node* DecimalToBinary(int value, int length) { struct node *binary = NULL; for (int position = 0; position < length; position++) { insertFirst_node(binary, GetBit(value, position)); } return (binary); } //dem so bit 1 trong chuoi nhi phan int GetOnCount(int value, int length) { int dem = 0; node* binary = DecimalToBinary(value, length); struct node *ptr = binary; //bat dau tu phan dau danh sach while (ptr != NULL) { if (ptr->data == 1) dem++; ptr = ptr->next; } return dem; } //ham tim tat ca tap con cua tap co k phan tu struct ItemsetCollection* FindSubsets(Itemset *itemset, int n) { ItemsetCollection *subsets = NULL; int subsetCount = (int)pow(2.0, countItemset(itemset)); for (int i = 0; i < subsetCount; i++) { if (n == 0 || GetOnCount(i, countItemset(itemset)) == n) { node* binary = DecimalToBinary(i, countItemset(itemset)); Itemset *subset = NULL; for (int nodeIndex = 0; nodeIndex < length_node(binary); nodeIndex++) { if (findData_node(binary, nodeIndex) == 1) { insertFirst_Itemset(subset, findData_Itemset(itemset, nodeIndex)); } } insertFirst_ItemsetCollection(subsets, subset); } } return subsets; } //--------------------------------mine data with sp----------------------------------------------------------// struct Itemset* GetUniqueItems(ItemsetCollection *head) { Itemset *kq = NULL; struct ItemsetCollection *ptr_1 = head; insertFirst_Itemset(kq, ptr_1->data->data); while (ptr_1 != NULL) { Itemset *ptr_2 = ptr_1->data; while (ptr_2 != NULL) { if (find_(kq, ptr_2->data) == NULL) { insertFirst_Itemset(kq, ptr_2->data); } ptr_2 = ptr_2->next; } ptr_1 = ptr_1->next; } return kq; } struct ItemsetCollection* doApriori(ItemsetCollection *db, double supportThreshold,int first[]) { Itemset *I = GetUniqueItems(db); ItemsetCollection *L = NULL;// tap du lieu pho bien ItemsetCollection *Li = NULL;// tap du lieu ItemsetCollection *Ci = NULL;// tap du lieu duoc luot bot //duyet su lap lai cua phan tu dau tien trong tap du lieu while (I != NULL) { Itemset *tmp = NULL; insertFirst_Itemset(tmp, I->data); insertFirst_ItemsetCollection(Ci, tmp); I = I->next; } // int first_tt = length_ItemsetCollection(Ci)-1; ItemsetCollection *Ci_tmp1 = Ci; while (Ci_tmp1 != NULL) { if (first[first_tt] >= supportThreshold) { insertFirst_ItemsetCollection(Li, Ci_tmp1->data); insertFirst_ItemsetCollection(L, Ci_tmp1->data); } first_tt--; Ci_tmp1 = Ci_tmp1->next; } clearItemsetCollection(Ci); Ci = FindSubsets(GetUniqueItems(Li), 2); int k = 3; // //su lap lai cac lan ke tiep for (int i = 0; i < length_ItemsetCollection(Ci); i++) { //lay Li tu Ci (phan tu dc luot bo) clearItemsetCollection(Li); ItemsetCollection *Ci_tmp = Ci; while (Ci_tmp != NULL) { double sp = FindSupport(db, Ci_tmp->data); if (sp >= supportThreshold) { insertFirst_ItemsetCollection(Li, Ci_tmp->data); insertFirst_ItemsetCollection(L, Ci_tmp->data); } Ci_tmp = Ci_tmp->next; } if (Li == NULL) break; clearItemsetCollection(Ci); Ci = FindSubsets(GetUniqueItems(Li), k); k++; } return (L); } void print_ItemsetCollection_sp(ItemsetCollection *head, ItemsetCollection *db) { struct ItemsetCollection *ptr = head; cout << "\n[ "; //bat dau tu phan dau danh sach while (ptr != NULL) { print_Itemset(ptr->data); cout << FindSupport(db, ptr->data); ptr = ptr->next; } cout << "\n]"; } //--------------------------------mine data with cf---------------------------------------------------------// void Mine(ItemsetCollection *db, ItemsetCollection *L, int size_L, double confidenceThreshold, rules *&allRules) { ItemsetCollection *tmp_L = L; for (int i = 0; i < size_L; i++) { ItemsetCollection *subsets = FindSubsets(tmp_L[i].data, 0); ItemsetCollection *tmp_subset = subsets; while (tmp_subset != NULL) { //tao ra ban copy du lieu cua tmp_L de dung cho viec xoa ItemsetCollection *copy_L = NULL; ItemsetCollection *tmp_L_L = tmp_L; for (int j = 0; j < size_L; j++) { Itemset *tmp_L_L_sub = tmp_L_L[i].data; Itemset *chil = NULL; while (tmp_L_L_sub != NULL) { insertFirst_Itemset(chil, tmp_L_L_sub->data); tmp_L_L_sub = tmp_L_L_sub->next; } insertFirst_ItemsetCollection(copy_L, chil); //tmp_L_L = tmp_L_L->next; } reverse_ItemsetCollection(&copy_L); // double confidence = (FindSupport(db, tmp_L[i].data) / FindSupport(db, tmp_subset->data))*100.0; if (confidence >= confidenceThreshold) { rules *rule = NULL; //rule->X = tmp->data; Itemset *tmp_X = NULL; tmp_X = tmp_subset->data; //xoa x ra khoi tmp_L->data while (tmp_X != NULL) { deleteKey(copy_L->data, tmp_X->data); tmp_X = tmp_X->next; } //rule->sp = FindSupport(db, tmp_L->data); //rule->cf = confidence; if (length_Itemset(tmp_subset->data) > 0 && length_Itemset(copy_L->data) > 0) { insertFirst_rules(allRules, tmp_subset->data, copy_L->data, FindSupport(db, tmp_L[i].data), confidence); } } tmp_subset = tmp_subset->next; } //tmp_L = tmp_L->next; } } int unique_count(ItemsetCollection *db, string a) { ItemsetCollection *tmp1 = db; int kq = 0; while (tmp1 != NULL) { Itemset *tmp2 = tmp1->data; while (tmp2 != NULL) { if (tmp2->data == a) { kq++; } tmp2 = tmp2->next; } tmp1 = tmp1->next; } return kq; } __global__ void arradd(int* a, int* b, int* c, int size) { int myid = threadIdx.x; c[myid] = a[myid] + b[myid]; } __global__ void additem(int *sp_dv, int *kq_dv, int size) { int myid = threadIdx.x; kq_dv[myid] = (int)((sp_dv[myid]*100)/size); //kq_dv[myid] = sp_dv[myid]; } int main() { struct Itemset *tmp = NULL; int *sp_dv; int *kq_dv; int *kq_host=new int[100]; struct Itemset *a = NULL; struct Itemset *b = NULL; struct Itemset *c = NULL; struct Itemset *d = NULL; struct Itemset *e = NULL; struct Itemset *z = NULL; struct Itemset *z1 = NULL; insertFirst_Itemset(a, "beer"); insertFirst_Itemset(a, "diaper"); insertFirst_Itemset(a, "baby powder"); insertFirst_Itemset(a, "bread"); insertFirst_Itemset(a, "umbrella"); print_Itemset(a); cout << "\n"; insertFirst_Itemset(b, "diaper"); insertFirst_Itemset(b, "baby powder"); print_Itemset(b); cout << "\n"; insertFirst_Itemset(c, "beer"); insertFirst_Itemset(c, "diaper"); insertFirst_Itemset(c, "milk"); print_Itemset(c); cout << "\n"; insertFirst_Itemset(d, "diaper"); insertFirst_Itemset(d, "beer"); insertFirst_Itemset(d, "detergent"); print_Itemset(d); cout << "\n"; insertFirst_Itemset(e, "beer"); insertFirst_Itemset(e, "milk"); insertFirst_Itemset(e, "coca-cola"); print_Itemset(e); cout << "\n"; struct ItemsetCollection *L = NULL; Itemset *Z_[5]; Z_[0] = a; Z_[1] = b; Z_[2] = c; Z_[3] = d; Z_[4] = e; for (int i = 0; i < 5; i++) { insertFirst_ItemsetCollection(L, Z_[i]); } print_ItemsetCollection(L); cout << "\n"; cout << "\nunique item: "; Itemset *uniqueItems = GetUniqueItems(L); print_Itemset(uniqueItems); cout << "\n"; int *sp_first = new int[100]; Itemset *uni_tmp = uniqueItems; int tt = 0; while (uni_tmp != NULL) { sp_first[tt] = unique_count(L, uni_tmp->data); tt++; kq_host[tt] = 0; uni_tmp = uni_tmp->next; } cudaMalloc(&sp_dv, length_Itemset(uniqueItems) * sizeof(int)); cudaMemcpy(sp_dv, sp_first, length_Itemset(uniqueItems) * sizeof(int), cudaMemcpyHostToDevice); cudaMalloc(&kq_dv, length_Itemset(uniqueItems) * sizeof(int)); additem << < 1, length_Itemset(uniqueItems) >> > (sp_dv, kq_dv, length_ItemsetCollection(L)); cudaMemcpy(kq_host, kq_dv, length_Itemset(uniqueItems) * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(sp_dv); cudaFree(kq_dv); for (int i = 0; i < 8; i++) { cout << " " << kq_host[i]; } //covert L to array struct Itemcollection ItemsetCollection db[5]; ItemsetCollection *tmp_L = L; for (int i = 0; i < length_ItemsetCollection(L); i++) { db[i].data = tmp_L->data; tmp_L = tmp_L->next; } //test apriori(do pho bien) ItemsetCollection *L1 = doApriori(L, 40.0,kq_host); cout << "\n itemsets in L \n" << countItemsetCollection(L1);//dem tap du lieu pho bien print_ItemsetCollection_sp(L1, L); //covert L1 to array struct Itemcollection ItemsetCollection db1[7]; ItemsetCollection *tmp_L1 = L1; int size_L1 = length_ItemsetCollection(L1); for (int i = 0; i < size_L1; i++) { db1[i].data = tmp_L1->data; tmp_L1 = tmp_L1->next; } //test mining(tim luat co do tin cay >=70%) rules *allRules = NULL; Mine(L, db1, size_L1, 70.0, allRules); cout << "\n rules \n" << countRules(allRules); print_rules(allRules); }
9322597176fe5e3c63966861ceaf63493627d3f0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cmath> #include <iomanip> #include "matrix.hh" #include "utils.hh" namespace gpu_1::utils { __global__ void sub_matrix_cuda(const char* matrix_data, std::size_t matrix_pitch, std::size_t starting_row, std::size_t starting_col, std::size_t row_count, std::size_t col_count, char* result_data, std::size_t result_pitch) { for (std::size_t row = 0; row < row_count; row++) { for (std::size_t col = 0; col < col_count; col++) { value_t* result_ptr; const value_t* matrix_ptr; get_val_ptr_cuda(result_data, result_pitch, row, col, &result_ptr); get_val_ptr_const_cuda(matrix_data, matrix_pitch, row + starting_row, col + starting_col, &matrix_ptr); *result_ptr = *matrix_ptr; } } } __global__ void matrix_transpose_cuda(const char* matrix_data, std::size_t matrix_pitch, std::size_t matrix_rows, std::size_t matrix_cols, char* result_data, std::size_t result_pitch) { for (std::size_t row = 0; row < matrix_cols; row++) { for (std::size_t col = 0; col < matrix_rows; col++) { value_t* result_ptr; const value_t* matrix_ptr; get_val_ptr_cuda(result_data, result_pitch, row, col, &result_ptr); get_val_ptr_const_cuda(matrix_data, matrix_pitch, col, row, &matrix_ptr); *result_ptr = *matrix_ptr; } } } __global__ void matrix_subtract_vector_cuda(const char* matrix_data, std::size_t matrix_pitch, std::size_t matrix_rows, std::size_t matrix_cols, const char* vector_data, std::size_t vector_pitch, char* result_data, std::size_t result_pitch) { for (std::size_t row = 0; row < matrix_rows; row++) { for (std::size_t col = 0; col < matrix_cols; col++) { value_t* result_ptr; const value_t* matrix_ptr; const value_t* vector_ptr; get_val_ptr_cuda(result_data, result_pitch, row, col, &result_ptr); get_val_ptr_const_cuda(matrix_data, matrix_pitch, row, col, &matrix_ptr); get_val_ptr_const_cuda(vector_data, vector_pitch, 0, col, &vector_ptr); *result_ptr = *matrix_ptr - *vector_ptr; } } } __global__ void matrix_add_vector_cuda(const char* matrix_data, std::size_t matrix_pitch, std::size_t matrix_rows, std::size_t matrix_cols, const char* vector_data, std::size_t vector_pitch, char* result_data, std::size_t result_pitch) { for (std::size_t row = 0; row < matrix_rows; row++) { for (std::size_t col = 0; col < matrix_cols; col++) { value_t* result_ptr; const value_t* matrix_ptr; const value_t* vector_ptr; get_val_ptr_cuda(result_data, result_pitch, row, col, &result_ptr); get_val_ptr_const_cuda(matrix_data, matrix_pitch, row, col, &matrix_ptr); get_val_ptr_const_cuda(vector_data, vector_pitch, 0, col, &vector_ptr); *result_ptr = *matrix_ptr + *vector_ptr; } } } __global__ void multiply_by_scalar_cuda(const char* matrix_data, std::size_t matrix_pitch, std::size_t matrix_rows, std::size_t matrix_cols, float val, char* result_data, std::size_t result_pitch) { for (std::size_t row = 0; row < matrix_rows; row++) { for (std::size_t col = 0; col < matrix_cols; col++) { value_t* result_ptr; const value_t* matrix_ptr; get_val_ptr_cuda(result_data, result_pitch, row, col, &result_ptr); get_val_ptr_const_cuda(matrix_data, matrix_pitch, row, col, &matrix_ptr); *result_ptr = *matrix_ptr * val; } } } __global__ void matrix_dot_product_cuda(const char* lhs_data, std::size_t lhs_pitch, std::size_t lhs_rows, std::size_t lhs_cols, const char* rhs_data, std::size_t rhs_pitch, std::size_t rhs_rows, std::size_t rhs_cols, char* result_data, std::size_t result_pitch) { std::size_t row_count = lhs_rows; std::size_t col_count = rhs_cols; std::size_t common_dim = lhs_cols; for (std::size_t row = 0; row < row_count; row++) { for (std::size_t col = 0; col < col_count; col++) { value_t* result_ptr; get_val_ptr_cuda(result_data, result_pitch, row, col, &result_ptr); *result_ptr = 0; for (std::size_t k = 0; k < common_dim; k++) { const value_t* lhs_ptr; const value_t* rhs_ptr; get_val_ptr_const_cuda(lhs_data, lhs_pitch, row, k, &lhs_ptr); get_val_ptr_const_cuda(rhs_data, rhs_pitch, k, col, &rhs_ptr); *result_ptr += *lhs_ptr * *rhs_ptr; } } } } __global__ void vector_element_wise_multiplication_cuda(const char* lhs_data, std::size_t lhs_pitch, std::size_t lhs_cols, std::size_t lhs_row, const char* rhs_data, std::size_t rhs_pitch, std::size_t rhs_cols, std::size_t rhs_row, char* result_data, std::size_t result_pitch) { for (std::size_t i = 0; i < lhs_cols; i++) { value_t* result_ptr; const value_t* lhs_ptr; const value_t* rhs_ptr; get_val_ptr_cuda(result_data, result_pitch, 0, i, &result_ptr); get_val_ptr_const_cuda(lhs_data, lhs_pitch, lhs_row, i, &lhs_ptr); get_val_ptr_const_cuda(rhs_data, rhs_pitch, rhs_row, i, &rhs_ptr); *result_ptr = *lhs_ptr * *rhs_ptr; } } __global__ void matrix_subtract_cuda(const char* lhs_data, std::size_t lhs_pitch, std::size_t lhs_rows, std::size_t lhs_cols, const char* rhs_data, std::size_t rhs_pitch, std::size_t rhs_rows, std::size_t rhs_cols, char* result_data, std::size_t result_pitch) { for (std::size_t row = 0; row < lhs_rows; row++) { for (std::size_t col = 0; col < lhs_cols; col++) { value_t* result_ptr; const value_t* lhs_ptr; const value_t* rhs_ptr; get_val_ptr_cuda(result_data, result_pitch, row, col, &result_ptr); get_val_ptr_const_cuda(lhs_data, lhs_pitch, row, col, &lhs_ptr); get_val_ptr_const_cuda(rhs_data, rhs_pitch, row, col, &rhs_ptr); *result_ptr = *lhs_ptr - *rhs_ptr; } } } __global__ void vector_sum_cuda(const char* vector_data, std::size_t vector_pitch, std::size_t vector_cols, float* sum) { *sum = 0.0; for (std::size_t col = 0; col < vector_cols; col++) { const value_t* data; get_val_ptr_const_cuda(vector_data, vector_pitch, 0, col, &data); *sum += *data; } } __global__ void matrix_norm_2_cuda(const char* matrix_data, std::size_t matrix_pitch, std::size_t matrix_rows, std::size_t matrix_cols, float* norm) { float sum = 0.0; for (std::size_t row = 0; row < matrix_rows; row++) { const value_t* line; get_val_ptr_const_cuda(matrix_data, matrix_pitch, row, 0, &line); for (std::size_t col = 0; col < matrix_cols; col++) { sum += pow(line[col], 2); } } *norm = sqrt(sum); } __global__ void matrix_centroid_cuda(const char* matrix_data, std::size_t matrix_pitch, std::size_t matrix_rows, std::size_t matrix_cols, char* result_data, std::size_t result_pitch) { std::size_t row_count = matrix_rows; std::size_t col_count = matrix_cols; for (std::size_t row = 0; row < row_count; row++) { for (std::size_t col = 0; col < col_count; col++) { value_t* result_ptr; const value_t* matrix_ptr; get_val_ptr_cuda(result_data, result_pitch, 0, col, &result_ptr); get_val_ptr_const_cuda(matrix_data, matrix_pitch, row, col, &matrix_ptr); if (row == 0) { *result_ptr = *matrix_ptr; } else { *result_ptr += *matrix_ptr; } } } value_t* result_ptr; get_val_ptr_cuda(result_data, result_pitch, 0, 0, &result_ptr); result_ptr[0] /= row_count; result_ptr[1] /= row_count; result_ptr[2] /= row_count; } __device__ void compute_distance_cuda(const char* p_data, std::size_t p_pitch, std::size_t p_row, const char* q_data, std::size_t q_pitch, std::size_t q_row, float* distance) { const float* X1; const float* Y1; const float* Z1; const float* X2; const float* Y2; const float* Z2; get_val_ptr_const_cuda(p_data, p_pitch, p_row, 0, &X1); get_val_ptr_const_cuda(p_data, p_pitch, p_row, 1, &Y1); get_val_ptr_const_cuda(p_data, p_pitch, p_row, 2, &Z1); get_val_ptr_const_cuda(q_data, q_pitch, q_row, 0, &X2); get_val_ptr_const_cuda(q_data, q_pitch, q_row, 1, &Y2); get_val_ptr_const_cuda(q_data, q_pitch, q_row, 2, &Z2); *distance = sqrt(pow(*X2 - *X1, 2) + pow(*Y2 - *Y1, 2) + pow(*Z2 - *Z1, 2) * 1.0); } __global__ void get_nearest_neighbors_cuda(const char* P_data, std::size_t P_pitch, std::size_t P_rows, const char* Q_data, std::size_t Q_pitch, std::size_t Q_rows, char* res_data, std::size_t res_pitch) { std::size_t p_row = blockDim.x * blockIdx.x + threadIdx.x; if (p_row >= P_rows) return; float min_dist = MAXFLOAT; std::size_t choosen_row = 0; for (std::size_t q_row = 0; q_row < Q_rows; q_row++) { float dist; compute_distance_cuda(P_data, P_pitch, p_row, Q_data, Q_pitch, q_row, &dist); if (dist < min_dist) { min_dist = dist; choosen_row = q_row; } } const float* Q_line; float* res_line; get_val_ptr_const_cuda(Q_data, Q_pitch, choosen_row, 0, &Q_line); get_val_ptr_cuda(res_data, res_pitch, p_row, 0, &res_line); for (std::size_t i = 0; i < 3; i++) { res_line[i] = Q_line[i]; } } __global__ void matrix_diag_sum_cuda(const char* matrix_data, std::size_t matrix_pitch, std::size_t matrix_rows, float* sum) { *sum = 0.0; for (std::size_t row = 0; row < matrix_rows; row++) { const value_t* matrix_ptr; get_val_ptr_const_cuda(matrix_data, matrix_pitch, row, row, &matrix_ptr); *sum += *matrix_ptr; } } __device__ void get_val_ptr_const_cuda(const char* data, std::size_t pitch, std::size_t row, std::size_t col, const value_t** val) { *val = (value_t*)((data + row * pitch) + col * sizeof(value_t)); } __device__ void get_val_ptr_cuda(char* data, std::size_t pitch, std::size_t row, std::size_t col, value_t** val) { *val = (value_t*)((data + row * pitch) + col * sizeof(value_t)); } __global__ void set_val_cuda(char* matrix_data, std::size_t matrix_pitch, std::size_t row, std::size_t col, value_t val) { value_t* val_ptr; get_val_ptr_cuda(matrix_data, matrix_pitch, row, col, &val_ptr); *val_ptr = val; } __global__ void set_val_ptr_cuda(char* matrix_data, std::size_t matrix_pitch, std::size_t row, std::size_t col, value_t* val) { value_t* val_ptr; get_val_ptr_cuda(matrix_data, matrix_pitch, row, col, &val_ptr); *val_ptr = *val; } __global__ void compute_rotation_matrix_cuda(const char* q_data, std::size_t q_pitch, char* QBar_T_data, std::size_t QBar_T_pitch, char* Q_data, std::size_t Q_pitch) { const value_t* q0_ptr; const value_t* q1_ptr; const value_t* q2_ptr; const value_t* q3_ptr; get_val_ptr_const_cuda(q_data, q_pitch, 0, 0, &q0_ptr); get_val_ptr_const_cuda(q_data, q_pitch, 1, 0, &q1_ptr); get_val_ptr_const_cuda(q_data, q_pitch, 2, 0, &q2_ptr); get_val_ptr_const_cuda(q_data, q_pitch, 3, 0, &q3_ptr); value_t* QBar_T_0_ptr; value_t* QBar_T_1_ptr; value_t* QBar_T_2_ptr; value_t* QBar_T_3_ptr; get_val_ptr_cuda(QBar_T_data, QBar_T_pitch, 0, 0, &QBar_T_0_ptr); get_val_ptr_cuda(QBar_T_data, QBar_T_pitch, 1, 0, &QBar_T_1_ptr); get_val_ptr_cuda(QBar_T_data, QBar_T_pitch, 2, 0, &QBar_T_2_ptr); get_val_ptr_cuda(QBar_T_data, QBar_T_pitch, 3, 0, &QBar_T_3_ptr); QBar_T_0_ptr[0] = *q0_ptr; QBar_T_0_ptr[1] = *q1_ptr; QBar_T_0_ptr[2] = *q2_ptr; QBar_T_0_ptr[3] = *q3_ptr; QBar_T_1_ptr[0] = -*q1_ptr; QBar_T_1_ptr[1] = *q0_ptr; QBar_T_1_ptr[2] = *q3_ptr; QBar_T_1_ptr[3] = -*q2_ptr; QBar_T_2_ptr[0] = -*q2_ptr; QBar_T_2_ptr[1] = -*q3_ptr; QBar_T_2_ptr[2] = *q0_ptr; QBar_T_2_ptr[3] = *q1_ptr; QBar_T_3_ptr[0] = -*q3_ptr; QBar_T_3_ptr[1] = *q2_ptr; QBar_T_3_ptr[2] = -*q1_ptr; QBar_T_3_ptr[3] = *q0_ptr; value_t* Q_0_ptr; value_t* Q_1_ptr; value_t* Q_2_ptr; value_t* Q_3_ptr; get_val_ptr_cuda(Q_data, Q_pitch, 0, 0, &Q_0_ptr); get_val_ptr_cuda(Q_data, Q_pitch, 1, 0, &Q_1_ptr); get_val_ptr_cuda(Q_data, Q_pitch, 2, 0, &Q_2_ptr); get_val_ptr_cuda(Q_data, Q_pitch, 3, 0, &Q_3_ptr); Q_0_ptr[0] = *q0_ptr; Q_0_ptr[1] = -*q1_ptr; Q_0_ptr[2] = -*q2_ptr; Q_0_ptr[3] = -*q3_ptr; Q_1_ptr[0] = *q1_ptr; Q_1_ptr[1] = *q0_ptr; Q_1_ptr[2] = *q3_ptr; Q_1_ptr[3] = -*q2_ptr; Q_2_ptr[0] = *q2_ptr; Q_2_ptr[1] = -*q3_ptr; Q_2_ptr[2] = *q0_ptr; Q_2_ptr[3] = *q1_ptr; Q_3_ptr[0] = *q3_ptr; Q_3_ptr[1] = *q2_ptr; Q_3_ptr[2] = -*q1_ptr; Q_3_ptr[3] = *q0_ptr; } __global__ void get_val_cuda(const char* matrix_data, std::size_t matrix_pitch, std::size_t row, std::size_t col, value_t* val) { const value_t* val_ptr; get_val_ptr_const_cuda(matrix_data, matrix_pitch, row, col, &val_ptr); *val = *val_ptr; } __global__ void print_matrix_cuda(const char* matrix, std::size_t pitch, std::size_t rows, std::size_t cols) { for (std::size_t row = 0; row < rows; row++) { printf("| "); for (std::size_t col = 0; col < cols; col++) { const value_t* val; get_val_ptr_const_cuda(matrix, pitch, row, col, &val); printf("%f ", *val); } printf("|\n"); } } void matrix_dot_product(const matrix_device_t& lhs, const matrix_device_t& rhs, matrix_device_t& result) { hipLaunchKernelGGL(( matrix_dot_product_cuda), dim3(1), dim3(1), 0, 0, lhs.data_, lhs.pitch_, lhs.rows_, lhs.cols_, rhs.data_, rhs.pitch_, rhs.rows_, rhs.cols_, result.data_, result.pitch_); hipDeviceSynchronize(); if (hipPeekAtLastError()) { abortError("Computation Error"); } } void vector_element_wise_multiplication(const matrix_device_t& lhs, std::size_t lhs_row, const matrix_device_t& rhs, std::size_t rhs_row, matrix_device_t& result) { hipLaunchKernelGGL(( vector_element_wise_multiplication_cuda), dim3(1), dim3(1), 0, 0, lhs.data_, lhs.pitch_, lhs.cols_, lhs_row, rhs.data_, rhs.pitch_, rhs.cols_, rhs_row, result.data_, result.pitch_); hipDeviceSynchronize(); if (hipPeekAtLastError()) { abortError("Computation Error"); } } float vector_sum(const matrix_device_t& vector) { float* sum_device; hipError_t rc = hipSuccess; rc = hipMalloc(&sum_device, sizeof(float)); if (rc) { abortError("Fail buffer allocation"); } hipLaunchKernelGGL(( vector_sum_cuda), dim3(1), dim3(1), 0, 0, vector.data_, vector.pitch_, vector.cols_, sum_device); hipDeviceSynchronize(); if (hipPeekAtLastError()) { abortError("Computation Error"); } float sum_host; rc = hipMemcpy(&sum_host, sum_device, sizeof(float), hipMemcpyDeviceToHost); if (rc) { abortError("Fail buffer copy"); } rc = hipFree(sum_device); if (rc) { abortError("Fail buffer free"); } return sum_host; } void matrix_subtract(const matrix_device_t& lhs, const matrix_device_t& rhs, matrix_device_t& result) { hipLaunchKernelGGL(( matrix_subtract_cuda), dim3(1), dim3(1), 0, 0, lhs.data_, lhs.pitch_, lhs.rows_, lhs.cols_, rhs.data_, rhs.pitch_, rhs.rows_, rhs.cols_, result.data_, result.pitch_); hipDeviceSynchronize(); if (hipPeekAtLastError()) { abortError("Computation Error"); } } void compute_rotation_matrix(const matrix_device_t& q, matrix_device_t& QBar_T, matrix_device_t& Q) { hipLaunchKernelGGL(( compute_rotation_matrix_cuda), dim3(1), dim3(1), 0, 0, q.data_, q.pitch_, QBar_T.data_, QBar_T.pitch_, Q.data_, Q.pitch_); hipDeviceSynchronize(); if (hipPeekAtLastError()) { abortError("Computation Error"); } } value_t* get_val_ptr(char* data, std::size_t pitch, std::size_t row, std::size_t col) { return (value_t*)((data + row * pitch) + col * sizeof(value_t)); } } // namespace utils
9322597176fe5e3c63966861ceaf63493627d3f0.cu
#include <cmath> #include <iomanip> #include "matrix.hh" #include "utils.hh" namespace gpu_1::utils { __global__ void sub_matrix_cuda(const char* matrix_data, std::size_t matrix_pitch, std::size_t starting_row, std::size_t starting_col, std::size_t row_count, std::size_t col_count, char* result_data, std::size_t result_pitch) { for (std::size_t row = 0; row < row_count; row++) { for (std::size_t col = 0; col < col_count; col++) { value_t* result_ptr; const value_t* matrix_ptr; get_val_ptr_cuda(result_data, result_pitch, row, col, &result_ptr); get_val_ptr_const_cuda(matrix_data, matrix_pitch, row + starting_row, col + starting_col, &matrix_ptr); *result_ptr = *matrix_ptr; } } } __global__ void matrix_transpose_cuda(const char* matrix_data, std::size_t matrix_pitch, std::size_t matrix_rows, std::size_t matrix_cols, char* result_data, std::size_t result_pitch) { for (std::size_t row = 0; row < matrix_cols; row++) { for (std::size_t col = 0; col < matrix_rows; col++) { value_t* result_ptr; const value_t* matrix_ptr; get_val_ptr_cuda(result_data, result_pitch, row, col, &result_ptr); get_val_ptr_const_cuda(matrix_data, matrix_pitch, col, row, &matrix_ptr); *result_ptr = *matrix_ptr; } } } __global__ void matrix_subtract_vector_cuda(const char* matrix_data, std::size_t matrix_pitch, std::size_t matrix_rows, std::size_t matrix_cols, const char* vector_data, std::size_t vector_pitch, char* result_data, std::size_t result_pitch) { for (std::size_t row = 0; row < matrix_rows; row++) { for (std::size_t col = 0; col < matrix_cols; col++) { value_t* result_ptr; const value_t* matrix_ptr; const value_t* vector_ptr; get_val_ptr_cuda(result_data, result_pitch, row, col, &result_ptr); get_val_ptr_const_cuda(matrix_data, matrix_pitch, row, col, &matrix_ptr); get_val_ptr_const_cuda(vector_data, vector_pitch, 0, col, &vector_ptr); *result_ptr = *matrix_ptr - *vector_ptr; } } } __global__ void matrix_add_vector_cuda(const char* matrix_data, std::size_t matrix_pitch, std::size_t matrix_rows, std::size_t matrix_cols, const char* vector_data, std::size_t vector_pitch, char* result_data, std::size_t result_pitch) { for (std::size_t row = 0; row < matrix_rows; row++) { for (std::size_t col = 0; col < matrix_cols; col++) { value_t* result_ptr; const value_t* matrix_ptr; const value_t* vector_ptr; get_val_ptr_cuda(result_data, result_pitch, row, col, &result_ptr); get_val_ptr_const_cuda(matrix_data, matrix_pitch, row, col, &matrix_ptr); get_val_ptr_const_cuda(vector_data, vector_pitch, 0, col, &vector_ptr); *result_ptr = *matrix_ptr + *vector_ptr; } } } __global__ void multiply_by_scalar_cuda(const char* matrix_data, std::size_t matrix_pitch, std::size_t matrix_rows, std::size_t matrix_cols, float val, char* result_data, std::size_t result_pitch) { for (std::size_t row = 0; row < matrix_rows; row++) { for (std::size_t col = 0; col < matrix_cols; col++) { value_t* result_ptr; const value_t* matrix_ptr; get_val_ptr_cuda(result_data, result_pitch, row, col, &result_ptr); get_val_ptr_const_cuda(matrix_data, matrix_pitch, row, col, &matrix_ptr); *result_ptr = *matrix_ptr * val; } } } __global__ void matrix_dot_product_cuda(const char* lhs_data, std::size_t lhs_pitch, std::size_t lhs_rows, std::size_t lhs_cols, const char* rhs_data, std::size_t rhs_pitch, std::size_t rhs_rows, std::size_t rhs_cols, char* result_data, std::size_t result_pitch) { std::size_t row_count = lhs_rows; std::size_t col_count = rhs_cols; std::size_t common_dim = lhs_cols; for (std::size_t row = 0; row < row_count; row++) { for (std::size_t col = 0; col < col_count; col++) { value_t* result_ptr; get_val_ptr_cuda(result_data, result_pitch, row, col, &result_ptr); *result_ptr = 0; for (std::size_t k = 0; k < common_dim; k++) { const value_t* lhs_ptr; const value_t* rhs_ptr; get_val_ptr_const_cuda(lhs_data, lhs_pitch, row, k, &lhs_ptr); get_val_ptr_const_cuda(rhs_data, rhs_pitch, k, col, &rhs_ptr); *result_ptr += *lhs_ptr * *rhs_ptr; } } } } __global__ void vector_element_wise_multiplication_cuda(const char* lhs_data, std::size_t lhs_pitch, std::size_t lhs_cols, std::size_t lhs_row, const char* rhs_data, std::size_t rhs_pitch, std::size_t rhs_cols, std::size_t rhs_row, char* result_data, std::size_t result_pitch) { for (std::size_t i = 0; i < lhs_cols; i++) { value_t* result_ptr; const value_t* lhs_ptr; const value_t* rhs_ptr; get_val_ptr_cuda(result_data, result_pitch, 0, i, &result_ptr); get_val_ptr_const_cuda(lhs_data, lhs_pitch, lhs_row, i, &lhs_ptr); get_val_ptr_const_cuda(rhs_data, rhs_pitch, rhs_row, i, &rhs_ptr); *result_ptr = *lhs_ptr * *rhs_ptr; } } __global__ void matrix_subtract_cuda(const char* lhs_data, std::size_t lhs_pitch, std::size_t lhs_rows, std::size_t lhs_cols, const char* rhs_data, std::size_t rhs_pitch, std::size_t rhs_rows, std::size_t rhs_cols, char* result_data, std::size_t result_pitch) { for (std::size_t row = 0; row < lhs_rows; row++) { for (std::size_t col = 0; col < lhs_cols; col++) { value_t* result_ptr; const value_t* lhs_ptr; const value_t* rhs_ptr; get_val_ptr_cuda(result_data, result_pitch, row, col, &result_ptr); get_val_ptr_const_cuda(lhs_data, lhs_pitch, row, col, &lhs_ptr); get_val_ptr_const_cuda(rhs_data, rhs_pitch, row, col, &rhs_ptr); *result_ptr = *lhs_ptr - *rhs_ptr; } } } __global__ void vector_sum_cuda(const char* vector_data, std::size_t vector_pitch, std::size_t vector_cols, float* sum) { *sum = 0.0; for (std::size_t col = 0; col < vector_cols; col++) { const value_t* data; get_val_ptr_const_cuda(vector_data, vector_pitch, 0, col, &data); *sum += *data; } } __global__ void matrix_norm_2_cuda(const char* matrix_data, std::size_t matrix_pitch, std::size_t matrix_rows, std::size_t matrix_cols, float* norm) { float sum = 0.0; for (std::size_t row = 0; row < matrix_rows; row++) { const value_t* line; get_val_ptr_const_cuda(matrix_data, matrix_pitch, row, 0, &line); for (std::size_t col = 0; col < matrix_cols; col++) { sum += pow(line[col], 2); } } *norm = sqrt(sum); } __global__ void matrix_centroid_cuda(const char* matrix_data, std::size_t matrix_pitch, std::size_t matrix_rows, std::size_t matrix_cols, char* result_data, std::size_t result_pitch) { std::size_t row_count = matrix_rows; std::size_t col_count = matrix_cols; for (std::size_t row = 0; row < row_count; row++) { for (std::size_t col = 0; col < col_count; col++) { value_t* result_ptr; const value_t* matrix_ptr; get_val_ptr_cuda(result_data, result_pitch, 0, col, &result_ptr); get_val_ptr_const_cuda(matrix_data, matrix_pitch, row, col, &matrix_ptr); if (row == 0) { *result_ptr = *matrix_ptr; } else { *result_ptr += *matrix_ptr; } } } value_t* result_ptr; get_val_ptr_cuda(result_data, result_pitch, 0, 0, &result_ptr); result_ptr[0] /= row_count; result_ptr[1] /= row_count; result_ptr[2] /= row_count; } __device__ void compute_distance_cuda(const char* p_data, std::size_t p_pitch, std::size_t p_row, const char* q_data, std::size_t q_pitch, std::size_t q_row, float* distance) { const float* X1; const float* Y1; const float* Z1; const float* X2; const float* Y2; const float* Z2; get_val_ptr_const_cuda(p_data, p_pitch, p_row, 0, &X1); get_val_ptr_const_cuda(p_data, p_pitch, p_row, 1, &Y1); get_val_ptr_const_cuda(p_data, p_pitch, p_row, 2, &Z1); get_val_ptr_const_cuda(q_data, q_pitch, q_row, 0, &X2); get_val_ptr_const_cuda(q_data, q_pitch, q_row, 1, &Y2); get_val_ptr_const_cuda(q_data, q_pitch, q_row, 2, &Z2); *distance = sqrt(pow(*X2 - *X1, 2) + pow(*Y2 - *Y1, 2) + pow(*Z2 - *Z1, 2) * 1.0); } __global__ void get_nearest_neighbors_cuda(const char* P_data, std::size_t P_pitch, std::size_t P_rows, const char* Q_data, std::size_t Q_pitch, std::size_t Q_rows, char* res_data, std::size_t res_pitch) { std::size_t p_row = blockDim.x * blockIdx.x + threadIdx.x; if (p_row >= P_rows) return; float min_dist = MAXFLOAT; std::size_t choosen_row = 0; for (std::size_t q_row = 0; q_row < Q_rows; q_row++) { float dist; compute_distance_cuda(P_data, P_pitch, p_row, Q_data, Q_pitch, q_row, &dist); if (dist < min_dist) { min_dist = dist; choosen_row = q_row; } } const float* Q_line; float* res_line; get_val_ptr_const_cuda(Q_data, Q_pitch, choosen_row, 0, &Q_line); get_val_ptr_cuda(res_data, res_pitch, p_row, 0, &res_line); for (std::size_t i = 0; i < 3; i++) { res_line[i] = Q_line[i]; } } __global__ void matrix_diag_sum_cuda(const char* matrix_data, std::size_t matrix_pitch, std::size_t matrix_rows, float* sum) { *sum = 0.0; for (std::size_t row = 0; row < matrix_rows; row++) { const value_t* matrix_ptr; get_val_ptr_const_cuda(matrix_data, matrix_pitch, row, row, &matrix_ptr); *sum += *matrix_ptr; } } __device__ void get_val_ptr_const_cuda(const char* data, std::size_t pitch, std::size_t row, std::size_t col, const value_t** val) { *val = (value_t*)((data + row * pitch) + col * sizeof(value_t)); } __device__ void get_val_ptr_cuda(char* data, std::size_t pitch, std::size_t row, std::size_t col, value_t** val) { *val = (value_t*)((data + row * pitch) + col * sizeof(value_t)); } __global__ void set_val_cuda(char* matrix_data, std::size_t matrix_pitch, std::size_t row, std::size_t col, value_t val) { value_t* val_ptr; get_val_ptr_cuda(matrix_data, matrix_pitch, row, col, &val_ptr); *val_ptr = val; } __global__ void set_val_ptr_cuda(char* matrix_data, std::size_t matrix_pitch, std::size_t row, std::size_t col, value_t* val) { value_t* val_ptr; get_val_ptr_cuda(matrix_data, matrix_pitch, row, col, &val_ptr); *val_ptr = *val; } __global__ void compute_rotation_matrix_cuda(const char* q_data, std::size_t q_pitch, char* QBar_T_data, std::size_t QBar_T_pitch, char* Q_data, std::size_t Q_pitch) { const value_t* q0_ptr; const value_t* q1_ptr; const value_t* q2_ptr; const value_t* q3_ptr; get_val_ptr_const_cuda(q_data, q_pitch, 0, 0, &q0_ptr); get_val_ptr_const_cuda(q_data, q_pitch, 1, 0, &q1_ptr); get_val_ptr_const_cuda(q_data, q_pitch, 2, 0, &q2_ptr); get_val_ptr_const_cuda(q_data, q_pitch, 3, 0, &q3_ptr); value_t* QBar_T_0_ptr; value_t* QBar_T_1_ptr; value_t* QBar_T_2_ptr; value_t* QBar_T_3_ptr; get_val_ptr_cuda(QBar_T_data, QBar_T_pitch, 0, 0, &QBar_T_0_ptr); get_val_ptr_cuda(QBar_T_data, QBar_T_pitch, 1, 0, &QBar_T_1_ptr); get_val_ptr_cuda(QBar_T_data, QBar_T_pitch, 2, 0, &QBar_T_2_ptr); get_val_ptr_cuda(QBar_T_data, QBar_T_pitch, 3, 0, &QBar_T_3_ptr); QBar_T_0_ptr[0] = *q0_ptr; QBar_T_0_ptr[1] = *q1_ptr; QBar_T_0_ptr[2] = *q2_ptr; QBar_T_0_ptr[3] = *q3_ptr; QBar_T_1_ptr[0] = -*q1_ptr; QBar_T_1_ptr[1] = *q0_ptr; QBar_T_1_ptr[2] = *q3_ptr; QBar_T_1_ptr[3] = -*q2_ptr; QBar_T_2_ptr[0] = -*q2_ptr; QBar_T_2_ptr[1] = -*q3_ptr; QBar_T_2_ptr[2] = *q0_ptr; QBar_T_2_ptr[3] = *q1_ptr; QBar_T_3_ptr[0] = -*q3_ptr; QBar_T_3_ptr[1] = *q2_ptr; QBar_T_3_ptr[2] = -*q1_ptr; QBar_T_3_ptr[3] = *q0_ptr; value_t* Q_0_ptr; value_t* Q_1_ptr; value_t* Q_2_ptr; value_t* Q_3_ptr; get_val_ptr_cuda(Q_data, Q_pitch, 0, 0, &Q_0_ptr); get_val_ptr_cuda(Q_data, Q_pitch, 1, 0, &Q_1_ptr); get_val_ptr_cuda(Q_data, Q_pitch, 2, 0, &Q_2_ptr); get_val_ptr_cuda(Q_data, Q_pitch, 3, 0, &Q_3_ptr); Q_0_ptr[0] = *q0_ptr; Q_0_ptr[1] = -*q1_ptr; Q_0_ptr[2] = -*q2_ptr; Q_0_ptr[3] = -*q3_ptr; Q_1_ptr[0] = *q1_ptr; Q_1_ptr[1] = *q0_ptr; Q_1_ptr[2] = *q3_ptr; Q_1_ptr[3] = -*q2_ptr; Q_2_ptr[0] = *q2_ptr; Q_2_ptr[1] = -*q3_ptr; Q_2_ptr[2] = *q0_ptr; Q_2_ptr[3] = *q1_ptr; Q_3_ptr[0] = *q3_ptr; Q_3_ptr[1] = *q2_ptr; Q_3_ptr[2] = -*q1_ptr; Q_3_ptr[3] = *q0_ptr; } __global__ void get_val_cuda(const char* matrix_data, std::size_t matrix_pitch, std::size_t row, std::size_t col, value_t* val) { const value_t* val_ptr; get_val_ptr_const_cuda(matrix_data, matrix_pitch, row, col, &val_ptr); *val = *val_ptr; } __global__ void print_matrix_cuda(const char* matrix, std::size_t pitch, std::size_t rows, std::size_t cols) { for (std::size_t row = 0; row < rows; row++) { printf("| "); for (std::size_t col = 0; col < cols; col++) { const value_t* val; get_val_ptr_const_cuda(matrix, pitch, row, col, &val); printf("%f ", *val); } printf("|\n"); } } void matrix_dot_product(const matrix_device_t& lhs, const matrix_device_t& rhs, matrix_device_t& result) { matrix_dot_product_cuda<<<1, 1>>>(lhs.data_, lhs.pitch_, lhs.rows_, lhs.cols_, rhs.data_, rhs.pitch_, rhs.rows_, rhs.cols_, result.data_, result.pitch_); cudaDeviceSynchronize(); if (cudaPeekAtLastError()) { abortError("Computation Error"); } } void vector_element_wise_multiplication(const matrix_device_t& lhs, std::size_t lhs_row, const matrix_device_t& rhs, std::size_t rhs_row, matrix_device_t& result) { vector_element_wise_multiplication_cuda<<<1, 1>>>(lhs.data_, lhs.pitch_, lhs.cols_, lhs_row, rhs.data_, rhs.pitch_, rhs.cols_, rhs_row, result.data_, result.pitch_); cudaDeviceSynchronize(); if (cudaPeekAtLastError()) { abortError("Computation Error"); } } float vector_sum(const matrix_device_t& vector) { float* sum_device; cudaError_t rc = cudaSuccess; rc = cudaMalloc(&sum_device, sizeof(float)); if (rc) { abortError("Fail buffer allocation"); } vector_sum_cuda<<<1, 1>>>(vector.data_, vector.pitch_, vector.cols_, sum_device); cudaDeviceSynchronize(); if (cudaPeekAtLastError()) { abortError("Computation Error"); } float sum_host; rc = cudaMemcpy(&sum_host, sum_device, sizeof(float), cudaMemcpyDeviceToHost); if (rc) { abortError("Fail buffer copy"); } rc = cudaFree(sum_device); if (rc) { abortError("Fail buffer free"); } return sum_host; } void matrix_subtract(const matrix_device_t& lhs, const matrix_device_t& rhs, matrix_device_t& result) { matrix_subtract_cuda<<<1, 1>>>(lhs.data_, lhs.pitch_, lhs.rows_, lhs.cols_, rhs.data_, rhs.pitch_, rhs.rows_, rhs.cols_, result.data_, result.pitch_); cudaDeviceSynchronize(); if (cudaPeekAtLastError()) { abortError("Computation Error"); } } void compute_rotation_matrix(const matrix_device_t& q, matrix_device_t& QBar_T, matrix_device_t& Q) { compute_rotation_matrix_cuda<<<1, 1>>>(q.data_, q.pitch_, QBar_T.data_, QBar_T.pitch_, Q.data_, Q.pitch_); cudaDeviceSynchronize(); if (cudaPeekAtLastError()) { abortError("Computation Error"); } } value_t* get_val_ptr(char* data, std::size_t pitch, std::size_t row, std::size_t col) { return (value_t*)((data + row * pitch) + col * sizeof(value_t)); } } // namespace utils
bd33ed79e2af0300a83c5869d001d896e42334d7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2012 The Trustees of Indiana University. All rights reserved. CGL MapReduce Framework on GPUs and CPUs Code Name: Panda File: PandaLib.cu First Version: 2012-07-01 V0.1 Current Version: 2012-09-01 V0.3 Last Updates: 2012-09-016 Developer: Hui Li (lihui@indiana.edu) This is the source code for Panda, a MapReduce runtime on GPUs and CPUs. */ #ifndef __PANDALIB_CU__ #define __PANDALIB_CU__ #include "Panda.h" #include "UserAPI.cu" //---------------------------------------------- //Get default job configuration //---------------------------------------------- job_configuration *CreateJobConf(){ job_configuration *job_conf = (job_configuration *)malloc(sizeof(job_configuration)); if (job_conf == NULL) exit(-1); memset(job_conf, 0, sizeof(job_configuration)); job_conf->num_input_record = 0; job_conf->input_keyval_arr = NULL; job_conf->auto_tuning = false; job_conf->num_mappers = 0; job_conf->num_reducers = 0; job_conf->num_gpus = 0; job_conf->num_cpus_cores = 0; job_conf->num_cpus_groups = 0; return job_conf; }//gpu_context gpu_context *CreateGPUContext(){ gpu_context *d_g_state = (gpu_context*)malloc(sizeof(gpu_context)); if (d_g_state == NULL) exit(-1); memset(d_g_state, 0, sizeof(gpu_context)); d_g_state->configured = false; d_g_state->h_input_keyval_arr = NULL; d_g_state->num_mappers = 0; d_g_state->num_reducers = 0; return d_g_state; }//gpu_context cpu_context *CreateCPUContext(){ cpu_context *d_g_state = (cpu_context*)malloc(sizeof(cpu_context)); if (d_g_state == NULL) exit(-1); memset(d_g_state, 0, sizeof(cpu_context)); d_g_state->configured = false; d_g_state->input_keyval_arr = NULL; return d_g_state; }//gpu_context panda_context *CreatePandaContext(){ panda_context *d_g_state = (panda_context*)malloc(sizeof(panda_context)); if (d_g_state == NULL) exit(-1); d_g_state->input_keyval_arr = NULL; d_g_state->intermediate_keyval_arr_arr_p = NULL; d_g_state->sorted_intermediate_keyvals_arr = NULL; d_g_state->sorted_keyvals_arr_len = 0; d_g_state->num_gpus = 0; d_g_state->gpu_context = NULL; d_g_state->num_cpus_groups = 0; d_g_state->cpu_context = NULL; return d_g_state; }//panda_context //For version 0.3 void InitCPUMapReduce2(thread_info_t * thread_info){ cpu_context *d_g_state = (cpu_context *)(thread_info->d_g_state); job_configuration *job_conf = (job_configuration *)(thread_info->job_conf); if (job_conf->num_input_record<=0) { ShowError("Error: no any input keys"); exit(-1);} if (job_conf->input_keyval_arr == NULL) { ShowError("Error: input_keyval_arr == NULL"); exit(-1);} if (d_g_state->num_cpus_cores <= 0) { ShowError("Error: d_g_state->num_cpus == 0"); exit(-1);} //ShowLog("d_g_state->configured:%s enable for iterative applications",d_g_state->configured? "true" : "false"); //if (d_g_state->configured) // return; int totalKeySize = 0; int totalValSize = 0; for(int i=0;i<job_conf->num_input_record;i++){ totalKeySize += job_conf->input_keyval_arr[i].keySize; totalValSize += job_conf->input_keyval_arr[i].valSize; }//for ShowLog("CPU_GROUP_ID:[%d] num_input_record:%d, totalKeySize:%d totalValSize:%d num_cpus:%d", d_g_state->cpu_group_id, job_conf->num_input_record, totalKeySize, totalValSize, d_g_state->num_cpus_cores); //TODO determin num_cpus int num_cpus_cores = d_g_state->num_cpus_cores; d_g_state->panda_cpu_task = (pthread_t *)malloc(sizeof(pthread_t)*(num_cpus_cores)); d_g_state->panda_cpu_task_info = (panda_cpu_task_info_t *)malloc(sizeof(panda_cpu_task_info_t)*(num_cpus_cores)); d_g_state->intermediate_keyval_arr_arr_p = (keyval_arr_t *)malloc(sizeof(keyval_arr_t)*job_conf->num_input_record); memset(d_g_state->intermediate_keyval_arr_arr_p, 0, sizeof(keyval_arr_t)*job_conf->num_input_record); for (int i=0;i<num_cpus_cores;i++){ d_g_state->panda_cpu_task_info[i].d_g_state = d_g_state; d_g_state->panda_cpu_task_info[i].cpu_job_conf = job_conf; d_g_state->panda_cpu_task_info[i].num_cpus_cores = num_cpus_cores; d_g_state->panda_cpu_task_info[i].start_row_idx = 0; d_g_state->panda_cpu_task_info[i].end_row_idx = 0; }//for d_g_state->configured = true; ShowLog("CPU_GROUP_ID:[%d] DONE",d_g_state->cpu_group_id); } #ifdef DEV_MODE //For Version 0.3 test depressed void InitGPUMapReduce4(thread_info_t* thread_info) { gpu_context *d_g_state = (gpu_context *)(thread_info->d_g_state); job_configuration* gpu_job_conf = (job_configuration*)(thread_info->job_conf); keyval_t * kv_p = gpu_job_conf->input_keyval_arr; ShowLog("d_g_state->configured:%s enable for iterative applications",d_g_state->configured? "true" : "false"); //if (d_g_state->configured) // return; ShowLog("copy %d input records from Host to GPU memory",gpu_job_conf->num_input_record); //checkCudaErrors(hipMalloc((void **)&d_g_state->d_input_keyval_arr,sizeof(keyval_t)*d_g_state->num_input_record)); int totalKeySize = 0; int totalValSize = 0; for(int i=0;i<gpu_job_conf->num_input_record;i++){ totalKeySize += kv_p[i].keySize; totalValSize += kv_p[i].valSize; }//for ShowLog("totalKeySize:%d totalValSize:%d", totalKeySize, totalValSize); void *input_vals_shared_buff = malloc(totalValSize); void *input_keys_shared_buff = malloc(totalKeySize); keyval_pos_t *input_keyval_pos_arr = (keyval_pos_t *)malloc(sizeof(keyval_pos_t)*gpu_job_conf->num_input_record); int keyPos = 0; int valPos = 0; int keySize = 0; int valSize = 0; for(int i=0; i<gpu_job_conf->num_input_record; i++){ keySize = kv_p[i].keySize; valSize = kv_p[i].valSize; memcpy((char *)input_keys_shared_buff + keyPos,(char *)(kv_p[i].key), keySize); memcpy((char *)input_vals_shared_buff + valPos,(char *)(kv_p[i].val), valSize); input_keyval_pos_arr[i].keySize = keySize; input_keyval_pos_arr[i].keyPos = keyPos; input_keyval_pos_arr[i].valPos = valPos; input_keyval_pos_arr[i].valSize = valSize; keyPos += keySize; valPos += valSize; }//for checkCudaErrors(hipMalloc((void **)&d_g_state->d_input_keyval_pos_arr,sizeof(keyval_pos_t)*gpu_job_conf->num_input_record)); checkCudaErrors(hipMalloc((void **)&d_g_state->d_input_keys_shared_buff, totalKeySize)); checkCudaErrors(hipMalloc((void **)&d_g_state->d_input_vals_shared_buff, totalValSize)); checkCudaErrors(hipMemcpy(d_g_state->d_input_keyval_pos_arr, input_keyval_pos_arr,sizeof(keyval_pos_t)*gpu_job_conf->num_input_record ,hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_g_state->d_input_keys_shared_buff, input_keys_shared_buff,totalKeySize ,hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_g_state->d_input_vals_shared_buff, input_vals_shared_buff,totalValSize ,hipMemcpyHostToDevice)); //checkCudaErrors(hipMemcpy(d_g_state->d_input_keyval_arr,h_buff,sizeof(keyval_t)*d_g_state->num_input_record,hipMemcpyHostToDevice)); hipDeviceSynchronize(); d_g_state->configured = true; }//void #endif void InitGPUMapReduce3(gpu_context* d_g_state) { ShowLog("d_g_state->configured:%s enable for iterative applications",d_g_state->configured? "true" : "false"); //if (d_g_state->configured) // return; int totalKeySize = 0; int totalValSize = 0; for(int i=0;i<d_g_state->num_input_record;i++){ totalKeySize += d_g_state->h_input_keyval_arr[i].keySize; totalValSize += d_g_state->h_input_keyval_arr[i].valSize; }//for ShowLog("GPU_ID:[%d] copy %d input records from Host to GPU memory totalKeySize:%d totalValSize:%d",d_g_state->gpu_id, d_g_state->num_input_record, totalKeySize, totalValSize); void *input_vals_shared_buff = malloc(totalValSize); void *input_keys_shared_buff = malloc(totalKeySize); keyval_pos_t *input_keyval_pos_arr = (keyval_pos_t *)malloc(sizeof(keyval_pos_t)*d_g_state->num_input_record); int keyPos = 0; int valPos = 0; int keySize = 0; int valSize = 0; for(int i=0;i<d_g_state->num_input_record;i++){ keySize = d_g_state->h_input_keyval_arr[i].keySize; valSize = d_g_state->h_input_keyval_arr[i].valSize; memcpy((char *)input_keys_shared_buff + keyPos,(char *)(d_g_state->h_input_keyval_arr[i].key), keySize); memcpy((char *)input_vals_shared_buff + valPos,(char *)(d_g_state->h_input_keyval_arr[i].val), valSize); input_keyval_pos_arr[i].keySize = keySize; input_keyval_pos_arr[i].keyPos = keyPos; input_keyval_pos_arr[i].valPos = valPos; input_keyval_pos_arr[i].valSize = valSize; keyPos += keySize; valPos += valSize; }//for checkCudaErrors(hipMalloc((void **)&d_g_state->d_input_keyval_pos_arr,sizeof(keyval_pos_t)*d_g_state->num_input_record)); checkCudaErrors(hipMalloc((void **)&d_g_state->d_input_keys_shared_buff, totalKeySize)); checkCudaErrors(hipMalloc((void **)&d_g_state->d_input_vals_shared_buff, totalValSize)); checkCudaErrors(hipMemcpy(d_g_state->d_input_keyval_pos_arr, input_keyval_pos_arr,sizeof(keyval_pos_t)*d_g_state->num_input_record ,hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_g_state->d_input_keys_shared_buff, input_keys_shared_buff,totalKeySize ,hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_g_state->d_input_vals_shared_buff, input_vals_shared_buff,totalValSize ,hipMemcpyHostToDevice)); //checkCudaErrors(hipMemcpy(d_g_state->d_input_keyval_arr,h_buff,sizeof(keyval_t)*d_g_state->num_input_record,hipMemcpyHostToDevice)); hipDeviceSynchronize(); d_g_state->configured = true; }//void #ifdef DEV_MODE void InitGPUMapReduce2(gpu_context* d_g_state) { ShowLog("d_g_state->num_input_record:%d",d_g_state->num_input_record); //checkCudaErrors(hipMalloc((void **)&d_g_state->d_input_keyval_arr,sizeof(keyval_t)*d_g_state->num_input_record)); int totalKeySize = 0; int totalValSize = 0; for(int i=0;i<d_g_state->num_input_record;i++){ totalKeySize += d_g_state->h_input_keyval_arr[i].keySize; totalValSize += d_g_state->h_input_keyval_arr[i].valSize; }//for void *input_vals_shared_buff = malloc(totalValSize); void *input_keys_shared_buff = malloc(totalKeySize); keyval_pos_t *input_keyval_pos_arr = (keyval_pos_t *)malloc(sizeof(keyval_pos_t)*d_g_state->num_input_record); int keyPos = 0; int valPos = 0; int keySize = 0; int valSize = 0; for(int i=0;i<d_g_state->num_input_record;i++){ keySize = d_g_state->h_input_keyval_arr[i].keySize; valSize = d_g_state->h_input_keyval_arr[i].valSize; memcpy((char *)input_keys_shared_buff + keyPos,(char *)(d_g_state->h_input_keyval_arr[i].key), keySize); memcpy((char *)input_vals_shared_buff + valPos,(char *)(d_g_state->h_input_keyval_arr[i].val), valSize); input_keyval_pos_arr[i].keySize = keySize; input_keyval_pos_arr[i].keyPos = keyPos; input_keyval_pos_arr[i].valPos = valPos; input_keyval_pos_arr[i].valSize = valSize; keyPos += keySize; valPos += valSize; }//for checkCudaErrors(hipMalloc((void **)&d_g_state->d_input_keyval_pos_arr,sizeof(keyval_pos_t)*d_g_state->num_input_record)); checkCudaErrors(hipMalloc((void **)&d_g_state->d_input_keys_shared_buff, totalKeySize)); checkCudaErrors(hipMalloc((void **)&d_g_state->d_input_vals_shared_buff, totalValSize)); checkCudaErrors(hipMemcpy(d_g_state->d_input_keyval_pos_arr, input_keyval_pos_arr,sizeof(keyval_pos_t)*d_g_state->num_input_record ,hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_g_state->d_input_keys_shared_buff, input_keys_shared_buff,totalKeySize ,hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_g_state->d_input_vals_shared_buff, input_vals_shared_buff,totalValSize ,hipMemcpyHostToDevice)); //checkCudaErrors(hipMemcpy(d_g_state->d_input_keyval_arr,h_buff,sizeof(keyval_t)*d_g_state->num_input_record,hipMemcpyHostToDevice)); hipDeviceSynchronize(); }//void #endif void InitCPUDevice(thread_info_t*thread_info){ //------------------------------------------ //1, init CPU device //------------------------------------------ cpu_context *d_g_state = (cpu_context *)(thread_info->d_g_state); if (d_g_state->num_cpus_cores<=0) d_g_state->num_cpus_cores = getCPUCoresNum(); int tid = thread_info->tid; ShowLog( "CPU_GROUP_ID:[%d] Init CPU Deivce",d_g_state->cpu_group_id); } void InitGPUDevice(thread_info_t*thread_info){ //------------------------------------------ //1, init device //------------------------------------------ gpu_context *d_g_state = (gpu_context *)(thread_info->d_g_state); int tid = thread_info->tid; int assigned_gpu_id = d_g_state->gpu_id; int num_gpus = d_g_state->num_gpus; if (num_gpus == 0) { ShowError("error num_gpus == 0"); exit(-1); }//gpu_context int gpu_id; hipGetDevice(&gpu_id); int gpu_count = 0; hipGetDeviceCount(&gpu_count); hipDeviceProp_t gpu_dev; hipGetDeviceProperties(&gpu_dev, gpu_id); ShowLog("TID:[%d] check GPU ids: cur_gpu_id:[%d] assig_gpu_id:[%d] hipGetDeviceCount:[%d] GPU name:%s", tid, gpu_id, assigned_gpu_id, gpu_count, gpu_dev.name); if ( gpu_id != assigned_gpu_id ){ //ShowLog("hipSetDevice gpu_id %d == (tid num_gpus) %d ", gpu_id, tid%num_gpus); hipSetDevice(assigned_gpu_id % num_gpus); }//if size_t total_mem,avail_mem, heap_limit; checkCudaErrors(hipMemGetInfo( &avail_mem, &total_mem )); hipDeviceSetLimit(hipLimitMallocHeapSize, (int)(total_mem*0.8)); hipDeviceGetLimit(&heap_limit, hipLimitMallocHeapSize); int numGPUCores = getGPUCoresNum(); ShowLog("GPU_ID:[%d] numGPUCores:%d hipLimitMallocHeapSize:%d MB avail_mem:%d MB total_mem:%d MB", gpu_id, numGPUCores,heap_limit/1024/1024, avail_mem/1024/1024,total_mem/1024/1024); } void AddPandaTask(job_configuration* job_conf, void* key, void* val, int keySize, int valSize){ int len = job_conf->num_input_record; if (len<0) return; if (len == 0) job_conf->input_keyval_arr = NULL; job_conf->input_keyval_arr = (keyval_t *)realloc(job_conf->input_keyval_arr, sizeof(keyval_t)*(len+1)); job_conf->input_keyval_arr[len].keySize = keySize; job_conf->input_keyval_arr[len].valSize = valSize; job_conf->input_keyval_arr[len].key = malloc(keySize); job_conf->input_keyval_arr[len].val = malloc(valSize); memcpy(job_conf->input_keyval_arr[len].key,key,keySize); memcpy(job_conf->input_keyval_arr[len].val,val,valSize); job_conf->num_input_record++; } void AddReduceInputRecordGPU(gpu_context* d_g_state, keyvals_t * sorted_intermediate_keyvals_arr, int start_row_id, int end_row_id){ int total_count = 0; for(int i=start_row_id;i<end_row_id;i++){ total_count += sorted_intermediate_keyvals_arr[i].val_arr_len; }//for int totalKeySize = 0; int totalValSize = 0; for(int i=start_row_id;i<end_row_id;i++){ totalKeySize += (sorted_intermediate_keyvals_arr[i].keySize+3)/4*4; for (int j=0;j<sorted_intermediate_keyvals_arr[i].val_arr_len;j++) totalValSize += (sorted_intermediate_keyvals_arr[i].vals[j].valSize+3)/4*4; }//for checkCudaErrors(hipMalloc((void **)&d_g_state->d_sorted_keys_shared_buff,totalKeySize)); checkCudaErrors(hipMalloc((void **)&d_g_state->d_sorted_vals_shared_buff,totalValSize)); checkCudaErrors(hipMalloc((void **)&d_g_state->d_keyval_pos_arr,sizeof(keyval_pos_t)*total_count)); d_g_state->h_sorted_keys_shared_buff = malloc(sizeof(char)*totalKeySize); d_g_state->h_sorted_vals_shared_buff = malloc(sizeof(char)*totalValSize); char *sorted_keys_shared_buff = (char *)d_g_state->h_sorted_keys_shared_buff; char *sorted_vals_shared_buff = (char *)d_g_state->h_sorted_vals_shared_buff; char *keyval_pos_arr = (char *)malloc(sizeof(keyval_pos_t)*total_count); int sorted_key_arr_len = (end_row_id-start_row_id); keyval_pos_t *tmp_keyval_pos_arr = (keyval_pos_t *)malloc(sizeof(keyval_pos_t)*total_count); ShowLog("GPU_ID:[%d] total #different intermediate records:%d total records:%d totalKeySize:%d KB totalValSize:%d KB", d_g_state->gpu_id, end_row_id - start_row_id, total_count, totalKeySize/1024, totalValSize/1024); int *pos_arr_4_pos_arr = (int*)malloc(sizeof(int)*(sorted_key_arr_len)); memset(pos_arr_4_pos_arr,0,sizeof(int)*sorted_key_arr_len); int index = 0; int keyPos = 0; int valPos = 0; for (int i=start_row_id;i<end_row_id;i++){ keyvals_t* p = (keyvals_t*)&(sorted_intermediate_keyvals_arr[i]); memcpy(sorted_keys_shared_buff+keyPos,p->key, p->keySize); for (int j=0;j<p->val_arr_len;j++){ tmp_keyval_pos_arr[index].keyPos = keyPos; tmp_keyval_pos_arr[index].keySize = p->keySize; tmp_keyval_pos_arr[index].valPos = valPos; tmp_keyval_pos_arr[index].valSize = p->vals[j].valSize; memcpy(sorted_vals_shared_buff + valPos,p->vals[j].val,p->vals[j].valSize); valPos += (p->vals[j].valSize+3)/4*4; index++; }//for keyPos += (p->keySize+3)/4*4; pos_arr_4_pos_arr[i-start_row_id] = index; }// d_g_state->d_sorted_keyvals_arr_len = end_row_id-start_row_id; checkCudaErrors(hipMemcpy(d_g_state->d_keyval_pos_arr,tmp_keyval_pos_arr,sizeof(keyval_pos_t)*total_count,hipMemcpyHostToDevice)); checkCudaErrors(hipMalloc((void**)&d_g_state->d_pos_arr_4_sorted_keyval_pos_arr,sizeof(int)*sorted_key_arr_len)); checkCudaErrors(hipMemcpy(d_g_state->d_pos_arr_4_sorted_keyval_pos_arr,pos_arr_4_pos_arr,sizeof(int)*sorted_key_arr_len,hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_g_state->d_sorted_keys_shared_buff, sorted_keys_shared_buff, sizeof(char)*totalKeySize,hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_g_state->d_sorted_vals_shared_buff, sorted_vals_shared_buff, sizeof(char)*totalValSize,hipMemcpyHostToDevice)); } void AddMapInputRecordGPU(gpu_context* d_g_state, keyval_t *kv_p, int start_row_id, int end_row_id){ if (end_row_id<=start_row_id) { ShowError("error! end_row_id<=start_row_id"); return; } int len = d_g_state->num_input_record; if (len<0) { ShowError("error! len<0"); return; } if (len == 0) d_g_state->h_input_keyval_arr = NULL; ShowLog("GPU_ID:[%d] add map tasks into gpu; #total input:%d #added input:%d",d_g_state->gpu_id, len, end_row_id-start_row_id); d_g_state->h_input_keyval_arr = (keyval_t *)realloc(d_g_state->h_input_keyval_arr, sizeof(keyval_t)*(len + end_row_id - start_row_id)); //assert(d_g_state->h_input_keyval_arr != NULL); for (int i=start_row_id;i<end_row_id;i++){ d_g_state->h_input_keyval_arr[len].keySize = kv_p[i].keySize; d_g_state->h_input_keyval_arr[len].valSize = kv_p[i].valSize; d_g_state->h_input_keyval_arr[len].key = kv_p[i].key; d_g_state->h_input_keyval_arr[len].val = kv_p[i].val; d_g_state->num_input_record++; len++; } } void AddMapInputRecordCPU(cpu_context* d_g_state, keyval_t *kv_p, int start_row_id, int end_row_id){ if (end_row_id<=start_row_id) { ShowError("error! end_row_id[%d] <= start_row_id[%d]",end_row_id, start_row_id); return; } int len = d_g_state->num_input_record; if (len<0) { ShowError("error! len<0"); return; } if (len == 0) d_g_state->input_keyval_arr = NULL; ShowLog("CPU_GROUP_ID:[%d] add map input record for cpu device current #input:%d added #input:%d",d_g_state->cpu_group_id,len,end_row_id-start_row_id); d_g_state->input_keyval_arr = (keyval_t *)realloc(d_g_state->input_keyval_arr, sizeof(keyval_t)*(len+end_row_id-start_row_id)); for (int i=start_row_id;i<end_row_id;i++){ d_g_state->input_keyval_arr[len].keySize = kv_p[i].keySize; d_g_state->input_keyval_arr[len].valSize = kv_p[i].valSize; d_g_state->input_keyval_arr[len].key = kv_p[i].key; d_g_state->input_keyval_arr[len].val = kv_p[i].val; d_g_state->num_input_record++; len++; } } void AddReduceInputRecordCPU(cpu_context* d_g_state, keyvals_t *kv_p, int start_row_id, int end_row_id){ if (end_row_id<start_row_id){ ShowError("error! end_row_id<=start_row_id"); return; } int len = d_g_state->sorted_keyvals_arr_len; if (len<0) { ShowError("error! len<0"); return; } if (len == 0) d_g_state->sorted_intermediate_keyvals_arr = NULL; d_g_state->sorted_intermediate_keyvals_arr = (keyvals_t *)malloc(sizeof(keyvals_t)*(len+end_row_id-start_row_id)); for (int i = len; i< len+end_row_id-start_row_id; i++){ d_g_state->sorted_intermediate_keyvals_arr[i].keySize = kv_p[start_row_id+i-len].keySize; d_g_state->sorted_intermediate_keyvals_arr[i].key = kv_p[start_row_id+i-len].key; d_g_state->sorted_intermediate_keyvals_arr[i].vals = kv_p[start_row_id+i-len].vals; d_g_state->sorted_intermediate_keyvals_arr[i].val_arr_len = kv_p[start_row_id+i-len].val_arr_len; }//for d_g_state->sorted_keyvals_arr_len = end_row_id-start_row_id; } __device__ void GPUEmitReduceOuput (void* key, void* val, int keySize, int valSize, gpu_context *d_g_state){ keyval_t *p = &(d_g_state->d_reduced_keyval_arr[TID]); p->keySize = keySize; p->key = malloc(keySize); memcpy(p->key,key,keySize); p->valSize = valSize; p->val = malloc(valSize); memcpy(p->val,val,valSize); printf("[gpu output]: key:%s val:%d\n",key,*(int *)val); }//__device__ void CPUEmitReduceOutput (void* key, void* val, int keySize, int valSize, cpu_context *d_g_state){ /*keyval_t *p = &(d_g_state->d_reduced_keyval_arr[TID]); p->keySize = keySize; p->key = malloc(keySize); memcpy(p->key,key,keySize); p->valSize = valSize; p->val = malloc(valSize); memcpy(p->val,val,valSize);*/ printf("[cpu output]: key:%s val:%d\n",key,*(int *)val); }//__device__ //Last update 9/1/2012 void CPUEmitMapOutput(void *key, void *val, int keySize, int valSize, cpu_context *d_g_state, int map_task_idx){ if(map_task_idx >= d_g_state->num_input_record) { ShowLog("error ! map_task_idx >= d_g_state->num_input_record"); return; } keyval_arr_t *kv_arr_p = &(d_g_state->intermediate_keyval_arr_arr_p[map_task_idx]); if (kv_arr_p->arr_len==0) kv_arr_p->arr = NULL; kv_arr_p->cpu_arr = (keyval_t*)realloc(kv_arr_p->arr, sizeof(keyval_t)*(kv_arr_p->arr_len+1)); int current_map_output_index = (kv_arr_p->arr_len); keyval_t *kv_p = &(kv_arr_p->cpu_arr[current_map_output_index]); kv_p->key = (char *)malloc(sizeof(keySize)); memcpy(kv_p->key,key,keySize); kv_p->keySize = keySize; kv_p->val = (char *)malloc(sizeof(valSize)); memcpy(kv_p->val,val,valSize); kv_p->valSize = valSize; kv_arr_p->arr_len++; }//__device__ __device__ void GPUEmitCombinerOuput(void *key, void *val, int keySize, int valSize, gpu_context *d_g_state, int map_task_idx){ keyval_arr_t *kv_arr_p = d_g_state->d_intermediate_keyval_arr_arr_p[map_task_idx]; void *shared_buff = kv_arr_p->shared_buff; int shared_buff_len = *kv_arr_p->shared_buff_len; int shared_arr_len = *kv_arr_p->shared_arr_len; int shared_buff_pos = *kv_arr_p->shared_buff_pos; int required_mem_len = (shared_buff_pos) + keySize + valSize + sizeof(keyval_pos_t)*(shared_arr_len+1); if (required_mem_len> shared_buff_len){ ShowWarn("Warning! no enough memory in GPU task:%d need:%d KB KeySize:%d ValSize:%d shared_arr_len:%d shared_buff_pos:%d shared_buff_len:%d", map_task_idx, required_mem_len/1024,keySize,valSize,shared_arr_len,shared_buff_pos,shared_buff_len); char *new_buff = (char*)malloc(sizeof(char)*((*kv_arr_p->shared_buff_len)*2)); if(new_buff==NULL)ShowLog("Error! There is not enough memory to allocat!\n"); memcpy(new_buff, shared_buff, sizeof(char)*(*kv_arr_p->shared_buff_pos)); memcpy(new_buff + (*kv_arr_p->shared_buff_len)*2 - sizeof(keyval_pos_t)*(*kv_arr_p->shared_arr_len), (char*)shared_buff + (*kv_arr_p->shared_buff_len) - sizeof(keyval_pos_t)*(*kv_arr_p->shared_arr_len), sizeof(keyval_pos_t)*(*kv_arr_p->shared_arr_len)); shared_buff_len = 2*(*kv_arr_p->shared_buff_len); (*kv_arr_p->shared_buff_len) = shared_buff_len; for(int idx = 0; idx < (kv_arr_p->shared_buddy_len); idx++){ int cur_map_task_idx = kv_arr_p->shared_buddy[idx]; //the buddy relationship won't be changed keyval_arr_t *cur_kv_arr_p = d_g_state->d_intermediate_keyval_arr_arr_p[cur_map_task_idx]; cur_kv_arr_p->shared_buff = new_buff; }//for free(shared_buff); shared_buff = new_buff; }//if keyval_pos_t *kv_p = (keyval_pos_t *)((char *)shared_buff + shared_buff_len - sizeof(keyval_pos_t)*(*kv_arr_p->shared_arr_len+1)); kv_p->keySize = keySize; kv_p->valSize = valSize; kv_p->task_idx = map_task_idx; kv_p->next_idx = -2; //merged results memcpy( (char*)shared_buff + *kv_arr_p->shared_buff_pos, key, keySize); kv_p->keyPos = *kv_arr_p->shared_buff_pos; *kv_arr_p->shared_buff_pos += (keySize+3)/4*4; memcpy( (char*)shared_buff + *kv_arr_p->shared_buff_pos, val, valSize); kv_p->valPos = *kv_arr_p->shared_buff_pos; *kv_arr_p->shared_buff_pos += (valSize+3)/4*4; (*kv_arr_p->shared_arr_len)++; }//__device__ //Last update 9/16/2012 __device__ void GPUEmitMapOutput(void *key, void *val, int keySize, int valSize, gpu_context *d_g_state, int map_task_idx){ keyval_arr_t *kv_arr_p = d_g_state->d_intermediate_keyval_arr_arr_p[map_task_idx]; char *buff = (char*)(kv_arr_p->shared_buff); if (!((*kv_arr_p->shared_buff_pos) + keySize + valSize < (*kv_arr_p->shared_buff_len) - sizeof(keyval_pos_t)*((*kv_arr_p->shared_arr_len)+1))){ ShowWarn("Warning! not enough memory at GPU task:%d *kv_arr_p->shared_arr_len:%d current buff_size:%d KB", map_task_idx,*kv_arr_p->shared_arr_len,(*kv_arr_p->shared_buff_len)/1024); char *new_buff = (char*)malloc(sizeof(char)*((*kv_arr_p->shared_buff_len)*2)); if(new_buff==NULL){ ShowError("Error ! There is not enough memory to allocat!"); return; } memcpy(new_buff, buff, sizeof(char)*(*kv_arr_p->shared_buff_pos)); memcpy(new_buff + (*kv_arr_p->shared_buff_len)*2 - sizeof(keyval_pos_t)*(*kv_arr_p->shared_arr_len), (char*)buff + (*kv_arr_p->shared_buff_len) - sizeof(keyval_pos_t)*(*kv_arr_p->shared_arr_len), sizeof(keyval_pos_t)*(*kv_arr_p->shared_arr_len)); (*kv_arr_p->shared_buff_len) = 2*(*kv_arr_p->shared_buff_len); for(int idx = 0; idx < (kv_arr_p->shared_buddy_len); idx++){ int cur_map_task_idx = kv_arr_p->shared_buddy[idx]; //the buddy relationship won't be changed keyval_arr_t *cur_kv_arr_p = d_g_state->d_intermediate_keyval_arr_arr_p[cur_map_task_idx]; cur_kv_arr_p->shared_buff = new_buff; }//for free(buff);//????? buff = new_buff; }//if keyval_pos_t *kv_p = (keyval_pos_t *)((char *)buff + *kv_arr_p->shared_buff_len - sizeof(keyval_pos_t)*((*kv_arr_p->shared_arr_len)+1)); (*kv_arr_p->shared_arr_len)++; kv_p->task_idx = map_task_idx; kv_p->next_idx = -1; kv_p->keyPos = (*kv_arr_p->shared_buff_pos); *kv_arr_p->shared_buff_pos += ((keySize+3)/4)*4; //alignment 4 bytes for reading and writing memcpy((char *)(buff) + kv_p->keyPos,key,keySize); kv_p->keySize = keySize; kv_p->valPos = (*kv_arr_p->shared_buff_pos); *kv_arr_p->shared_buff_pos += ((valSize+3)/4)*4; char *val_p = (char *)(buff) + kv_p->valPos; memcpy((char *)(buff) + kv_p->valPos, val, valSize); kv_p->valSize = valSize; (kv_arr_p->arr) = kv_p; //kv_arr_p->arr_len++; //d_g_state->d_intermediate_keyval_total_count[map_task_idx] = kv_arr_p->arr_len; }//__device__ //------------------------------------------------- //called by user defined map function //------------------------------------------------- //need update copydata1<<<??? //TODO 9/11/2012 merge threads and blocks code into the same place. __global__ void GPUMapPartitioner(gpu_context d_g_state) { //ShowLog("gridDim.x:%d gridDim.y:%d gridDim.z:%d blockDim.x:%d blockDim.y:%d blockDim.z:%d blockIdx.x:%d blockIdx.y:%d blockIdx.z:%d\n", // gridDim.x,gridDim.y,gridDim.z,blockDim.x,blockDim.y,blockDim.z,blockIdx.x,blockIdx.y,blockIdx.z); int num_records_per_thread = (d_g_state.num_input_record + (gridDim.x*blockDim.x*blockDim.y)-1)/(gridDim.x*blockDim.x*blockDim.y); int block_start_idx = num_records_per_thread * blockIdx.x * blockDim.x * blockDim.y; int thread_start_idx = block_start_idx + ((threadIdx.y*blockDim.x + threadIdx.x)/STRIDE)*num_records_per_thread*STRIDE + ((threadIdx.y*blockDim.x + threadIdx.x)%STRIDE); int thread_end_idx = thread_start_idx + num_records_per_thread*STRIDE; if (thread_end_idx > d_g_state.num_input_record) thread_end_idx = d_g_state.num_input_record; if (thread_start_idx >= thread_end_idx) return; int buddy_arr_len = num_records_per_thread; int * int_arr = (int*)malloc((4+buddy_arr_len)*sizeof(int)); if(int_arr==NULL){ ShowError("there is not enough GPU memory\n"); return;} int *shared_arr_len = int_arr; int *shared_buff_len = int_arr+1; int *shared_buff_pos = int_arr+2; int *num_buddy = int_arr+3; int *buddy = int_arr+4; (*shared_buff_len) = SHARED_BUFF_LEN; (*shared_arr_len) = 0; (*shared_buff_pos) = 0; char * buff = (char *)malloc(sizeof(char)*(*shared_buff_len)); keyval_arr_t *kv_arr_t_arr = (keyval_arr_t *)malloc(sizeof(keyval_arr_t)*(thread_end_idx-thread_start_idx+STRIDE-1)/STRIDE); int index = 0; for(int idx = thread_start_idx; idx < thread_end_idx; idx += STRIDE){ buddy[index] = idx; index ++; }//for index = 0; for(int map_task_idx = thread_start_idx; map_task_idx < thread_end_idx; map_task_idx += STRIDE){ keyval_arr_t *kv_arr_t = (keyval_arr_t *)&(kv_arr_t_arr[index]); index++; kv_arr_t->shared_buff = buff; kv_arr_t->shared_arr_len = shared_arr_len; kv_arr_t->shared_buff_len = shared_buff_len; kv_arr_t->shared_buff_pos = shared_buff_pos; kv_arr_t->shared_buddy = buddy; kv_arr_t->shared_buddy_len = buddy_arr_len; kv_arr_t->arr = NULL; kv_arr_t->arr_len = 0; d_g_state.d_intermediate_keyval_arr_arr_p[map_task_idx] = kv_arr_t; }//for }//GPUMapPartitioner __global__ void RunGPUMapTasks(gpu_context d_g_state, int curIter, int totalIter) { //ShowLog("gridDim.x:%d gridDim.y:%d gridDim.z:%d blockDim.x:%d blockDim.y:%d blockDim.z:%d blockIdx.x:%d blockIdx.y:%d blockIdx.z:%d\n", // gridDim.x,gridDim.y,gridDim.z,blockDim.x,blockDim.y,blockDim.z,blockIdx.x,blockIdx.y,blockIdx.z); int num_records_per_thread = (d_g_state.num_input_record + (gridDim.x*blockDim.x*blockDim.y)-1)/(gridDim.x*blockDim.x*blockDim.y); int block_start_idx = num_records_per_thread * blockIdx.x * blockDim.x * blockDim.y; int thread_start_idx = block_start_idx + ((threadIdx.y*blockDim.x + threadIdx.x)/STRIDE)*num_records_per_thread*STRIDE + ((threadIdx.y*blockDim.x + threadIdx.x)%STRIDE); //ShowLog("num_records_per_thread:%d block_start_idx:%d gridDim.x:%d gridDim.y:%d gridDim.z:%d blockDim.x:%d blockDim.y:%d blockDim.z:%d",num_records_per_thread, block_start_idx, gridDim.x,gridDim.y,gridDim.z,blockDim.x,blockDim.y,blockDim.z); int thread_end_idx = thread_start_idx + num_records_per_thread*STRIDE; if (thread_end_idx > d_g_state.num_input_record) thread_end_idx = d_g_state.num_input_record; if (thread_start_idx + curIter*STRIDE >= thread_end_idx) return; for(int map_task_idx = thread_start_idx + curIter*STRIDE; map_task_idx < thread_end_idx; map_task_idx += totalIter*STRIDE){ char *key = (char *)(d_g_state.d_input_keys_shared_buff) + d_g_state.d_input_keyval_pos_arr[map_task_idx].keyPos; char *val = (char *)(d_g_state.d_input_vals_shared_buff) + d_g_state.d_input_keyval_pos_arr[map_task_idx].valPos; int valSize = d_g_state.d_input_keyval_pos_arr[map_task_idx].valSize; int keySize = d_g_state.d_input_keyval_pos_arr[map_task_idx].keySize; //////////////////////////////////////////////////////////////// gpu_map(key, val, keySize, valSize, &d_g_state, map_task_idx);// //////////////////////////////////////////////////////////////// }//for keyval_arr_t *kv_arr_p = d_g_state.d_intermediate_keyval_arr_arr_p[thread_start_idx]; //char *shared_buff = (char *)(kv_arr_p->shared_buff); //int shared_arr_len = *kv_arr_p->shared_arr_len; //int shared_buff_len = *kv_arr_p->shared_buff_len; d_g_state.d_intermediate_keyval_total_count[thread_start_idx] = *kv_arr_p->shared_arr_len; //__syncthreads(); }//GPUMapPartitioner //NOTE: gpu_combiner will affect the later program results //Last update 9/16/2012 __global__ void GPUCombiner(gpu_context d_g_state) { //ShowLog("gridDim.x:%d gridDim.y:%d gridDim.z:%d blockDim.x:%d blockDim.y:%d blockDim.z:%d blockIdx.x:%d blockIdx.y:%d blockIdx.z:%d\n", // gridDim.x,gridDim.y,gridDim.z,blockDim.x,blockDim.y,blockDim.z,blockIdx.x,blockIdx.y,blockIdx.z); int num_records_per_thread = (d_g_state.num_input_record + (gridDim.x*blockDim.x*blockDim.y)-1)/(gridDim.x*blockDim.x*blockDim.y); int block_start_idx = num_records_per_thread * blockIdx.x * blockDim.x * blockDim.y; int thread_start_idx = block_start_idx + ((threadIdx.y*blockDim.x + threadIdx.x)/STRIDE)*num_records_per_thread*STRIDE + ((threadIdx.y*blockDim.x + threadIdx.x)%STRIDE); int thread_end_idx = thread_start_idx + num_records_per_thread*STRIDE; if (thread_end_idx > d_g_state.num_input_record) thread_end_idx = d_g_state.num_input_record; if (thread_start_idx >= thread_end_idx) return; keyval_arr_t *kv_arr_p = d_g_state.d_intermediate_keyval_arr_arr_p[thread_start_idx]; int *buddy = kv_arr_p->shared_buddy; //TODO use host function set /*for (int idx=0;idx<kv_arr_p->shared_buddy_len;idx++){ d_g_state.d_intermediate_keyval_total_count[idx] = 0; }*/ int unmerged_shared_arr_len = *kv_arr_p->shared_arr_len; int num_keyval_pairs_after_combiner = 0; for (int i=0; i<unmerged_shared_arr_len;i++){ void *shared_buff = kv_arr_p->shared_buff; int shared_buff_len = *kv_arr_p->shared_buff_len; keyval_pos_t *head_kv_p = (keyval_pos_t *)((char *)shared_buff + shared_buff_len - sizeof(keyval_pos_t)*(unmerged_shared_arr_len-i)); keyval_pos_t *first_kv_p = head_kv_p; if (first_kv_p->next_idx != -1) continue; int iKeySize = first_kv_p->keySize; char *iKey = (char*)shared_buff + first_kv_p->keyPos; char *iVal = (char*)shared_buff + first_kv_p->valPos; if((first_kv_p->keyPos%4!=0)||(first_kv_p->valPos%4!=0)){ ShowError("keyPos or valPos is not aligned with 4 bytes, results could be wrong\n"); } int valCount = 1; for (int j=i+1;j<unmerged_shared_arr_len;j++){ keyval_pos_t *next_kv_p = (keyval_pos_t *)((char *)shared_buff + shared_buff_len - sizeof(keyval_pos_t)*(unmerged_shared_arr_len-j)); char *jKey = (char *)shared_buff+next_kv_p->keyPos; int jKeySize = next_kv_p->keySize; if (gpu_compare(iKey,iKeySize,jKey,jKeySize)!=0){ continue; } valCount++; first_kv_p->next_idx = j; first_kv_p = next_kv_p; }//for val_t *val_t_arr = (val_t *)malloc(sizeof(val_t)*valCount); int index = 0; first_kv_p = head_kv_p; (val_t_arr[index]).valSize = first_kv_p->valSize; (val_t_arr[index]).val = (char*)shared_buff + first_kv_p->valPos; for (int j=i+1;j<unmerged_shared_arr_len;j++){ keyval_pos_t *next_kv_p = (keyval_pos_t *)((char *)shared_buff + shared_buff_len - sizeof(keyval_pos_t)*(unmerged_shared_arr_len-j)); char *jKey = (char *)shared_buff+next_kv_p->keyPos; int jKeySize = next_kv_p->keySize; if (gpu_compare(iKey,iKeySize,jKey,jKeySize)!=0){ //printf("not match iKey:%s jKey:%s\n",iKey,jKey); continue; } index++; (val_t_arr[index]).valSize = next_kv_p->valSize; (val_t_arr[index]).val = (char*)shared_buff + next_kv_p->valPos; } //do { //(val_t_arr[index]).valSize = first_kv_p->valSize; //(val_t_arr[index]).val = (char*)shared_buff + first_kv_p->valPos; //char *key = (char*)shared_buff + first_kv_p->keyPos; //char *val = (char*)shared_buff + first_kv_p->valPos; //char* val_p = (char*)((val_t_arr[index]).val); //if (thread_start_idx == 95) //int next_idx = first_kv_p->next_idx; //first_kv_p = (keyval_pos_t *)((char *)shared_buff + shared_buff_len - sizeof(keyval_pos_t)*(shared_arr_len-next_idx)); //printf("-map_task_idx:%d ->key:%s val:%d valPos:%d valSize:%d next_idx:%d shared_arr_len:%d [%d/%d]\n", // first_kv_p->task_idx, key, *(int*)((char*)shared_buff + first_kv_p->valPos), first_kv_p->valPos,(val_t_arr[index]).valSize, next_idx, shared_arr_len, index, valCount); //printf("->compare key:%s next_key:%s\n", key, (char *)shared_buff + first_kv_p->keyPos); //index++; //}while(first_kv_p->next_idx != -1); if((index+1)!=valCount){ ShowError("Error! thread_idx:%d index:%d != valCount:%d records are missing.\n",thread_start_idx,(index+1),valCount); return; } if(valCount>1) gpu_combiner(iKey,val_t_arr,iKeySize,(valCount),d_g_state,thread_start_idx); else{ first_kv_p->next_idx = -2; first_kv_p->task_idx = thread_start_idx; } num_keyval_pairs_after_combiner++; }//for d_g_state.d_intermediate_keyval_total_count[thread_start_idx] = num_keyval_pairs_after_combiner; //////////////////////////////////////////////////////////////////// __syncthreads(); }//GPUMapPartitioner int StartCPUMap2(thread_info_t* thread_info) { cpu_context *d_g_state = (cpu_context*)(thread_info->d_g_state); job_configuration *cpu_job_conf = (job_configuration*)(thread_info->job_conf); if (cpu_job_conf->num_input_record<=0) { ShowError("Error: no any input keys"); exit(-1);} if (cpu_job_conf->input_keyval_arr == NULL) { ShowError("Error: input_keyval_arr == NULL"); exit(-1);} if (d_g_state->num_cpus_cores <= 0) { ShowError("Error: d_g_state->num_cpus == 0"); exit(-1);} //------------------------------------------------------- //1, prepare buffer to store intermediate results //------------------------------------------------------- keyval_arr_t *d_keyval_arr_p; int *count = NULL; //--------------------------------------------- //3, determine the number of threads to run //--------------------------------------------- ShowLog("CPU_GROUP_ID:[%d] the number of cpus used in computation:%d",d_g_state->cpu_group_id, d_g_state->num_cpus_cores); //-------------------------------------------------- //4, start_row_id map //-------------------------------------------------- int num_threads = d_g_state->num_cpus_cores; int num_records_per_thread = (cpu_job_conf->num_input_record+num_threads-1)/(num_threads); int start_row_idx = 0; int end_row_idx = 0; for (int tid = 0;tid<num_threads;tid++){ end_row_idx = start_row_idx + num_records_per_thread; if (tid < (cpu_job_conf->num_input_record % num_threads) ) end_row_idx++; d_g_state->panda_cpu_task_info[tid].start_row_idx = start_row_idx; if (end_row_idx > cpu_job_conf->num_input_record) end_row_idx = cpu_job_conf->num_input_record; d_g_state->panda_cpu_task_info[tid].end_row_idx = end_row_idx; if (pthread_create(&(d_g_state->panda_cpu_task[tid]),NULL,RunPandaCPUMapThread,(char *)&(d_g_state->panda_cpu_task_info[tid]))!=0) perror("Thread creation failed!\n"); start_row_idx = end_row_idx; }//for for (int tid = 0;tid<num_threads;tid++){ void *exitstat; if (pthread_join(d_g_state->panda_cpu_task[tid],&exitstat)!=0) perror("joining failed"); }//for ShowLog("CPU_GROUP_ID:[%d] DONE", d_g_state->cpu_group_id); return 0; }//int int StartCPUMap(cpu_context *d_g_state) { #ifdef DEV_MODE ShowLog("there are %d map tasks.",d_g_state->num_input_record); if (d_g_state->num_input_record<=0) { ShowError("Error: no any input keys"); exit(-1);} if (d_g_state->input_keyval_arr == NULL) { ShowError("Error: input_keyval_arr == NULL"); exit(-1);} if (d_g_state->num_cpus_cores <= 0) { ShowError("Error: d_g_state->num_cpus == 0"); exit(-1);} //------------------------------------------------------- //1, prepare buffer to store intermediate results //------------------------------------------------------- ShowLog("prepare buffer to store intermediate results"); keyval_arr_t *d_keyval_arr_p; int *count = NULL; //--------------------------------------------- //3, determine the number of threads to run //--------------------------------------------- ShowLog("the number of cpus used in computation:%d",d_g_state->num_cpus_cores); //-------------------------------------------------- //4, start_row_id map //-------------------------------------------------- int num_threads = d_g_state->num_cpus_cores; ShowLog("start_row_id CPUMapPartitioner num_threads:%d num_input_record:%d",num_threads, d_g_state->num_input_record); int num_records_per_thread = (d_g_state->num_input_record+num_threads-1)/(num_threads); int start_row_idx = 0; int end_idx = 0; for (int tid = 0;tid<num_threads;tid++){ end_idx = start_row_idx + num_records_per_thread; if (tid < (d_g_state->num_input_record % num_threads) ) end_idx++; d_g_state->panda_cpu_task_info[tid].start_row_idx = start_row_idx; if (end_idx > d_g_state->num_input_record) end_idx = d_g_state->num_input_record; d_g_state->panda_cpu_task_info[tid].end_idx = end_idx; if (pthread_create(&(d_g_state->panda_cpu_task[tid]),NULL,RunPandaCPUMapThread,(char *)&(d_g_state->panda_cpu_task_info[tid]))!=0) perror("Thread creation failed!\n"); start_row_idx = end_idx; }//for for (int tid = 0;tid<num_threads;tid++){ void *exitstat; if (pthread_join(d_g_state->panda_cpu_task[tid],&exitstat)!=0) perror("joining failed"); }//for ShowLog("DONE"); #endif return 0; }//int //-------------------------------------------------- // StartGPUMap // Last Update 9/2/2012 //-------------------------------------------------- int StartGPUMap(gpu_context *d_g_state) { //------------------------------------------------------- //0, Check status of d_g_state; //------------------------------------------------------- ShowLog("GPU_ID:[%d] num_input_record %d", d_g_state->gpu_id, d_g_state->num_input_record); if (d_g_state->num_input_record<0) { ShowLog("Error: no any input keys"); exit(-1);} if (d_g_state->h_input_keyval_arr == NULL) { ShowLog("Error: h_input_keyval_arr == NULL"); exit(-1);} if (d_g_state->num_mappers<=0) {d_g_state->num_mappers = (NUM_BLOCKS)*(NUM_THREADS);} if (d_g_state->num_reducers<=0) {d_g_state->num_reducers = (NUM_BLOCKS)*(NUM_THREADS);} //------------------------------------------------------- //1, prepare buffer to store intermediate results //------------------------------------------------------- keyval_arr_t *h_keyval_arr_arr = (keyval_arr_t *)malloc(sizeof(keyval_arr_t)*d_g_state->num_input_record); keyval_arr_t *d_keyval_arr_arr; checkCudaErrors(hipMalloc((void**)&(d_keyval_arr_arr),d_g_state->num_input_record*sizeof(keyval_arr_t))); for (int i=0; i<d_g_state->num_input_record;i++){ h_keyval_arr_arr[i].arr = NULL; h_keyval_arr_arr[i].arr_len = 0; }//for keyval_arr_t **d_keyval_arr_arr_p; checkCudaErrors(hipMalloc((void***)&(d_keyval_arr_arr_p),d_g_state->num_input_record*sizeof(keyval_arr_t*))); d_g_state->d_intermediate_keyval_arr_arr_p = d_keyval_arr_arr_p; int *count = NULL; checkCudaErrors(hipMalloc((void**)&(count),d_g_state->num_input_record*sizeof(int))); d_g_state->d_intermediate_keyval_total_count = count; checkCudaErrors(hipMemset(d_g_state->d_intermediate_keyval_total_count,0,d_g_state->num_input_record*sizeof(int))); //---------------------------------------------- //3, determine the number of threads to run //---------------------------------------------- //-------------------------------------------------- //4, start_row_id map //Note: DO *NOT* set large number of threads within block (512), which lead to too many invocation of malloc in the kernel. //-------------------------------------------------- hipDeviceSynchronize(); int numGPUCores = getGPUCoresNum(); dim3 blocks(THREAD_BLOCK_SIZE, THREAD_BLOCK_SIZE); int numBlocks = (numGPUCores*16+(blocks.x*blocks.y)-1)/(blocks.x*blocks.y); dim3 grids(numBlocks, 1); int total_gpu_threads = (grids.x*grids.y*blocks.x*blocks.y); ShowLog("GridDim.X:%d GridDim.Y:%d BlockDim.X:%d BlockDim.Y:%d TotalGPUThreads:%d",grids.x,grids.y,blocks.x,blocks.y,total_gpu_threads); hipDeviceSynchronize(); double t1 = PandaTimer(); hipLaunchKernelGGL(( GPUMapPartitioner), dim3(grids),dim3(blocks), 0, 0, *d_g_state); hipDeviceSynchronize(); double t2 = PandaTimer(); int num_records_per_thread = (d_g_state->num_input_record + (total_gpu_threads)-1)/(total_gpu_threads); int totalIter = num_records_per_thread; ShowLog("GPUMapPartitioner:%f totalIter:%d",t2-t1, totalIter); for (int iter = 0; iter< totalIter; iter++){ double t3 = PandaTimer(); hipLaunchKernelGGL(( RunGPUMapTasks), dim3(grids),dim3(blocks), 0, 0, *d_g_state, totalIter -1 - iter, totalIter); hipDeviceSynchronize(); double t4 = PandaTimer(); size_t total_mem,avail_mem; checkCudaErrors(hipMemGetInfo( &avail_mem, &total_mem )); ShowLog("GPU_ID:[%d] RunGPUMapTasks take %f sec at iter [%d/%d] remain %d mb GPU mem processed", d_g_state->gpu_id, t4-t3,iter,totalIter, avail_mem/1024/1024); }//for ShowLog("GPU_ID:[%d] Done %d Tasks",d_g_state->gpu_id,d_g_state->num_input_record); //Local combiner t1 = PandaTimer(); checkCudaErrors(hipMemset(d_g_state->d_intermediate_keyval_total_count,0,d_g_state->num_input_record*sizeof(int))); hipLaunchKernelGGL(( GPUCombiner), dim3(grids),dim3(blocks), 0, 0, *d_g_state); hipDeviceSynchronize(); t2 = PandaTimer(); ShowLog("GPU_ID: GPUCombiner Take:%f",t2-t1); return 0; }//int void DestroyDGlobalState(gpu_context * d_g_state){ }//void void StartGPUShuffle(gpu_context * state){ gpu_context* d_g_state = state; double t1 = PandaTimer(); Shuffle4GPUOutput(d_g_state); double t2 = PandaTimer(); ShowLog("GPU_ID:[%d] GPU Shuffle take %f sec", state->gpu_id,t2-t1); //ShowLog("DONE"); }//void void *RunPandaCPUMapThread(void *ptr){ panda_cpu_task_info_t *panda_cpu_task_info = (panda_cpu_task_info_t *)ptr; cpu_context *d_g_state = (cpu_context *)(panda_cpu_task_info->d_g_state); job_configuration *cpu_job_conf = (job_configuration *)(panda_cpu_task_info->cpu_job_conf); for (int map_idx = panda_cpu_task_info->start_row_idx; map_idx < panda_cpu_task_info->end_row_idx; map_idx++){ keyval_t *kv_p = (keyval_t *)(&(cpu_job_conf->input_keyval_arr[map_idx])); cpu_map(kv_p->key,kv_p->val,kv_p->keySize,kv_p->valSize,d_g_state,map_idx); }//for ShowLog("CPU_GROUP_ID:[%d] Done :%d tasks",d_g_state->cpu_group_id, panda_cpu_task_info->end_row_idx - panda_cpu_task_info->start_row_idx); return NULL; } //Use Pthread to process Panda_Reduce void * Panda_Reduce(void *ptr){ thread_info_t *thread_info = (thread_info_t *)ptr; if(thread_info->device_type == GPU_ACC){ //TODO Multiple GPU support gpu_context *d_g_state = (gpu_context *)(thread_info->d_g_state); int num_gpus = d_g_state->num_gpus; if ( num_gpus <= 0){ ShowLog("Error! num_gpus == 0 return"); return NULL; }//if int tid = thread_info->tid; hipSetDevice(tid % num_gpus); // "% num_gpus" allows more CPU threads than GPU devices int gpu_id; hipGetDevice(&gpu_id); ShowLog("Start GPU Reduce Tasks. Number of Reduce Tasks:%d",d_g_state->d_sorted_keyvals_arr_len); StartGPUReduce(d_g_state); }//if if(thread_info->device_type == CPU_ACC){ cpu_context *d_g_state = (cpu_context *)(thread_info->d_g_state); if (d_g_state->num_cpus_cores == 0){ ShowLog("Error! d_g_state->num_cpus == 0 return"); return NULL; } ShowLog("Start CPU Reduce Tasks. Number of Reduce Tasks:%d",d_g_state->sorted_keyvals_arr_len); for (int map_idx = 0; map_idx < d_g_state->sorted_keyvals_arr_len; map_idx++){ keyvals_t *kv_p = (keyvals_t *)(&(d_g_state->sorted_intermediate_keyvals_arr[map_idx])); cpu_reduce(kv_p->key, kv_p->vals, kv_p->keySize, kv_p->val_arr_len, d_g_state); }//for }//if //hipFree(d_filebuf); return NULL; }//void __device__ void *GetVal(void *vals, int4* interOffsetSizes, int keyIndex, int valStartIndex) { } __device__ void *GetKey(void *key, int4* interOffsetSizes, int keyIndex, int valStartIndex) { } //------------------------------------------------------- //Reducer //------------------------------------------------------- __global__ void ReducePartitioner(gpu_context d_g_state) { int num_records_per_thread = (d_g_state.d_sorted_keyvals_arr_len + (gridDim.x*blockDim.x*blockDim.y)-1)/(gridDim.x*blockDim.x*blockDim.y); int block_start_idx = num_records_per_thread * blockIdx.x * blockDim.x * blockDim.y; int thread_start_idx = block_start_idx + ((threadIdx.y*blockDim.x + threadIdx.x)/STRIDE)*num_records_per_thread*STRIDE + ((threadIdx.y*blockDim.x + threadIdx.x)%STRIDE); int thread_end_idx = thread_start_idx + num_records_per_thread*STRIDE; if (thread_end_idx > d_g_state.d_sorted_keyvals_arr_len) thread_end_idx = d_g_state.d_sorted_keyvals_arr_len; if (thread_start_idx >= thread_end_idx) return; int start_idx, end_idx; for(int reduce_task_idx=thread_start_idx; reduce_task_idx < thread_end_idx; reduce_task_idx+=STRIDE){ if (reduce_task_idx==0) start_idx = 0; else start_idx = d_g_state.d_pos_arr_4_sorted_keyval_pos_arr[reduce_task_idx-1]; end_idx = d_g_state.d_pos_arr_4_sorted_keyval_pos_arr[reduce_task_idx]; val_t *val_t_arr = (val_t*)malloc(sizeof(val_t)*(end_idx-start_idx)); int keySize = d_g_state.d_keyval_pos_arr[start_idx].keySize; int keyPos = d_g_state.d_keyval_pos_arr[start_idx].keyPos; void *key = (char*)d_g_state.d_sorted_keys_shared_buff+keyPos; for (int index = start_idx;index<end_idx;index++){ int valSize = d_g_state.d_keyval_pos_arr[index].valSize; int valPos = d_g_state.d_keyval_pos_arr[index].valPos; val_t_arr[index-start_idx].valSize = valSize; val_t_arr[index-start_idx].val = (char*)d_g_state.d_sorted_vals_shared_buff + valPos; } //for gpu_reduce(key, val_t_arr, keySize, end_idx-start_idx, d_g_state); }//for } void StartGPUReduce(gpu_context *d_g_state) { hipDeviceSynchronize(); d_g_state->d_reduced_keyval_arr_len = d_g_state->d_sorted_keyvals_arr_len; checkCudaErrors(hipMalloc((void **)&(d_g_state->d_reduced_keyval_arr), sizeof(keyval_t)*d_g_state->d_reduced_keyval_arr_len)); hipDeviceSynchronize(); int numGPUCores = getGPUCoresNum(); dim3 blocks(THREAD_BLOCK_SIZE, THREAD_BLOCK_SIZE); int numBlocks = (numGPUCores*16+(blocks.x*blocks.y)-1)/(blocks.x*blocks.y); dim3 grids(numBlocks, 1); int total_gpu_threads = (grids.x*grids.y*blocks.x*blocks.y); ShowLog("number of reduce tasks:%d total gpu threads:%d",d_g_state->d_sorted_keyvals_arr_len, total_gpu_threads); hipLaunchKernelGGL(( ReducePartitioner), dim3(grids),dim3(blocks), 0, 0, *d_g_state); hipDeviceSynchronize(); }//void void* Panda_Map(void *ptr){ thread_info_t *thread_info = (thread_info_t *)ptr; if(thread_info->device_type == GPU_ACC){ gpu_context *d_g_state = (gpu_context *)(thread_info->d_g_state); InitGPUDevice(thread_info); //ShowLog("GPU_ID:[%d] Init GPU MapReduce Load Data From Host to GPU memory",d_g_state->gpu_id); InitGPUMapReduce3(d_g_state); //ShowLog("GPU_ID:[%d] Start GPU Map Tasks",d_g_state->gpu_id); StartGPUMap(d_g_state); StartGPUShuffle(d_g_state); }//if if(thread_info->device_type == CPU_ACC){ cpu_context *d_g_state = (cpu_context *)(thread_info->d_g_state); //ShowLog("CPU_GROUP_ID:[%d] Init CPU Device",d_g_state->cpu_group_id); InitCPUDevice(thread_info); //ShowLog("Init CPU MapReduce"); InitCPUMapReduce2(thread_info); ShowLog("CPU_GROUP_ID:[%d] Start CPU Map Tasks",d_g_state->cpu_group_id); StartCPUMap2(thread_info); StartCPUShuffle2(thread_info); } return NULL; }//FinishMapReduce2(d_g_state); void FinishMapReduce(Spec_t* spec) { ShowLog( "=====finish panda mapreduce====="); }//void void FinishMapReduce2(gpu_context* state) { size_t total_mem,avail_mem, heap_limit; checkCudaErrors(hipMemGetInfo( &avail_mem, &total_mem )); ShowLog("avail_mem:%d",avail_mem); }//void #endif //__PANDALIB_CU__
bd33ed79e2af0300a83c5869d001d896e42334d7.cu
/* Copyright 2012 The Trustees of Indiana University. All rights reserved. CGL MapReduce Framework on GPUs and CPUs Code Name: Panda File: PandaLib.cu First Version: 2012-07-01 V0.1 Current Version: 2012-09-01 V0.3 Last Updates: 2012-09-016 Developer: Hui Li (lihui@indiana.edu) This is the source code for Panda, a MapReduce runtime on GPUs and CPUs. */ #ifndef __PANDALIB_CU__ #define __PANDALIB_CU__ #include "Panda.h" #include "UserAPI.cu" //---------------------------------------------- //Get default job configuration //---------------------------------------------- job_configuration *CreateJobConf(){ job_configuration *job_conf = (job_configuration *)malloc(sizeof(job_configuration)); if (job_conf == NULL) exit(-1); memset(job_conf, 0, sizeof(job_configuration)); job_conf->num_input_record = 0; job_conf->input_keyval_arr = NULL; job_conf->auto_tuning = false; job_conf->num_mappers = 0; job_conf->num_reducers = 0; job_conf->num_gpus = 0; job_conf->num_cpus_cores = 0; job_conf->num_cpus_groups = 0; return job_conf; }//gpu_context gpu_context *CreateGPUContext(){ gpu_context *d_g_state = (gpu_context*)malloc(sizeof(gpu_context)); if (d_g_state == NULL) exit(-1); memset(d_g_state, 0, sizeof(gpu_context)); d_g_state->configured = false; d_g_state->h_input_keyval_arr = NULL; d_g_state->num_mappers = 0; d_g_state->num_reducers = 0; return d_g_state; }//gpu_context cpu_context *CreateCPUContext(){ cpu_context *d_g_state = (cpu_context*)malloc(sizeof(cpu_context)); if (d_g_state == NULL) exit(-1); memset(d_g_state, 0, sizeof(cpu_context)); d_g_state->configured = false; d_g_state->input_keyval_arr = NULL; return d_g_state; }//gpu_context panda_context *CreatePandaContext(){ panda_context *d_g_state = (panda_context*)malloc(sizeof(panda_context)); if (d_g_state == NULL) exit(-1); d_g_state->input_keyval_arr = NULL; d_g_state->intermediate_keyval_arr_arr_p = NULL; d_g_state->sorted_intermediate_keyvals_arr = NULL; d_g_state->sorted_keyvals_arr_len = 0; d_g_state->num_gpus = 0; d_g_state->gpu_context = NULL; d_g_state->num_cpus_groups = 0; d_g_state->cpu_context = NULL; return d_g_state; }//panda_context //For version 0.3 void InitCPUMapReduce2(thread_info_t * thread_info){ cpu_context *d_g_state = (cpu_context *)(thread_info->d_g_state); job_configuration *job_conf = (job_configuration *)(thread_info->job_conf); if (job_conf->num_input_record<=0) { ShowError("Error: no any input keys"); exit(-1);} if (job_conf->input_keyval_arr == NULL) { ShowError("Error: input_keyval_arr == NULL"); exit(-1);} if (d_g_state->num_cpus_cores <= 0) { ShowError("Error: d_g_state->num_cpus == 0"); exit(-1);} //ShowLog("d_g_state->configured:%s enable for iterative applications",d_g_state->configured? "true" : "false"); //if (d_g_state->configured) // return; int totalKeySize = 0; int totalValSize = 0; for(int i=0;i<job_conf->num_input_record;i++){ totalKeySize += job_conf->input_keyval_arr[i].keySize; totalValSize += job_conf->input_keyval_arr[i].valSize; }//for ShowLog("CPU_GROUP_ID:[%d] num_input_record:%d, totalKeySize:%d totalValSize:%d num_cpus:%d", d_g_state->cpu_group_id, job_conf->num_input_record, totalKeySize, totalValSize, d_g_state->num_cpus_cores); //TODO determin num_cpus int num_cpus_cores = d_g_state->num_cpus_cores; d_g_state->panda_cpu_task = (pthread_t *)malloc(sizeof(pthread_t)*(num_cpus_cores)); d_g_state->panda_cpu_task_info = (panda_cpu_task_info_t *)malloc(sizeof(panda_cpu_task_info_t)*(num_cpus_cores)); d_g_state->intermediate_keyval_arr_arr_p = (keyval_arr_t *)malloc(sizeof(keyval_arr_t)*job_conf->num_input_record); memset(d_g_state->intermediate_keyval_arr_arr_p, 0, sizeof(keyval_arr_t)*job_conf->num_input_record); for (int i=0;i<num_cpus_cores;i++){ d_g_state->panda_cpu_task_info[i].d_g_state = d_g_state; d_g_state->panda_cpu_task_info[i].cpu_job_conf = job_conf; d_g_state->panda_cpu_task_info[i].num_cpus_cores = num_cpus_cores; d_g_state->panda_cpu_task_info[i].start_row_idx = 0; d_g_state->panda_cpu_task_info[i].end_row_idx = 0; }//for d_g_state->configured = true; ShowLog("CPU_GROUP_ID:[%d] DONE",d_g_state->cpu_group_id); } #ifdef DEV_MODE //For Version 0.3 test depressed void InitGPUMapReduce4(thread_info_t* thread_info) { gpu_context *d_g_state = (gpu_context *)(thread_info->d_g_state); job_configuration* gpu_job_conf = (job_configuration*)(thread_info->job_conf); keyval_t * kv_p = gpu_job_conf->input_keyval_arr; ShowLog("d_g_state->configured:%s enable for iterative applications",d_g_state->configured? "true" : "false"); //if (d_g_state->configured) // return; ShowLog("copy %d input records from Host to GPU memory",gpu_job_conf->num_input_record); //checkCudaErrors(cudaMalloc((void **)&d_g_state->d_input_keyval_arr,sizeof(keyval_t)*d_g_state->num_input_record)); int totalKeySize = 0; int totalValSize = 0; for(int i=0;i<gpu_job_conf->num_input_record;i++){ totalKeySize += kv_p[i].keySize; totalValSize += kv_p[i].valSize; }//for ShowLog("totalKeySize:%d totalValSize:%d", totalKeySize, totalValSize); void *input_vals_shared_buff = malloc(totalValSize); void *input_keys_shared_buff = malloc(totalKeySize); keyval_pos_t *input_keyval_pos_arr = (keyval_pos_t *)malloc(sizeof(keyval_pos_t)*gpu_job_conf->num_input_record); int keyPos = 0; int valPos = 0; int keySize = 0; int valSize = 0; for(int i=0; i<gpu_job_conf->num_input_record; i++){ keySize = kv_p[i].keySize; valSize = kv_p[i].valSize; memcpy((char *)input_keys_shared_buff + keyPos,(char *)(kv_p[i].key), keySize); memcpy((char *)input_vals_shared_buff + valPos,(char *)(kv_p[i].val), valSize); input_keyval_pos_arr[i].keySize = keySize; input_keyval_pos_arr[i].keyPos = keyPos; input_keyval_pos_arr[i].valPos = valPos; input_keyval_pos_arr[i].valSize = valSize; keyPos += keySize; valPos += valSize; }//for checkCudaErrors(cudaMalloc((void **)&d_g_state->d_input_keyval_pos_arr,sizeof(keyval_pos_t)*gpu_job_conf->num_input_record)); checkCudaErrors(cudaMalloc((void **)&d_g_state->d_input_keys_shared_buff, totalKeySize)); checkCudaErrors(cudaMalloc((void **)&d_g_state->d_input_vals_shared_buff, totalValSize)); checkCudaErrors(cudaMemcpy(d_g_state->d_input_keyval_pos_arr, input_keyval_pos_arr,sizeof(keyval_pos_t)*gpu_job_conf->num_input_record ,cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_g_state->d_input_keys_shared_buff, input_keys_shared_buff,totalKeySize ,cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_g_state->d_input_vals_shared_buff, input_vals_shared_buff,totalValSize ,cudaMemcpyHostToDevice)); //checkCudaErrors(cudaMemcpy(d_g_state->d_input_keyval_arr,h_buff,sizeof(keyval_t)*d_g_state->num_input_record,cudaMemcpyHostToDevice)); cudaThreadSynchronize(); d_g_state->configured = true; }//void #endif void InitGPUMapReduce3(gpu_context* d_g_state) { ShowLog("d_g_state->configured:%s enable for iterative applications",d_g_state->configured? "true" : "false"); //if (d_g_state->configured) // return; int totalKeySize = 0; int totalValSize = 0; for(int i=0;i<d_g_state->num_input_record;i++){ totalKeySize += d_g_state->h_input_keyval_arr[i].keySize; totalValSize += d_g_state->h_input_keyval_arr[i].valSize; }//for ShowLog("GPU_ID:[%d] copy %d input records from Host to GPU memory totalKeySize:%d totalValSize:%d",d_g_state->gpu_id, d_g_state->num_input_record, totalKeySize, totalValSize); void *input_vals_shared_buff = malloc(totalValSize); void *input_keys_shared_buff = malloc(totalKeySize); keyval_pos_t *input_keyval_pos_arr = (keyval_pos_t *)malloc(sizeof(keyval_pos_t)*d_g_state->num_input_record); int keyPos = 0; int valPos = 0; int keySize = 0; int valSize = 0; for(int i=0;i<d_g_state->num_input_record;i++){ keySize = d_g_state->h_input_keyval_arr[i].keySize; valSize = d_g_state->h_input_keyval_arr[i].valSize; memcpy((char *)input_keys_shared_buff + keyPos,(char *)(d_g_state->h_input_keyval_arr[i].key), keySize); memcpy((char *)input_vals_shared_buff + valPos,(char *)(d_g_state->h_input_keyval_arr[i].val), valSize); input_keyval_pos_arr[i].keySize = keySize; input_keyval_pos_arr[i].keyPos = keyPos; input_keyval_pos_arr[i].valPos = valPos; input_keyval_pos_arr[i].valSize = valSize; keyPos += keySize; valPos += valSize; }//for checkCudaErrors(cudaMalloc((void **)&d_g_state->d_input_keyval_pos_arr,sizeof(keyval_pos_t)*d_g_state->num_input_record)); checkCudaErrors(cudaMalloc((void **)&d_g_state->d_input_keys_shared_buff, totalKeySize)); checkCudaErrors(cudaMalloc((void **)&d_g_state->d_input_vals_shared_buff, totalValSize)); checkCudaErrors(cudaMemcpy(d_g_state->d_input_keyval_pos_arr, input_keyval_pos_arr,sizeof(keyval_pos_t)*d_g_state->num_input_record ,cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_g_state->d_input_keys_shared_buff, input_keys_shared_buff,totalKeySize ,cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_g_state->d_input_vals_shared_buff, input_vals_shared_buff,totalValSize ,cudaMemcpyHostToDevice)); //checkCudaErrors(cudaMemcpy(d_g_state->d_input_keyval_arr,h_buff,sizeof(keyval_t)*d_g_state->num_input_record,cudaMemcpyHostToDevice)); cudaThreadSynchronize(); d_g_state->configured = true; }//void #ifdef DEV_MODE void InitGPUMapReduce2(gpu_context* d_g_state) { ShowLog("d_g_state->num_input_record:%d",d_g_state->num_input_record); //checkCudaErrors(cudaMalloc((void **)&d_g_state->d_input_keyval_arr,sizeof(keyval_t)*d_g_state->num_input_record)); int totalKeySize = 0; int totalValSize = 0; for(int i=0;i<d_g_state->num_input_record;i++){ totalKeySize += d_g_state->h_input_keyval_arr[i].keySize; totalValSize += d_g_state->h_input_keyval_arr[i].valSize; }//for void *input_vals_shared_buff = malloc(totalValSize); void *input_keys_shared_buff = malloc(totalKeySize); keyval_pos_t *input_keyval_pos_arr = (keyval_pos_t *)malloc(sizeof(keyval_pos_t)*d_g_state->num_input_record); int keyPos = 0; int valPos = 0; int keySize = 0; int valSize = 0; for(int i=0;i<d_g_state->num_input_record;i++){ keySize = d_g_state->h_input_keyval_arr[i].keySize; valSize = d_g_state->h_input_keyval_arr[i].valSize; memcpy((char *)input_keys_shared_buff + keyPos,(char *)(d_g_state->h_input_keyval_arr[i].key), keySize); memcpy((char *)input_vals_shared_buff + valPos,(char *)(d_g_state->h_input_keyval_arr[i].val), valSize); input_keyval_pos_arr[i].keySize = keySize; input_keyval_pos_arr[i].keyPos = keyPos; input_keyval_pos_arr[i].valPos = valPos; input_keyval_pos_arr[i].valSize = valSize; keyPos += keySize; valPos += valSize; }//for checkCudaErrors(cudaMalloc((void **)&d_g_state->d_input_keyval_pos_arr,sizeof(keyval_pos_t)*d_g_state->num_input_record)); checkCudaErrors(cudaMalloc((void **)&d_g_state->d_input_keys_shared_buff, totalKeySize)); checkCudaErrors(cudaMalloc((void **)&d_g_state->d_input_vals_shared_buff, totalValSize)); checkCudaErrors(cudaMemcpy(d_g_state->d_input_keyval_pos_arr, input_keyval_pos_arr,sizeof(keyval_pos_t)*d_g_state->num_input_record ,cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_g_state->d_input_keys_shared_buff, input_keys_shared_buff,totalKeySize ,cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_g_state->d_input_vals_shared_buff, input_vals_shared_buff,totalValSize ,cudaMemcpyHostToDevice)); //checkCudaErrors(cudaMemcpy(d_g_state->d_input_keyval_arr,h_buff,sizeof(keyval_t)*d_g_state->num_input_record,cudaMemcpyHostToDevice)); cudaThreadSynchronize(); }//void #endif void InitCPUDevice(thread_info_t*thread_info){ //------------------------------------------ //1, init CPU device //------------------------------------------ cpu_context *d_g_state = (cpu_context *)(thread_info->d_g_state); if (d_g_state->num_cpus_cores<=0) d_g_state->num_cpus_cores = getCPUCoresNum(); int tid = thread_info->tid; ShowLog( "CPU_GROUP_ID:[%d] Init CPU Deivce",d_g_state->cpu_group_id); } void InitGPUDevice(thread_info_t*thread_info){ //------------------------------------------ //1, init device //------------------------------------------ gpu_context *d_g_state = (gpu_context *)(thread_info->d_g_state); int tid = thread_info->tid; int assigned_gpu_id = d_g_state->gpu_id; int num_gpus = d_g_state->num_gpus; if (num_gpus == 0) { ShowError("error num_gpus == 0"); exit(-1); }//gpu_context int gpu_id; cudaGetDevice(&gpu_id); int gpu_count = 0; cudaGetDeviceCount(&gpu_count); cudaDeviceProp gpu_dev; cudaGetDeviceProperties(&gpu_dev, gpu_id); ShowLog("TID:[%d] check GPU ids: cur_gpu_id:[%d] assig_gpu_id:[%d] cudaGetDeviceCount:[%d] GPU name:%s", tid, gpu_id, assigned_gpu_id, gpu_count, gpu_dev.name); if ( gpu_id != assigned_gpu_id ){ //ShowLog("cudaSetDevice gpu_id %d == (tid num_gpus) %d ", gpu_id, tid%num_gpus); cudaSetDevice(assigned_gpu_id % num_gpus); }//if size_t total_mem,avail_mem, heap_limit; checkCudaErrors(cudaMemGetInfo( &avail_mem, &total_mem )); cudaDeviceSetLimit(cudaLimitMallocHeapSize, (int)(total_mem*0.8)); cudaDeviceGetLimit(&heap_limit, cudaLimitMallocHeapSize); int numGPUCores = getGPUCoresNum(); ShowLog("GPU_ID:[%d] numGPUCores:%d cudaLimitMallocHeapSize:%d MB avail_mem:%d MB total_mem:%d MB", gpu_id, numGPUCores,heap_limit/1024/1024, avail_mem/1024/1024,total_mem/1024/1024); } void AddPandaTask(job_configuration* job_conf, void* key, void* val, int keySize, int valSize){ int len = job_conf->num_input_record; if (len<0) return; if (len == 0) job_conf->input_keyval_arr = NULL; job_conf->input_keyval_arr = (keyval_t *)realloc(job_conf->input_keyval_arr, sizeof(keyval_t)*(len+1)); job_conf->input_keyval_arr[len].keySize = keySize; job_conf->input_keyval_arr[len].valSize = valSize; job_conf->input_keyval_arr[len].key = malloc(keySize); job_conf->input_keyval_arr[len].val = malloc(valSize); memcpy(job_conf->input_keyval_arr[len].key,key,keySize); memcpy(job_conf->input_keyval_arr[len].val,val,valSize); job_conf->num_input_record++; } void AddReduceInputRecordGPU(gpu_context* d_g_state, keyvals_t * sorted_intermediate_keyvals_arr, int start_row_id, int end_row_id){ int total_count = 0; for(int i=start_row_id;i<end_row_id;i++){ total_count += sorted_intermediate_keyvals_arr[i].val_arr_len; }//for int totalKeySize = 0; int totalValSize = 0; for(int i=start_row_id;i<end_row_id;i++){ totalKeySize += (sorted_intermediate_keyvals_arr[i].keySize+3)/4*4; for (int j=0;j<sorted_intermediate_keyvals_arr[i].val_arr_len;j++) totalValSize += (sorted_intermediate_keyvals_arr[i].vals[j].valSize+3)/4*4; }//for checkCudaErrors(cudaMalloc((void **)&d_g_state->d_sorted_keys_shared_buff,totalKeySize)); checkCudaErrors(cudaMalloc((void **)&d_g_state->d_sorted_vals_shared_buff,totalValSize)); checkCudaErrors(cudaMalloc((void **)&d_g_state->d_keyval_pos_arr,sizeof(keyval_pos_t)*total_count)); d_g_state->h_sorted_keys_shared_buff = malloc(sizeof(char)*totalKeySize); d_g_state->h_sorted_vals_shared_buff = malloc(sizeof(char)*totalValSize); char *sorted_keys_shared_buff = (char *)d_g_state->h_sorted_keys_shared_buff; char *sorted_vals_shared_buff = (char *)d_g_state->h_sorted_vals_shared_buff; char *keyval_pos_arr = (char *)malloc(sizeof(keyval_pos_t)*total_count); int sorted_key_arr_len = (end_row_id-start_row_id); keyval_pos_t *tmp_keyval_pos_arr = (keyval_pos_t *)malloc(sizeof(keyval_pos_t)*total_count); ShowLog("GPU_ID:[%d] total #different intermediate records:%d total records:%d totalKeySize:%d KB totalValSize:%d KB", d_g_state->gpu_id, end_row_id - start_row_id, total_count, totalKeySize/1024, totalValSize/1024); int *pos_arr_4_pos_arr = (int*)malloc(sizeof(int)*(sorted_key_arr_len)); memset(pos_arr_4_pos_arr,0,sizeof(int)*sorted_key_arr_len); int index = 0; int keyPos = 0; int valPos = 0; for (int i=start_row_id;i<end_row_id;i++){ keyvals_t* p = (keyvals_t*)&(sorted_intermediate_keyvals_arr[i]); memcpy(sorted_keys_shared_buff+keyPos,p->key, p->keySize); for (int j=0;j<p->val_arr_len;j++){ tmp_keyval_pos_arr[index].keyPos = keyPos; tmp_keyval_pos_arr[index].keySize = p->keySize; tmp_keyval_pos_arr[index].valPos = valPos; tmp_keyval_pos_arr[index].valSize = p->vals[j].valSize; memcpy(sorted_vals_shared_buff + valPos,p->vals[j].val,p->vals[j].valSize); valPos += (p->vals[j].valSize+3)/4*4; index++; }//for keyPos += (p->keySize+3)/4*4; pos_arr_4_pos_arr[i-start_row_id] = index; }// d_g_state->d_sorted_keyvals_arr_len = end_row_id-start_row_id; checkCudaErrors(cudaMemcpy(d_g_state->d_keyval_pos_arr,tmp_keyval_pos_arr,sizeof(keyval_pos_t)*total_count,cudaMemcpyHostToDevice)); checkCudaErrors(cudaMalloc((void**)&d_g_state->d_pos_arr_4_sorted_keyval_pos_arr,sizeof(int)*sorted_key_arr_len)); checkCudaErrors(cudaMemcpy(d_g_state->d_pos_arr_4_sorted_keyval_pos_arr,pos_arr_4_pos_arr,sizeof(int)*sorted_key_arr_len,cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_g_state->d_sorted_keys_shared_buff, sorted_keys_shared_buff, sizeof(char)*totalKeySize,cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_g_state->d_sorted_vals_shared_buff, sorted_vals_shared_buff, sizeof(char)*totalValSize,cudaMemcpyHostToDevice)); } void AddMapInputRecordGPU(gpu_context* d_g_state, keyval_t *kv_p, int start_row_id, int end_row_id){ if (end_row_id<=start_row_id) { ShowError("error! end_row_id<=start_row_id"); return; } int len = d_g_state->num_input_record; if (len<0) { ShowError("error! len<0"); return; } if (len == 0) d_g_state->h_input_keyval_arr = NULL; ShowLog("GPU_ID:[%d] add map tasks into gpu; #total input:%d #added input:%d",d_g_state->gpu_id, len, end_row_id-start_row_id); d_g_state->h_input_keyval_arr = (keyval_t *)realloc(d_g_state->h_input_keyval_arr, sizeof(keyval_t)*(len + end_row_id - start_row_id)); //assert(d_g_state->h_input_keyval_arr != NULL); for (int i=start_row_id;i<end_row_id;i++){ d_g_state->h_input_keyval_arr[len].keySize = kv_p[i].keySize; d_g_state->h_input_keyval_arr[len].valSize = kv_p[i].valSize; d_g_state->h_input_keyval_arr[len].key = kv_p[i].key; d_g_state->h_input_keyval_arr[len].val = kv_p[i].val; d_g_state->num_input_record++; len++; } } void AddMapInputRecordCPU(cpu_context* d_g_state, keyval_t *kv_p, int start_row_id, int end_row_id){ if (end_row_id<=start_row_id) { ShowError("error! end_row_id[%d] <= start_row_id[%d]",end_row_id, start_row_id); return; } int len = d_g_state->num_input_record; if (len<0) { ShowError("error! len<0"); return; } if (len == 0) d_g_state->input_keyval_arr = NULL; ShowLog("CPU_GROUP_ID:[%d] add map input record for cpu device current #input:%d added #input:%d",d_g_state->cpu_group_id,len,end_row_id-start_row_id); d_g_state->input_keyval_arr = (keyval_t *)realloc(d_g_state->input_keyval_arr, sizeof(keyval_t)*(len+end_row_id-start_row_id)); for (int i=start_row_id;i<end_row_id;i++){ d_g_state->input_keyval_arr[len].keySize = kv_p[i].keySize; d_g_state->input_keyval_arr[len].valSize = kv_p[i].valSize; d_g_state->input_keyval_arr[len].key = kv_p[i].key; d_g_state->input_keyval_arr[len].val = kv_p[i].val; d_g_state->num_input_record++; len++; } } void AddReduceInputRecordCPU(cpu_context* d_g_state, keyvals_t *kv_p, int start_row_id, int end_row_id){ if (end_row_id<start_row_id){ ShowError("error! end_row_id<=start_row_id"); return; } int len = d_g_state->sorted_keyvals_arr_len; if (len<0) { ShowError("error! len<0"); return; } if (len == 0) d_g_state->sorted_intermediate_keyvals_arr = NULL; d_g_state->sorted_intermediate_keyvals_arr = (keyvals_t *)malloc(sizeof(keyvals_t)*(len+end_row_id-start_row_id)); for (int i = len; i< len+end_row_id-start_row_id; i++){ d_g_state->sorted_intermediate_keyvals_arr[i].keySize = kv_p[start_row_id+i-len].keySize; d_g_state->sorted_intermediate_keyvals_arr[i].key = kv_p[start_row_id+i-len].key; d_g_state->sorted_intermediate_keyvals_arr[i].vals = kv_p[start_row_id+i-len].vals; d_g_state->sorted_intermediate_keyvals_arr[i].val_arr_len = kv_p[start_row_id+i-len].val_arr_len; }//for d_g_state->sorted_keyvals_arr_len = end_row_id-start_row_id; } __device__ void GPUEmitReduceOuput (void* key, void* val, int keySize, int valSize, gpu_context *d_g_state){ keyval_t *p = &(d_g_state->d_reduced_keyval_arr[TID]); p->keySize = keySize; p->key = malloc(keySize); memcpy(p->key,key,keySize); p->valSize = valSize; p->val = malloc(valSize); memcpy(p->val,val,valSize); printf("[gpu output]: key:%s val:%d\n",key,*(int *)val); }//__device__ void CPUEmitReduceOutput (void* key, void* val, int keySize, int valSize, cpu_context *d_g_state){ /*keyval_t *p = &(d_g_state->d_reduced_keyval_arr[TID]); p->keySize = keySize; p->key = malloc(keySize); memcpy(p->key,key,keySize); p->valSize = valSize; p->val = malloc(valSize); memcpy(p->val,val,valSize);*/ printf("[cpu output]: key:%s val:%d\n",key,*(int *)val); }//__device__ //Last update 9/1/2012 void CPUEmitMapOutput(void *key, void *val, int keySize, int valSize, cpu_context *d_g_state, int map_task_idx){ if(map_task_idx >= d_g_state->num_input_record) { ShowLog("error ! map_task_idx >= d_g_state->num_input_record"); return; } keyval_arr_t *kv_arr_p = &(d_g_state->intermediate_keyval_arr_arr_p[map_task_idx]); if (kv_arr_p->arr_len==0) kv_arr_p->arr = NULL; kv_arr_p->cpu_arr = (keyval_t*)realloc(kv_arr_p->arr, sizeof(keyval_t)*(kv_arr_p->arr_len+1)); int current_map_output_index = (kv_arr_p->arr_len); keyval_t *kv_p = &(kv_arr_p->cpu_arr[current_map_output_index]); kv_p->key = (char *)malloc(sizeof(keySize)); memcpy(kv_p->key,key,keySize); kv_p->keySize = keySize; kv_p->val = (char *)malloc(sizeof(valSize)); memcpy(kv_p->val,val,valSize); kv_p->valSize = valSize; kv_arr_p->arr_len++; }//__device__ __device__ void GPUEmitCombinerOuput(void *key, void *val, int keySize, int valSize, gpu_context *d_g_state, int map_task_idx){ keyval_arr_t *kv_arr_p = d_g_state->d_intermediate_keyval_arr_arr_p[map_task_idx]; void *shared_buff = kv_arr_p->shared_buff; int shared_buff_len = *kv_arr_p->shared_buff_len; int shared_arr_len = *kv_arr_p->shared_arr_len; int shared_buff_pos = *kv_arr_p->shared_buff_pos; int required_mem_len = (shared_buff_pos) + keySize + valSize + sizeof(keyval_pos_t)*(shared_arr_len+1); if (required_mem_len> shared_buff_len){ ShowWarn("Warning! no enough memory in GPU task:%d need:%d KB KeySize:%d ValSize:%d shared_arr_len:%d shared_buff_pos:%d shared_buff_len:%d", map_task_idx, required_mem_len/1024,keySize,valSize,shared_arr_len,shared_buff_pos,shared_buff_len); char *new_buff = (char*)malloc(sizeof(char)*((*kv_arr_p->shared_buff_len)*2)); if(new_buff==NULL)ShowLog("Error! There is not enough memory to allocat!\n"); memcpy(new_buff, shared_buff, sizeof(char)*(*kv_arr_p->shared_buff_pos)); memcpy(new_buff + (*kv_arr_p->shared_buff_len)*2 - sizeof(keyval_pos_t)*(*kv_arr_p->shared_arr_len), (char*)shared_buff + (*kv_arr_p->shared_buff_len) - sizeof(keyval_pos_t)*(*kv_arr_p->shared_arr_len), sizeof(keyval_pos_t)*(*kv_arr_p->shared_arr_len)); shared_buff_len = 2*(*kv_arr_p->shared_buff_len); (*kv_arr_p->shared_buff_len) = shared_buff_len; for(int idx = 0; idx < (kv_arr_p->shared_buddy_len); idx++){ int cur_map_task_idx = kv_arr_p->shared_buddy[idx]; //the buddy relationship won't be changed keyval_arr_t *cur_kv_arr_p = d_g_state->d_intermediate_keyval_arr_arr_p[cur_map_task_idx]; cur_kv_arr_p->shared_buff = new_buff; }//for free(shared_buff); shared_buff = new_buff; }//if keyval_pos_t *kv_p = (keyval_pos_t *)((char *)shared_buff + shared_buff_len - sizeof(keyval_pos_t)*(*kv_arr_p->shared_arr_len+1)); kv_p->keySize = keySize; kv_p->valSize = valSize; kv_p->task_idx = map_task_idx; kv_p->next_idx = -2; //merged results memcpy( (char*)shared_buff + *kv_arr_p->shared_buff_pos, key, keySize); kv_p->keyPos = *kv_arr_p->shared_buff_pos; *kv_arr_p->shared_buff_pos += (keySize+3)/4*4; memcpy( (char*)shared_buff + *kv_arr_p->shared_buff_pos, val, valSize); kv_p->valPos = *kv_arr_p->shared_buff_pos; *kv_arr_p->shared_buff_pos += (valSize+3)/4*4; (*kv_arr_p->shared_arr_len)++; }//__device__ //Last update 9/16/2012 __device__ void GPUEmitMapOutput(void *key, void *val, int keySize, int valSize, gpu_context *d_g_state, int map_task_idx){ keyval_arr_t *kv_arr_p = d_g_state->d_intermediate_keyval_arr_arr_p[map_task_idx]; char *buff = (char*)(kv_arr_p->shared_buff); if (!((*kv_arr_p->shared_buff_pos) + keySize + valSize < (*kv_arr_p->shared_buff_len) - sizeof(keyval_pos_t)*((*kv_arr_p->shared_arr_len)+1))){ ShowWarn("Warning! not enough memory at GPU task:%d *kv_arr_p->shared_arr_len:%d current buff_size:%d KB", map_task_idx,*kv_arr_p->shared_arr_len,(*kv_arr_p->shared_buff_len)/1024); char *new_buff = (char*)malloc(sizeof(char)*((*kv_arr_p->shared_buff_len)*2)); if(new_buff==NULL){ ShowError("Error ! There is not enough memory to allocat!"); return; } memcpy(new_buff, buff, sizeof(char)*(*kv_arr_p->shared_buff_pos)); memcpy(new_buff + (*kv_arr_p->shared_buff_len)*2 - sizeof(keyval_pos_t)*(*kv_arr_p->shared_arr_len), (char*)buff + (*kv_arr_p->shared_buff_len) - sizeof(keyval_pos_t)*(*kv_arr_p->shared_arr_len), sizeof(keyval_pos_t)*(*kv_arr_p->shared_arr_len)); (*kv_arr_p->shared_buff_len) = 2*(*kv_arr_p->shared_buff_len); for(int idx = 0; idx < (kv_arr_p->shared_buddy_len); idx++){ int cur_map_task_idx = kv_arr_p->shared_buddy[idx]; //the buddy relationship won't be changed keyval_arr_t *cur_kv_arr_p = d_g_state->d_intermediate_keyval_arr_arr_p[cur_map_task_idx]; cur_kv_arr_p->shared_buff = new_buff; }//for free(buff);//????? buff = new_buff; }//if keyval_pos_t *kv_p = (keyval_pos_t *)((char *)buff + *kv_arr_p->shared_buff_len - sizeof(keyval_pos_t)*((*kv_arr_p->shared_arr_len)+1)); (*kv_arr_p->shared_arr_len)++; kv_p->task_idx = map_task_idx; kv_p->next_idx = -1; kv_p->keyPos = (*kv_arr_p->shared_buff_pos); *kv_arr_p->shared_buff_pos += ((keySize+3)/4)*4; //alignment 4 bytes for reading and writing memcpy((char *)(buff) + kv_p->keyPos,key,keySize); kv_p->keySize = keySize; kv_p->valPos = (*kv_arr_p->shared_buff_pos); *kv_arr_p->shared_buff_pos += ((valSize+3)/4)*4; char *val_p = (char *)(buff) + kv_p->valPos; memcpy((char *)(buff) + kv_p->valPos, val, valSize); kv_p->valSize = valSize; (kv_arr_p->arr) = kv_p; //kv_arr_p->arr_len++; //d_g_state->d_intermediate_keyval_total_count[map_task_idx] = kv_arr_p->arr_len; }//__device__ //------------------------------------------------- //called by user defined map function //------------------------------------------------- //need update copydata1<<<??? //TODO 9/11/2012 merge threads and blocks code into the same place. __global__ void GPUMapPartitioner(gpu_context d_g_state) { //ShowLog("gridDim.x:%d gridDim.y:%d gridDim.z:%d blockDim.x:%d blockDim.y:%d blockDim.z:%d blockIdx.x:%d blockIdx.y:%d blockIdx.z:%d\n", // gridDim.x,gridDim.y,gridDim.z,blockDim.x,blockDim.y,blockDim.z,blockIdx.x,blockIdx.y,blockIdx.z); int num_records_per_thread = (d_g_state.num_input_record + (gridDim.x*blockDim.x*blockDim.y)-1)/(gridDim.x*blockDim.x*blockDim.y); int block_start_idx = num_records_per_thread * blockIdx.x * blockDim.x * blockDim.y; int thread_start_idx = block_start_idx + ((threadIdx.y*blockDim.x + threadIdx.x)/STRIDE)*num_records_per_thread*STRIDE + ((threadIdx.y*blockDim.x + threadIdx.x)%STRIDE); int thread_end_idx = thread_start_idx + num_records_per_thread*STRIDE; if (thread_end_idx > d_g_state.num_input_record) thread_end_idx = d_g_state.num_input_record; if (thread_start_idx >= thread_end_idx) return; int buddy_arr_len = num_records_per_thread; int * int_arr = (int*)malloc((4+buddy_arr_len)*sizeof(int)); if(int_arr==NULL){ ShowError("there is not enough GPU memory\n"); return;} int *shared_arr_len = int_arr; int *shared_buff_len = int_arr+1; int *shared_buff_pos = int_arr+2; int *num_buddy = int_arr+3; int *buddy = int_arr+4; (*shared_buff_len) = SHARED_BUFF_LEN; (*shared_arr_len) = 0; (*shared_buff_pos) = 0; char * buff = (char *)malloc(sizeof(char)*(*shared_buff_len)); keyval_arr_t *kv_arr_t_arr = (keyval_arr_t *)malloc(sizeof(keyval_arr_t)*(thread_end_idx-thread_start_idx+STRIDE-1)/STRIDE); int index = 0; for(int idx = thread_start_idx; idx < thread_end_idx; idx += STRIDE){ buddy[index] = idx; index ++; }//for index = 0; for(int map_task_idx = thread_start_idx; map_task_idx < thread_end_idx; map_task_idx += STRIDE){ keyval_arr_t *kv_arr_t = (keyval_arr_t *)&(kv_arr_t_arr[index]); index++; kv_arr_t->shared_buff = buff; kv_arr_t->shared_arr_len = shared_arr_len; kv_arr_t->shared_buff_len = shared_buff_len; kv_arr_t->shared_buff_pos = shared_buff_pos; kv_arr_t->shared_buddy = buddy; kv_arr_t->shared_buddy_len = buddy_arr_len; kv_arr_t->arr = NULL; kv_arr_t->arr_len = 0; d_g_state.d_intermediate_keyval_arr_arr_p[map_task_idx] = kv_arr_t; }//for }//GPUMapPartitioner __global__ void RunGPUMapTasks(gpu_context d_g_state, int curIter, int totalIter) { //ShowLog("gridDim.x:%d gridDim.y:%d gridDim.z:%d blockDim.x:%d blockDim.y:%d blockDim.z:%d blockIdx.x:%d blockIdx.y:%d blockIdx.z:%d\n", // gridDim.x,gridDim.y,gridDim.z,blockDim.x,blockDim.y,blockDim.z,blockIdx.x,blockIdx.y,blockIdx.z); int num_records_per_thread = (d_g_state.num_input_record + (gridDim.x*blockDim.x*blockDim.y)-1)/(gridDim.x*blockDim.x*blockDim.y); int block_start_idx = num_records_per_thread * blockIdx.x * blockDim.x * blockDim.y; int thread_start_idx = block_start_idx + ((threadIdx.y*blockDim.x + threadIdx.x)/STRIDE)*num_records_per_thread*STRIDE + ((threadIdx.y*blockDim.x + threadIdx.x)%STRIDE); //ShowLog("num_records_per_thread:%d block_start_idx:%d gridDim.x:%d gridDim.y:%d gridDim.z:%d blockDim.x:%d blockDim.y:%d blockDim.z:%d",num_records_per_thread, block_start_idx, gridDim.x,gridDim.y,gridDim.z,blockDim.x,blockDim.y,blockDim.z); int thread_end_idx = thread_start_idx + num_records_per_thread*STRIDE; if (thread_end_idx > d_g_state.num_input_record) thread_end_idx = d_g_state.num_input_record; if (thread_start_idx + curIter*STRIDE >= thread_end_idx) return; for(int map_task_idx = thread_start_idx + curIter*STRIDE; map_task_idx < thread_end_idx; map_task_idx += totalIter*STRIDE){ char *key = (char *)(d_g_state.d_input_keys_shared_buff) + d_g_state.d_input_keyval_pos_arr[map_task_idx].keyPos; char *val = (char *)(d_g_state.d_input_vals_shared_buff) + d_g_state.d_input_keyval_pos_arr[map_task_idx].valPos; int valSize = d_g_state.d_input_keyval_pos_arr[map_task_idx].valSize; int keySize = d_g_state.d_input_keyval_pos_arr[map_task_idx].keySize; //////////////////////////////////////////////////////////////// gpu_map(key, val, keySize, valSize, &d_g_state, map_task_idx);// //////////////////////////////////////////////////////////////// }//for keyval_arr_t *kv_arr_p = d_g_state.d_intermediate_keyval_arr_arr_p[thread_start_idx]; //char *shared_buff = (char *)(kv_arr_p->shared_buff); //int shared_arr_len = *kv_arr_p->shared_arr_len; //int shared_buff_len = *kv_arr_p->shared_buff_len; d_g_state.d_intermediate_keyval_total_count[thread_start_idx] = *kv_arr_p->shared_arr_len; //__syncthreads(); }//GPUMapPartitioner //NOTE: gpu_combiner will affect the later program results //Last update 9/16/2012 __global__ void GPUCombiner(gpu_context d_g_state) { //ShowLog("gridDim.x:%d gridDim.y:%d gridDim.z:%d blockDim.x:%d blockDim.y:%d blockDim.z:%d blockIdx.x:%d blockIdx.y:%d blockIdx.z:%d\n", // gridDim.x,gridDim.y,gridDim.z,blockDim.x,blockDim.y,blockDim.z,blockIdx.x,blockIdx.y,blockIdx.z); int num_records_per_thread = (d_g_state.num_input_record + (gridDim.x*blockDim.x*blockDim.y)-1)/(gridDim.x*blockDim.x*blockDim.y); int block_start_idx = num_records_per_thread * blockIdx.x * blockDim.x * blockDim.y; int thread_start_idx = block_start_idx + ((threadIdx.y*blockDim.x + threadIdx.x)/STRIDE)*num_records_per_thread*STRIDE + ((threadIdx.y*blockDim.x + threadIdx.x)%STRIDE); int thread_end_idx = thread_start_idx + num_records_per_thread*STRIDE; if (thread_end_idx > d_g_state.num_input_record) thread_end_idx = d_g_state.num_input_record; if (thread_start_idx >= thread_end_idx) return; keyval_arr_t *kv_arr_p = d_g_state.d_intermediate_keyval_arr_arr_p[thread_start_idx]; int *buddy = kv_arr_p->shared_buddy; //TODO use host function set /*for (int idx=0;idx<kv_arr_p->shared_buddy_len;idx++){ d_g_state.d_intermediate_keyval_total_count[idx] = 0; }*/ int unmerged_shared_arr_len = *kv_arr_p->shared_arr_len; int num_keyval_pairs_after_combiner = 0; for (int i=0; i<unmerged_shared_arr_len;i++){ void *shared_buff = kv_arr_p->shared_buff; int shared_buff_len = *kv_arr_p->shared_buff_len; keyval_pos_t *head_kv_p = (keyval_pos_t *)((char *)shared_buff + shared_buff_len - sizeof(keyval_pos_t)*(unmerged_shared_arr_len-i)); keyval_pos_t *first_kv_p = head_kv_p; if (first_kv_p->next_idx != -1) continue; int iKeySize = first_kv_p->keySize; char *iKey = (char*)shared_buff + first_kv_p->keyPos; char *iVal = (char*)shared_buff + first_kv_p->valPos; if((first_kv_p->keyPos%4!=0)||(first_kv_p->valPos%4!=0)){ ShowError("keyPos or valPos is not aligned with 4 bytes, results could be wrong\n"); } int valCount = 1; for (int j=i+1;j<unmerged_shared_arr_len;j++){ keyval_pos_t *next_kv_p = (keyval_pos_t *)((char *)shared_buff + shared_buff_len - sizeof(keyval_pos_t)*(unmerged_shared_arr_len-j)); char *jKey = (char *)shared_buff+next_kv_p->keyPos; int jKeySize = next_kv_p->keySize; if (gpu_compare(iKey,iKeySize,jKey,jKeySize)!=0){ continue; } valCount++; first_kv_p->next_idx = j; first_kv_p = next_kv_p; }//for val_t *val_t_arr = (val_t *)malloc(sizeof(val_t)*valCount); int index = 0; first_kv_p = head_kv_p; (val_t_arr[index]).valSize = first_kv_p->valSize; (val_t_arr[index]).val = (char*)shared_buff + first_kv_p->valPos; for (int j=i+1;j<unmerged_shared_arr_len;j++){ keyval_pos_t *next_kv_p = (keyval_pos_t *)((char *)shared_buff + shared_buff_len - sizeof(keyval_pos_t)*(unmerged_shared_arr_len-j)); char *jKey = (char *)shared_buff+next_kv_p->keyPos; int jKeySize = next_kv_p->keySize; if (gpu_compare(iKey,iKeySize,jKey,jKeySize)!=0){ //printf("not match iKey:%s jKey:%s\n",iKey,jKey); continue; } index++; (val_t_arr[index]).valSize = next_kv_p->valSize; (val_t_arr[index]).val = (char*)shared_buff + next_kv_p->valPos; } //do { //(val_t_arr[index]).valSize = first_kv_p->valSize; //(val_t_arr[index]).val = (char*)shared_buff + first_kv_p->valPos; //char *key = (char*)shared_buff + first_kv_p->keyPos; //char *val = (char*)shared_buff + first_kv_p->valPos; //char* val_p = (char*)((val_t_arr[index]).val); //if (thread_start_idx == 95) //int next_idx = first_kv_p->next_idx; //first_kv_p = (keyval_pos_t *)((char *)shared_buff + shared_buff_len - sizeof(keyval_pos_t)*(shared_arr_len-next_idx)); //printf("-map_task_idx:%d ->key:%s val:%d valPos:%d valSize:%d next_idx:%d shared_arr_len:%d [%d/%d]\n", // first_kv_p->task_idx, key, *(int*)((char*)shared_buff + first_kv_p->valPos), first_kv_p->valPos,(val_t_arr[index]).valSize, next_idx, shared_arr_len, index, valCount); //printf("->compare key:%s next_key:%s\n", key, (char *)shared_buff + first_kv_p->keyPos); //index++; //}while(first_kv_p->next_idx != -1); if((index+1)!=valCount){ ShowError("Error! thread_idx:%d index:%d != valCount:%d records are missing.\n",thread_start_idx,(index+1),valCount); return; } if(valCount>1) gpu_combiner(iKey,val_t_arr,iKeySize,(valCount),d_g_state,thread_start_idx); else{ first_kv_p->next_idx = -2; first_kv_p->task_idx = thread_start_idx; } num_keyval_pairs_after_combiner++; }//for d_g_state.d_intermediate_keyval_total_count[thread_start_idx] = num_keyval_pairs_after_combiner; //////////////////////////////////////////////////////////////////// __syncthreads(); }//GPUMapPartitioner int StartCPUMap2(thread_info_t* thread_info) { cpu_context *d_g_state = (cpu_context*)(thread_info->d_g_state); job_configuration *cpu_job_conf = (job_configuration*)(thread_info->job_conf); if (cpu_job_conf->num_input_record<=0) { ShowError("Error: no any input keys"); exit(-1);} if (cpu_job_conf->input_keyval_arr == NULL) { ShowError("Error: input_keyval_arr == NULL"); exit(-1);} if (d_g_state->num_cpus_cores <= 0) { ShowError("Error: d_g_state->num_cpus == 0"); exit(-1);} //------------------------------------------------------- //1, prepare buffer to store intermediate results //------------------------------------------------------- keyval_arr_t *d_keyval_arr_p; int *count = NULL; //--------------------------------------------- //3, determine the number of threads to run //--------------------------------------------- ShowLog("CPU_GROUP_ID:[%d] the number of cpus used in computation:%d",d_g_state->cpu_group_id, d_g_state->num_cpus_cores); //-------------------------------------------------- //4, start_row_id map //-------------------------------------------------- int num_threads = d_g_state->num_cpus_cores; int num_records_per_thread = (cpu_job_conf->num_input_record+num_threads-1)/(num_threads); int start_row_idx = 0; int end_row_idx = 0; for (int tid = 0;tid<num_threads;tid++){ end_row_idx = start_row_idx + num_records_per_thread; if (tid < (cpu_job_conf->num_input_record % num_threads) ) end_row_idx++; d_g_state->panda_cpu_task_info[tid].start_row_idx = start_row_idx; if (end_row_idx > cpu_job_conf->num_input_record) end_row_idx = cpu_job_conf->num_input_record; d_g_state->panda_cpu_task_info[tid].end_row_idx = end_row_idx; if (pthread_create(&(d_g_state->panda_cpu_task[tid]),NULL,RunPandaCPUMapThread,(char *)&(d_g_state->panda_cpu_task_info[tid]))!=0) perror("Thread creation failed!\n"); start_row_idx = end_row_idx; }//for for (int tid = 0;tid<num_threads;tid++){ void *exitstat; if (pthread_join(d_g_state->panda_cpu_task[tid],&exitstat)!=0) perror("joining failed"); }//for ShowLog("CPU_GROUP_ID:[%d] DONE", d_g_state->cpu_group_id); return 0; }//int int StartCPUMap(cpu_context *d_g_state) { #ifdef DEV_MODE ShowLog("there are %d map tasks.",d_g_state->num_input_record); if (d_g_state->num_input_record<=0) { ShowError("Error: no any input keys"); exit(-1);} if (d_g_state->input_keyval_arr == NULL) { ShowError("Error: input_keyval_arr == NULL"); exit(-1);} if (d_g_state->num_cpus_cores <= 0) { ShowError("Error: d_g_state->num_cpus == 0"); exit(-1);} //------------------------------------------------------- //1, prepare buffer to store intermediate results //------------------------------------------------------- ShowLog("prepare buffer to store intermediate results"); keyval_arr_t *d_keyval_arr_p; int *count = NULL; //--------------------------------------------- //3, determine the number of threads to run //--------------------------------------------- ShowLog("the number of cpus used in computation:%d",d_g_state->num_cpus_cores); //-------------------------------------------------- //4, start_row_id map //-------------------------------------------------- int num_threads = d_g_state->num_cpus_cores; ShowLog("start_row_id CPUMapPartitioner num_threads:%d num_input_record:%d",num_threads, d_g_state->num_input_record); int num_records_per_thread = (d_g_state->num_input_record+num_threads-1)/(num_threads); int start_row_idx = 0; int end_idx = 0; for (int tid = 0;tid<num_threads;tid++){ end_idx = start_row_idx + num_records_per_thread; if (tid < (d_g_state->num_input_record % num_threads) ) end_idx++; d_g_state->panda_cpu_task_info[tid].start_row_idx = start_row_idx; if (end_idx > d_g_state->num_input_record) end_idx = d_g_state->num_input_record; d_g_state->panda_cpu_task_info[tid].end_idx = end_idx; if (pthread_create(&(d_g_state->panda_cpu_task[tid]),NULL,RunPandaCPUMapThread,(char *)&(d_g_state->panda_cpu_task_info[tid]))!=0) perror("Thread creation failed!\n"); start_row_idx = end_idx; }//for for (int tid = 0;tid<num_threads;tid++){ void *exitstat; if (pthread_join(d_g_state->panda_cpu_task[tid],&exitstat)!=0) perror("joining failed"); }//for ShowLog("DONE"); #endif return 0; }//int //-------------------------------------------------- // StartGPUMap // Last Update 9/2/2012 //-------------------------------------------------- int StartGPUMap(gpu_context *d_g_state) { //------------------------------------------------------- //0, Check status of d_g_state; //------------------------------------------------------- ShowLog("GPU_ID:[%d] num_input_record %d", d_g_state->gpu_id, d_g_state->num_input_record); if (d_g_state->num_input_record<0) { ShowLog("Error: no any input keys"); exit(-1);} if (d_g_state->h_input_keyval_arr == NULL) { ShowLog("Error: h_input_keyval_arr == NULL"); exit(-1);} if (d_g_state->num_mappers<=0) {d_g_state->num_mappers = (NUM_BLOCKS)*(NUM_THREADS);} if (d_g_state->num_reducers<=0) {d_g_state->num_reducers = (NUM_BLOCKS)*(NUM_THREADS);} //------------------------------------------------------- //1, prepare buffer to store intermediate results //------------------------------------------------------- keyval_arr_t *h_keyval_arr_arr = (keyval_arr_t *)malloc(sizeof(keyval_arr_t)*d_g_state->num_input_record); keyval_arr_t *d_keyval_arr_arr; checkCudaErrors(cudaMalloc((void**)&(d_keyval_arr_arr),d_g_state->num_input_record*sizeof(keyval_arr_t))); for (int i=0; i<d_g_state->num_input_record;i++){ h_keyval_arr_arr[i].arr = NULL; h_keyval_arr_arr[i].arr_len = 0; }//for keyval_arr_t **d_keyval_arr_arr_p; checkCudaErrors(cudaMalloc((void***)&(d_keyval_arr_arr_p),d_g_state->num_input_record*sizeof(keyval_arr_t*))); d_g_state->d_intermediate_keyval_arr_arr_p = d_keyval_arr_arr_p; int *count = NULL; checkCudaErrors(cudaMalloc((void**)&(count),d_g_state->num_input_record*sizeof(int))); d_g_state->d_intermediate_keyval_total_count = count; checkCudaErrors(cudaMemset(d_g_state->d_intermediate_keyval_total_count,0,d_g_state->num_input_record*sizeof(int))); //---------------------------------------------- //3, determine the number of threads to run //---------------------------------------------- //-------------------------------------------------- //4, start_row_id map //Note: DO *NOT* set large number of threads within block (512), which lead to too many invocation of malloc in the kernel. //-------------------------------------------------- cudaThreadSynchronize(); int numGPUCores = getGPUCoresNum(); dim3 blocks(THREAD_BLOCK_SIZE, THREAD_BLOCK_SIZE); int numBlocks = (numGPUCores*16+(blocks.x*blocks.y)-1)/(blocks.x*blocks.y); dim3 grids(numBlocks, 1); int total_gpu_threads = (grids.x*grids.y*blocks.x*blocks.y); ShowLog("GridDim.X:%d GridDim.Y:%d BlockDim.X:%d BlockDim.Y:%d TotalGPUThreads:%d",grids.x,grids.y,blocks.x,blocks.y,total_gpu_threads); cudaDeviceSynchronize(); double t1 = PandaTimer(); GPUMapPartitioner<<<grids,blocks>>>(*d_g_state); cudaThreadSynchronize(); double t2 = PandaTimer(); int num_records_per_thread = (d_g_state->num_input_record + (total_gpu_threads)-1)/(total_gpu_threads); int totalIter = num_records_per_thread; ShowLog("GPUMapPartitioner:%f totalIter:%d",t2-t1, totalIter); for (int iter = 0; iter< totalIter; iter++){ double t3 = PandaTimer(); RunGPUMapTasks<<<grids,blocks>>>(*d_g_state, totalIter -1 - iter, totalIter); cudaThreadSynchronize(); double t4 = PandaTimer(); size_t total_mem,avail_mem; checkCudaErrors(cudaMemGetInfo( &avail_mem, &total_mem )); ShowLog("GPU_ID:[%d] RunGPUMapTasks take %f sec at iter [%d/%d] remain %d mb GPU mem processed", d_g_state->gpu_id, t4-t3,iter,totalIter, avail_mem/1024/1024); }//for ShowLog("GPU_ID:[%d] Done %d Tasks",d_g_state->gpu_id,d_g_state->num_input_record); //Local combiner t1 = PandaTimer(); checkCudaErrors(cudaMemset(d_g_state->d_intermediate_keyval_total_count,0,d_g_state->num_input_record*sizeof(int))); GPUCombiner<<<grids,blocks>>>(*d_g_state); cudaThreadSynchronize(); t2 = PandaTimer(); ShowLog("GPU_ID: GPUCombiner Take:%f",t2-t1); return 0; }//int void DestroyDGlobalState(gpu_context * d_g_state){ }//void void StartGPUShuffle(gpu_context * state){ gpu_context* d_g_state = state; double t1 = PandaTimer(); Shuffle4GPUOutput(d_g_state); double t2 = PandaTimer(); ShowLog("GPU_ID:[%d] GPU Shuffle take %f sec", state->gpu_id,t2-t1); //ShowLog("DONE"); }//void void *RunPandaCPUMapThread(void *ptr){ panda_cpu_task_info_t *panda_cpu_task_info = (panda_cpu_task_info_t *)ptr; cpu_context *d_g_state = (cpu_context *)(panda_cpu_task_info->d_g_state); job_configuration *cpu_job_conf = (job_configuration *)(panda_cpu_task_info->cpu_job_conf); for (int map_idx = panda_cpu_task_info->start_row_idx; map_idx < panda_cpu_task_info->end_row_idx; map_idx++){ keyval_t *kv_p = (keyval_t *)(&(cpu_job_conf->input_keyval_arr[map_idx])); cpu_map(kv_p->key,kv_p->val,kv_p->keySize,kv_p->valSize,d_g_state,map_idx); }//for ShowLog("CPU_GROUP_ID:[%d] Done :%d tasks",d_g_state->cpu_group_id, panda_cpu_task_info->end_row_idx - panda_cpu_task_info->start_row_idx); return NULL; } //Use Pthread to process Panda_Reduce void * Panda_Reduce(void *ptr){ thread_info_t *thread_info = (thread_info_t *)ptr; if(thread_info->device_type == GPU_ACC){ //TODO Multiple GPU support gpu_context *d_g_state = (gpu_context *)(thread_info->d_g_state); int num_gpus = d_g_state->num_gpus; if ( num_gpus <= 0){ ShowLog("Error! num_gpus == 0 return"); return NULL; }//if int tid = thread_info->tid; cudaSetDevice(tid % num_gpus); // "% num_gpus" allows more CPU threads than GPU devices int gpu_id; cudaGetDevice(&gpu_id); ShowLog("Start GPU Reduce Tasks. Number of Reduce Tasks:%d",d_g_state->d_sorted_keyvals_arr_len); StartGPUReduce(d_g_state); }//if if(thread_info->device_type == CPU_ACC){ cpu_context *d_g_state = (cpu_context *)(thread_info->d_g_state); if (d_g_state->num_cpus_cores == 0){ ShowLog("Error! d_g_state->num_cpus == 0 return"); return NULL; } ShowLog("Start CPU Reduce Tasks. Number of Reduce Tasks:%d",d_g_state->sorted_keyvals_arr_len); for (int map_idx = 0; map_idx < d_g_state->sorted_keyvals_arr_len; map_idx++){ keyvals_t *kv_p = (keyvals_t *)(&(d_g_state->sorted_intermediate_keyvals_arr[map_idx])); cpu_reduce(kv_p->key, kv_p->vals, kv_p->keySize, kv_p->val_arr_len, d_g_state); }//for }//if //cudaFree(d_filebuf); return NULL; }//void __device__ void *GetVal(void *vals, int4* interOffsetSizes, int keyIndex, int valStartIndex) { } __device__ void *GetKey(void *key, int4* interOffsetSizes, int keyIndex, int valStartIndex) { } //------------------------------------------------------- //Reducer //------------------------------------------------------- __global__ void ReducePartitioner(gpu_context d_g_state) { int num_records_per_thread = (d_g_state.d_sorted_keyvals_arr_len + (gridDim.x*blockDim.x*blockDim.y)-1)/(gridDim.x*blockDim.x*blockDim.y); int block_start_idx = num_records_per_thread * blockIdx.x * blockDim.x * blockDim.y; int thread_start_idx = block_start_idx + ((threadIdx.y*blockDim.x + threadIdx.x)/STRIDE)*num_records_per_thread*STRIDE + ((threadIdx.y*blockDim.x + threadIdx.x)%STRIDE); int thread_end_idx = thread_start_idx + num_records_per_thread*STRIDE; if (thread_end_idx > d_g_state.d_sorted_keyvals_arr_len) thread_end_idx = d_g_state.d_sorted_keyvals_arr_len; if (thread_start_idx >= thread_end_idx) return; int start_idx, end_idx; for(int reduce_task_idx=thread_start_idx; reduce_task_idx < thread_end_idx; reduce_task_idx+=STRIDE){ if (reduce_task_idx==0) start_idx = 0; else start_idx = d_g_state.d_pos_arr_4_sorted_keyval_pos_arr[reduce_task_idx-1]; end_idx = d_g_state.d_pos_arr_4_sorted_keyval_pos_arr[reduce_task_idx]; val_t *val_t_arr = (val_t*)malloc(sizeof(val_t)*(end_idx-start_idx)); int keySize = d_g_state.d_keyval_pos_arr[start_idx].keySize; int keyPos = d_g_state.d_keyval_pos_arr[start_idx].keyPos; void *key = (char*)d_g_state.d_sorted_keys_shared_buff+keyPos; for (int index = start_idx;index<end_idx;index++){ int valSize = d_g_state.d_keyval_pos_arr[index].valSize; int valPos = d_g_state.d_keyval_pos_arr[index].valPos; val_t_arr[index-start_idx].valSize = valSize; val_t_arr[index-start_idx].val = (char*)d_g_state.d_sorted_vals_shared_buff + valPos; } //for gpu_reduce(key, val_t_arr, keySize, end_idx-start_idx, d_g_state); }//for } void StartGPUReduce(gpu_context *d_g_state) { cudaThreadSynchronize(); d_g_state->d_reduced_keyval_arr_len = d_g_state->d_sorted_keyvals_arr_len; checkCudaErrors(cudaMalloc((void **)&(d_g_state->d_reduced_keyval_arr), sizeof(keyval_t)*d_g_state->d_reduced_keyval_arr_len)); cudaThreadSynchronize(); int numGPUCores = getGPUCoresNum(); dim3 blocks(THREAD_BLOCK_SIZE, THREAD_BLOCK_SIZE); int numBlocks = (numGPUCores*16+(blocks.x*blocks.y)-1)/(blocks.x*blocks.y); dim3 grids(numBlocks, 1); int total_gpu_threads = (grids.x*grids.y*blocks.x*blocks.y); ShowLog("number of reduce tasks:%d total gpu threads:%d",d_g_state->d_sorted_keyvals_arr_len, total_gpu_threads); ReducePartitioner<<<grids,blocks>>>(*d_g_state); cudaThreadSynchronize(); }//void void* Panda_Map(void *ptr){ thread_info_t *thread_info = (thread_info_t *)ptr; if(thread_info->device_type == GPU_ACC){ gpu_context *d_g_state = (gpu_context *)(thread_info->d_g_state); InitGPUDevice(thread_info); //ShowLog("GPU_ID:[%d] Init GPU MapReduce Load Data From Host to GPU memory",d_g_state->gpu_id); InitGPUMapReduce3(d_g_state); //ShowLog("GPU_ID:[%d] Start GPU Map Tasks",d_g_state->gpu_id); StartGPUMap(d_g_state); StartGPUShuffle(d_g_state); }//if if(thread_info->device_type == CPU_ACC){ cpu_context *d_g_state = (cpu_context *)(thread_info->d_g_state); //ShowLog("CPU_GROUP_ID:[%d] Init CPU Device",d_g_state->cpu_group_id); InitCPUDevice(thread_info); //ShowLog("Init CPU MapReduce"); InitCPUMapReduce2(thread_info); ShowLog("CPU_GROUP_ID:[%d] Start CPU Map Tasks",d_g_state->cpu_group_id); StartCPUMap2(thread_info); StartCPUShuffle2(thread_info); } return NULL; }//FinishMapReduce2(d_g_state); void FinishMapReduce(Spec_t* spec) { ShowLog( "=====finish panda mapreduce====="); }//void void FinishMapReduce2(gpu_context* state) { size_t total_mem,avail_mem, heap_limit; checkCudaErrors(cudaMemGetInfo( &avail_mem, &total_mem )); ShowLog("avail_mem:%d",avail_mem); }//void #endif //__PANDALIB_CU__
ed6b74fa0f579110c45d15e6ab11f33991530ce9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "thrust/device_vector.h" #include "device_atomic_functions.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/layers/hotspot_layer.hpp" #define CV_PI 3.1415926535897932384626433832795 #define GAUSSIAN(x0,y0,x,y) 0.5 / gaussian_std / gaussian_std / CV_PI * exp(-0.5 * (((x0)-(x)) * ((x0)-(x)) + ((y0)-(y)) * ((y0)-(y))) / gaussian_std / gaussian_std) namespace caffe { __device__ __constant__ float kEps= 1e-4; template <typename Dtype> __global__ void HotspotFoward(const int num, const int num_point, const Dtype gaussian_std, const int data_height, const int data_width, const bool mean_removed, const int target_height, const int target_width, const Dtype* point_data, Dtype* top_data) { CUDA_KERNEL_LOOP(index, num * target_height * target_width * num_point) { int n = index / (target_height * target_width * num_point); int sp = index % (target_height * target_width * num_point); int h = sp / (target_width * num_point); int pw = sp % (target_width * num_point); int w = pw / num_point; int p = pw % num_point; Dtype p1 = (point_data[n * num_point * 2 + p * 2] / data_width + (mean_removed ? 0.5 : 0)) * target_width; Dtype p2 = (point_data[n * num_point * 2 + p * 2 + 1] / data_height + (mean_removed ? 0.5 : 0)) * target_height; Dtype temp = GAUSSIAN(p1, p2, w, h); if (temp > kEps) { top_data[(((n * num_point + p) * target_height + h) * target_width + w)] = temp; } else { top_data[(((n * num_point + p) * target_height + h) * target_width + w)] = 0; } } } template <typename Dtype> void HotspotLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { Dtype* top_data = top[0]->mutable_gpu_data(); const Dtype* point_data = bottom[0]->gpu_data(); const int num_point = bottom[0]->shape(1) / 2; const int num = bottom[0]->num(); HotspotFoward<Dtype> << <CAFFE_GET_BLOCKS(num * num_point * height_ * width_), CAFFE_CUDA_NUM_THREADS >> >(num, num_point, gaussian_std_, data_height_, data_width_, mean_removed_, height_, width_, point_data, top_data); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> void HotspotLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { NOT_IMPLEMENTED; } INSTANTIATE_LAYER_GPU_FUNCS(HotspotLayer); } // namespace caffe
ed6b74fa0f579110c45d15e6ab11f33991530ce9.cu
#include <algorithm> #include <cfloat> #include <vector> #include "thrust/device_vector.h" #include "device_atomic_functions.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/layers/hotspot_layer.hpp" #define CV_PI 3.1415926535897932384626433832795 #define GAUSSIAN(x0,y0,x,y) 0.5 / gaussian_std / gaussian_std / CV_PI * exp(-0.5 * (((x0)-(x)) * ((x0)-(x)) + ((y0)-(y)) * ((y0)-(y))) / gaussian_std / gaussian_std) namespace caffe { __device__ __constant__ float kEps= 1e-4; template <typename Dtype> __global__ void HotspotFoward(const int num, const int num_point, const Dtype gaussian_std, const int data_height, const int data_width, const bool mean_removed, const int target_height, const int target_width, const Dtype* point_data, Dtype* top_data) { CUDA_KERNEL_LOOP(index, num * target_height * target_width * num_point) { int n = index / (target_height * target_width * num_point); int sp = index % (target_height * target_width * num_point); int h = sp / (target_width * num_point); int pw = sp % (target_width * num_point); int w = pw / num_point; int p = pw % num_point; Dtype p1 = (point_data[n * num_point * 2 + p * 2] / data_width + (mean_removed ? 0.5 : 0)) * target_width; Dtype p2 = (point_data[n * num_point * 2 + p * 2 + 1] / data_height + (mean_removed ? 0.5 : 0)) * target_height; Dtype temp = GAUSSIAN(p1, p2, w, h); if (temp > kEps) { top_data[(((n * num_point + p) * target_height + h) * target_width + w)] = temp; } else { top_data[(((n * num_point + p) * target_height + h) * target_width + w)] = 0; } } } template <typename Dtype> void HotspotLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { Dtype* top_data = top[0]->mutable_gpu_data(); const Dtype* point_data = bottom[0]->gpu_data(); const int num_point = bottom[0]->shape(1) / 2; const int num = bottom[0]->num(); HotspotFoward<Dtype> << <CAFFE_GET_BLOCKS(num * num_point * height_ * width_), CAFFE_CUDA_NUM_THREADS >> >(num, num_point, gaussian_std_, data_height_, data_width_, mean_removed_, height_, width_, point_data, top_data); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> void HotspotLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { NOT_IMPLEMENTED; } INSTANTIATE_LAYER_GPU_FUNCS(HotspotLayer); } // namespace caffe
220a97dd6837cdabbafdbe34626dc70dcd4b810d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include < stdio.h > #include < cuda.h > #include < time.h > #include < math.h > #define row 50000 #define col 10 #define test_row 13000 #define test_col 10 __global__ void class_classification(int * index_chunks, double * * set, int total_chunks_train, int k, int * d_kneighbours, int set1, int * res_class) { //double k_nearest[k][2]; int i = blockDim.x * blockIdx.x + threadIdx.x; int set_i; int min = 0; //int index_chunks[total_chunks_train]; for (int f = 0; f < total_chunks_train; f++) index_chunks[f] = 0; if (i < test_row) { for (int l = 0; l < k; l++) { min = 0; //set[0][i*test_row+index_chunks[total_chunks_train]*2]; for (int j = 1; j < total_chunks_train; j++) { if (set[min][(index_chunks[min] * test_row + i) * 2] > set[j][(index_chunks[j] * test_row + i) * 2]) min = j; } //k_nearest[l][0]=set[min][i*test_row+index_chunks[min]*2]; //k_nearest[l][1]=set[min][i*test_row+index_chunks[min]*2+1]; set_i = i * set1; d_kneighbours[(int)(set[min][(index_chunks[min] * test_row + i) * 2 + 1]) - 1 + set_i] += 1; index_chunks[min]++; } set_i = i * set1; int max = 0; for (int l = 1; l < set1; l++) { if (d_kneighbours[set_i + l] > d_kneighbours[set_i + max]) max = l; } res_class[i] = max + 1; } } __global__ void KminNeighbourFind(double * distance, int k) { int i = blockDim.x * blockIdx.x + threadIdx.x; //int index=i*row*2; //int set_i; if (i < test_row) { for (int i1 = 0; i1 < k; i1++) { int min = 2 * (i1 * test_row + i); for (int j1 = i1 + 1; j1 < row; j1++) { if (distance[2 * (j1 * test_row + i)] < distance[min]) min = 2 * (j1 * test_row + i); } int dist = 2 * (i1 * test_row + i), clas = 2 * (i1 * test_row + i) + 1; double temp = distance[dist]; distance[dist] = distance[min]; distance[min] = temp; temp = distance[clas]; distance[clas] = distance[min + 1]; distance[min + 1] = temp; } } } __global__ void Euclidian_distance(double * d_train, double * d_test, double * distance) { int ro = blockIdx.x * blockDim.x + threadIdx.x; int co = blockIdx.y * blockDim.y + threadIdx.y; int distanceid = 2 * (ro * test_row + co); double sum = 0, diff = 0; //checking boundary condition if (ro < row && co < test_row) { for (int i = 0; i < col - 1; i++) { diff = (d_train[ro * col + i] - d_test[co * col + i]); sum += diff * diff; } distance[distanceid] = sqrt(sum); distance[distanceid + 1] = d_train[ro * col + col - 1]; } // __syncthreads(); } int main() { int count; clock_t s_time, e_time; double t_time; FILE * myfile, * myfilet; int k, i, j; double train[row * col], test1[test_row * test_col]; double * d_train, * d_test; double * distance; printf("Enter the k value to apply k nearest neighbour algorithm"); scanf("%d", & k); printf("\n"); int set; printf("Enter the total classes present in your dataset\n"); scanf("%d", & set); myfile = fopen("shuttle.trn", "r"); if (myfile == NULL) { printf("data not open\n"); exit(0); } else { printf("Successfully open\n"); } myfilet = fopen("shuttle.tst", "r"); if (myfilet == NULL) { printf("Test data not open\n"); exit(0); } else { printf("Test file open successfully\n"); } //Test cases and train set int total_train, total_test; printf("Enter total no of train data elements\n"); scanf("%d", & total_train); printf("Enter total no of test data elements\n"); scanf("%d", & total_test); int total_chunks_train = (total_train - 1) / row + 1; int total_chunks_test = (total_test - 1) / test_row + 1; printf("Total train and test chunks are %d and %d \n", total_chunks_train, total_chunks_test); //scanning test data //chunk of test cases s_time = clock(); for (int test_c = 0; test_c < total_chunks_test; test_c++) { printf("\nTest Case chunk no %d is on working state", test_c + 1); for (i = 0; i < test_row; i++) { for (j = 0; j < test_col; j++) { fscanf(myfilet, "%lf", & test1[i * test_col + j]); } } myfile = fopen("shuttle.trn", "r"); double * * set_train_kneigh = (double * * ) malloc(total_chunks_train * sizeof(double * )); int * res_class, * h_class; for (int h = 0; h < total_chunks_train; h++) set_train_kneigh[h] = (double * ) malloc(2 * k * test_row * sizeof(double)); for (int train_c = 0; train_c < total_chunks_train; train_c++) { printf("\nTrain Case chunk no %d is on working state", train_c + 1); //myfile=fopen("shuttle.trn","r"); for (i = 0; i < row; i++) { for (j = 0; j < col; j++) { fscanf(myfile, "%lf", & train[i * col + j]); } } hipError_t cudastatus; cudastatus = hipDeviceReset(); if (cudastatus != hipSuccess) { fprintf(stderr, " hipDeviceReset failed!"); return 1; } cudastatus = hipSetDevice(0); if (cudastatus != hipSuccess) { fprintf(stderr, " hipSetDevice failed!"); return 1; } else printf(" Working \n "); //s_time=clock(); size_t size = row * col * sizeof(double); size_t size1 = test_row * test_col * sizeof(double); size_t distance_size = 2 * row * test_row * sizeof(double); hipMalloc( & d_train, size); hipMalloc( & d_test, size1); hipMalloc( & distance, distance_size); //hipMalloc(&res_class,class_mem); //copy the data from host to device memory hipMemcpy(d_train, train, size, hipMemcpyHostToDevice); hipMemcpy(d_test, test1, size1, hipMemcpyHostToDevice); //int threads=test_row*row; dim3 dimgrid((row - 1) / 16 + 1, (test_row - 1) / 16 + 1, 1); dim3 dimblock(16, 16, 1); Euclidian_distance << < dimgrid, dimblock >>> (d_train, d_test, distance); //hipMemcpy(h_distance,distance,distance_size,hipMemcpyDeviceToHost); hipFree(d_train); hipFree(d_test); KminNeighbourFind << < (test_row - 1) / 16 + 1, 16 >>> (distance, k); //double kdistance[2*k*test_row]; hipMemcpy(set_train_kneigh[train_c], distance, 2 * k * test_row * sizeof(double), hipMemcpyDeviceToHost); } //class_classification(int index_chunks[],double **set, int total_chunks_train ,int k,int *d_kneighbours,int set1,int *res_class) int * index_chunks; //int *res_class,*h_class; int * d_kneighbours, * h_kneighbours; double * * set_nei; size_t neighbour_size = test_row * set * sizeof(int); hipMalloc( & d_kneighbours, neighbour_size); size_t class_mem = test_row * sizeof(int); h_class = (int * ) malloc(class_mem); h_kneighbours = (int * ) malloc(neighbour_size); hipMalloc( & res_class, class_mem); hipMalloc( & index_chunks, total_chunks_train * sizeof(int)); hipMalloc( & set_nei, test_row * 2 * k * total_chunks_train * sizeof(double)); hipMemcpy(set_nei, set_train_kneigh, test_row * 2 * k * total_chunks_train * sizeof(double), hipMemcpyHostToDevice); class_classification << < (test_row - 1) / 16 + 1, 16 >>> (index_chunks, set_nei, total_chunks_train, k, d_kneighbours, set, res_class); hipMemcpy(h_class, res_class, class_mem, hipMemcpyDeviceToHost); hipMemcpy(h_kneighbours, d_kneighbours, neighbour_size, hipMemcpyDeviceToHost); for (i = 0; i < test_row; i++) { for (j = 0; j < set; j++) { //printf("class freq of test case %d class no %d value %d\n",i+1,j,h_kneighbours[i*set+j]); } } //hipFree(distance1); hipFree(d_kneighbours); hipFree(res_class); hipFree(index_chunks); hipFree(set_nei); count = 0; free(h_kneighbours); //free(set_train_kneigh); for (i = 0; i < test_row; i++) { if (test1[i * col + col - 1] != h_class[i]) count++; //printf("Given Test point %d belongs to class %d\n",i+1,h_class[i]); } hipFree(distance); printf("count unmatched in first %d chunk size is %d\n", count, i); //count=0; } e_time = clock(); t_time = ((double)(e_time - s_time)) / 1000000; printf("Count unmachted %d", count); printf("\n \n Total time taken %0.2lf second", t_time); return 0; }
220a97dd6837cdabbafdbe34626dc70dcd4b810d.cu
#include < stdio.h > #include < cuda.h > #include < time.h > #include < math.h > #define row 50000 #define col 10 #define test_row 13000 #define test_col 10 __global__ void class_classification(int * index_chunks, double * * set, int total_chunks_train, int k, int * d_kneighbours, int set1, int * res_class) { //double k_nearest[k][2]; int i = blockDim.x * blockIdx.x + threadIdx.x; int set_i; int min = 0; //int index_chunks[total_chunks_train]; for (int f = 0; f < total_chunks_train; f++) index_chunks[f] = 0; if (i < test_row) { for (int l = 0; l < k; l++) { min = 0; //set[0][i*test_row+index_chunks[total_chunks_train]*2]; for (int j = 1; j < total_chunks_train; j++) { if (set[min][(index_chunks[min] * test_row + i) * 2] > set[j][(index_chunks[j] * test_row + i) * 2]) min = j; } //k_nearest[l][0]=set[min][i*test_row+index_chunks[min]*2]; //k_nearest[l][1]=set[min][i*test_row+index_chunks[min]*2+1]; set_i = i * set1; d_kneighbours[(int)(set[min][(index_chunks[min] * test_row + i) * 2 + 1]) - 1 + set_i] += 1; index_chunks[min]++; } set_i = i * set1; int max = 0; for (int l = 1; l < set1; l++) { if (d_kneighbours[set_i + l] > d_kneighbours[set_i + max]) max = l; } res_class[i] = max + 1; } } __global__ void KminNeighbourFind(double * distance, int k) { int i = blockDim.x * blockIdx.x + threadIdx.x; //int index=i*row*2; //int set_i; if (i < test_row) { for (int i1 = 0; i1 < k; i1++) { int min = 2 * (i1 * test_row + i); for (int j1 = i1 + 1; j1 < row; j1++) { if (distance[2 * (j1 * test_row + i)] < distance[min]) min = 2 * (j1 * test_row + i); } int dist = 2 * (i1 * test_row + i), clas = 2 * (i1 * test_row + i) + 1; double temp = distance[dist]; distance[dist] = distance[min]; distance[min] = temp; temp = distance[clas]; distance[clas] = distance[min + 1]; distance[min + 1] = temp; } } } __global__ void Euclidian_distance(double * d_train, double * d_test, double * distance) { int ro = blockIdx.x * blockDim.x + threadIdx.x; int co = blockIdx.y * blockDim.y + threadIdx.y; int distanceid = 2 * (ro * test_row + co); double sum = 0, diff = 0; //checking boundary condition if (ro < row && co < test_row) { for (int i = 0; i < col - 1; i++) { diff = (d_train[ro * col + i] - d_test[co * col + i]); sum += diff * diff; } distance[distanceid] = sqrt(sum); distance[distanceid + 1] = d_train[ro * col + col - 1]; } // __syncthreads(); } int main() { int count; clock_t s_time, e_time; double t_time; FILE * myfile, * myfilet; int k, i, j; double train[row * col], test1[test_row * test_col]; double * d_train, * d_test; double * distance; printf("Enter the k value to apply k nearest neighbour algorithm"); scanf("%d", & k); printf("\n"); int set; printf("Enter the total classes present in your dataset\n"); scanf("%d", & set); myfile = fopen("shuttle.trn", "r"); if (myfile == NULL) { printf("data not open\n"); exit(0); } else { printf("Successfully open\n"); } myfilet = fopen("shuttle.tst", "r"); if (myfilet == NULL) { printf("Test data not open\n"); exit(0); } else { printf("Test file open successfully\n"); } //Test cases and train set int total_train, total_test; printf("Enter total no of train data elements\n"); scanf("%d", & total_train); printf("Enter total no of test data elements\n"); scanf("%d", & total_test); int total_chunks_train = (total_train - 1) / row + 1; int total_chunks_test = (total_test - 1) / test_row + 1; printf("Total train and test chunks are %d and %d \n", total_chunks_train, total_chunks_test); //scanning test data //chunk of test cases s_time = clock(); for (int test_c = 0; test_c < total_chunks_test; test_c++) { printf("\nTest Case chunk no %d is on working state", test_c + 1); for (i = 0; i < test_row; i++) { for (j = 0; j < test_col; j++) { fscanf(myfilet, "%lf", & test1[i * test_col + j]); } } myfile = fopen("shuttle.trn", "r"); double * * set_train_kneigh = (double * * ) malloc(total_chunks_train * sizeof(double * )); int * res_class, * h_class; for (int h = 0; h < total_chunks_train; h++) set_train_kneigh[h] = (double * ) malloc(2 * k * test_row * sizeof(double)); for (int train_c = 0; train_c < total_chunks_train; train_c++) { printf("\nTrain Case chunk no %d is on working state", train_c + 1); //myfile=fopen("shuttle.trn","r"); for (i = 0; i < row; i++) { for (j = 0; j < col; j++) { fscanf(myfile, "%lf", & train[i * col + j]); } } cudaError_t cudastatus; cudastatus = cudaDeviceReset(); if (cudastatus != cudaSuccess) { fprintf(stderr, " cudaDeviceReset failed!"); return 1; } cudastatus = cudaSetDevice(0); if (cudastatus != cudaSuccess) { fprintf(stderr, " cudaSetDevice failed!"); return 1; } else printf(" Working \n "); //s_time=clock(); size_t size = row * col * sizeof(double); size_t size1 = test_row * test_col * sizeof(double); size_t distance_size = 2 * row * test_row * sizeof(double); cudaMalloc( & d_train, size); cudaMalloc( & d_test, size1); cudaMalloc( & distance, distance_size); //cudaMalloc(&res_class,class_mem); //copy the data from host to device memory cudaMemcpy(d_train, train, size, cudaMemcpyHostToDevice); cudaMemcpy(d_test, test1, size1, cudaMemcpyHostToDevice); //int threads=test_row*row; dim3 dimgrid((row - 1) / 16 + 1, (test_row - 1) / 16 + 1, 1); dim3 dimblock(16, 16, 1); Euclidian_distance << < dimgrid, dimblock >>> (d_train, d_test, distance); //cudaMemcpy(h_distance,distance,distance_size,cudaMemcpyDeviceToHost); cudaFree(d_train); cudaFree(d_test); KminNeighbourFind << < (test_row - 1) / 16 + 1, 16 >>> (distance, k); //double kdistance[2*k*test_row]; cudaMemcpy(set_train_kneigh[train_c], distance, 2 * k * test_row * sizeof(double), cudaMemcpyDeviceToHost); } //class_classification(int index_chunks[],double **set, int total_chunks_train ,int k,int *d_kneighbours,int set1,int *res_class) int * index_chunks; //int *res_class,*h_class; int * d_kneighbours, * h_kneighbours; double * * set_nei; size_t neighbour_size = test_row * set * sizeof(int); cudaMalloc( & d_kneighbours, neighbour_size); size_t class_mem = test_row * sizeof(int); h_class = (int * ) malloc(class_mem); h_kneighbours = (int * ) malloc(neighbour_size); cudaMalloc( & res_class, class_mem); cudaMalloc( & index_chunks, total_chunks_train * sizeof(int)); cudaMalloc( & set_nei, test_row * 2 * k * total_chunks_train * sizeof(double)); cudaMemcpy(set_nei, set_train_kneigh, test_row * 2 * k * total_chunks_train * sizeof(double), cudaMemcpyHostToDevice); class_classification << < (test_row - 1) / 16 + 1, 16 >>> (index_chunks, set_nei, total_chunks_train, k, d_kneighbours, set, res_class); cudaMemcpy(h_class, res_class, class_mem, cudaMemcpyDeviceToHost); cudaMemcpy(h_kneighbours, d_kneighbours, neighbour_size, cudaMemcpyDeviceToHost); for (i = 0; i < test_row; i++) { for (j = 0; j < set; j++) { //printf("class freq of test case %d class no %d value %d\n",i+1,j,h_kneighbours[i*set+j]); } } //cudaFree(distance1); cudaFree(d_kneighbours); cudaFree(res_class); cudaFree(index_chunks); cudaFree(set_nei); count = 0; free(h_kneighbours); //free(set_train_kneigh); for (i = 0; i < test_row; i++) { if (test1[i * col + col - 1] != h_class[i]) count++; //printf("Given Test point %d belongs to class %d\n",i+1,h_class[i]); } cudaFree(distance); printf("count unmatched in first %d chunk size is %d\n", count, i); //count=0; } e_time = clock(); t_time = ((double)(e_time - s_time)) / 1000000; printf("Count unmachted %d", count); printf("\n \n Total time taken %0.2lf second", t_time); return 0; }
6f23fbd884ed6a3fbb46bffd564394d34837af6d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include "lodepng.h"//including the given c file for image data extraction #include <time.h> #include "wm.h" #include <math.h> __global__ void convolveKernel(unsigned char* inputReference, unsigned char* inputChange, unsigned width, unsigned height, int threads, float * w) { int outPutHeight = (height - 2); int outPutWidth = (width - 2); /* *number of pixels each thread will need to do initially in the changed picture * Ignore RGB for now will mean we each thread will deal with RGB for all pixels it is resposible for this * is a slower paralization scheme only in the case where threads > ((outPutHeight * outPutWidth) / threads) * also leads to undefined behaviour. I think this is a fine caveat in saving me some time, the assumption * that threads won't be greater 9,203,524 is fair for this course. */ int taskSize = ((outPutHeight * outPutWidth) / threads); //num output pixels / thread // Each thread will do portion of work from (thread global location * tasksize)->(thread global location * tasksize) + tasksize int startPix = (threadIdx.x + (blockDim.x * blockIdx.x)) * taskSize; //starting location in output image /* * Have each thread do (outputwidth*outputheight*3)/threads) but if (outputwidth*outputheight*3)%threads isn't 0 there will be some left over threads * will deal with this later. * * If I want to do convolution on pixel 0 of inputChange, I need data from pixels 0,1,2,3840,3841,3842,7680,7681,7681 in inputRef * multiply by 4 to get location in list and add 0,1,2 for each different value R,G,B * since our images are different sizes we must consider the conversion, all pixels shifted 1 down, 1 right * pixel 0 in output = pixel 3841 in original * 9000 = 12841 * opx = inpx + 3840 + 1 * this brings us to our next equation for input pixel i we need pixel i+-3840, i+-1, i +- 3840 +-1 and i itself * hope I explained it right, its a bit confusing */ int outPix; //Current Pixel (output) int outLoc; //Current location of pixel in list (output) int outRow; int outCol; int inPix; //Current Pixel (input, larger) int inLoc; //Current location of pixel in list (input, larger image) int type; // R = 0, G = 1, B = 1 float x; for (int a = 0; a < 3; a++) { for (int i = 0; i < taskSize; i++) { outPix = startPix + i; //Get current pixel in output image outCol = outPix % outPutWidth; //Get current pixel col in output image outRow = (outPix - outCol) / outPutWidth; //Get current pixel row in output image inPix = ((outRow + 1) * width) + outCol + 1; //Get current pixel in input image outLoc = outPix * 4 + a; //convert both: Pixel->Item in PNG List inLoc = inPix * 4 + a; if (a == 2) { inputChange[outLoc+1] = inputReference[inLoc+1]; //just set the alpha value to what it was before when you are working on G } x = inputReference[inLoc - 4 - width*4] * w[0] + inputReference[inLoc - width * 4] * w[1] + inputReference[inLoc + 4 - width * 4] * w[2] + //Top row inputReference[inLoc - 4] * w[3] + inputReference[inLoc] * w[4] + inputReference[inLoc + 4] * w[5] + //mid row inputReference[inLoc - 4 + width * 4] * w[6] + inputReference[inLoc + width * 4] * w[7] + inputReference[inLoc + 4 + width * 4] * w[8]; //bot row if (x < 0) { x = 0; } else if (x > 255) { x = 255; } inputChange[outLoc] = (unsigned char) ((int) round(x)); } } //now some threads will need to do an extra one, in some cases int leftover = ((outPutHeight * outPutWidth) % threads); if ((leftover != 0) && ((threadIdx.x + (blockDim.x * blockIdx.x)) <= leftover)) { for (int a = 0; a < 3; a++) { outPix = taskSize * threads + threadIdx.x; //get current pixel in output image outCol = outPix % outPutWidth; //get current pixel col in output image outRow = (outPix - outCol) / outPutWidth; //get current pixel row in output image inPix = ((outRow + 1) * width) + outCol + 1; //get current pixel in input image outLoc = outPix * 4 + a; //convert both: pixel->item in png list inLoc = inPix * 4 + a; if (a == 2) { inputChange[outLoc + 1] = inputReference[inLoc + 1]; //just set the alpha value to what it was before when you are working on G } x = inputReference[inLoc - 4 - width * 4] * w[0] + inputReference[inLoc - width * 4] * w[1] + inputReference[inLoc + 4 - width * 4] * w[2] + //Top row inputReference[inLoc - 4] * w[3] + inputReference[inLoc] * w[4] + inputReference[inLoc + 4] * w[5] + //mid row inputReference[inLoc - 4 + width * 4] * w[6] + inputReference[inLoc + width * 4] * w[7] + inputReference[inLoc + 4 + width * 4] * w[8]; //bot row if (x < 0) { x = 0; } else if (x > 255) { x = 255; } inputChange[outLoc] = (unsigned char)((int)round(x)); } } } void convolve(unsigned char* input, char* output, unsigned width, unsigned height, int threads) { //Note: looked up the max number of threads per block, 1024 int blocks; if (threads > 1024) { blocks = (threads + (1024 - 1)) / 1024; } else { blocks = 1; } printf("Will convolve using %d threads contained in %d blocks\n", threads, blocks); //From Tutorial //1. allocate unified memory and need two areas, one for image input and one for image output since for convolution we can't edit image in place printf("\nAllocating unified memory..\n"); unsigned char* imageDataUNI; //Data for referencing hipMallocManaged(&imageDataUNI, (width * height * 4 * sizeof(unsigned char))); unsigned char* imageChangeDataUNI; //data for changing, smaller since we don't do sides hipMallocManaged(&imageChangeDataUNI, ((width - 2) * (height - 2) * 4 * sizeof(unsigned char))); float * weights; //load weights into unified memory hipMallocManaged(&weights, (9 * sizeof(float))); //2. Transfer data from host to unified memory (Can do this with cpu operations) printf("\nTranfering data for reference to unified memory..\n"); for (int i = 0; i < (width * height * 4 * sizeof(unsigned char)); i++) { imageDataUNI[i] = input[i]; } for (int i = 0; i < ((width - 2) * (height - 2) * 4 * sizeof(unsigned char)); i++) { imageChangeDataUNI[i] = (unsigned char) 0; } int count = 0; for (int a = 0; a<3; a++) { for (int b = 0; b < 3; b++) { weights[count] = w[a][b]; count++; } } printf("\nConfirm correct transfer..\n"); printf("First 5 pixels from CPU mem is,\n"); for (int i = 0; i < 5; i++) { printf("%u, ", input[i]); } printf("\n"); printf("First 5 pixels from UNIfied reference mem is,\n"); for (int i = 0; i < 5; i++) { printf("%u, ", imageDataUNI[i]); } printf("\n"); printf("First 5 pixels from UNIfied change mem is,\n"); for (int i = 0; i < 5; i++) { printf("%u, ", imageChangeDataUNI[i]); } printf("\n"); printf("Weights list from unified memory is,\n"); for (int i = 0; i < 9; i++) { printf("%f, ", weights[i]); } printf("\n"); //3. Execute kernals "go my children, run with haste and do my bidding" printf("Sending out threads to do my dirty work..\n"); convolveKernel << <blocks, (threads / blocks) >> > (imageDataUNI, imageChangeDataUNI, width, height, threads, weights); hipDeviceSynchronize(); printf(hipGetErrorString(hipGetLastError())); printf("\nCreating output image\n"); lodepng_encode32_file(output, imageChangeDataUNI, width-2, height-2); printf("Thanks for convolving, come again\n"); free(input); hipFree(imageChangeDataUNI); hipFree(imageDataUNI); hipFree(weights); } int main(int argc, char* argv[]) { //declare all vars used in main unsigned error; unsigned char* image; unsigned width, height; //Take argument inputs char* input_filename = argv[1]; char* output_filename = argv[2]; char* numThreadsInput = argv[3]; int numThreads = atoi(numThreadsInput); //prefer to work with int in this case, use when taking command line arguments if (numThreads < 1) { fprintf(stderr, "Number of threads argument must be a valid integer."); return 1; } //load the image error = lodepng_decode32_file(&image, &width, &height, input_filename); if (error) printf("error %u: %s\n", error, lodepng_error_text(error)); else printf("Image loaded successfully!\n"); printf("Taking %s running convolution with %d threads and returning new file %s\n", input_filename, numThreads, output_filename); printf("Height: %d, Width: %d\n", height, width); //Lets start convolution convolve(image, output_filename, width, height, numThreads); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. hipError_t cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } return 0; }
6f23fbd884ed6a3fbb46bffd564394d34837af6d.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include "lodepng.h"//including the given c file for image data extraction #include <time.h> #include "wm.h" #include <math.h> __global__ void convolveKernel(unsigned char* inputReference, unsigned char* inputChange, unsigned width, unsigned height, int threads, float * w) { int outPutHeight = (height - 2); int outPutWidth = (width - 2); /* *number of pixels each thread will need to do initially in the changed picture * Ignore RGB for now will mean we each thread will deal with RGB for all pixels it is resposible for this * is a slower paralization scheme only in the case where threads > ((outPutHeight * outPutWidth) / threads) * also leads to undefined behaviour. I think this is a fine caveat in saving me some time, the assumption * that threads won't be greater 9,203,524 is fair for this course. */ int taskSize = ((outPutHeight * outPutWidth) / threads); //num output pixels / thread // Each thread will do portion of work from (thread global location * tasksize)->(thread global location * tasksize) + tasksize int startPix = (threadIdx.x + (blockDim.x * blockIdx.x)) * taskSize; //starting location in output image /* * Have each thread do (outputwidth*outputheight*3)/threads) but if (outputwidth*outputheight*3)%threads isn't 0 there will be some left over threads * will deal with this later. * * If I want to do convolution on pixel 0 of inputChange, I need data from pixels 0,1,2,3840,3841,3842,7680,7681,7681 in inputRef * multiply by 4 to get location in list and add 0,1,2 for each different value R,G,B * since our images are different sizes we must consider the conversion, all pixels shifted 1 down, 1 right * pixel 0 in output = pixel 3841 in original * 9000 = 12841 * opx = inpx + 3840 + 1 * this brings us to our next equation for input pixel i we need pixel i+-3840, i+-1, i +- 3840 +-1 and i itself * hope I explained it right, its a bit confusing */ int outPix; //Current Pixel (output) int outLoc; //Current location of pixel in list (output) int outRow; int outCol; int inPix; //Current Pixel (input, larger) int inLoc; //Current location of pixel in list (input, larger image) int type; // R = 0, G = 1, B = 1 float x; for (int a = 0; a < 3; a++) { for (int i = 0; i < taskSize; i++) { outPix = startPix + i; //Get current pixel in output image outCol = outPix % outPutWidth; //Get current pixel col in output image outRow = (outPix - outCol) / outPutWidth; //Get current pixel row in output image inPix = ((outRow + 1) * width) + outCol + 1; //Get current pixel in input image outLoc = outPix * 4 + a; //convert both: Pixel->Item in PNG List inLoc = inPix * 4 + a; if (a == 2) { inputChange[outLoc+1] = inputReference[inLoc+1]; //just set the alpha value to what it was before when you are working on G } x = inputReference[inLoc - 4 - width*4] * w[0] + inputReference[inLoc - width * 4] * w[1] + inputReference[inLoc + 4 - width * 4] * w[2] + //Top row inputReference[inLoc - 4] * w[3] + inputReference[inLoc] * w[4] + inputReference[inLoc + 4] * w[5] + //mid row inputReference[inLoc - 4 + width * 4] * w[6] + inputReference[inLoc + width * 4] * w[7] + inputReference[inLoc + 4 + width * 4] * w[8]; //bot row if (x < 0) { x = 0; } else if (x > 255) { x = 255; } inputChange[outLoc] = (unsigned char) ((int) round(x)); } } //now some threads will need to do an extra one, in some cases int leftover = ((outPutHeight * outPutWidth) % threads); if ((leftover != 0) && ((threadIdx.x + (blockDim.x * blockIdx.x)) <= leftover)) { for (int a = 0; a < 3; a++) { outPix = taskSize * threads + threadIdx.x; //get current pixel in output image outCol = outPix % outPutWidth; //get current pixel col in output image outRow = (outPix - outCol) / outPutWidth; //get current pixel row in output image inPix = ((outRow + 1) * width) + outCol + 1; //get current pixel in input image outLoc = outPix * 4 + a; //convert both: pixel->item in png list inLoc = inPix * 4 + a; if (a == 2) { inputChange[outLoc + 1] = inputReference[inLoc + 1]; //just set the alpha value to what it was before when you are working on G } x = inputReference[inLoc - 4 - width * 4] * w[0] + inputReference[inLoc - width * 4] * w[1] + inputReference[inLoc + 4 - width * 4] * w[2] + //Top row inputReference[inLoc - 4] * w[3] + inputReference[inLoc] * w[4] + inputReference[inLoc + 4] * w[5] + //mid row inputReference[inLoc - 4 + width * 4] * w[6] + inputReference[inLoc + width * 4] * w[7] + inputReference[inLoc + 4 + width * 4] * w[8]; //bot row if (x < 0) { x = 0; } else if (x > 255) { x = 255; } inputChange[outLoc] = (unsigned char)((int)round(x)); } } } void convolve(unsigned char* input, char* output, unsigned width, unsigned height, int threads) { //Note: looked up the max number of threads per block, 1024 int blocks; if (threads > 1024) { blocks = (threads + (1024 - 1)) / 1024; } else { blocks = 1; } printf("Will convolve using %d threads contained in %d blocks\n", threads, blocks); //From Tutorial //1. allocate unified memory and need two areas, one for image input and one for image output since for convolution we can't edit image in place printf("\nAllocating unified memory..\n"); unsigned char* imageDataUNI; //Data for referencing cudaMallocManaged(&imageDataUNI, (width * height * 4 * sizeof(unsigned char))); unsigned char* imageChangeDataUNI; //data for changing, smaller since we don't do sides cudaMallocManaged(&imageChangeDataUNI, ((width - 2) * (height - 2) * 4 * sizeof(unsigned char))); float * weights; //load weights into unified memory cudaMallocManaged(&weights, (9 * sizeof(float))); //2. Transfer data from host to unified memory (Can do this with cpu operations) printf("\nTranfering data for reference to unified memory..\n"); for (int i = 0; i < (width * height * 4 * sizeof(unsigned char)); i++) { imageDataUNI[i] = input[i]; } for (int i = 0; i < ((width - 2) * (height - 2) * 4 * sizeof(unsigned char)); i++) { imageChangeDataUNI[i] = (unsigned char) 0; } int count = 0; for (int a = 0; a<3; a++) { for (int b = 0; b < 3; b++) { weights[count] = w[a][b]; count++; } } printf("\nConfirm correct transfer..\n"); printf("First 5 pixels from CPU mem is,\n"); for (int i = 0; i < 5; i++) { printf("%u, ", input[i]); } printf("\n"); printf("First 5 pixels from UNIfied reference mem is,\n"); for (int i = 0; i < 5; i++) { printf("%u, ", imageDataUNI[i]); } printf("\n"); printf("First 5 pixels from UNIfied change mem is,\n"); for (int i = 0; i < 5; i++) { printf("%u, ", imageChangeDataUNI[i]); } printf("\n"); printf("Weights list from unified memory is,\n"); for (int i = 0; i < 9; i++) { printf("%f, ", weights[i]); } printf("\n"); //3. Execute kernals "go my children, run with haste and do my bidding" printf("Sending out threads to do my dirty work..\n"); convolveKernel << <blocks, (threads / blocks) >> > (imageDataUNI, imageChangeDataUNI, width, height, threads, weights); cudaDeviceSynchronize(); printf(cudaGetErrorString(cudaGetLastError())); printf("\nCreating output image\n"); lodepng_encode32_file(output, imageChangeDataUNI, width-2, height-2); printf("Thanks for convolving, come again\n"); free(input); cudaFree(imageChangeDataUNI); cudaFree(imageDataUNI); cudaFree(weights); } int main(int argc, char* argv[]) { //declare all vars used in main unsigned error; unsigned char* image; unsigned width, height; //Take argument inputs char* input_filename = argv[1]; char* output_filename = argv[2]; char* numThreadsInput = argv[3]; int numThreads = atoi(numThreadsInput); //prefer to work with int in this case, use when taking command line arguments if (numThreads < 1) { fprintf(stderr, "Number of threads argument must be a valid integer."); return 1; } //load the image error = lodepng_decode32_file(&image, &width, &height, input_filename); if (error) printf("error %u: %s\n", error, lodepng_error_text(error)); else printf("Image loaded successfully!\n"); printf("Taking %s running convolution with %d threads and returning new file %s\n", input_filename, numThreads, output_filename); printf("Height: %d, Width: %d\n", height, width); //Lets start convolution convolve(image, output_filename, width, height, numThreads); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaError_t cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; }
demo4_gpu_thread.hip
// !!! This is a file automatically generated by hipify!!! /*demo4_gpu_thread.c * * Get two numbers from input databuffer, calculate them and write the sum to output databuffer. */ #ifdef __cplusplus extern "C"{ #endif #include <stdio.h> #include <stdlib.h> #include <string.h> #include <pthread.h> #include <sys/time.h> #include <sys/resource.h> #include <sys/types.h> #include <unistd.h> #include "hashpipe.h" #include "demo4_databuf.h" #include "demo4_gpu_thread.h" #include <hip/hip_runtime.h> #include <hipfft.h> #include <time.h> #include <hip/hip_runtime.h> int g_iIsDataReadDone = FALSE; char* g_pc4Data_d = NULL; /* raw data starting address */ char* g_pc4DataRead_d = NULL; /* raw data read pointer */ int g_iNFFT = NFFT; int g_iNFFT1 = NFFT; int g_iNFFT2 = NFFT/2; int g_iNFFT3 = NFFT/4; int g_iNFFT4 = NFFT/8; int g_iNFFT5 = NFFT/16; int g_ISIZE1 = FFTPLAN1_ISIZE; int g_OSIZE1 = FFTPLAN1_OSIZE; int g_ISIZE2 = FFTPLAN2_ISIZE; int g_OSIZE2 = FFTPLAN2_OSIZE; int g_ISIZE3 = FFTPLAN3_ISIZE; int g_OSIZE3 = FFTPLAN3_OSIZE; int g_ISIZE4 = FFTPLAN4_ISIZE; int g_OSIZE4 = FFTPLAN4_OSIZE; int g_ISIZE5 = FFTPLAN5_ISIZE; int g_OSIZE5 = FFTPLAN5_OSIZE; dim3 g_dimBCopy(1, 1, 1); dim3 g_dimGCopy(1, 1); dim3 g_dimBAccum(1, 1, 1); dim3 g_dimGAccum(1, 1); int g_BatchAccumThreads; int g_BatchAccumBlocks; float* g_pf4FFTIn_d = NULL; float2* g_pf4FFTOut1_d = NULL; float2* g_pf4FFTOut2_d = NULL; float2* g_pf4FFTOut3_d = NULL; float2* g_pf4FFTOut4_d = NULL; float2* g_pf4FFTOut5_d = NULL; hipfftHandle g_stPlan1 = {0}; hipfftHandle g_stPlan2 = {0}; hipfftHandle g_stPlan3 = {0}; hipfftHandle g_stPlan4 = {0}; hipfftHandle g_stPlan5 = {0}; float* g_pf4SumStokes = NULL; float* g_pf4SumStokes_d = NULL; float* g_sumBatch1 = NULL; float* g_sumBatch2 = NULL; float* g_sumBatch3 = NULL; float* g_sumBatch4 = NULL; float* g_sumBatch5 = NULL; /* BUG: crash if file size is less than 32MB */ int g_iSizeRead = DEF_LEN_IDATA; static int Init(hashpipe_thread_args_t * args) { int iDevCount = 0; hipDeviceProp_t stDevProp = {0}; int iRet = EXIT_SUCCESS; hipfftResult iCUFFTRet = HIPFFT_SUCCESS; int iMaxThreadsPerBlock = 0; iRet = RegisterSignalHandlers(); if (iRet != EXIT_SUCCESS) { (void) fprintf(stderr, "ERROR: Signal-handler registration failed!\n"); return EXIT_FAILURE; } /* since CUDASafeCallWithCleanUp() calls hipGetErrorString(), it should not be used here - will cause crash if no CUDA device is found */ (void) hipGetDeviceCount(&iDevCount); if (0 == iDevCount) { (void) fprintf(stderr, "ERROR: No CUDA-capable device found!\n"); return EXIT_FAILURE; } /* just use the first device */ CUDASafeCallWithCleanUp(hipSetDevice(0)); CUDASafeCallWithCleanUp(hipGetDeviceProperties(&stDevProp, 0)); iMaxThreadsPerBlock = stDevProp.maxThreadsPerBlock; /* allocate memory for data array - 32MB is the block size for the VEGAS input buffer */ //CUDASafeCallWithCleanUp(hipMalloc((void **) &g_pc4DataRead_d, g_iSizeRead)); CUDASafeCallWithCleanUp(hipMalloc((void **) &g_pc4Data_d, g_iSizeRead)); g_pc4DataRead_d = g_pc4Data_d; /* calculate kernel parameters */ if (DEF_LEN_IDATA < iMaxThreadsPerBlock) { g_dimBCopy.x = DEF_LEN_IDATA; g_dimBAccum.x = DEF_LEN_IDATA; } else { g_dimBCopy.x = iMaxThreadsPerBlock; g_dimBAccum.x = iMaxThreadsPerBlock; } g_dimGCopy.x = (DEF_LEN_IDATA) / iMaxThreadsPerBlock; g_dimGAccum.x = (DEF_LEN_IDATA) / iMaxThreadsPerBlock; if (DEF_LEN_ODATA < iMaxThreadsPerBlock){ g_BatchAccumThreads = DEF_LEN_ODATA; } else{ g_BatchAccumThreads = iMaxThreadsPerBlock; } g_BatchAccumBlocks = DEF_LEN_ODATA/iMaxThreadsPerBlock; CUDASafeCallWithCleanUp(hipMalloc((void **) &g_pf4FFTIn_d, DEF_LEN_IDATA * sizeof(float))); CUDASafeCallWithCleanUp(hipMalloc((void **) &g_pf4FFTOut1_d, DEF_LEN_IDATA * sizeof(float2))); CUDASafeCallWithCleanUp(hipMalloc((void **) &g_pf4FFTOut2_d, DEF_LEN_IDATA * sizeof(float2))); CUDASafeCallWithCleanUp(hipMalloc((void **) &g_pf4FFTOut3_d, DEF_LEN_IDATA * sizeof(float2))); CUDASafeCallWithCleanUp(hipMalloc((void **) &g_pf4FFTOut4_d, DEF_LEN_IDATA * sizeof(float2))); CUDASafeCallWithCleanUp(hipMalloc((void **) &g_pf4FFTOut5_d, DEF_LEN_IDATA * sizeof(float2))); g_pf4SumStokes = (float *) malloc(DEF_LEN_IDATA * sizeof(float)); if (NULL == g_pf4SumStokes) { (void) fprintf(stderr, "ERROR: Memory allocation failed! %s.\n", strerror(errno)); return EXIT_FAILURE; } CUDASafeCallWithCleanUp(hipMalloc((void **) &g_pf4SumStokes_d, DEF_LEN_IDATA * sizeof(float))); CUDASafeCallWithCleanUp(hipMemset(g_pf4SumStokes_d, '\0', DEF_LEN_IDATA * sizeof(float))); CUDASafeCallWithCleanUp(hipMalloc((void **) &g_sumBatch1, DEF_LEN_ODATA * sizeof(float))); CUDASafeCallWithCleanUp(hipMemset(g_sumBatch1, '\0', DEF_LEN_ODATA * sizeof(float))); CUDASafeCallWithCleanUp(hipMalloc((void **) &g_sumBatch2, DEF_LEN_ODATA * sizeof(float))); CUDASafeCallWithCleanUp(hipMemset(g_sumBatch2, '\0', DEF_LEN_ODATA * sizeof(float))); CUDASafeCallWithCleanUp(hipMalloc((void **) &g_sumBatch3, DEF_LEN_ODATA * sizeof(float))); CUDASafeCallWithCleanUp(hipMemset(g_sumBatch3, '\0', DEF_LEN_ODATA * sizeof(float))); CUDASafeCallWithCleanUp(hipMalloc((void **) &g_sumBatch4, DEF_LEN_ODATA * sizeof(float))); CUDASafeCallWithCleanUp(hipMemset(g_sumBatch4, '\0', DEF_LEN_ODATA * sizeof(float))); CUDASafeCallWithCleanUp(hipMalloc((void **) &g_sumBatch5, DEF_LEN_ODATA * sizeof(float))); CUDASafeCallWithCleanUp(hipMemset(g_sumBatch5, '\0', DEF_LEN_ODATA * sizeof(float))); /* create plan */ iCUFFTRet = hipfftPlanMany(&g_stPlan1, FFTPLAN_RANK, &g_iNFFT1, &g_ISIZE1, FFTPLAN1_ISTRIDE, FFTPLAN1_IDIST, &g_OSIZE1, FFTPLAN1_OSTRIDE, FFTPLAN1_ODIST, HIPFFT_R2C, FFTPLAN1_BATCH); if (iCUFFTRet != HIPFFT_SUCCESS) { (void) fprintf(stderr, "ERROR: Plan1 creation failed!\n"); return EXIT_FAILURE; } iCUFFTRet = hipfftPlanMany(&g_stPlan2, FFTPLAN_RANK, &g_iNFFT2, &g_ISIZE2, FFTPLAN2_ISTRIDE, FFTPLAN2_IDIST, &g_OSIZE2, FFTPLAN2_OSTRIDE, FFTPLAN2_ODIST, HIPFFT_R2C, FFTPLAN2_BATCH); if (iCUFFTRet != HIPFFT_SUCCESS) { (void) fprintf(stderr, "ERROR: Plan2 creation failed!\n"); return EXIT_FAILURE; } iCUFFTRet = hipfftPlanMany(&g_stPlan3, FFTPLAN_RANK, &g_iNFFT3, &g_ISIZE3, FFTPLAN3_ISTRIDE, FFTPLAN3_IDIST, &g_OSIZE3, FFTPLAN3_OSTRIDE, FFTPLAN3_ODIST, HIPFFT_R2C, FFTPLAN3_BATCH); if (iCUFFTRet != HIPFFT_SUCCESS) { (void) fprintf(stderr, "ERROR: Plan3 creation failed!\n"); return EXIT_FAILURE; } iCUFFTRet = hipfftPlanMany(&g_stPlan4, FFTPLAN_RANK, &g_iNFFT4, &g_ISIZE4, FFTPLAN4_ISTRIDE, FFTPLAN4_IDIST, &g_OSIZE4, FFTPLAN4_OSTRIDE, FFTPLAN4_ODIST, HIPFFT_R2C, FFTPLAN4_BATCH); if (iCUFFTRet != HIPFFT_SUCCESS) { (void) fprintf(stderr, "ERROR: Plan4 creation failed!\n"); return EXIT_FAILURE; } iCUFFTRet = hipfftPlanMany(&g_stPlan5, FFTPLAN_RANK, &g_iNFFT5, &g_ISIZE5, FFTPLAN5_ISTRIDE, FFTPLAN5_IDIST, &g_OSIZE5, FFTPLAN5_OSTRIDE, FFTPLAN5_ODIST, HIPFFT_R2C, FFTPLAN5_BATCH); if (iCUFFTRet != HIPFFT_SUCCESS) { (void) fprintf(stderr, "ERROR: Plan5 creation failed!\n"); return EXIT_FAILURE; } return EXIT_SUCCESS; } /* function that frees resources */ void CleanUp() { /* free resources */ if (g_pc4Data_d != NULL) { (void) hipFree(g_pc4Data_d); g_pc4Data_d = NULL; } if (g_pf4FFTIn_d != NULL) { (void) hipFree(g_pf4FFTIn_d); g_pf4FFTIn_d = NULL; } if (g_pf4FFTOut1_d != NULL) { (void) hipFree(g_pf4FFTOut1_d); g_pf4FFTOut1_d = NULL; } if (g_pf4FFTOut2_d != NULL) { (void) hipFree(g_pf4FFTOut2_d); g_pf4FFTOut2_d = NULL; } if (g_pf4FFTOut3_d != NULL) { (void) hipFree(g_pf4FFTOut3_d); g_pf4FFTOut3_d = NULL; } if (g_pf4FFTOut4_d != NULL) { (void) hipFree(g_pf4FFTOut4_d); g_pf4FFTOut4_d = NULL; } if (g_pf4FFTOut5_d != NULL) { (void) hipFree(g_pf4FFTOut5_d); g_pf4FFTOut5_d = NULL; } if (g_pf4SumStokes != NULL) { free(g_pf4SumStokes); g_pf4SumStokes = NULL; } if (g_pf4SumStokes_d != NULL) { (void) hipFree(g_pf4SumStokes_d); g_pf4SumStokes_d = NULL; } if (g_sumBatch2 != NULL) { (void) hipFree(g_sumBatch2); g_sumBatch2 = NULL; } if (g_sumBatch1 != NULL) { (void) hipFree(g_sumBatch1); g_sumBatch1 = NULL; } if (g_sumBatch3 != NULL) { (void) hipFree(g_sumBatch3); g_sumBatch3 = NULL; } if (g_sumBatch4 != NULL) { (void) hipFree(g_sumBatch4); g_sumBatch4 = NULL; } if (g_sumBatch5 != NULL) { (void) hipFree(g_sumBatch5); g_sumBatch5 = NULL; } /* destroy plan */ /* TODO: check for plan */ (void) hipfftDestroy(g_stPlan1); (void) hipfftDestroy(g_stPlan2); (void) hipfftDestroy(g_stPlan3); (void) hipfftDestroy(g_stPlan4); (void) hipfftDestroy(g_stPlan5); /* TODO: check if open */ cpgclos(); return; } /* * Registers handlers for SIGTERM and CTRL+C */ int RegisterSignalHandlers() { struct sigaction stSigHandler = {{0}}; int iRet = EXIT_SUCCESS; /* register the CTRL+C-handling function */ stSigHandler.sa_handler = HandleStopSignals; iRet = sigaction(SIGINT, &stSigHandler, NULL); if (iRet != EXIT_SUCCESS) { (void) fprintf(stderr, "ERROR: Handler registration failed for signal %d!\n", SIGINT); return EXIT_FAILURE; } /* register the SIGTERM-handling function */ stSigHandler.sa_handler = HandleStopSignals; iRet = sigaction(SIGTERM, &stSigHandler, NULL); if (iRet != EXIT_SUCCESS) { (void) fprintf(stderr, "ERROR: Handler registration failed for signal %d!\n", SIGTERM); return EXIT_FAILURE; } return EXIT_SUCCESS; } /* * Catches SIGTERM and CTRL+C and cleans up before exiting */ void HandleStopSignals(int iSigNo) { /* clean up */ CleanUp(); /* exit */ exit(EXIT_SUCCESS); /* never reached */ return; } void __CUDASafeCallWithCleanUp(hipError_t iRet, const char* pcFile, const int iLine, void (*pCleanUp)(void)) { if (iRet != hipSuccess) { (void) fprintf(stderr, "ERROR: File <%s>, Line %d: %s\n", pcFile, iLine, hipGetErrorString(iRet)); /* free resources */ (*pCleanUp)(); exit(EXIT_FAILURE); } return; } /* * Prints usage information */ void PrintUsage(const char *pcProgName) { (void) printf("Usage: %s [options] <data-file>\n", pcProgName); (void) printf(" -h --help "); (void) printf("Display this usage information\n"); (void) printf(" -n --nfft <value> "); (void) printf("Number of points in FFT\n"); (void) printf(" -p --pfb "); (void) printf("Enable PFB\n"); (void) printf(" -a --nacc <value> "); (void) printf("Number of spectra to add\n"); (void) printf(" -s --fsamp <value> "); (void) printf("Sampling frequency\n"); return; } static void *run(hashpipe_thread_args_t * args) { // Local aliases to shorten access to args fields demo4_input_databuf_t *db_in = (demo4_input_databuf_t *)args->ibuf; demo4_output_databuf_t *db_out = (demo4_output_databuf_t *)args->obuf; hashpipe_status_t st = args->st; const char * status_key = args->thread_desc->skey; int rv; uint64_t mcnt=0; int curblock_in=0; int curblock_out=0; int nhits = 0; char *data_raw; // raw data will be feed to gpu thread data_raw = (char *)malloc(g_iSizeRead*sizeof(char)); int n_frames; // number of frames has been processed int iRet = EXIT_SUCCESS; int iSpecCount = 0; int iNumAcc = DEF_ACC; //if(iNumAcc > g_iSizeRead/DEF_LEN_IDATA){iNumAcc=g_iSizeRead/DEF_LEN_IDATA;} // if accumulation number larger than data buffer, setit to number spectra frames of buffer int n_spec = 0; // number of spectrum int iProcData = 0; hipError_t iCUDARet = hipSuccess; struct timeval stStart = {0}; struct timeval stStop = {0}; const char *pcProgName = NULL; int iNextOpt = 0; /* valid short options */ const char* const pcOptsShort = "hb:n:pa:s:"; /* valid long options */ const struct option stOptsLong[] = { { "help", 0, NULL, 'h' }, { "nsub", 1, NULL, 'b' }, { "nfft", 1, NULL, 'n' }, { "pfb", 0, NULL, 'p' }, { "nacc", 1, NULL, 'a' }, { "fsamp", 1, NULL, 's' }, { NULL, 0, NULL, 0 } }; while (run_threads()) { hashpipe_status_lock_safe(&st); hputi4(st.buf, "GPUBLKIN", curblock_in); hputs(st.buf, status_key, "waiting"); hputi4(st.buf, "GPUBKOUT", curblock_out); hputi8(st.buf,"GPUMCNT",mcnt); hashpipe_status_unlock_safe(&st); n_spec = 0; // Wait for new output block to be free while ((rv=demo4_output_databuf_wait_free(db_out, curblock_out)) != HASHPIPE_OK) { if (rv==HASHPIPE_TIMEOUT) { hashpipe_status_lock_safe(&st); hputs(st.buf, status_key, "blocked gpu out"); hashpipe_status_unlock_safe(&st); continue; } else { hashpipe_error(__FUNCTION__, "error waiting for free databuf"); pthread_exit(NULL); break; } } while(iSpecCount < iNumAcc){ // Wait for new input block to be filled while ((rv=demo4_input_databuf_wait_filled(db_in, curblock_in)) != HASHPIPE_OK) { if (rv==HASHPIPE_TIMEOUT) { hashpipe_status_lock_safe(&st); hputs(st.buf, status_key, "blocked"); hashpipe_status_unlock_safe(&st); continue; } else { hashpipe_error(__FUNCTION__, "error waiting for filled databuf"); pthread_exit(NULL); break; } } // Note processing status hashpipe_status_lock_safe(&st); hputs(st.buf, status_key, "processing gpu"); hashpipe_status_unlock_safe(&st); //get data from input databuf to local memcpy(data_raw,db_in->block[curblock_in].data_block,g_iSizeRead*sizeof(char)); // write new data to the gpu buffer CUDASafeCallWithCleanUp(hipMemcpy(g_pc4Data_d, data_raw, g_iSizeRead*sizeof(char), hipMemcpyHostToDevice)); /* whenever there is a read, reset the read pointer to the beginning */ g_pc4DataRead_d = g_pc4Data_d; hipLaunchKernelGGL(( CopyDataForFFT), dim3(g_dimGCopy), dim3(g_dimBCopy), 0, 0, g_pc4DataRead_d, g_pf4FFTIn_d); CUDASafeCallWithCleanUp(hipDeviceSynchronize()); iCUDARet = hipGetLastError(); if (iCUDARet != hipSuccess){ (void) fprintf(stderr, "ERROR: File <%s>, Line %d: %s\n", __FILE__, __LINE__, hipGetErrorString(iCUDARet)); CleanUp(); } /* do fft */ iRet = DoFFT(); if (iRet != EXIT_SUCCESS){ (void) fprintf(stderr, "ERROR! FFT failed!\n"); CleanUp(); } hipLaunchKernelGGL(( BatchAccumulate), dim3(g_BatchAccumBlocks), dim3(g_BatchAccumThreads), 0, 0, g_pf4FFTOut1_d, g_pf4FFTOut2_d, g_pf4FFTOut3_d, g_pf4FFTOut4_d, g_pf4FFTOut5_d, g_sumBatch1, g_sumBatch2, g_sumBatch3, g_sumBatch4, g_sumBatch5, DEF_LEN_ODATA ); /* hipLaunchKernelGGL(( BatchAccumulate), dim3(g_BatchAccumBlocks), dim3(g_BatchAccumThreads), 0, 0, g_pf4FFTOut1_d, 1, DEF_LEN_ODATA+1, g_sumBatch1); hipLaunchKernelGGL(( BatchAccumulate), dim3(g_BatchAccumBlocks), dim3(g_BatchAccumThreads), 0, 0, g_pf4FFTOut2_d, 2, DEF_LEN_ODATA/2+1, g_sumBatch2); hipLaunchKernelGGL(( BatchAccumulate), dim3(g_BatchAccumBlocks), dim3(g_BatchAccumThreads), 0, 0, g_pf4FFTOut3_d, 4, DEF_LEN_ODATA/4+1, g_sumBatch3); hipLaunchKernelGGL(( BatchAccumulate), dim3(g_BatchAccumBlocks), dim3(g_BatchAccumThreads), 0, 0, g_pf4FFTOut4_d, 8, DEF_LEN_ODATA/8+1, g_sumBatch4); hipLaunchKernelGGL(( BatchAccumulate), dim3(g_BatchAccumBlocks), dim3(g_BatchAccumThreads), 0, 0, g_pf4FFTOut5_d, 16, DEF_LEN_ODATA/16+1, g_sumBatch5); */ CUDASafeCallWithCleanUp(hipDeviceSynchronize()); iCUDARet = hipGetLastError(); if (iCUDARet != hipSuccess) { (void) fprintf(stderr, "ERROR: File <%s>, Line %d: %s\n", __FILE__, __LINE__, hipGetErrorString(iCUDARet)); CleanUp(); } ++iSpecCount; // Mark input block as free and advance demo4_input_databuf_set_free(db_in, curblock_in); curblock_in = (curblock_in + 1) % db_in->header.n_block; } //store all spectrums untrimmed, concatenated into one output /* CUDASafeCallWithCleanUp(hipMemcpy(g_pf4SumStokes, g_sumBatch1, (DEF_LEN_ODATA * sizeof(float)), hipMemcpyDeviceToHost)); CUDASafeCallWithCleanUp(hipMemcpy(g_pf4SumStokes + DEF_LEN_ODATA, g_sumBatch2, (DEF_LEN_ODATA/2 * sizeof(float)), hipMemcpyDeviceToHost)); CUDASafeCallWithCleanUp(hipMemcpy(g_pf4SumStokes + DEF_LEN_ODATA*3/2, g_sumBatch3, (DEF_LEN_ODATA/4 * sizeof(float)), hipMemcpyDeviceToHost)); CUDASafeCallWithCleanUp(hipMemcpy(g_pf4SumStokes + DEF_LEN_ODATA*7/4, g_sumBatch4, (DEF_LEN_ODATA/8 * sizeof(float)), hipMemcpyDeviceToHost)); CUDASafeCallWithCleanUp(hipMemcpy(g_pf4SumStokes + DEF_LEN_ODATA*15/8, g_sumBatch5, (DEF_LEN_ODATA/16 * sizeof(float)), hipMemcpyDeviceToHost)); */ //timmed spectrum CUDASafeCallWithCleanUp(hipMemcpy(g_pf4SumStokes, g_sumBatch1+DEF_LEN_ODATA/32, (DEF_LEN_ODATA/32 * sizeof(float)), hipMemcpyDeviceToHost)); CUDASafeCallWithCleanUp(hipMemcpy(g_pf4SumStokes + DEF_LEN_ODATA/32, g_sumBatch2+DEF_LEN_ODATA/32, (DEF_LEN_ODATA/32 * sizeof(float)), hipMemcpyDeviceToHost)); CUDASafeCallWithCleanUp(hipMemcpy(g_pf4SumStokes + DEF_LEN_ODATA/16, g_sumBatch3+DEF_LEN_ODATA/32, (DEF_LEN_ODATA/32 * sizeof(float)), hipMemcpyDeviceToHost)); CUDASafeCallWithCleanUp(hipMemcpy(g_pf4SumStokes + DEF_LEN_ODATA*3/32, g_sumBatch4+DEF_LEN_ODATA/32, (DEF_LEN_ODATA/32 * sizeof(float)), hipMemcpyDeviceToHost)); CUDASafeCallWithCleanUp(hipMemcpy(g_pf4SumStokes + DEF_LEN_ODATA/8, g_sumBatch5+DEF_LEN_ODATA/32, (DEF_LEN_ODATA/32 * sizeof(float)), hipMemcpyDeviceToHost)); memcpy(db_out->block[curblock_out].Stokes_Full+SIZEOF_OUT_STOKES*n_spec,g_pf4SumStokes,SIZEOF_OUT_STOKES*sizeof(float)); //printf("Stokes to output done!\n"); n_spec++; /* reset time */ iSpecCount = 0; /* zero accumulators */ CUDASafeCallWithCleanUp(hipMemset(g_pf4SumStokes_d, '\0', (DEF_LEN_IDATA * sizeof(float)))); CUDASafeCallWithCleanUp(hipMemset(g_sumBatch2, '\0', (DEF_LEN_ODATA * sizeof(float)))); CUDASafeCallWithCleanUp(hipMemset(g_sumBatch1, '\0', (DEF_LEN_ODATA * sizeof(float)))); CUDASafeCallWithCleanUp(hipMemset(g_sumBatch3, '\0', (DEF_LEN_ODATA * sizeof(float)))); CUDASafeCallWithCleanUp(hipMemset(g_sumBatch4, '\0', (DEF_LEN_ODATA * sizeof(float)))); CUDASafeCallWithCleanUp(hipMemset(g_sumBatch5, '\0', (DEF_LEN_ODATA * sizeof(float)))); /* if time to read from input buffer */ iProcData = 0; (void) gettimeofday(&stStop, NULL); /*(void) printf("Time taken (barring Init()): %gs\n", ((stStop.tv_sec + (stStop.tv_usec * USEC2SEC)) - (stStart.tv_sec + (stStart.tv_usec * USEC2SEC))));*/ //return EXIT_SUCCESS; //display number of frames in status hashpipe_status_lock_safe(&st); hputi4(st.buf,"NFRAMES",n_frames); hashpipe_status_unlock_safe(&st); // Mark output block as full and advance demo4_output_databuf_set_filled(db_out, curblock_out); curblock_out = (curblock_out + 1) % db_out->header.n_block; // Mark input block as free and advance //demo4_input_databuf_set_free(db_in, curblock_in); //curblock_in = (curblock_in + 1) % db_in->header.n_block; mcnt++; /* Check for cancel */ pthread_testcancel(); return EXIT_SUCCESS; } CleanUp(); } static hashpipe_thread_desc_t demo4_gpu_thread = { name: "demo4_gpu_thread", skey: "GPUSTAT", init: Init, //init: NULL, run: run, ibuf_desc: {demo4_input_databuf_create}, obuf_desc: {demo4_output_databuf_create} }; static __attribute__((constructor)) void ctor() { register_hashpipe_thread(&demo4_gpu_thread); } #ifdef __cplusplus } #endif
demo4_gpu_thread.cu
/*demo4_gpu_thread.c * * Get two numbers from input databuffer, calculate them and write the sum to output databuffer. */ #ifdef __cplusplus extern "C"{ #endif #include <stdio.h> #include <stdlib.h> #include <string.h> #include <pthread.h> #include <sys/time.h> #include <sys/resource.h> #include <sys/types.h> #include <unistd.h> #include "hashpipe.h" #include "demo4_databuf.h" #include "demo4_gpu_thread.h" #include <cuda.h> #include <cufft.h> #include <time.h> #include <cuda_runtime.h> int g_iIsDataReadDone = FALSE; char* g_pc4Data_d = NULL; /* raw data starting address */ char* g_pc4DataRead_d = NULL; /* raw data read pointer */ int g_iNFFT = NFFT; int g_iNFFT1 = NFFT; int g_iNFFT2 = NFFT/2; int g_iNFFT3 = NFFT/4; int g_iNFFT4 = NFFT/8; int g_iNFFT5 = NFFT/16; int g_ISIZE1 = FFTPLAN1_ISIZE; int g_OSIZE1 = FFTPLAN1_OSIZE; int g_ISIZE2 = FFTPLAN2_ISIZE; int g_OSIZE2 = FFTPLAN2_OSIZE; int g_ISIZE3 = FFTPLAN3_ISIZE; int g_OSIZE3 = FFTPLAN3_OSIZE; int g_ISIZE4 = FFTPLAN4_ISIZE; int g_OSIZE4 = FFTPLAN4_OSIZE; int g_ISIZE5 = FFTPLAN5_ISIZE; int g_OSIZE5 = FFTPLAN5_OSIZE; dim3 g_dimBCopy(1, 1, 1); dim3 g_dimGCopy(1, 1); dim3 g_dimBAccum(1, 1, 1); dim3 g_dimGAccum(1, 1); int g_BatchAccumThreads; int g_BatchAccumBlocks; float* g_pf4FFTIn_d = NULL; float2* g_pf4FFTOut1_d = NULL; float2* g_pf4FFTOut2_d = NULL; float2* g_pf4FFTOut3_d = NULL; float2* g_pf4FFTOut4_d = NULL; float2* g_pf4FFTOut5_d = NULL; cufftHandle g_stPlan1 = {0}; cufftHandle g_stPlan2 = {0}; cufftHandle g_stPlan3 = {0}; cufftHandle g_stPlan4 = {0}; cufftHandle g_stPlan5 = {0}; float* g_pf4SumStokes = NULL; float* g_pf4SumStokes_d = NULL; float* g_sumBatch1 = NULL; float* g_sumBatch2 = NULL; float* g_sumBatch3 = NULL; float* g_sumBatch4 = NULL; float* g_sumBatch5 = NULL; /* BUG: crash if file size is less than 32MB */ int g_iSizeRead = DEF_LEN_IDATA; static int Init(hashpipe_thread_args_t * args) { int iDevCount = 0; cudaDeviceProp stDevProp = {0}; int iRet = EXIT_SUCCESS; cufftResult iCUFFTRet = CUFFT_SUCCESS; int iMaxThreadsPerBlock = 0; iRet = RegisterSignalHandlers(); if (iRet != EXIT_SUCCESS) { (void) fprintf(stderr, "ERROR: Signal-handler registration failed!\n"); return EXIT_FAILURE; } /* since CUDASafeCallWithCleanUp() calls cudaGetErrorString(), it should not be used here - will cause crash if no CUDA device is found */ (void) cudaGetDeviceCount(&iDevCount); if (0 == iDevCount) { (void) fprintf(stderr, "ERROR: No CUDA-capable device found!\n"); return EXIT_FAILURE; } /* just use the first device */ CUDASafeCallWithCleanUp(cudaSetDevice(0)); CUDASafeCallWithCleanUp(cudaGetDeviceProperties(&stDevProp, 0)); iMaxThreadsPerBlock = stDevProp.maxThreadsPerBlock; /* allocate memory for data array - 32MB is the block size for the VEGAS input buffer */ //CUDASafeCallWithCleanUp(cudaMalloc((void **) &g_pc4DataRead_d, g_iSizeRead)); CUDASafeCallWithCleanUp(cudaMalloc((void **) &g_pc4Data_d, g_iSizeRead)); g_pc4DataRead_d = g_pc4Data_d; /* calculate kernel parameters */ if (DEF_LEN_IDATA < iMaxThreadsPerBlock) { g_dimBCopy.x = DEF_LEN_IDATA; g_dimBAccum.x = DEF_LEN_IDATA; } else { g_dimBCopy.x = iMaxThreadsPerBlock; g_dimBAccum.x = iMaxThreadsPerBlock; } g_dimGCopy.x = (DEF_LEN_IDATA) / iMaxThreadsPerBlock; g_dimGAccum.x = (DEF_LEN_IDATA) / iMaxThreadsPerBlock; if (DEF_LEN_ODATA < iMaxThreadsPerBlock){ g_BatchAccumThreads = DEF_LEN_ODATA; } else{ g_BatchAccumThreads = iMaxThreadsPerBlock; } g_BatchAccumBlocks = DEF_LEN_ODATA/iMaxThreadsPerBlock; CUDASafeCallWithCleanUp(cudaMalloc((void **) &g_pf4FFTIn_d, DEF_LEN_IDATA * sizeof(float))); CUDASafeCallWithCleanUp(cudaMalloc((void **) &g_pf4FFTOut1_d, DEF_LEN_IDATA * sizeof(float2))); CUDASafeCallWithCleanUp(cudaMalloc((void **) &g_pf4FFTOut2_d, DEF_LEN_IDATA * sizeof(float2))); CUDASafeCallWithCleanUp(cudaMalloc((void **) &g_pf4FFTOut3_d, DEF_LEN_IDATA * sizeof(float2))); CUDASafeCallWithCleanUp(cudaMalloc((void **) &g_pf4FFTOut4_d, DEF_LEN_IDATA * sizeof(float2))); CUDASafeCallWithCleanUp(cudaMalloc((void **) &g_pf4FFTOut5_d, DEF_LEN_IDATA * sizeof(float2))); g_pf4SumStokes = (float *) malloc(DEF_LEN_IDATA * sizeof(float)); if (NULL == g_pf4SumStokes) { (void) fprintf(stderr, "ERROR: Memory allocation failed! %s.\n", strerror(errno)); return EXIT_FAILURE; } CUDASafeCallWithCleanUp(cudaMalloc((void **) &g_pf4SumStokes_d, DEF_LEN_IDATA * sizeof(float))); CUDASafeCallWithCleanUp(cudaMemset(g_pf4SumStokes_d, '\0', DEF_LEN_IDATA * sizeof(float))); CUDASafeCallWithCleanUp(cudaMalloc((void **) &g_sumBatch1, DEF_LEN_ODATA * sizeof(float))); CUDASafeCallWithCleanUp(cudaMemset(g_sumBatch1, '\0', DEF_LEN_ODATA * sizeof(float))); CUDASafeCallWithCleanUp(cudaMalloc((void **) &g_sumBatch2, DEF_LEN_ODATA * sizeof(float))); CUDASafeCallWithCleanUp(cudaMemset(g_sumBatch2, '\0', DEF_LEN_ODATA * sizeof(float))); CUDASafeCallWithCleanUp(cudaMalloc((void **) &g_sumBatch3, DEF_LEN_ODATA * sizeof(float))); CUDASafeCallWithCleanUp(cudaMemset(g_sumBatch3, '\0', DEF_LEN_ODATA * sizeof(float))); CUDASafeCallWithCleanUp(cudaMalloc((void **) &g_sumBatch4, DEF_LEN_ODATA * sizeof(float))); CUDASafeCallWithCleanUp(cudaMemset(g_sumBatch4, '\0', DEF_LEN_ODATA * sizeof(float))); CUDASafeCallWithCleanUp(cudaMalloc((void **) &g_sumBatch5, DEF_LEN_ODATA * sizeof(float))); CUDASafeCallWithCleanUp(cudaMemset(g_sumBatch5, '\0', DEF_LEN_ODATA * sizeof(float))); /* create plan */ iCUFFTRet = cufftPlanMany(&g_stPlan1, FFTPLAN_RANK, &g_iNFFT1, &g_ISIZE1, FFTPLAN1_ISTRIDE, FFTPLAN1_IDIST, &g_OSIZE1, FFTPLAN1_OSTRIDE, FFTPLAN1_ODIST, CUFFT_R2C, FFTPLAN1_BATCH); if (iCUFFTRet != CUFFT_SUCCESS) { (void) fprintf(stderr, "ERROR: Plan1 creation failed!\n"); return EXIT_FAILURE; } iCUFFTRet = cufftPlanMany(&g_stPlan2, FFTPLAN_RANK, &g_iNFFT2, &g_ISIZE2, FFTPLAN2_ISTRIDE, FFTPLAN2_IDIST, &g_OSIZE2, FFTPLAN2_OSTRIDE, FFTPLAN2_ODIST, CUFFT_R2C, FFTPLAN2_BATCH); if (iCUFFTRet != CUFFT_SUCCESS) { (void) fprintf(stderr, "ERROR: Plan2 creation failed!\n"); return EXIT_FAILURE; } iCUFFTRet = cufftPlanMany(&g_stPlan3, FFTPLAN_RANK, &g_iNFFT3, &g_ISIZE3, FFTPLAN3_ISTRIDE, FFTPLAN3_IDIST, &g_OSIZE3, FFTPLAN3_OSTRIDE, FFTPLAN3_ODIST, CUFFT_R2C, FFTPLAN3_BATCH); if (iCUFFTRet != CUFFT_SUCCESS) { (void) fprintf(stderr, "ERROR: Plan3 creation failed!\n"); return EXIT_FAILURE; } iCUFFTRet = cufftPlanMany(&g_stPlan4, FFTPLAN_RANK, &g_iNFFT4, &g_ISIZE4, FFTPLAN4_ISTRIDE, FFTPLAN4_IDIST, &g_OSIZE4, FFTPLAN4_OSTRIDE, FFTPLAN4_ODIST, CUFFT_R2C, FFTPLAN4_BATCH); if (iCUFFTRet != CUFFT_SUCCESS) { (void) fprintf(stderr, "ERROR: Plan4 creation failed!\n"); return EXIT_FAILURE; } iCUFFTRet = cufftPlanMany(&g_stPlan5, FFTPLAN_RANK, &g_iNFFT5, &g_ISIZE5, FFTPLAN5_ISTRIDE, FFTPLAN5_IDIST, &g_OSIZE5, FFTPLAN5_OSTRIDE, FFTPLAN5_ODIST, CUFFT_R2C, FFTPLAN5_BATCH); if (iCUFFTRet != CUFFT_SUCCESS) { (void) fprintf(stderr, "ERROR: Plan5 creation failed!\n"); return EXIT_FAILURE; } return EXIT_SUCCESS; } /* function that frees resources */ void CleanUp() { /* free resources */ if (g_pc4Data_d != NULL) { (void) cudaFree(g_pc4Data_d); g_pc4Data_d = NULL; } if (g_pf4FFTIn_d != NULL) { (void) cudaFree(g_pf4FFTIn_d); g_pf4FFTIn_d = NULL; } if (g_pf4FFTOut1_d != NULL) { (void) cudaFree(g_pf4FFTOut1_d); g_pf4FFTOut1_d = NULL; } if (g_pf4FFTOut2_d != NULL) { (void) cudaFree(g_pf4FFTOut2_d); g_pf4FFTOut2_d = NULL; } if (g_pf4FFTOut3_d != NULL) { (void) cudaFree(g_pf4FFTOut3_d); g_pf4FFTOut3_d = NULL; } if (g_pf4FFTOut4_d != NULL) { (void) cudaFree(g_pf4FFTOut4_d); g_pf4FFTOut4_d = NULL; } if (g_pf4FFTOut5_d != NULL) { (void) cudaFree(g_pf4FFTOut5_d); g_pf4FFTOut5_d = NULL; } if (g_pf4SumStokes != NULL) { free(g_pf4SumStokes); g_pf4SumStokes = NULL; } if (g_pf4SumStokes_d != NULL) { (void) cudaFree(g_pf4SumStokes_d); g_pf4SumStokes_d = NULL; } if (g_sumBatch2 != NULL) { (void) cudaFree(g_sumBatch2); g_sumBatch2 = NULL; } if (g_sumBatch1 != NULL) { (void) cudaFree(g_sumBatch1); g_sumBatch1 = NULL; } if (g_sumBatch3 != NULL) { (void) cudaFree(g_sumBatch3); g_sumBatch3 = NULL; } if (g_sumBatch4 != NULL) { (void) cudaFree(g_sumBatch4); g_sumBatch4 = NULL; } if (g_sumBatch5 != NULL) { (void) cudaFree(g_sumBatch5); g_sumBatch5 = NULL; } /* destroy plan */ /* TODO: check for plan */ (void) cufftDestroy(g_stPlan1); (void) cufftDestroy(g_stPlan2); (void) cufftDestroy(g_stPlan3); (void) cufftDestroy(g_stPlan4); (void) cufftDestroy(g_stPlan5); /* TODO: check if open */ cpgclos(); return; } /* * Registers handlers for SIGTERM and CTRL+C */ int RegisterSignalHandlers() { struct sigaction stSigHandler = {{0}}; int iRet = EXIT_SUCCESS; /* register the CTRL+C-handling function */ stSigHandler.sa_handler = HandleStopSignals; iRet = sigaction(SIGINT, &stSigHandler, NULL); if (iRet != EXIT_SUCCESS) { (void) fprintf(stderr, "ERROR: Handler registration failed for signal %d!\n", SIGINT); return EXIT_FAILURE; } /* register the SIGTERM-handling function */ stSigHandler.sa_handler = HandleStopSignals; iRet = sigaction(SIGTERM, &stSigHandler, NULL); if (iRet != EXIT_SUCCESS) { (void) fprintf(stderr, "ERROR: Handler registration failed for signal %d!\n", SIGTERM); return EXIT_FAILURE; } return EXIT_SUCCESS; } /* * Catches SIGTERM and CTRL+C and cleans up before exiting */ void HandleStopSignals(int iSigNo) { /* clean up */ CleanUp(); /* exit */ exit(EXIT_SUCCESS); /* never reached */ return; } void __CUDASafeCallWithCleanUp(cudaError_t iRet, const char* pcFile, const int iLine, void (*pCleanUp)(void)) { if (iRet != cudaSuccess) { (void) fprintf(stderr, "ERROR: File <%s>, Line %d: %s\n", pcFile, iLine, cudaGetErrorString(iRet)); /* free resources */ (*pCleanUp)(); exit(EXIT_FAILURE); } return; } /* * Prints usage information */ void PrintUsage(const char *pcProgName) { (void) printf("Usage: %s [options] <data-file>\n", pcProgName); (void) printf(" -h --help "); (void) printf("Display this usage information\n"); (void) printf(" -n --nfft <value> "); (void) printf("Number of points in FFT\n"); (void) printf(" -p --pfb "); (void) printf("Enable PFB\n"); (void) printf(" -a --nacc <value> "); (void) printf("Number of spectra to add\n"); (void) printf(" -s --fsamp <value> "); (void) printf("Sampling frequency\n"); return; } static void *run(hashpipe_thread_args_t * args) { // Local aliases to shorten access to args fields demo4_input_databuf_t *db_in = (demo4_input_databuf_t *)args->ibuf; demo4_output_databuf_t *db_out = (demo4_output_databuf_t *)args->obuf; hashpipe_status_t st = args->st; const char * status_key = args->thread_desc->skey; int rv; uint64_t mcnt=0; int curblock_in=0; int curblock_out=0; int nhits = 0; char *data_raw; // raw data will be feed to gpu thread data_raw = (char *)malloc(g_iSizeRead*sizeof(char)); int n_frames; // number of frames has been processed int iRet = EXIT_SUCCESS; int iSpecCount = 0; int iNumAcc = DEF_ACC; //if(iNumAcc > g_iSizeRead/DEF_LEN_IDATA){iNumAcc=g_iSizeRead/DEF_LEN_IDATA;} // if accumulation number larger than data buffer, setit to number spectra frames of buffer int n_spec = 0; // number of spectrum int iProcData = 0; cudaError_t iCUDARet = cudaSuccess; struct timeval stStart = {0}; struct timeval stStop = {0}; const char *pcProgName = NULL; int iNextOpt = 0; /* valid short options */ const char* const pcOptsShort = "hb:n:pa:s:"; /* valid long options */ const struct option stOptsLong[] = { { "help", 0, NULL, 'h' }, { "nsub", 1, NULL, 'b' }, { "nfft", 1, NULL, 'n' }, { "pfb", 0, NULL, 'p' }, { "nacc", 1, NULL, 'a' }, { "fsamp", 1, NULL, 's' }, { NULL, 0, NULL, 0 } }; while (run_threads()) { hashpipe_status_lock_safe(&st); hputi4(st.buf, "GPUBLKIN", curblock_in); hputs(st.buf, status_key, "waiting"); hputi4(st.buf, "GPUBKOUT", curblock_out); hputi8(st.buf,"GPUMCNT",mcnt); hashpipe_status_unlock_safe(&st); n_spec = 0; // Wait for new output block to be free while ((rv=demo4_output_databuf_wait_free(db_out, curblock_out)) != HASHPIPE_OK) { if (rv==HASHPIPE_TIMEOUT) { hashpipe_status_lock_safe(&st); hputs(st.buf, status_key, "blocked gpu out"); hashpipe_status_unlock_safe(&st); continue; } else { hashpipe_error(__FUNCTION__, "error waiting for free databuf"); pthread_exit(NULL); break; } } while(iSpecCount < iNumAcc){ // Wait for new input block to be filled while ((rv=demo4_input_databuf_wait_filled(db_in, curblock_in)) != HASHPIPE_OK) { if (rv==HASHPIPE_TIMEOUT) { hashpipe_status_lock_safe(&st); hputs(st.buf, status_key, "blocked"); hashpipe_status_unlock_safe(&st); continue; } else { hashpipe_error(__FUNCTION__, "error waiting for filled databuf"); pthread_exit(NULL); break; } } // Note processing status hashpipe_status_lock_safe(&st); hputs(st.buf, status_key, "processing gpu"); hashpipe_status_unlock_safe(&st); //get data from input databuf to local memcpy(data_raw,db_in->block[curblock_in].data_block,g_iSizeRead*sizeof(char)); // write new data to the gpu buffer CUDASafeCallWithCleanUp(cudaMemcpy(g_pc4Data_d, data_raw, g_iSizeRead*sizeof(char), cudaMemcpyHostToDevice)); /* whenever there is a read, reset the read pointer to the beginning */ g_pc4DataRead_d = g_pc4Data_d; CopyDataForFFT<<<g_dimGCopy, g_dimBCopy>>>(g_pc4DataRead_d, g_pf4FFTIn_d); CUDASafeCallWithCleanUp(cudaThreadSynchronize()); iCUDARet = cudaGetLastError(); if (iCUDARet != cudaSuccess){ (void) fprintf(stderr, "ERROR: File <%s>, Line %d: %s\n", __FILE__, __LINE__, cudaGetErrorString(iCUDARet)); CleanUp(); } /* do fft */ iRet = DoFFT(); if (iRet != EXIT_SUCCESS){ (void) fprintf(stderr, "ERROR! FFT failed!\n"); CleanUp(); } BatchAccumulate<<<g_BatchAccumBlocks, g_BatchAccumThreads>>>(g_pf4FFTOut1_d, g_pf4FFTOut2_d, g_pf4FFTOut3_d, g_pf4FFTOut4_d, g_pf4FFTOut5_d, g_sumBatch1, g_sumBatch2, g_sumBatch3, g_sumBatch4, g_sumBatch5, DEF_LEN_ODATA ); /* BatchAccumulate<<<g_BatchAccumBlocks, g_BatchAccumThreads>>>(g_pf4FFTOut1_d, 1, DEF_LEN_ODATA+1, g_sumBatch1); BatchAccumulate<<<g_BatchAccumBlocks, g_BatchAccumThreads>>>(g_pf4FFTOut2_d, 2, DEF_LEN_ODATA/2+1, g_sumBatch2); BatchAccumulate<<<g_BatchAccumBlocks, g_BatchAccumThreads>>>(g_pf4FFTOut3_d, 4, DEF_LEN_ODATA/4+1, g_sumBatch3); BatchAccumulate<<<g_BatchAccumBlocks, g_BatchAccumThreads>>>(g_pf4FFTOut4_d, 8, DEF_LEN_ODATA/8+1, g_sumBatch4); BatchAccumulate<<<g_BatchAccumBlocks, g_BatchAccumThreads>>>(g_pf4FFTOut5_d, 16, DEF_LEN_ODATA/16+1, g_sumBatch5); */ CUDASafeCallWithCleanUp(cudaThreadSynchronize()); iCUDARet = cudaGetLastError(); if (iCUDARet != cudaSuccess) { (void) fprintf(stderr, "ERROR: File <%s>, Line %d: %s\n", __FILE__, __LINE__, cudaGetErrorString(iCUDARet)); CleanUp(); } ++iSpecCount; // Mark input block as free and advance demo4_input_databuf_set_free(db_in, curblock_in); curblock_in = (curblock_in + 1) % db_in->header.n_block; } //store all spectrums untrimmed, concatenated into one output /* CUDASafeCallWithCleanUp(cudaMemcpy(g_pf4SumStokes, g_sumBatch1, (DEF_LEN_ODATA * sizeof(float)), cudaMemcpyDeviceToHost)); CUDASafeCallWithCleanUp(cudaMemcpy(g_pf4SumStokes + DEF_LEN_ODATA, g_sumBatch2, (DEF_LEN_ODATA/2 * sizeof(float)), cudaMemcpyDeviceToHost)); CUDASafeCallWithCleanUp(cudaMemcpy(g_pf4SumStokes + DEF_LEN_ODATA*3/2, g_sumBatch3, (DEF_LEN_ODATA/4 * sizeof(float)), cudaMemcpyDeviceToHost)); CUDASafeCallWithCleanUp(cudaMemcpy(g_pf4SumStokes + DEF_LEN_ODATA*7/4, g_sumBatch4, (DEF_LEN_ODATA/8 * sizeof(float)), cudaMemcpyDeviceToHost)); CUDASafeCallWithCleanUp(cudaMemcpy(g_pf4SumStokes + DEF_LEN_ODATA*15/8, g_sumBatch5, (DEF_LEN_ODATA/16 * sizeof(float)), cudaMemcpyDeviceToHost)); */ //timmed spectrum CUDASafeCallWithCleanUp(cudaMemcpy(g_pf4SumStokes, g_sumBatch1+DEF_LEN_ODATA/32, (DEF_LEN_ODATA/32 * sizeof(float)), cudaMemcpyDeviceToHost)); CUDASafeCallWithCleanUp(cudaMemcpy(g_pf4SumStokes + DEF_LEN_ODATA/32, g_sumBatch2+DEF_LEN_ODATA/32, (DEF_LEN_ODATA/32 * sizeof(float)), cudaMemcpyDeviceToHost)); CUDASafeCallWithCleanUp(cudaMemcpy(g_pf4SumStokes + DEF_LEN_ODATA/16, g_sumBatch3+DEF_LEN_ODATA/32, (DEF_LEN_ODATA/32 * sizeof(float)), cudaMemcpyDeviceToHost)); CUDASafeCallWithCleanUp(cudaMemcpy(g_pf4SumStokes + DEF_LEN_ODATA*3/32, g_sumBatch4+DEF_LEN_ODATA/32, (DEF_LEN_ODATA/32 * sizeof(float)), cudaMemcpyDeviceToHost)); CUDASafeCallWithCleanUp(cudaMemcpy(g_pf4SumStokes + DEF_LEN_ODATA/8, g_sumBatch5+DEF_LEN_ODATA/32, (DEF_LEN_ODATA/32 * sizeof(float)), cudaMemcpyDeviceToHost)); memcpy(db_out->block[curblock_out].Stokes_Full+SIZEOF_OUT_STOKES*n_spec,g_pf4SumStokes,SIZEOF_OUT_STOKES*sizeof(float)); //printf("Stokes to output done!\n"); n_spec++; /* reset time */ iSpecCount = 0; /* zero accumulators */ CUDASafeCallWithCleanUp(cudaMemset(g_pf4SumStokes_d, '\0', (DEF_LEN_IDATA * sizeof(float)))); CUDASafeCallWithCleanUp(cudaMemset(g_sumBatch2, '\0', (DEF_LEN_ODATA * sizeof(float)))); CUDASafeCallWithCleanUp(cudaMemset(g_sumBatch1, '\0', (DEF_LEN_ODATA * sizeof(float)))); CUDASafeCallWithCleanUp(cudaMemset(g_sumBatch3, '\0', (DEF_LEN_ODATA * sizeof(float)))); CUDASafeCallWithCleanUp(cudaMemset(g_sumBatch4, '\0', (DEF_LEN_ODATA * sizeof(float)))); CUDASafeCallWithCleanUp(cudaMemset(g_sumBatch5, '\0', (DEF_LEN_ODATA * sizeof(float)))); /* if time to read from input buffer */ iProcData = 0; (void) gettimeofday(&stStop, NULL); /*(void) printf("Time taken (barring Init()): %gs\n", ((stStop.tv_sec + (stStop.tv_usec * USEC2SEC)) - (stStart.tv_sec + (stStart.tv_usec * USEC2SEC))));*/ //return EXIT_SUCCESS; //display number of frames in status hashpipe_status_lock_safe(&st); hputi4(st.buf,"NFRAMES",n_frames); hashpipe_status_unlock_safe(&st); // Mark output block as full and advance demo4_output_databuf_set_filled(db_out, curblock_out); curblock_out = (curblock_out + 1) % db_out->header.n_block; // Mark input block as free and advance //demo4_input_databuf_set_free(db_in, curblock_in); //curblock_in = (curblock_in + 1) % db_in->header.n_block; mcnt++; /* Check for cancel */ pthread_testcancel(); return EXIT_SUCCESS; } CleanUp(); } static hashpipe_thread_desc_t demo4_gpu_thread = { name: "demo4_gpu_thread", skey: "GPUSTAT", init: Init, //init: NULL, run: run, ibuf_desc: {demo4_input_databuf_create}, obuf_desc: {demo4_output_databuf_create} }; static __attribute__((constructor)) void ctor() { register_hashpipe_thread(&demo4_gpu_thread); } #ifdef __cplusplus } #endif
5e9899123d5bb1f0fefef3959a00b62a5032a780.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <fstream> #include <sstream> #include <vector> #include <string> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <cstdlib> #include <math.h> #include <sys/time.h> #include <hip/hip_runtime.h> #include "kernels.h" using namespace std; //************************************************************************************************************************************************ // This code projects the ROIs selected on a virtual EPID for every 0.5 degree and creates the Centroid.txt and the Results.txt files. Centroid.txt contains // the mean position of each projection and Results.txt contains the position of each points forming the contours of the projections. //************************************************************************************************************************************************ int main (int argc, char **argv) { //~ time_t timer; //~ cout <<time(&timer)<<endl; //~ sleep(2); //~ cout <<time(&timer)<<endl; float *sourcePoint = new float[3]; float *resolution = new float[3]; float *isocenter = new float[3]; float *refPointCT = new float[3]; int *sens = new int[3]; string PATH = argv[1]; isocenter[0] = atof(argv[2]); isocenter[1] = atof(argv[3]); isocenter[2] = atof(argv[4]); sens[0] = atoi(argv[5]); sens[1] = atoi(argv[6]); sens[2] = atoi(argv[7]); string Patient_id = argv[8]; resolution[0] = atof(argv[9]); resolution[1] = atof(argv[10]); resolution[2] = atof(argv[11]); refPointCT[0] = atof(argv[12]); refPointCT[1] = atof(argv[13]); refPointCT[2] = atof(argv[14]); int NbOfROIs = atoi(argv[15]); int NbRows = atoi(argv[16]); int NbCols = atoi(argv[17]); int NbSlices = atoi(argv[18]); string ROIName; struct timeval tp; long int start; long int end; int NbAngles = 720; int Xmin; int Xmax; int Ymin; int Ymax; int Zmin; int Zmax; int PanelNbPixels = 512; float PixDimEPID = 0.252*1024/PanelNbPixels*1.6; float BeamAngle = 0.0; float TableAngle = 0.0; float PI = 3.14159; int *Label = new int [NbSlices*NbRows*NbCols]; int *im_EPID = new int [PanelNbPixels*PanelNbPixels]; int *Panel = new int[PanelNbPixels*PanelNbPixels*NbAngles]; for (int i = 0; i < PanelNbPixels*PanelNbPixels*NbAngles; i++) { Panel[i] = 0; } ifstream RoiFile; RoiFile.open("./listROI.txt"); ofstream Centroid("./StructureProjection/Centroid.txt"); ofstream Results("./StructureProjection/Results.txt"); Results << NbOfROIs << "\t" << endl; Results << NbAngles << endl; for(int N=0; N<NbOfROIs; N++){ RoiFile>>ROIName; RoiFile>>Xmin;RoiFile>>Xmax;RoiFile>>Ymin;RoiFile>>Ymax;RoiFile>>Zmin;RoiFile>>Zmax; Results << ROIName << endl; Centroid << ROIName << endl; streampos size =NbSlices*NbRows*NbCols; char * memblock; memblock = new char [size]; ifstream myfile(("./ROIs/"+Patient_id+"."+ROIName).c_str(),ios::in|ios::binary); if (myfile.is_open()){ myfile.read (memblock, size); myfile.close(); } else{cout << "Unable to open file"<<endl;} int val; int pos = 0; for (int i = 0; i < NbSlices; i++) { for (int j = 0; j < NbRows; j++) { for (int k = 0; k < NbCols; k++) { val = *(unsigned char *)&memblock[pos]; Label[i*NbRows*NbCols+j*NbCols+k] = (int)val; pos = pos + 1; } } } myfile.close(); delete[] memblock; //~ start = std::chrono::system_clock::now(); hipError_t err; size = NbSlices*NbRows*NbCols * sizeof(int); float *d_Label = NULL; err = hipMalloc((void **)&d_Label, size); err = hipMemcpy(d_Label, Label, size, hipMemcpyHostToDevice); size = 3 * sizeof(float); float *d_resolution = NULL; err = hipMalloc((void **)&d_resolution, size); err = hipMemcpy(d_resolution, resolution, size, hipMemcpyHostToDevice); float offsetX_dicom = (isocenter[0] - refPointCT[0])*sens[0]; float offsetY_dicom = (isocenter[1] - refPointCT[1])*sens[1]; float offsetZ_dicom = (isocenter[2] - refPointCT[2])*sens[2]+(NbSlices-1)*resolution[2]; gettimeofday(&tp, NULL); start = tp.tv_sec * 1000 + tp.tv_usec / 1000; int threadsPerBlock = PanelNbPixels; int blocksPerGrid = PanelNbPixels; for(int z=0;z<NbAngles;z++){ BeamAngle = (float)(z)*0.5; sourcePoint[0] = 1000 * sin(BeamAngle*PI / 180.0); sourcePoint[1] = -1000 * cos(BeamAngle*PI / 180.0); sourcePoint[2] = 0.0; for (int i = 0; i < PanelNbPixels*PanelNbPixels; i++) { im_EPID[i] = 0; } size = 3 * sizeof(float); float *d_sourcePoint = NULL; err = hipMalloc((void **)&d_sourcePoint, size); err = hipMemcpy(d_sourcePoint, sourcePoint, size, hipMemcpyHostToDevice); size = PanelNbPixels*PanelNbPixels*sizeof(int); int *d_im_EPID = NULL; err = hipMalloc((void **)&d_im_EPID, size); err = hipMemcpy(d_im_EPID, im_EPID, size, hipMemcpyHostToDevice); ProjectionGPU<< < blocksPerGrid, threadsPerBlock >> >(d_Label, d_im_EPID, d_sourcePoint, d_resolution, BeamAngle, NbRows, NbCols, NbSlices, PanelNbPixels, PixDimEPID, TableAngle, Xmin, Xmax, Ymin, Ymax, Zmin, Zmax, offsetX_dicom, offsetY_dicom, offsetZ_dicom); size = PanelNbPixels*PanelNbPixels* sizeof(int); err = hipMemcpy(im_EPID, d_im_EPID, size, hipMemcpyDeviceToHost); //~ cout <<"err: "<< err << endl; float NbOfPoints=0; float mean_i = 0; float mean_j = 0; for(int i=0;i<PanelNbPixels;i++){ for(int j=0;j<PanelNbPixels;j++){ if(im_EPID[i*PanelNbPixels + j] ==1){ NbOfPoints = NbOfPoints + 1; mean_i = mean_i +i; mean_j = mean_j +j; if(i==0 or j==0 or i==PanelNbPixels-1 or j==PanelNbPixels-1){ Panel[z*PanelNbPixels*PanelNbPixels+(PanelNbPixels-1-j)*PanelNbPixels+i] = 1; } else{ if(im_EPID[(i+1)*PanelNbPixels + j]==1 and im_EPID[(i-1)*PanelNbPixels + j]==1 and im_EPID[i*PanelNbPixels + j+1]==1 and im_EPID[i*PanelNbPixels + j-1]==1 and im_EPID[(i+1)*PanelNbPixels + j+1]==1 and im_EPID[(i-1)*PanelNbPixels + j+1]==1 and im_EPID[(i+1)*PanelNbPixels + j-1]==1 and im_EPID[(i-1)*PanelNbPixels + j-1]==1){ Panel[z*PanelNbPixels*PanelNbPixels+(PanelNbPixels-1-j)*PanelNbPixels+i] = 0; } else{Panel[z*PanelNbPixels*PanelNbPixels+(PanelNbPixels-1-j)*PanelNbPixels+i] = 1;} } } else{Panel[z*PanelNbPixels*PanelNbPixels+(PanelNbPixels-1-j)*PanelNbPixels+i] = 0;} } } hipFree(d_sourcePoint); hipFree(d_im_EPID); mean_i = mean_i/NbOfPoints; mean_j = mean_j/NbOfPoints; Centroid << BeamAngle << "\t" << mean_i << "\t" << mean_j <<endl; } for (int z = 0; z < NbAngles; z++){ vector<int> Contour_X; vector<int> Contour_Y; for (int x = 0; x < PanelNbPixels; x++){ for (int y = 0; y < PanelNbPixels; y++){ if (Panel[z*PanelNbPixels*PanelNbPixels + y * PanelNbPixels + x] == 1){ Contour_X.push_back(x); Contour_Y.push_back(y); } } } for(int v=0;v<Contour_X.size();v++){ Results << Contour_X.at(v)*256.0/PanelNbPixels <<"\t" << Contour_Y.at(v)*256.0/PanelNbPixels <<"\t"; } Results << endl; Contour_X.clear(); Contour_Y.clear(); } hipFree(d_Label); hipFree(d_resolution); cout<<ROIName<<endl; } Centroid.close(); Results.close(); RoiFile.close(); delete[] sourcePoint; sourcePoint = NULL; delete[] resolution; resolution = NULL; delete[] isocenter; isocenter = NULL; delete[] refPointCT; refPointCT = NULL; delete[] sens; sens = NULL; delete[] Label; Label = NULL; delete[] im_EPID; im_EPID = NULL; delete[] Panel; Panel = NULL; gettimeofday(&tp, NULL); end = tp.tv_sec * 1000 + tp.tv_usec / 1000; cout<<end-start<<endl; cout<<"Fin du programme"<<endl; return 0; } //=============================================================================================================================================
5e9899123d5bb1f0fefef3959a00b62a5032a780.cu
#include <iostream> #include <fstream> #include <sstream> #include <vector> #include <string> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <cstdlib> #include <math.h> #include <sys/time.h> #include <cuda_runtime.h> #include "kernels.h" using namespace std; //************************************************************************************************************************************************ // This code projects the ROIs selected on a virtual EPID for every 0.5 degree and creates the Centroid.txt and the Results.txt files. Centroid.txt contains // the mean position of each projection and Results.txt contains the position of each points forming the contours of the projections. //************************************************************************************************************************************************ int main (int argc, char **argv) { //~ time_t timer; //~ cout <<time(&timer)<<endl; //~ sleep(2); //~ cout <<time(&timer)<<endl; float *sourcePoint = new float[3]; float *resolution = new float[3]; float *isocenter = new float[3]; float *refPointCT = new float[3]; int *sens = new int[3]; string PATH = argv[1]; isocenter[0] = atof(argv[2]); isocenter[1] = atof(argv[3]); isocenter[2] = atof(argv[4]); sens[0] = atoi(argv[5]); sens[1] = atoi(argv[6]); sens[2] = atoi(argv[7]); string Patient_id = argv[8]; resolution[0] = atof(argv[9]); resolution[1] = atof(argv[10]); resolution[2] = atof(argv[11]); refPointCT[0] = atof(argv[12]); refPointCT[1] = atof(argv[13]); refPointCT[2] = atof(argv[14]); int NbOfROIs = atoi(argv[15]); int NbRows = atoi(argv[16]); int NbCols = atoi(argv[17]); int NbSlices = atoi(argv[18]); string ROIName; struct timeval tp; long int start; long int end; int NbAngles = 720; int Xmin; int Xmax; int Ymin; int Ymax; int Zmin; int Zmax; int PanelNbPixels = 512; float PixDimEPID = 0.252*1024/PanelNbPixels*1.6; float BeamAngle = 0.0; float TableAngle = 0.0; float PI = 3.14159; int *Label = new int [NbSlices*NbRows*NbCols]; int *im_EPID = new int [PanelNbPixels*PanelNbPixels]; int *Panel = new int[PanelNbPixels*PanelNbPixels*NbAngles]; for (int i = 0; i < PanelNbPixels*PanelNbPixels*NbAngles; i++) { Panel[i] = 0; } ifstream RoiFile; RoiFile.open("./listROI.txt"); ofstream Centroid("./StructureProjection/Centroid.txt"); ofstream Results("./StructureProjection/Results.txt"); Results << NbOfROIs << "\t" << endl; Results << NbAngles << endl; for(int N=0; N<NbOfROIs; N++){ RoiFile>>ROIName; RoiFile>>Xmin;RoiFile>>Xmax;RoiFile>>Ymin;RoiFile>>Ymax;RoiFile>>Zmin;RoiFile>>Zmax; Results << ROIName << endl; Centroid << ROIName << endl; streampos size =NbSlices*NbRows*NbCols; char * memblock; memblock = new char [size]; ifstream myfile(("./ROIs/"+Patient_id+"."+ROIName).c_str(),ios::in|ios::binary); if (myfile.is_open()){ myfile.read (memblock, size); myfile.close(); } else{cout << "Unable to open file"<<endl;} int val; int pos = 0; for (int i = 0; i < NbSlices; i++) { for (int j = 0; j < NbRows; j++) { for (int k = 0; k < NbCols; k++) { val = *(unsigned char *)&memblock[pos]; Label[i*NbRows*NbCols+j*NbCols+k] = (int)val; pos = pos + 1; } } } myfile.close(); delete[] memblock; //~ start = std::chrono::system_clock::now(); cudaError_t err; size = NbSlices*NbRows*NbCols * sizeof(int); float *d_Label = NULL; err = cudaMalloc((void **)&d_Label, size); err = cudaMemcpy(d_Label, Label, size, cudaMemcpyHostToDevice); size = 3 * sizeof(float); float *d_resolution = NULL; err = cudaMalloc((void **)&d_resolution, size); err = cudaMemcpy(d_resolution, resolution, size, cudaMemcpyHostToDevice); float offsetX_dicom = (isocenter[0] - refPointCT[0])*sens[0]; float offsetY_dicom = (isocenter[1] - refPointCT[1])*sens[1]; float offsetZ_dicom = (isocenter[2] - refPointCT[2])*sens[2]+(NbSlices-1)*resolution[2]; gettimeofday(&tp, NULL); start = tp.tv_sec * 1000 + tp.tv_usec / 1000; int threadsPerBlock = PanelNbPixels; int blocksPerGrid = PanelNbPixels; for(int z=0;z<NbAngles;z++){ BeamAngle = (float)(z)*0.5; sourcePoint[0] = 1000 * sin(BeamAngle*PI / 180.0); sourcePoint[1] = -1000 * cos(BeamAngle*PI / 180.0); sourcePoint[2] = 0.0; for (int i = 0; i < PanelNbPixels*PanelNbPixels; i++) { im_EPID[i] = 0; } size = 3 * sizeof(float); float *d_sourcePoint = NULL; err = cudaMalloc((void **)&d_sourcePoint, size); err = cudaMemcpy(d_sourcePoint, sourcePoint, size, cudaMemcpyHostToDevice); size = PanelNbPixels*PanelNbPixels*sizeof(int); int *d_im_EPID = NULL; err = cudaMalloc((void **)&d_im_EPID, size); err = cudaMemcpy(d_im_EPID, im_EPID, size, cudaMemcpyHostToDevice); ProjectionGPU<< < blocksPerGrid, threadsPerBlock >> >(d_Label, d_im_EPID, d_sourcePoint, d_resolution, BeamAngle, NbRows, NbCols, NbSlices, PanelNbPixels, PixDimEPID, TableAngle, Xmin, Xmax, Ymin, Ymax, Zmin, Zmax, offsetX_dicom, offsetY_dicom, offsetZ_dicom); size = PanelNbPixels*PanelNbPixels* sizeof(int); err = cudaMemcpy(im_EPID, d_im_EPID, size, cudaMemcpyDeviceToHost); //~ cout <<"err: "<< err << endl; float NbOfPoints=0; float mean_i = 0; float mean_j = 0; for(int i=0;i<PanelNbPixels;i++){ for(int j=0;j<PanelNbPixels;j++){ if(im_EPID[i*PanelNbPixels + j] ==1){ NbOfPoints = NbOfPoints + 1; mean_i = mean_i +i; mean_j = mean_j +j; if(i==0 or j==0 or i==PanelNbPixels-1 or j==PanelNbPixels-1){ Panel[z*PanelNbPixels*PanelNbPixels+(PanelNbPixels-1-j)*PanelNbPixels+i] = 1; } else{ if(im_EPID[(i+1)*PanelNbPixels + j]==1 and im_EPID[(i-1)*PanelNbPixels + j]==1 and im_EPID[i*PanelNbPixels + j+1]==1 and im_EPID[i*PanelNbPixels + j-1]==1 and im_EPID[(i+1)*PanelNbPixels + j+1]==1 and im_EPID[(i-1)*PanelNbPixels + j+1]==1 and im_EPID[(i+1)*PanelNbPixels + j-1]==1 and im_EPID[(i-1)*PanelNbPixels + j-1]==1){ Panel[z*PanelNbPixels*PanelNbPixels+(PanelNbPixels-1-j)*PanelNbPixels+i] = 0; } else{Panel[z*PanelNbPixels*PanelNbPixels+(PanelNbPixels-1-j)*PanelNbPixels+i] = 1;} } } else{Panel[z*PanelNbPixels*PanelNbPixels+(PanelNbPixels-1-j)*PanelNbPixels+i] = 0;} } } cudaFree(d_sourcePoint); cudaFree(d_im_EPID); mean_i = mean_i/NbOfPoints; mean_j = mean_j/NbOfPoints; Centroid << BeamAngle << "\t" << mean_i << "\t" << mean_j <<endl; } for (int z = 0; z < NbAngles; z++){ vector<int> Contour_X; vector<int> Contour_Y; for (int x = 0; x < PanelNbPixels; x++){ for (int y = 0; y < PanelNbPixels; y++){ if (Panel[z*PanelNbPixels*PanelNbPixels + y * PanelNbPixels + x] == 1){ Contour_X.push_back(x); Contour_Y.push_back(y); } } } for(int v=0;v<Contour_X.size();v++){ Results << Contour_X.at(v)*256.0/PanelNbPixels <<"\t" << Contour_Y.at(v)*256.0/PanelNbPixels <<"\t"; } Results << endl; Contour_X.clear(); Contour_Y.clear(); } cudaFree(d_Label); cudaFree(d_resolution); cout<<ROIName<<endl; } Centroid.close(); Results.close(); RoiFile.close(); delete[] sourcePoint; sourcePoint = NULL; delete[] resolution; resolution = NULL; delete[] isocenter; isocenter = NULL; delete[] refPointCT; refPointCT = NULL; delete[] sens; sens = NULL; delete[] Label; Label = NULL; delete[] im_EPID; im_EPID = NULL; delete[] Panel; Panel = NULL; gettimeofday(&tp, NULL); end = tp.tv_sec * 1000 + tp.tv_usec / 1000; cout<<end-start<<endl; cout<<"Fin du programme"<<endl; return 0; } //=============================================================================================================================================
3a51751f43920eef48601db0f5387d4b5e870c12.hip
// !!! This is a file automatically generated by hipify!!! #include "../../config.h" #include "mtbs_cu.h" #include <sys/times.h> unsigned n_sm_count; unsigned n_threads_per_MTB; /* per macro TB */ unsigned n_MTBs_per_sm; static struct timespec started_ts; __device__ uint get_smid(void) { uint ret; asm("mov.u32 %0, %smid;" : "=r"(ret)); return ret; } __device__ uint get_laneid(void) { uint ret; asm volatile("mov.u32 %0, %laneid;" : "=r"(ret)); return ret; } __device__ void sleep_in_kernel(void) { #if CUDA_COMPUTE >= 70 asm("nanosleep.u32 1;"); #else static __device__ volatile int dummy; dummy++; #endif } unsigned long long get_ticks(void) { struct timespec ts; clock_gettime(CLOCK_MONOTONIC, &ts); return ts.tv_sec * 1000000 + ts.tv_nsec / 1000; } extern "C" void error(const char *fmt, ...) { char *msg; va_list ap; int n; va_start(ap, fmt); n = vasprintf(&msg, fmt, ap); va_end(ap); if (n >= 0) { fprintf(stderr, "error: %s\n", msg); free(msg); } } const char * get_cuda_error_msg(hipError_t err) { const char *msg; hipError_t res; res = hipGetErrorString(err, &msg); if (res != hipSuccess) return ""; return msg; } void init_tickcount(void) { clock_gettime(CLOCK_MONOTONIC, &started_ts); } /* microsecs */ unsigned get_tickcount(void) { struct timespec ts; unsigned ticks; clock_gettime(CLOCK_MONOTONIC, &ts); if (ts.tv_nsec < started_ts.tv_nsec) { ticks = ((unsigned)(ts.tv_sec - started_ts.tv_sec - 1)) * 1000000; ticks += (1000000000 + ts.tv_nsec - started_ts.tv_nsec) / 1000; } else { ticks = ((unsigned)(ts.tv_sec - started_ts.tv_sec)) * 1000000; ticks += (ts.tv_nsec - started_ts.tv_nsec) / 1000; } return ticks; }
3a51751f43920eef48601db0f5387d4b5e870c12.cu
#include "../../config.h" #include "mtbs_cu.h" #include <sys/times.h> unsigned n_sm_count; unsigned n_threads_per_MTB; /* per macro TB */ unsigned n_MTBs_per_sm; static struct timespec started_ts; __device__ uint get_smid(void) { uint ret; asm("mov.u32 %0, %smid;" : "=r"(ret)); return ret; } __device__ uint get_laneid(void) { uint ret; asm volatile("mov.u32 %0, %laneid;" : "=r"(ret)); return ret; } __device__ void sleep_in_kernel(void) { #if CUDA_COMPUTE >= 70 asm("nanosleep.u32 1;"); #else static __device__ volatile int dummy; dummy++; #endif } unsigned long long get_ticks(void) { struct timespec ts; clock_gettime(CLOCK_MONOTONIC, &ts); return ts.tv_sec * 1000000 + ts.tv_nsec / 1000; } extern "C" void error(const char *fmt, ...) { char *msg; va_list ap; int n; va_start(ap, fmt); n = vasprintf(&msg, fmt, ap); va_end(ap); if (n >= 0) { fprintf(stderr, "error: %s\n", msg); free(msg); } } const char * get_cuda_error_msg(CUresult err) { const char *msg; CUresult res; res = cuGetErrorString(err, &msg); if (res != CUDA_SUCCESS) return ""; return msg; } void init_tickcount(void) { clock_gettime(CLOCK_MONOTONIC, &started_ts); } /* microsecs */ unsigned get_tickcount(void) { struct timespec ts; unsigned ticks; clock_gettime(CLOCK_MONOTONIC, &ts); if (ts.tv_nsec < started_ts.tv_nsec) { ticks = ((unsigned)(ts.tv_sec - started_ts.tv_sec - 1)) * 1000000; ticks += (1000000000 + ts.tv_nsec - started_ts.tv_nsec) / 1000; } else { ticks = ((unsigned)(ts.tv_sec - started_ts.tv_sec)) * 1000000; ticks += (ts.tv_nsec - started_ts.tv_nsec) / 1000; } return ticks; }
c2688ca0f82c173dc031747769fcb0378a137336.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" { } __global__ void elSq(int N, int M, float *Mat) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = j*N + i; if (i < N && j < M) { Mat[index] = __fmul_rn(Mat[index], Mat[index]); } }
c2688ca0f82c173dc031747769fcb0378a137336.cu
#include "includes.h" extern "C" { } __global__ void elSq(int N, int M, float *Mat) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = j*N + i; if (i < N && j < M) { Mat[index] = __fmul_rn(Mat[index], Mat[index]); } }
3e539a5d133fe3eb5636bf309cbb3a1fdd8ebfb6.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <time.h> struct Startup{ int seed = time(nullptr); int threadsPerBlock = 256; int datasetSize = 10000; int range = 100; } startup; struct DataSet{ float* values; int size; }; inline int sizeOfDataSet(DataSet data) { return sizeof(float)*data.size; } DataSet generateRandomDataSet(int size){ DataSet data; data.size = size; data.values = (float*)malloc(sizeof(float)*data.size); for (int i = 0; i < data.size; i++) data.values[i] = (float)(rand()%startup.range); return data; } bool CompareDataSet(DataSet d1, DataSet d2){ for (int i = 0; i < d1.size; i++) if (d1.values[i] != d2.values[i]){ printf("Dataset is different"); return false; } if (d1.size != d2.size) {printf("Datasets are not equal size\n"); return false;}; printf("D1 and D2 are equal!"); return true; } __global__ void DeviceCalculateSM_Global(float* input, int input_size, float* result, int result_size, int sample_size){ int id_x = blockDim.x * blockIdx.x + threadIdx.x; float sum = 0; if (id_x < result_size){ for (int i = 0; i < sample_size; i++) sum = sum + input[id_x+i]; sum = sum/sample_size; result[id_x] = sum; } } __global__ void DeviceCalculateSM_Shared(float* input, int input_size, float* result, int result_size, int sample_size){ int id_x = blockDim.x * blockIdx.x + threadIdx.x; if (id_x < input_size){ extern __shared__ float cache[]; int cachedSize = sample_size + blockDim.x; for (int i = 0; i < cachedSize/blockDim.x+1; i++){ int cacheId = threadIdx.x+ i*blockDim.x; if (cacheId < cachedSize && cacheId+blockDim.x *blockIdx.x < input_size) cache[cacheId] = input[cacheId+blockDim.x *blockIdx.x]; } __syncthreads(); float sum = 0; for (int i = 0; i < sample_size; i++){ if(i + threadIdx.x < cachedSize && i + id_x < input_size) sum = sum + cache[i+threadIdx.x]; } sum = sum/sample_size; /*store in global memory*/ if (id_x < result_size) result[id_x] = sum; } } DataSet CalculateSM(DataSet input, int sample_size, bool usesharedmemory){ if(sample_size == 1 && input.size < 1 && sample_size < 1 && sample_size > input.size) { printf("Error! Invalid Sample Size"); exit(-1); } int result_size = input.size-sample_size+1; DataSet host_result = {(float*)malloc(sizeof(float)*(result_size)), result_size}; float* device_input, *device_result; int threads_needed = host_result.size; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); if (usesharedmemory){ int shared_memory_allocation_size = sizeof(float)*(startup.threadsPerBlock+sample_size); hipEventRecord(start); hipLaunchKernelGGL(( DeviceCalculateSM_Shared), dim3(threads_needed/ startup.threadsPerBlock + 1), dim3(startup.threadsPerBlock), shared_memory_allocation_size, 0, device_input, input.size, device_result, host_result.size, sample_size); hipEventRecord(stop); }else{ hipEventRecord(start); hipLaunchKernelGGL(( DeviceCalculateSM_Global), dim3(threads_needed/ startup.threadsPerBlock + 1), dim3(startup.threadsPerBlock), 0, 0, device_input, input.size, device_result, host_result.size, sample_size); hipEventRecord(stop); } hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); if (usesharedmemory) printf("Shared Memory: "); else printf("Global Memory: "); printf("Kernel executed in %f milliseconds\n", milliseconds); return host_result; } void printDataSet(DataSet data){ for (int i = 0; i < data.size; i++) printf("%.6g, ", data.values[i]); printf("\n"); } int main(int argc, char** argv){ for (int i = 0; i < argc; i++){ if (strcmp(argv[i], "Range")==0 && i+1 < argc) startup.range = atoi(argv[i+1]); if (strcmp(argv[i], "Seed")==0 && i+1 < argc) startup.seed = atoi(argv[i+1]); if (strcmp(argv[i], "Block threads")==0 && i+1 < argc) startup.threadsPerBlock = atoi(argv[i+1]); } srand(startup.seed); DataSet data = generateRandomDataSet(100); printDataSet( data ); DataSet shared = CalculateSM(data, 2, true); DataSet global = CalculateSM(data, 2, false); printDataSet( shared ); printf("\n"); printDataSet( global ); printf("\n"); printf("Each should be %d elements in size\n", global.size); CompareDataSet(global, shared); }
3e539a5d133fe3eb5636bf309cbb3a1fdd8ebfb6.cu
#include <stdio.h> #include <cuda.h> #include <time.h> struct Startup{ int seed = time(nullptr); int threadsPerBlock = 256; int datasetSize = 10000; int range = 100; } startup; struct DataSet{ float* values; int size; }; inline int sizeOfDataSet(DataSet data) { return sizeof(float)*data.size; } DataSet generateRandomDataSet(int size){ DataSet data; data.size = size; data.values = (float*)malloc(sizeof(float)*data.size); for (int i = 0; i < data.size; i++) data.values[i] = (float)(rand()%startup.range); return data; } bool CompareDataSet(DataSet d1, DataSet d2){ for (int i = 0; i < d1.size; i++) if (d1.values[i] != d2.values[i]){ printf("Dataset is different"); return false; } if (d1.size != d2.size) {printf("Datasets are not equal size\n"); return false;}; printf("D1 and D2 are equal!"); return true; } __global__ void DeviceCalculateSM_Global(float* input, int input_size, float* result, int result_size, int sample_size){ int id_x = blockDim.x * blockIdx.x + threadIdx.x; float sum = 0; if (id_x < result_size){ for (int i = 0; i < sample_size; i++) sum = sum + input[id_x+i]; sum = sum/sample_size; result[id_x] = sum; } } __global__ void DeviceCalculateSM_Shared(float* input, int input_size, float* result, int result_size, int sample_size){ int id_x = blockDim.x * blockIdx.x + threadIdx.x; if (id_x < input_size){ extern __shared__ float cache[]; int cachedSize = sample_size + blockDim.x; for (int i = 0; i < cachedSize/blockDim.x+1; i++){ int cacheId = threadIdx.x+ i*blockDim.x; if (cacheId < cachedSize && cacheId+blockDim.x *blockIdx.x < input_size) cache[cacheId] = input[cacheId+blockDim.x *blockIdx.x]; } __syncthreads(); float sum = 0; for (int i = 0; i < sample_size; i++){ if(i + threadIdx.x < cachedSize && i + id_x < input_size) sum = sum + cache[i+threadIdx.x]; } sum = sum/sample_size; /*store in global memory*/ if (id_x < result_size) result[id_x] = sum; } } DataSet CalculateSM(DataSet input, int sample_size, bool usesharedmemory){ if(sample_size == 1 && input.size < 1 && sample_size < 1 && sample_size > input.size) { printf("Error! Invalid Sample Size"); exit(-1); } int result_size = input.size-sample_size+1; DataSet host_result = {(float*)malloc(sizeof(float)*(result_size)), result_size}; float* device_input, *device_result; int threads_needed = host_result.size; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); if (usesharedmemory){ int shared_memory_allocation_size = sizeof(float)*(startup.threadsPerBlock+sample_size); cudaEventRecord(start); DeviceCalculateSM_Shared<<<threads_needed/ startup.threadsPerBlock + 1, startup.threadsPerBlock, shared_memory_allocation_size>>> (device_input, input.size, device_result, host_result.size, sample_size); cudaEventRecord(stop); }else{ cudaEventRecord(start); DeviceCalculateSM_Global<<<threads_needed/ startup.threadsPerBlock + 1, startup.threadsPerBlock>>> (device_input, input.size, device_result, host_result.size, sample_size); cudaEventRecord(stop); } cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); if (usesharedmemory) printf("Shared Memory: "); else printf("Global Memory: "); printf("Kernel executed in %f milliseconds\n", milliseconds); return host_result; } void printDataSet(DataSet data){ for (int i = 0; i < data.size; i++) printf("%.6g, ", data.values[i]); printf("\n"); } int main(int argc, char** argv){ for (int i = 0; i < argc; i++){ if (strcmp(argv[i], "Range")==0 && i+1 < argc) startup.range = atoi(argv[i+1]); if (strcmp(argv[i], "Seed")==0 && i+1 < argc) startup.seed = atoi(argv[i+1]); if (strcmp(argv[i], "Block threads")==0 && i+1 < argc) startup.threadsPerBlock = atoi(argv[i+1]); } srand(startup.seed); DataSet data = generateRandomDataSet(100); printDataSet( data ); DataSet shared = CalculateSM(data, 2, true); DataSet global = CalculateSM(data, 2, false); printDataSet( shared ); printf("\n"); printDataSet( global ); printf("\n"); printf("Each should be %d elements in size\n", global.size); CompareDataSet(global, shared); }
15a9d2aa19a26275a32660c2b0b19a9e06bcbaf9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright 2020-2021 by XGBoost Contributors */ #include <thrust/reduce.h> #include <thrust/iterator/transform_iterator.h> #include <algorithm> #include <ctgmath> #include <limits> #include "xgboost/base.h" #include "row_partitioner_hip.cuh" #include "histogram.cuh" #include "../../data/ellpack_page.cuh" #include "../../common/device_helpers.cuh" namespace xgboost { namespace tree { // Following 2 functions are slightly modified version of fbcuda. /* \brief Constructs a rounding factor used to truncate elements in a sum such that the sum of the truncated elements is the same no matter what the order of the sum is. * Algorithm 5: Reproducible Sequential Sum in 'Fast Reproducible Floating-Point * Summation' by Demmel and Nguyen * In algorithm 5 the bound is calculated as $max(|v_i|) * n$. Here we use the bound * * \begin{equation} * max( fl(\sum^{V}_{v_i>0}{v_i}), fl(\sum^{V}_{v_i<0}|v_i|) ) * \end{equation} * * to avoid outliers, as the full reduction is reproducible on GPU with reduction tree. */ template <typename T> T CreateRoundingFactor(T max_abs, int n) { T delta = max_abs / (static_cast<T>(1.0) - 2 * n * std::numeric_limits<T>::epsilon()); // Calculate ceil(log_2(delta)). // frexpf() calculates exp and returns `x` such that // delta = x * 2^exp, where `x` in (-1.0, -0.5] U [0.5, 1). // Because |x| < 1, exp is exactly ceil(log_2(delta)). int exp; ::frexp(delta, &exp); // return M = 2 ^ ceil(log_2(delta)) return std::ldexp(static_cast<T>(1.0), exp); } namespace { struct Pair { GradientPair first; GradientPair second; }; __host__ XGBOOST_DEV_INLINE Pair operator+(Pair const& lhs, Pair const& rhs) { return {lhs.first + rhs.first, lhs.second + rhs.second}; } } // anonymous namespace struct Clip : public thrust::unary_function<GradientPair, Pair> { static XGBOOST_DEV_INLINE float Pclip(float v) { return v > 0 ? v : 0; } static XGBOOST_DEV_INLINE float Nclip(float v) { return v < 0 ? abs(v) : 0; } XGBOOST_DEV_INLINE Pair operator()(GradientPair x) const { auto pg = Pclip(x.GetGrad()); auto ph = Pclip(x.GetHess()); auto ng = Nclip(x.GetGrad()); auto nh = Nclip(x.GetHess()); return { GradientPair{ pg, ph }, GradientPair{ ng, nh } }; } }; template <typename GradientSumT> HistRounding<GradientSumT> CreateRoundingFactor(common::Span<GradientPair const> gpair) { using T = typename GradientSumT::ValueT; dh::XGBCachingDeviceAllocator<char> alloc; thrust::device_ptr<GradientPair const> gpair_beg {gpair.data()}; thrust::device_ptr<GradientPair const> gpair_end {gpair.data() + gpair.size()}; auto beg = thrust::make_transform_iterator(gpair_beg, Clip()); auto end = thrust::make_transform_iterator(gpair_end, Clip()); Pair p = dh::Reduce(thrust::hip::par(alloc), beg, end, Pair{}, thrust::plus<Pair>{}); GradientPair positive_sum {p.first}, negative_sum {p.second}; auto histogram_rounding = GradientSumT { CreateRoundingFactor<T>(::max(positive_sum.GetGrad(), negative_sum.GetGrad()), gpair.size()), CreateRoundingFactor<T>(::max(positive_sum.GetHess(), negative_sum.GetHess()), gpair.size()) }; using IntT = typename HistRounding<GradientSumT>::SharedSumT::ValueT; /** * Factor for converting gradients from fixed-point to floating-point. */ GradientSumT to_floating_point = histogram_rounding / T(IntT(1) << (sizeof(typename GradientSumT::ValueT) * 8 - 2)); // keep 1 for sign bit /** * Factor for converting gradients from floating-point to fixed-point. For * f64: * * Precision = 64 - 1 - log2(rounding) * * rounding is calcuated as exp(m), see the rounding factor calcuation for * details. */ GradientSumT to_fixed_point = GradientSumT( T(1) / to_floating_point.GetGrad(), T(1) / to_floating_point.GetHess()); return {histogram_rounding, to_fixed_point, to_floating_point}; } template HistRounding<GradientPairPrecise> CreateRoundingFactor(common::Span<GradientPair const> gpair); template HistRounding<GradientPair> CreateRoundingFactor(common::Span<GradientPair const> gpair); template <typename GradientSumT, bool use_shared_memory_histograms> __global__ void SharedMemHistKernel(EllpackDeviceAccessor matrix, FeatureGroupsAccessor feature_groups, common::Span<const RowPartitioner::RowIndexT> d_ridx, GradientSumT* __restrict__ d_node_hist, const GradientPair* __restrict__ d_gpair, HistRounding<GradientSumT> const rounding) { using SharedSumT = typename HistRounding<GradientSumT>::SharedSumT; using T = typename GradientSumT::ValueT; extern __shared__ char smem[]; FeatureGroup group = feature_groups[blockIdx.y]; SharedSumT *smem_arr = reinterpret_cast<SharedSumT *>(smem); if (use_shared_memory_histograms) { dh::BlockFill(smem_arr, group.num_bins, SharedSumT()); __syncthreads(); } int feature_stride = matrix.is_dense ? group.num_features : matrix.row_stride; size_t n_elements = feature_stride * d_ridx.size(); for (auto idx : dh::GridStrideRange(static_cast<size_t>(0), n_elements)) { int ridx = d_ridx[idx / feature_stride]; int gidx = matrix.gidx_iter[ridx * matrix.row_stride + group.start_feature + idx % feature_stride]; if (gidx != matrix.NumBins()) { // If we are not using shared memory, accumulate the values directly into // global memory gidx = use_shared_memory_histograms ? gidx - group.start_bin : gidx; if (use_shared_memory_histograms) { auto adjusted = rounding.ToFixedPoint(d_gpair[ridx]); dh::AtomicAddGpair(smem_arr + gidx, adjusted); } else { GradientSumT truncated{ TruncateWithRoundingFactor<T>(rounding.rounding.GetGrad(), d_gpair[ridx].GetGrad()), TruncateWithRoundingFactor<T>(rounding.rounding.GetHess(), d_gpair[ridx].GetHess()), }; dh::AtomicAddGpair(d_node_hist + gidx, truncated); } } } if (use_shared_memory_histograms) { // Write shared memory back to global memory __syncthreads(); for (auto i : dh::BlockStrideRange(0, group.num_bins)) { auto truncated = rounding.ToFloatingPoint(smem_arr[i]); dh::AtomicAddGpair(d_node_hist + group.start_bin + i, truncated); } } } template <typename GradientSumT> void BuildGradientHistogram(EllpackDeviceAccessor const& matrix, FeatureGroupsAccessor const& feature_groups, common::Span<GradientPair const> gpair, common::Span<const uint32_t> d_ridx, common::Span<GradientSumT> histogram, HistRounding<GradientSumT> rounding, bool force_global_memory) { // decide whether to use shared memory int device = 0; dh::safe_cuda(hipGetDevice(&device)); // opt into maximum shared memory for the kernel if necessary size_t max_shared_memory = dh::MaxSharedMemoryOptin(device); size_t smem_size = sizeof(typename HistRounding<GradientSumT>::SharedSumT) * feature_groups.max_group_bins; bool shared = !force_global_memory && smem_size <= max_shared_memory; smem_size = shared ? smem_size : 0; auto runit = [&](auto kernel) { if (shared) { dh::safe_cuda(hipFuncSetAttribute( kernel, hipFuncAttributeMaxDynamicSharedMemorySize, max_shared_memory)); } // determine the launch configuration int min_grid_size; int block_threads = 1024; dh::safe_cuda(hipOccupancyMaxPotentialBlockSize( &min_grid_size, &block_threads, kernel, smem_size, 0)); int num_groups = feature_groups.NumGroups(); int n_mps = 0; dh::safe_cuda( hipDeviceGetAttribute(&n_mps, hipDeviceAttributeMultiprocessorCount, device)); int n_blocks_per_mp = 0; dh::safe_cuda(hipOccupancyMaxActiveBlocksPerMultiprocessor( &n_blocks_per_mp, kernel, block_threads, smem_size)); unsigned grid_size = n_blocks_per_mp * n_mps; // TODO(canonizer): This is really a hack, find a better way to distribute // the data among thread blocks. The intention is to generate enough thread // blocks to fill the GPU, but avoid having too many thread blocks, as this // is less efficient when the number of rows is low. At least one thread // block per feature group is required. The number of thread blocks: // - for num_groups <= num_groups_threshold, around grid_size * num_groups // - for num_groups_threshold <= num_groups <= num_groups_threshold * // grid_size, // around grid_size * num_groups_threshold // - for num_groups_threshold * grid_size <= num_groups, around num_groups int num_groups_threshold = 4; grid_size = common::DivRoundUp( grid_size, common::DivRoundUp(num_groups, num_groups_threshold)); using T = typename GradientSumT::ValueT; dh::LaunchKernel {dim3(grid_size, num_groups), static_cast<uint32_t>(block_threads), smem_size} (kernel, matrix, feature_groups, d_ridx, histogram.data(), gpair.data(), rounding); }; if (shared) { runit(SharedMemHistKernel<GradientSumT, true>); } else { runit(SharedMemHistKernel<GradientSumT, false>); } dh::safe_cuda(hipGetLastError()); } template void BuildGradientHistogram<GradientPairPrecise>( EllpackDeviceAccessor const& matrix, FeatureGroupsAccessor const& feature_groups, common::Span<GradientPair const> gpair, common::Span<const uint32_t> ridx, common::Span<GradientPairPrecise> histogram, HistRounding<GradientPairPrecise> rounding, bool force_global_memory); } // namespace tree } // namespace xgboost
15a9d2aa19a26275a32660c2b0b19a9e06bcbaf9.cu
/*! * Copyright 2020-2021 by XGBoost Contributors */ #include <thrust/reduce.h> #include <thrust/iterator/transform_iterator.h> #include <algorithm> #include <ctgmath> #include <limits> #include "xgboost/base.h" #include "row_partitioner.cuh" #include "histogram.cuh" #include "../../data/ellpack_page.cuh" #include "../../common/device_helpers.cuh" namespace xgboost { namespace tree { // Following 2 functions are slightly modified version of fbcuda. /* \brief Constructs a rounding factor used to truncate elements in a sum such that the sum of the truncated elements is the same no matter what the order of the sum is. * Algorithm 5: Reproducible Sequential Sum in 'Fast Reproducible Floating-Point * Summation' by Demmel and Nguyen * In algorithm 5 the bound is calculated as $max(|v_i|) * n$. Here we use the bound * * \begin{equation} * max( fl(\sum^{V}_{v_i>0}{v_i}), fl(\sum^{V}_{v_i<0}|v_i|) ) * \end{equation} * * to avoid outliers, as the full reduction is reproducible on GPU with reduction tree. */ template <typename T> T CreateRoundingFactor(T max_abs, int n) { T delta = max_abs / (static_cast<T>(1.0) - 2 * n * std::numeric_limits<T>::epsilon()); // Calculate ceil(log_2(delta)). // frexpf() calculates exp and returns `x` such that // delta = x * 2^exp, where `x` in (-1.0, -0.5] U [0.5, 1). // Because |x| < 1, exp is exactly ceil(log_2(delta)). int exp; std::frexp(delta, &exp); // return M = 2 ^ ceil(log_2(delta)) return std::ldexp(static_cast<T>(1.0), exp); } namespace { struct Pair { GradientPair first; GradientPair second; }; __host__ XGBOOST_DEV_INLINE Pair operator+(Pair const& lhs, Pair const& rhs) { return {lhs.first + rhs.first, lhs.second + rhs.second}; } } // anonymous namespace struct Clip : public thrust::unary_function<GradientPair, Pair> { static XGBOOST_DEV_INLINE float Pclip(float v) { return v > 0 ? v : 0; } static XGBOOST_DEV_INLINE float Nclip(float v) { return v < 0 ? abs(v) : 0; } XGBOOST_DEV_INLINE Pair operator()(GradientPair x) const { auto pg = Pclip(x.GetGrad()); auto ph = Pclip(x.GetHess()); auto ng = Nclip(x.GetGrad()); auto nh = Nclip(x.GetHess()); return { GradientPair{ pg, ph }, GradientPair{ ng, nh } }; } }; template <typename GradientSumT> HistRounding<GradientSumT> CreateRoundingFactor(common::Span<GradientPair const> gpair) { using T = typename GradientSumT::ValueT; dh::XGBCachingDeviceAllocator<char> alloc; thrust::device_ptr<GradientPair const> gpair_beg {gpair.data()}; thrust::device_ptr<GradientPair const> gpair_end {gpair.data() + gpair.size()}; auto beg = thrust::make_transform_iterator(gpair_beg, Clip()); auto end = thrust::make_transform_iterator(gpair_end, Clip()); Pair p = dh::Reduce(thrust::cuda::par(alloc), beg, end, Pair{}, thrust::plus<Pair>{}); GradientPair positive_sum {p.first}, negative_sum {p.second}; auto histogram_rounding = GradientSumT { CreateRoundingFactor<T>(std::max(positive_sum.GetGrad(), negative_sum.GetGrad()), gpair.size()), CreateRoundingFactor<T>(std::max(positive_sum.GetHess(), negative_sum.GetHess()), gpair.size()) }; using IntT = typename HistRounding<GradientSumT>::SharedSumT::ValueT; /** * Factor for converting gradients from fixed-point to floating-point. */ GradientSumT to_floating_point = histogram_rounding / T(IntT(1) << (sizeof(typename GradientSumT::ValueT) * 8 - 2)); // keep 1 for sign bit /** * Factor for converting gradients from floating-point to fixed-point. For * f64: * * Precision = 64 - 1 - log2(rounding) * * rounding is calcuated as exp(m), see the rounding factor calcuation for * details. */ GradientSumT to_fixed_point = GradientSumT( T(1) / to_floating_point.GetGrad(), T(1) / to_floating_point.GetHess()); return {histogram_rounding, to_fixed_point, to_floating_point}; } template HistRounding<GradientPairPrecise> CreateRoundingFactor(common::Span<GradientPair const> gpair); template HistRounding<GradientPair> CreateRoundingFactor(common::Span<GradientPair const> gpair); template <typename GradientSumT, bool use_shared_memory_histograms> __global__ void SharedMemHistKernel(EllpackDeviceAccessor matrix, FeatureGroupsAccessor feature_groups, common::Span<const RowPartitioner::RowIndexT> d_ridx, GradientSumT* __restrict__ d_node_hist, const GradientPair* __restrict__ d_gpair, HistRounding<GradientSumT> const rounding) { using SharedSumT = typename HistRounding<GradientSumT>::SharedSumT; using T = typename GradientSumT::ValueT; extern __shared__ char smem[]; FeatureGroup group = feature_groups[blockIdx.y]; SharedSumT *smem_arr = reinterpret_cast<SharedSumT *>(smem); if (use_shared_memory_histograms) { dh::BlockFill(smem_arr, group.num_bins, SharedSumT()); __syncthreads(); } int feature_stride = matrix.is_dense ? group.num_features : matrix.row_stride; size_t n_elements = feature_stride * d_ridx.size(); for (auto idx : dh::GridStrideRange(static_cast<size_t>(0), n_elements)) { int ridx = d_ridx[idx / feature_stride]; int gidx = matrix.gidx_iter[ridx * matrix.row_stride + group.start_feature + idx % feature_stride]; if (gidx != matrix.NumBins()) { // If we are not using shared memory, accumulate the values directly into // global memory gidx = use_shared_memory_histograms ? gidx - group.start_bin : gidx; if (use_shared_memory_histograms) { auto adjusted = rounding.ToFixedPoint(d_gpair[ridx]); dh::AtomicAddGpair(smem_arr + gidx, adjusted); } else { GradientSumT truncated{ TruncateWithRoundingFactor<T>(rounding.rounding.GetGrad(), d_gpair[ridx].GetGrad()), TruncateWithRoundingFactor<T>(rounding.rounding.GetHess(), d_gpair[ridx].GetHess()), }; dh::AtomicAddGpair(d_node_hist + gidx, truncated); } } } if (use_shared_memory_histograms) { // Write shared memory back to global memory __syncthreads(); for (auto i : dh::BlockStrideRange(0, group.num_bins)) { auto truncated = rounding.ToFloatingPoint(smem_arr[i]); dh::AtomicAddGpair(d_node_hist + group.start_bin + i, truncated); } } } template <typename GradientSumT> void BuildGradientHistogram(EllpackDeviceAccessor const& matrix, FeatureGroupsAccessor const& feature_groups, common::Span<GradientPair const> gpair, common::Span<const uint32_t> d_ridx, common::Span<GradientSumT> histogram, HistRounding<GradientSumT> rounding, bool force_global_memory) { // decide whether to use shared memory int device = 0; dh::safe_cuda(cudaGetDevice(&device)); // opt into maximum shared memory for the kernel if necessary size_t max_shared_memory = dh::MaxSharedMemoryOptin(device); size_t smem_size = sizeof(typename HistRounding<GradientSumT>::SharedSumT) * feature_groups.max_group_bins; bool shared = !force_global_memory && smem_size <= max_shared_memory; smem_size = shared ? smem_size : 0; auto runit = [&](auto kernel) { if (shared) { dh::safe_cuda(cudaFuncSetAttribute( kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_memory)); } // determine the launch configuration int min_grid_size; int block_threads = 1024; dh::safe_cuda(cudaOccupancyMaxPotentialBlockSize( &min_grid_size, &block_threads, kernel, smem_size, 0)); int num_groups = feature_groups.NumGroups(); int n_mps = 0; dh::safe_cuda( cudaDeviceGetAttribute(&n_mps, cudaDevAttrMultiProcessorCount, device)); int n_blocks_per_mp = 0; dh::safe_cuda(cudaOccupancyMaxActiveBlocksPerMultiprocessor( &n_blocks_per_mp, kernel, block_threads, smem_size)); unsigned grid_size = n_blocks_per_mp * n_mps; // TODO(canonizer): This is really a hack, find a better way to distribute // the data among thread blocks. The intention is to generate enough thread // blocks to fill the GPU, but avoid having too many thread blocks, as this // is less efficient when the number of rows is low. At least one thread // block per feature group is required. The number of thread blocks: // - for num_groups <= num_groups_threshold, around grid_size * num_groups // - for num_groups_threshold <= num_groups <= num_groups_threshold * // grid_size, // around grid_size * num_groups_threshold // - for num_groups_threshold * grid_size <= num_groups, around num_groups int num_groups_threshold = 4; grid_size = common::DivRoundUp( grid_size, common::DivRoundUp(num_groups, num_groups_threshold)); using T = typename GradientSumT::ValueT; dh::LaunchKernel {dim3(grid_size, num_groups), static_cast<uint32_t>(block_threads), smem_size} (kernel, matrix, feature_groups, d_ridx, histogram.data(), gpair.data(), rounding); }; if (shared) { runit(SharedMemHistKernel<GradientSumT, true>); } else { runit(SharedMemHistKernel<GradientSumT, false>); } dh::safe_cuda(cudaGetLastError()); } template void BuildGradientHistogram<GradientPairPrecise>( EllpackDeviceAccessor const& matrix, FeatureGroupsAccessor const& feature_groups, common::Span<GradientPair const> gpair, common::Span<const uint32_t> ridx, common::Span<GradientPairPrecise> histogram, HistRounding<GradientPairPrecise> rounding, bool force_global_memory); } // namespace tree } // namespace xgboost
3ebe9d767cd9c69090d53124df848bbb941b84dd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14) { float tmp_1 = (+1.2464E29f / +1.9244E-44f); float tmp_2 = var_2 / +1.8943E-44f - (var_3 - -1.1311E14f); comp = tmp_2 - tmp_1 + -1.1825E-36f * var_4 * -1.1172E-37f; if (comp < (var_5 - ldexpf(atanf(var_6 * -1.3105E-35f * (+1.6858E-35f * var_7 - (+1.3891E35f - var_8))), 2))) { comp += ceilf(-0.0f + +0.0f + -1.6484E-27f); } for (int i=0; i < var_1; ++i) { comp = (+1.7501E34f + (-1.5713E36f * +1.9343E-37f * -1.4231E-42f)); comp = (var_9 / var_10); } if (comp <= (+1.2267E23f / -1.3600E-44f + (+1.0749E-4f - var_11 / (-1.5771E36f / var_12)))) { comp = (var_13 - (var_14 - sinhf(+1.2679E-37f / -1.6524E-18f))); } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); float tmp_3 = atof(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15); hipDeviceSynchronize(); return 0; }
3ebe9d767cd9c69090d53124df848bbb941b84dd.cu
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14) { float tmp_1 = (+1.2464E29f / +1.9244E-44f); float tmp_2 = var_2 / +1.8943E-44f - (var_3 - -1.1311E14f); comp = tmp_2 - tmp_1 + -1.1825E-36f * var_4 * -1.1172E-37f; if (comp < (var_5 - ldexpf(atanf(var_6 * -1.3105E-35f * (+1.6858E-35f * var_7 - (+1.3891E35f - var_8))), 2))) { comp += ceilf(-0.0f + +0.0f + -1.6484E-27f); } for (int i=0; i < var_1; ++i) { comp = (+1.7501E34f + (-1.5713E36f * +1.9343E-37f * -1.4231E-42f)); comp = (var_9 / var_10); } if (comp <= (+1.2267E23f / -1.3600E-44f + (+1.0749E-4f - var_11 / (-1.5771E36f / var_12)))) { comp = (var_13 - (var_14 - sinhf(+1.2679E-37f / -1.6524E-18f))); } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); float tmp_3 = atof(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15); cudaDeviceSynchronize(); return 0; }
8ed0ebe44ef0e0d465770aa4fdeb8ced4d2be9be.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define TILE_WIDTH 32 #define DIVIDE_ROUND_UP(a, b)((a + b - 1) / b) #define GET_INDEX(row, column, numcols)(row * numcols + column) #include <stdio.h> #include <stdlib.h> #include <time.h> //define matrix type typedef struct{ int row_count; int column_count; int* elements; } Matrix; typedef struct{ int bin_count; int bin_width; int* counts; } Histogram; Matrix ones(int row_count, int column_count); Matrix random(int row_count, int column_count); Histogram make_histogram(Matrix image); void print_hist(Histogram hist); void print_matrix(Matrix mat); int main(){ //make the matrices you want to multiply srand(time(NULL)); Matrix image = random(512, 512); Histogram result = make_histogram(image); print_hist(result); } //global memory __global__ void global_atomic_histogram(const Matrix image, Histogram hist){ int row_index = blockIdx.y * blockDim.y + threadIdx.y; int column_index = blockIdx.x * blockDim.x + threadIdx.x; int index = row_index * image.column_count + column_index; //initialize histogram if(index < hist.bin_count){ hist.counts[index] = 0; } __syncthreads(); if(row_index < image.row_count && column_index < image.column_count){ int value = image.elements[index]; int bin = value / hist.bin_width; atomicAdd(&(hist.counts[bin]), value); } __syncthreads(); } //shared memory __global__ void shared_atomic_histogram(const Matrix image, Histogram hist){ __shared__ int shared_hist[200];//warning: hard coded hist size int row_index = blockIdx.y * blockDim.y + threadIdx.y; int column_index = blockIdx.x * blockDim.x + threadIdx.x; int index = row_index * image.column_count + column_index; int shared_index = threadIdx.y * blockDim.x + threadIdx.x; //initialize histogram if(index < hist.bin_count){ hist.counts[index] = 0; } //initialize shared histogram //as long as block_width^2 > 200, we're ok if(shared_index < hist.bin_count){ shared_hist[shared_index] = 0; } __syncthreads(); if(row_index < image.row_count && column_index < image.column_count){ int value = image.elements[index]; int bin = value / 1;//hard coded atomicAdd(&(shared_hist[bin]), value); } __syncthreads(); //now do atomic adds to the global histogram if(shared_index < hist.bin_count){ atomicAdd(&(hist.counts[shared_index]), shared_hist[shared_index]); } __syncthreads(); } Histogram make_histogram(Matrix image){ hipError_t error; //step 1: allocate memory on the kernel for matrix Matrix image_d; image_d.row_count = image.row_count; image_d.column_count = image.column_count; size_t image_size = image.row_count * image.column_count * sizeof(int); error = hipMalloc((void**) &image_d.elements, image_size); if(error != hipSuccess){ printf("error allocating image matrix\n"); printf("CUDA error: %s\n", hipGetErrorString(error)); } //step 2: allocate memory on host and device for histogram Histogram hist, hist_d; hist_d.bin_count = hist.bin_count = 200; hist_d.bin_width = hist.bin_width = 1; size_t hist_size = hist_d.bin_count * sizeof(int); error = hipMalloc((void**) &hist_d.counts, hist_size); if(error != hipSuccess){ printf("error allocating histogram\n"); printf("CUDA error: %s\n", hipGetErrorString(error)); } hist.counts = (int*) malloc(hist_size); //step 3: initialize histogram counts for(int i = 0; i < hist.bin_count; i++){ hist.counts[i] = 0; } //step 4: copy image to device error = hipMemcpy(image_d.elements, image.elements, image_size, hipMemcpyHostToDevice); if(error != hipSuccess){ printf("error copying matrix\n"); } hipMemcpy(&(image_d.row_count), &(image.row_count), image_size, hipMemcpyHostToDevice); hipMemcpy(&(image_d.column_count), &(image.column_count), image_size, hipMemcpyHostToDevice); //step 5: copy histogram zeros do device error = hipMemcpy(hist_d.counts, hist.counts, hist_size, hipMemcpyHostToDevice); if(error != hipSuccess){ printf("error copying histogram\n"); } hipMemcpy(&(hist_d.bin_count), &(hist.bin_count), sizeof(int), hipMemcpyHostToDevice); hipMemcpy(&(hist_d.bin_width), &(hist.bin_width), sizeof(int), hipMemcpyHostToDevice); //step 4: launch kernel dim3 block_dims(TILE_WIDTH, TILE_WIDTH); dim3 grid_dims(DIVIDE_ROUND_UP(image_d.column_count, block_dims.x), DIVIDE_ROUND_UP(image_d.row_count, block_dims.y)); hipLaunchKernelGGL(( shared_atomic_histogram) , dim3(grid_dims), dim3(block_dims), 0, 0, image_d, hist_d); //step 5: copy results back to host error = hipMemcpy(hist.counts, hist_d.counts, hist_size, hipMemcpyDeviceToHost); if(error != hipSuccess){ printf("error copying result histogram\n"); printf("CUDA error: %s\n", hipGetErrorString(error)); } return hist; } Matrix ones (int row_count, int column_count){ Matrix result; result.row_count = row_count; result.column_count = column_count; result.elements = (int*) malloc(row_count * column_count * sizeof(int)); for(int i = 0; i < row_count * column_count; i++){ result.elements[i] = 1; } return result; } Matrix random (int row_count, int column_count){ Matrix result; result.row_count = row_count; result.column_count = column_count; result.elements = (int*) malloc(row_count * column_count * sizeof(int)); for(int i = 0; i < row_count * column_count; i++){ result.elements[i] = rand() % 200; } return result; } void print_matrix(Matrix mat){ int num_elements = mat.row_count * mat.column_count; for(int i = 0; i < num_elements; i++){ printf(" %d", mat.elements[i]); if(!((i + 1) % mat.column_count)){ printf("\n"); } } } void print_hist(Histogram hist){ for(int i = 0; i < hist.bin_count; i++){ printf(" %d", hist.counts[i]); } }
8ed0ebe44ef0e0d465770aa4fdeb8ced4d2be9be.cu
#define TILE_WIDTH 32 #define DIVIDE_ROUND_UP(a, b)((a + b - 1) / b) #define GET_INDEX(row, column, numcols)(row * numcols + column) #include <stdio.h> #include <stdlib.h> #include <time.h> //define matrix type typedef struct{ int row_count; int column_count; int* elements; } Matrix; typedef struct{ int bin_count; int bin_width; int* counts; } Histogram; Matrix ones(int row_count, int column_count); Matrix random(int row_count, int column_count); Histogram make_histogram(Matrix image); void print_hist(Histogram hist); void print_matrix(Matrix mat); int main(){ //make the matrices you want to multiply srand(time(NULL)); Matrix image = random(512, 512); Histogram result = make_histogram(image); print_hist(result); } //global memory __global__ void global_atomic_histogram(const Matrix image, Histogram hist){ int row_index = blockIdx.y * blockDim.y + threadIdx.y; int column_index = blockIdx.x * blockDim.x + threadIdx.x; int index = row_index * image.column_count + column_index; //initialize histogram if(index < hist.bin_count){ hist.counts[index] = 0; } __syncthreads(); if(row_index < image.row_count && column_index < image.column_count){ int value = image.elements[index]; int bin = value / hist.bin_width; atomicAdd(&(hist.counts[bin]), value); } __syncthreads(); } //shared memory __global__ void shared_atomic_histogram(const Matrix image, Histogram hist){ __shared__ int shared_hist[200];//warning: hard coded hist size int row_index = blockIdx.y * blockDim.y + threadIdx.y; int column_index = blockIdx.x * blockDim.x + threadIdx.x; int index = row_index * image.column_count + column_index; int shared_index = threadIdx.y * blockDim.x + threadIdx.x; //initialize histogram if(index < hist.bin_count){ hist.counts[index] = 0; } //initialize shared histogram //as long as block_width^2 > 200, we're ok if(shared_index < hist.bin_count){ shared_hist[shared_index] = 0; } __syncthreads(); if(row_index < image.row_count && column_index < image.column_count){ int value = image.elements[index]; int bin = value / 1;//hard coded atomicAdd(&(shared_hist[bin]), value); } __syncthreads(); //now do atomic adds to the global histogram if(shared_index < hist.bin_count){ atomicAdd(&(hist.counts[shared_index]), shared_hist[shared_index]); } __syncthreads(); } Histogram make_histogram(Matrix image){ cudaError_t error; //step 1: allocate memory on the kernel for matrix Matrix image_d; image_d.row_count = image.row_count; image_d.column_count = image.column_count; size_t image_size = image.row_count * image.column_count * sizeof(int); error = cudaMalloc((void**) &image_d.elements, image_size); if(error != cudaSuccess){ printf("error allocating image matrix\n"); printf("CUDA error: %s\n", cudaGetErrorString(error)); } //step 2: allocate memory on host and device for histogram Histogram hist, hist_d; hist_d.bin_count = hist.bin_count = 200; hist_d.bin_width = hist.bin_width = 1; size_t hist_size = hist_d.bin_count * sizeof(int); error = cudaMalloc((void**) &hist_d.counts, hist_size); if(error != cudaSuccess){ printf("error allocating histogram\n"); printf("CUDA error: %s\n", cudaGetErrorString(error)); } hist.counts = (int*) malloc(hist_size); //step 3: initialize histogram counts for(int i = 0; i < hist.bin_count; i++){ hist.counts[i] = 0; } //step 4: copy image to device error = cudaMemcpy(image_d.elements, image.elements, image_size, cudaMemcpyHostToDevice); if(error != cudaSuccess){ printf("error copying matrix\n"); } cudaMemcpy(&(image_d.row_count), &(image.row_count), image_size, cudaMemcpyHostToDevice); cudaMemcpy(&(image_d.column_count), &(image.column_count), image_size, cudaMemcpyHostToDevice); //step 5: copy histogram zeros do device error = cudaMemcpy(hist_d.counts, hist.counts, hist_size, cudaMemcpyHostToDevice); if(error != cudaSuccess){ printf("error copying histogram\n"); } cudaMemcpy(&(hist_d.bin_count), &(hist.bin_count), sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(&(hist_d.bin_width), &(hist.bin_width), sizeof(int), cudaMemcpyHostToDevice); //step 4: launch kernel dim3 block_dims(TILE_WIDTH, TILE_WIDTH); dim3 grid_dims(DIVIDE_ROUND_UP(image_d.column_count, block_dims.x), DIVIDE_ROUND_UP(image_d.row_count, block_dims.y)); shared_atomic_histogram <<<grid_dims, block_dims>>> (image_d, hist_d); //step 5: copy results back to host error = cudaMemcpy(hist.counts, hist_d.counts, hist_size, cudaMemcpyDeviceToHost); if(error != cudaSuccess){ printf("error copying result histogram\n"); printf("CUDA error: %s\n", cudaGetErrorString(error)); } return hist; } Matrix ones (int row_count, int column_count){ Matrix result; result.row_count = row_count; result.column_count = column_count; result.elements = (int*) malloc(row_count * column_count * sizeof(int)); for(int i = 0; i < row_count * column_count; i++){ result.elements[i] = 1; } return result; } Matrix random (int row_count, int column_count){ Matrix result; result.row_count = row_count; result.column_count = column_count; result.elements = (int*) malloc(row_count * column_count * sizeof(int)); for(int i = 0; i < row_count * column_count; i++){ result.elements[i] = rand() % 200; } return result; } void print_matrix(Matrix mat){ int num_elements = mat.row_count * mat.column_count; for(int i = 0; i < num_elements; i++){ printf(" %d", mat.elements[i]); if(!((i + 1) % mat.column_count)){ printf("\n"); } } } void print_hist(Histogram hist){ for(int i = 0; i < hist.bin_count; i++){ printf(" %d", hist.counts[i]); } }
1145f8da2eb5371649bf22a978c47bba70031581.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (C) 2019 // Norbert Batfai, batfai.norbert@inf.unideb.hu // Released under GNU GPLv3 #include <png++/image.hpp> #include <png++/rgb_pixel.hpp> #include <sys/times.h> #include <iostream> #define SIZE 600 #define ITERATION_LIMIT 32000 // Vegigzongorazza a CUDA a szelesseg x magassag racsot: __device__ int mandel(int k, int j) { // most eppen a j. sor k. oszlopaban vagyunk float a = -2.0, b = .7, c = -1.35, d = 1.35; int width = SIZE, height = SIZE, iterationLimit = ITERATION_LIMIT; float dx = (b - a) / width; float dy = (d - c) / height; float reC, imC, reZ, imZ, ujreZ, ujimZ; int iteration = 0; reC = a + k * dx; imC = d - j * dy; reZ = 0.0; imZ = 0.0; iteration = 0; while (reZ * reZ + imZ * imZ < 4 && iteration < iterationLimit) { ujreZ = reZ * reZ - imZ * imZ + reC; ujimZ = 2 * reZ * imZ + imC; reZ = ujreZ; imZ = ujimZ; ++iteration; } return iteration; } __global__ void mandelkernel(int *buffer) { int tj = threadIdx.x; int tk = threadIdx.y; int j = blockIdx.x * 10 + tj; int k = blockIdx.y * 10 + tk; buffer[j + k * SIZE] = mandel(j, k); } void cudamandel(int buffer[SIZE][SIZE]) { int *deviceImageBuffer; hipMalloc((void **)&deviceImageBuffer, SIZE * SIZE * sizeof(int)); dim3 grid(SIZE / 10, SIZE / 10); dim3 tgrid(10, 10); hipLaunchKernelGGL(( mandelkernel) , dim3(grid), dim3(tgrid) , 0, 0, deviceImageBuffer); hipMemcpy(buffer, deviceImageBuffer, SIZE * SIZE * sizeof(int), hipMemcpyDeviceToHost); hipFree(deviceImageBuffer); } int main(int argc, char *argv[]) { // Merunk idot (PP 64) clock_t delta = clock(); // Merunk idot (PP 66) struct tms tmsbuf1, tmsbuf2; times(&tmsbuf1); int buffer[SIZE][SIZE]; cudamandel(buffer); png::image < png::rgb_pixel > image(SIZE, SIZE); for (int j = 0; j < SIZE; ++j) { //sor = j; for (int k = 0; k < SIZE; ++k) { image.set_pixel(k, j, png::rgb_pixel(255 - (255 * buffer[j][k]) / ITERATION_LIMIT, 255 - (255 * buffer[j][k]) / ITERATION_LIMIT, 255 - (255 * buffer[j][k]) / ITERATION_LIMIT)); } } image.write("mandel.png"); times(&tmsbuf2); std::cout << tmsbuf2.tms_utime - tmsbuf1.tms_utime + tmsbuf2.tms_stime - tmsbuf1.tms_stime << std::endl; delta = clock() - delta; std::cout << (float)delta / CLOCKS_PER_SEC << " sec" << std::endl; }
1145f8da2eb5371649bf22a978c47bba70031581.cu
// Copyright (C) 2019 // Norbert Batfai, batfai.norbert@inf.unideb.hu // Released under GNU GPLv3 #include <png++/image.hpp> #include <png++/rgb_pixel.hpp> #include <sys/times.h> #include <iostream> #define SIZE 600 #define ITERATION_LIMIT 32000 // Vegigzongorazza a CUDA a szelesseg x magassag racsot: __device__ int mandel(int k, int j) { // most eppen a j. sor k. oszlopaban vagyunk float a = -2.0, b = .7, c = -1.35, d = 1.35; int width = SIZE, height = SIZE, iterationLimit = ITERATION_LIMIT; float dx = (b - a) / width; float dy = (d - c) / height; float reC, imC, reZ, imZ, ujreZ, ujimZ; int iteration = 0; reC = a + k * dx; imC = d - j * dy; reZ = 0.0; imZ = 0.0; iteration = 0; while (reZ * reZ + imZ * imZ < 4 && iteration < iterationLimit) { ujreZ = reZ * reZ - imZ * imZ + reC; ujimZ = 2 * reZ * imZ + imC; reZ = ujreZ; imZ = ujimZ; ++iteration; } return iteration; } __global__ void mandelkernel(int *buffer) { int tj = threadIdx.x; int tk = threadIdx.y; int j = blockIdx.x * 10 + tj; int k = blockIdx.y * 10 + tk; buffer[j + k * SIZE] = mandel(j, k); } void cudamandel(int buffer[SIZE][SIZE]) { int *deviceImageBuffer; cudaMalloc((void **)&deviceImageBuffer, SIZE * SIZE * sizeof(int)); dim3 grid(SIZE / 10, SIZE / 10); dim3 tgrid(10, 10); mandelkernel <<< grid, tgrid >>> (deviceImageBuffer); cudaMemcpy(buffer, deviceImageBuffer, SIZE * SIZE * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(deviceImageBuffer); } int main(int argc, char *argv[]) { // Merunk idot (PP 64) clock_t delta = clock(); // Merunk idot (PP 66) struct tms tmsbuf1, tmsbuf2; times(&tmsbuf1); int buffer[SIZE][SIZE]; cudamandel(buffer); png::image < png::rgb_pixel > image(SIZE, SIZE); for (int j = 0; j < SIZE; ++j) { //sor = j; for (int k = 0; k < SIZE; ++k) { image.set_pixel(k, j, png::rgb_pixel(255 - (255 * buffer[j][k]) / ITERATION_LIMIT, 255 - (255 * buffer[j][k]) / ITERATION_LIMIT, 255 - (255 * buffer[j][k]) / ITERATION_LIMIT)); } } image.write("mandel.png"); times(&tmsbuf2); std::cout << tmsbuf2.tms_utime - tmsbuf1.tms_utime + tmsbuf2.tms_stime - tmsbuf1.tms_stime << std::endl; delta = clock() - delta; std::cout << (float)delta / CLOCKS_PER_SEC << " sec" << std::endl; }
54b149583bcf19a1db32fc6ab08681fc63e323aa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /********************************************************************* 11 12 Copyright (C) 2015 by Wisllay Vitrio 13 14 This program is free software; you can redistribute it and/or modify 15 it under the terms of the GNU General Public License as published by 16 the Free Software Foundation; either version 2 of the License, or 17 (at your option) any later version. 18 19 This program is distributed in the hope that it will be useful, 20 but WITHOUT ANY WARRANTY; without even the implied warranty of 21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 GNU General Public License for more details. 23 24 You should have received a copy of the GNU General Public License 25 along with this program; if not, write to the Free Software 26 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 27 28 ********************************************************************/ /* * * knn.cu */ #define CUDA_API_PER_THREAD_DEFAULT_STREAM #include <stdio.h> #include <stdlib.h> #include <algorithm> #include <iostream> #include <queue> #include <vector> #include <set> #include <functional> #include "simjoin.cuh" #include "structs.cuh" #include "utils.cuh" #include "inverted_index.cuh" #include "cuCompactor_hip.cuh" struct is_bigger_than_threshold { float threshold; is_bigger_than_threshold(float thr) : threshold(thr) {}; __host__ __device__ bool operator()(const Similarity &reg) { return (reg.similarity > threshold); } }; __host__ int findSimilars(InvertedIndex inverted_index, float threshold, struct DeviceVariables *dev_vars, Similarity* distances, int docid, int querystart, int querysize, bool aggregate, DeviceTiming& deviceTiming) { dim3 grid, threads; get_grid_config(grid, threads); int num_sets = inverted_index.num_sets - docid - 1; int *d_count = dev_vars->d_count, *d_index = dev_vars->d_index, *d_sim = dev_vars->d_sim, *size_doc = dev_vars->d_sizes; int *d_BlocksCount = dev_vars->d_bC, *d_BlocksOffset = dev_vars->d_bO; Entry *d_query = inverted_index.d_entries + querystart; Similarity *d_similarity = dev_vars->d_dist, *d_result = dev_vars->d_result; DeviceTiming::EventPair* memSet = deviceTiming.add("Mem set", 0); gpuAssert(hipMemset(d_sim + docid + 1, 0, num_sets*sizeof(int))); deviceTiming.finish(memSet); DeviceTiming::EventPair* termCount = deviceTiming.add("Term count", 0); hipLaunchKernelGGL(( get_term_count_and_tf_idf), dim3(grid), dim3(threads), 0, 0, inverted_index, d_query, d_count, querysize); deviceTiming.finish(termCount); thrust::device_ptr<int> thrust_d_count(d_count); thrust::device_ptr<int> thrust_d_index(d_index); thrust::inclusive_scan(thrust_d_count, thrust_d_count + querysize, thrust_d_index); DeviceTiming::EventPair* calcJacc = deviceTiming.add("Calculate Jaccard", 0); hipLaunchKernelGGL(( calculateJaccardSimilarity), dim3(grid), dim3(threads), 0, 0, inverted_index, d_query, d_index, d_sim, querysize, docid); deviceTiming.finish(calcJacc); DeviceTiming::EventPair* filterReg = deviceTiming.add("Filter registers", 0); hipLaunchKernelGGL(( filter_registers), dim3(grid), dim3(threads), 0, 0, d_sim, threshold, querysize, docid, inverted_index.num_sets, size_doc, d_similarity); deviceTiming.finish(filterReg); int blocksize = 1024; int numBlocks = cuCompactor::divup(num_sets, blocksize); DeviceTiming::EventPair* compactSimilars = deviceTiming.add("Compact similars", 0); int totalSimilars = cuCompactor::compact2<Similarity>(d_similarity + docid + 1, d_result, num_sets, is_bigger_than_threshold(threshold), blocksize, numBlocks, d_BlocksCount, d_BlocksOffset); deviceTiming.finish(compactSimilars); DeviceTiming::EventPair* transferPairs = deviceTiming.add("Transfer pairs", 0); if (totalSimilars && !aggregate) hipMemcpyAsync(distances, d_result, sizeof(Similarity)*totalSimilars, hipMemcpyDeviceToHost); deviceTiming.finish(transferPairs); return totalSimilars; } __global__ void calculateJaccardSimilarity(InvertedIndex inverted_index, Entry *d_query, int *index, int *dist, int D, int docid) { __shared__ int N; if (threadIdx.x == 0) { N = index[D - 1]; //Total number of items to be queried } __syncthreads(); int block_size = N / gridDim.x + (N % gridDim.x == 0 ? 0 : 1); //Partition size int lo = block_size * (blockIdx.x); //Beginning of the block int hi = min(lo + block_size, N); //End of the block int size = hi - lo; // Real partition size (the last one can be smaller) int idx = 0; int end; for (int i = threadIdx.x; i < size; i += blockDim.x) { int pos = i + lo; while (true) { end = index[idx]; if (end <= pos) { idx++; } else { break; } } Entry entry = d_query[idx]; //finds out the term int offset = end - pos; int idx2 = inverted_index.d_index[entry.term_id] - offset; Entry index_entry = inverted_index.d_inverted_index[idx2]; if (index_entry.set_id > docid) { atomicAdd(&dist[index_entry.set_id], 1); } } } __global__ void get_term_count_and_tf_idf(InvertedIndex inverted_index, Entry *query, int *count, int N) { int block_size = N / gridDim.x + (N % gridDim.x == 0 ? 0 : 1); //Partition size int offset = block_size * (blockIdx.x); //Beginning of the block int lim = min(offset + block_size, N); //End of the block int size = lim - offset; //Block size query += offset; count += offset; for (int i = threadIdx.x; i < size; i += blockDim.x) { Entry entry = query[i]; int idf = inverted_index.d_count[entry.term_id]; //query[i].tf_idf = entry.tf * log(inverted_index.num_sets / float(max(1, idf))); count[i] = idf; //atomicAdd(d_qnorm, query[i].tf_idf * query[i].tf_idf); //atomicAdd(d_qnorml1, query[i].tf_idf); } } __global__ void filter_registers(int *sim, float threshold, int querysize, int docid, int N, int *doc_size, Similarity *similars) { // similars + id_doc N -= (docid + 1); int block_size = N / gridDim.x + (N % gridDim.x == 0 ? 0 : 1); //Partition size int offset = block_size * (blockIdx.x) + docid + 1; //Beginning of the block int lim = min(offset + block_size, N + docid + 1); //End of the block int size = lim - offset; similars += offset; sim += offset; doc_size += offset; for (int i = threadIdx.x; i < size; i += blockDim.x) { float jac = sim[i]/ (float) (querysize + doc_size[i] - sim[i]); similars[i].set_id = offset + i; similars[i].similarity = jac; } }
54b149583bcf19a1db32fc6ab08681fc63e323aa.cu
/********************************************************************* 11 12 Copyright (C) 2015 by Wisllay Vitrio 13 14 This program is free software; you can redistribute it and/or modify 15 it under the terms of the GNU General Public License as published by 16 the Free Software Foundation; either version 2 of the License, or 17 (at your option) any later version. 18 19 This program is distributed in the hope that it will be useful, 20 but WITHOUT ANY WARRANTY; without even the implied warranty of 21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 GNU General Public License for more details. 23 24 You should have received a copy of the GNU General Public License 25 along with this program; if not, write to the Free Software 26 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 27 28 ********************************************************************/ /* * * knn.cu */ #define CUDA_API_PER_THREAD_DEFAULT_STREAM #include <stdio.h> #include <stdlib.h> #include <algorithm> #include <iostream> #include <queue> #include <vector> #include <set> #include <functional> #include "simjoin.cuh" #include "structs.cuh" #include "utils.cuh" #include "inverted_index.cuh" #include "cuCompactor.cuh" struct is_bigger_than_threshold { float threshold; is_bigger_than_threshold(float thr) : threshold(thr) {}; __host__ __device__ bool operator()(const Similarity &reg) { return (reg.similarity > threshold); } }; __host__ int findSimilars(InvertedIndex inverted_index, float threshold, struct DeviceVariables *dev_vars, Similarity* distances, int docid, int querystart, int querysize, bool aggregate, DeviceTiming& deviceTiming) { dim3 grid, threads; get_grid_config(grid, threads); int num_sets = inverted_index.num_sets - docid - 1; int *d_count = dev_vars->d_count, *d_index = dev_vars->d_index, *d_sim = dev_vars->d_sim, *size_doc = dev_vars->d_sizes; int *d_BlocksCount = dev_vars->d_bC, *d_BlocksOffset = dev_vars->d_bO; Entry *d_query = inverted_index.d_entries + querystart; Similarity *d_similarity = dev_vars->d_dist, *d_result = dev_vars->d_result; DeviceTiming::EventPair* memSet = deviceTiming.add("Mem set", 0); gpuAssert(cudaMemset(d_sim + docid + 1, 0, num_sets*sizeof(int))); deviceTiming.finish(memSet); DeviceTiming::EventPair* termCount = deviceTiming.add("Term count", 0); get_term_count_and_tf_idf<<<grid, threads>>>(inverted_index, d_query, d_count, querysize); deviceTiming.finish(termCount); thrust::device_ptr<int> thrust_d_count(d_count); thrust::device_ptr<int> thrust_d_index(d_index); thrust::inclusive_scan(thrust_d_count, thrust_d_count + querysize, thrust_d_index); DeviceTiming::EventPair* calcJacc = deviceTiming.add("Calculate Jaccard", 0); calculateJaccardSimilarity<<<grid, threads>>>(inverted_index, d_query, d_index, d_sim, querysize, docid); deviceTiming.finish(calcJacc); DeviceTiming::EventPair* filterReg = deviceTiming.add("Filter registers", 0); filter_registers<<<grid, threads>>>(d_sim, threshold, querysize, docid, inverted_index.num_sets, size_doc, d_similarity); deviceTiming.finish(filterReg); int blocksize = 1024; int numBlocks = cuCompactor::divup(num_sets, blocksize); DeviceTiming::EventPair* compactSimilars = deviceTiming.add("Compact similars", 0); int totalSimilars = cuCompactor::compact2<Similarity>(d_similarity + docid + 1, d_result, num_sets, is_bigger_than_threshold(threshold), blocksize, numBlocks, d_BlocksCount, d_BlocksOffset); deviceTiming.finish(compactSimilars); DeviceTiming::EventPair* transferPairs = deviceTiming.add("Transfer pairs", 0); if (totalSimilars && !aggregate) cudaMemcpyAsync(distances, d_result, sizeof(Similarity)*totalSimilars, cudaMemcpyDeviceToHost); deviceTiming.finish(transferPairs); return totalSimilars; } __global__ void calculateJaccardSimilarity(InvertedIndex inverted_index, Entry *d_query, int *index, int *dist, int D, int docid) { __shared__ int N; if (threadIdx.x == 0) { N = index[D - 1]; //Total number of items to be queried } __syncthreads(); int block_size = N / gridDim.x + (N % gridDim.x == 0 ? 0 : 1); //Partition size int lo = block_size * (blockIdx.x); //Beginning of the block int hi = min(lo + block_size, N); //End of the block int size = hi - lo; // Real partition size (the last one can be smaller) int idx = 0; int end; for (int i = threadIdx.x; i < size; i += blockDim.x) { int pos = i + lo; while (true) { end = index[idx]; if (end <= pos) { idx++; } else { break; } } Entry entry = d_query[idx]; //finds out the term int offset = end - pos; int idx2 = inverted_index.d_index[entry.term_id] - offset; Entry index_entry = inverted_index.d_inverted_index[idx2]; if (index_entry.set_id > docid) { atomicAdd(&dist[index_entry.set_id], 1); } } } __global__ void get_term_count_and_tf_idf(InvertedIndex inverted_index, Entry *query, int *count, int N) { int block_size = N / gridDim.x + (N % gridDim.x == 0 ? 0 : 1); //Partition size int offset = block_size * (blockIdx.x); //Beginning of the block int lim = min(offset + block_size, N); //End of the block int size = lim - offset; //Block size query += offset; count += offset; for (int i = threadIdx.x; i < size; i += blockDim.x) { Entry entry = query[i]; int idf = inverted_index.d_count[entry.term_id]; //query[i].tf_idf = entry.tf * log(inverted_index.num_sets / float(max(1, idf))); count[i] = idf; //atomicAdd(d_qnorm, query[i].tf_idf * query[i].tf_idf); //atomicAdd(d_qnorml1, query[i].tf_idf); } } __global__ void filter_registers(int *sim, float threshold, int querysize, int docid, int N, int *doc_size, Similarity *similars) { // similars + id_doc N -= (docid + 1); int block_size = N / gridDim.x + (N % gridDim.x == 0 ? 0 : 1); //Partition size int offset = block_size * (blockIdx.x) + docid + 1; //Beginning of the block int lim = min(offset + block_size, N + docid + 1); //End of the block int size = lim - offset; similars += offset; sim += offset; doc_size += offset; for (int i = threadIdx.x; i < size; i += blockDim.x) { float jac = sim[i]/ (float) (querysize + doc_size[i] - sim[i]); similars[i].set_id = offset + i; similars[i].similarity = jac; } }
b57640910f952cb5ffb0bb0583521bcea9f2f313.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe2/operators/batch_moments_op.h" #include <hipcub/hipcub.hpp> #include "caffe2/core/context_gpu.h" namespace caffe2 { namespace { template <typename T> using BlockReduce = hipcub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS>; template <typename T, StorageOrder kOrder> __global__ void BatchMomentsCUDAKernel( const int N, const int C, const int HxW, const T* X, T* mu, T* var) { const int outer_size = C; const int inner_size = N * HxW; __shared__ typename BlockReduce<T>::TempStorage m_storage; __shared__ typename BlockReduce<T>::TempStorage v_storage; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { T m_sum = 0; T v_sum = 0; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = kOrder == StorageOrder::NCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; #if __CUDA_ARCH__ >= 350 m_sum += __ldg(X + index); v_sum += __ldg(X + index) * __ldg(X + index); #else m_sum += X[index]; v_sum += X[index] * X[index]; #endif } m_sum = BlockReduce<T>(m_storage).Reduce(m_sum, hipcub::Sum()); v_sum = BlockReduce<T>(v_storage).Reduce(v_sum, hipcub::Sum()); if (threadIdx.x == 0) { mu[i] = m_sum / static_cast<T>(N * HxW); var[i] = v_sum / static_cast<T>(N * HxW); } __syncthreads(); } } template <typename T, StorageOrder kOrder> __global__ void BatchMomentsGradientCUDAKernel( const int N, const int C, const int HxW, const T* dmu, const T* dvar, const T* X, T* dX) { const int size = N * C * HxW; const T scale = T(1) / static_cast<T>(N * HxW); CUDA_1D_KERNEL_LOOP(i, size) { const int i_mu = kOrder == StorageOrder::NCHW ? i / (HxW) % C : i % C; #if __CUDA_ARCH__ >= 350 dX[i] = (__ldg(dmu + i_mu) + __ldg(dvar + i_mu) * T(2) * __ldg(X + i)) * scale; #else dX[i] = (dmu[i_mu] + dvar[i_mu] * T(2) * X[i]) * scale; #endif } } } // namespace template <> bool BatchMomentsOp<float, CUDAContext>::ComputeBatchMomentsNCHW( const int N, const int C, const int HxW, const float* X, float* mu, float* var) { const int outer_size = N * HxW; hipLaunchKernelGGL(( BatchMomentsCUDAKernel<float, StorageOrder::NCHW>) , dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, C, HxW, X, mu, var); return true; } template <> bool BatchMomentsOp<float, CUDAContext>::ComputeBatchMomentsNHWC( const int N, const int C, const int HxW, const float* X, float* mu, float* var) { const int outer_size = N * HxW; hipLaunchKernelGGL(( BatchMomentsCUDAKernel<float, StorageOrder::NHWC>) , dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, C, HxW, X, mu, var); return true; } template <> bool BatchMomentsGradientOp<float, CUDAContext>:: ComputeBatchMomentsGradientNCHW( const int N, const int C, const int HxW, const float* dmu, const float* dvar, const float* X, float* dX) { const int size = N * C * HxW; hipLaunchKernelGGL(( BatchMomentsGradientCUDAKernel<float, StorageOrder::NCHW>) , dim3(CAFFE_GET_BLOCKS(size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, C, HxW, dmu, dvar, X, dX); return true; } template <> bool BatchMomentsGradientOp<float, CUDAContext>:: ComputeBatchMomentsGradientNHWC( const int N, const int C, const int HxW, const float* dmu, const float* dvar, const float* X, float* dX) { const int size = N * C * HxW; hipLaunchKernelGGL(( BatchMomentsGradientCUDAKernel<float, StorageOrder::NHWC>) , dim3(CAFFE_GET_BLOCKS(size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, C, HxW, dmu, dvar, X, dX); return true; } REGISTER_CUDA_OPERATOR(BatchMoments, BatchMomentsOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( BatchMomentsGradient, BatchMomentsGradientOp<float, CUDAContext>); } // namespace caffe2
b57640910f952cb5ffb0bb0583521bcea9f2f313.cu
#include "caffe2/operators/batch_moments_op.h" #include <cub/block/block_reduce.cuh> #include "caffe2/core/context_gpu.h" namespace caffe2 { namespace { template <typename T> using BlockReduce = cub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS>; template <typename T, StorageOrder kOrder> __global__ void BatchMomentsCUDAKernel( const int N, const int C, const int HxW, const T* X, T* mu, T* var) { const int outer_size = C; const int inner_size = N * HxW; __shared__ typename BlockReduce<T>::TempStorage m_storage; __shared__ typename BlockReduce<T>::TempStorage v_storage; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { T m_sum = 0; T v_sum = 0; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = kOrder == StorageOrder::NCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; #if __CUDA_ARCH__ >= 350 m_sum += __ldg(X + index); v_sum += __ldg(X + index) * __ldg(X + index); #else m_sum += X[index]; v_sum += X[index] * X[index]; #endif } m_sum = BlockReduce<T>(m_storage).Reduce(m_sum, cub::Sum()); v_sum = BlockReduce<T>(v_storage).Reduce(v_sum, cub::Sum()); if (threadIdx.x == 0) { mu[i] = m_sum / static_cast<T>(N * HxW); var[i] = v_sum / static_cast<T>(N * HxW); } __syncthreads(); } } template <typename T, StorageOrder kOrder> __global__ void BatchMomentsGradientCUDAKernel( const int N, const int C, const int HxW, const T* dmu, const T* dvar, const T* X, T* dX) { const int size = N * C * HxW; const T scale = T(1) / static_cast<T>(N * HxW); CUDA_1D_KERNEL_LOOP(i, size) { const int i_mu = kOrder == StorageOrder::NCHW ? i / (HxW) % C : i % C; #if __CUDA_ARCH__ >= 350 dX[i] = (__ldg(dmu + i_mu) + __ldg(dvar + i_mu) * T(2) * __ldg(X + i)) * scale; #else dX[i] = (dmu[i_mu] + dvar[i_mu] * T(2) * X[i]) * scale; #endif } } } // namespace template <> bool BatchMomentsOp<float, CUDAContext>::ComputeBatchMomentsNCHW( const int N, const int C, const int HxW, const float* X, float* mu, float* var) { const int outer_size = N * HxW; BatchMomentsCUDAKernel<float, StorageOrder::NCHW> <<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(N, C, HxW, X, mu, var); return true; } template <> bool BatchMomentsOp<float, CUDAContext>::ComputeBatchMomentsNHWC( const int N, const int C, const int HxW, const float* X, float* mu, float* var) { const int outer_size = N * HxW; BatchMomentsCUDAKernel<float, StorageOrder::NHWC> <<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(N, C, HxW, X, mu, var); return true; } template <> bool BatchMomentsGradientOp<float, CUDAContext>:: ComputeBatchMomentsGradientNCHW( const int N, const int C, const int HxW, const float* dmu, const float* dvar, const float* X, float* dX) { const int size = N * C * HxW; BatchMomentsGradientCUDAKernel<float, StorageOrder::NCHW> <<<CAFFE_GET_BLOCKS(size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(N, C, HxW, dmu, dvar, X, dX); return true; } template <> bool BatchMomentsGradientOp<float, CUDAContext>:: ComputeBatchMomentsGradientNHWC( const int N, const int C, const int HxW, const float* dmu, const float* dvar, const float* X, float* dX) { const int size = N * C * HxW; BatchMomentsGradientCUDAKernel<float, StorageOrder::NHWC> <<<CAFFE_GET_BLOCKS(size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(N, C, HxW, dmu, dvar, X, dX); return true; } REGISTER_CUDA_OPERATOR(BatchMoments, BatchMomentsOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( BatchMomentsGradient, BatchMomentsGradientOp<float, CUDAContext>); } // namespace caffe2
0efa7e4a7407a47f043c18a1ec809844297ab28f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdio> #include <cstdlib> #include "SyncedMemory.h" #define CHECK {\ auto e = hipDeviceSynchronize();\ if (e != hipSuccess) {\ printf("At " __FILE__ ":%d, %s\n", __LINE__, hipGetErrorString(e));\ abort();\ }\ } const int W = 40; const int H = 12; __global__ void Draw(char *frame) { // TODO: draw more complex things here // Do not just submit the original file provided by the TA! const int y = blockIdx.y * blockDim.y + threadIdx.y; const int x = blockIdx.x * blockDim.x + threadIdx.x; if (y < H and x < W) { char c; if (x == W-1) { c = y == H-1 ? '\0' : '\n'; } else if (y == 0 or y == H-1 or x == 0 or x == W-2 or x==(W-2)/2 or y==(H-1)/2-1 or y==(H-1)/2+2) { c = ':'; } else { c = '_'; } frame[y*W+x] = c; } } int main(int argc, char **argv) { MemoryBuffer<char> frame(W*H); auto frame_smem = frame.CreateSync(W*H); CHECK; hipLaunchKernelGGL(( Draw), dim3(dim3((W-1)/16+1,(H-1)/12+1)), dim3(dim3(16,12)), 0, 0, frame_smem.get_gpu_wo()); CHECK; puts(frame_smem.get_cpu_ro()); CHECK; return 0; }
0efa7e4a7407a47f043c18a1ec809844297ab28f.cu
#include <cstdio> #include <cstdlib> #include "SyncedMemory.h" #define CHECK {\ auto e = cudaDeviceSynchronize();\ if (e != cudaSuccess) {\ printf("At " __FILE__ ":%d, %s\n", __LINE__, cudaGetErrorString(e));\ abort();\ }\ } const int W = 40; const int H = 12; __global__ void Draw(char *frame) { // TODO: draw more complex things here // Do not just submit the original file provided by the TA! const int y = blockIdx.y * blockDim.y + threadIdx.y; const int x = blockIdx.x * blockDim.x + threadIdx.x; if (y < H and x < W) { char c; if (x == W-1) { c = y == H-1 ? '\0' : '\n'; } else if (y == 0 or y == H-1 or x == 0 or x == W-2 or x==(W-2)/2 or y==(H-1)/2-1 or y==(H-1)/2+2) { c = ':'; } else { c = '_'; } frame[y*W+x] = c; } } int main(int argc, char **argv) { MemoryBuffer<char> frame(W*H); auto frame_smem = frame.CreateSync(W*H); CHECK; Draw<<<dim3((W-1)/16+1,(H-1)/12+1), dim3(16,12)>>>(frame_smem.get_gpu_wo()); CHECK; puts(frame_smem.get_cpu_ro()); CHECK; return 0; }
e6900b7259e271344e8ad2c64a76a6f358d74bd4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @generated from zbajac_csr.cu normal z -> s, Wed Sep 17 15:08:43 2014 */ #include "common_magma.h" #include "magmasparse_s.h" #include "magma.h" #define PRECISION_s #define BLOCKSIZE 256 __global__ void magma_sbajac_csr_ls_kernel(int localiters, int n, float *valD, magma_index_t *rowD, magma_index_t *colD, float *valR, magma_index_t *rowR, magma_index_t *colR, const float * __restrict__ b, float *x ){ int ind_diag = blockIdx.x*blockDim.x; int index = blockIdx.x*blockDim.x+threadIdx.x; int i, j, start, end; if(index<n){ start=rowR[index]; end =rowR[index+1]; float zero = MAGMA_S_MAKE(0.0, 0.0); float bl, tmp = zero, v = zero; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; start=rowD[index]; end =rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; /* add more local iterations */ __shared__ float local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - ind_diag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } x[index] = local_x[threadIdx.x]; } } __global__ void magma_sbajac_csr_kernel( int n, float *valD, magma_index_t *rowD, magma_index_t *colD, float *valR, magma_index_t *rowR, magma_index_t *colR, float *b, float *x ){ int index = blockIdx.x*blockDim.x+threadIdx.x; int i, start, end; if(index<n){ float zero = MAGMA_S_MAKE(0.0, 0.0); float bl, tmp = zero, v = zero; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif start=rowR[index]; end =rowR[index+1]; #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; v = bl - v; start=rowD[index]; end =rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; x[index] = x[index] + ( v - tmp ) / (valD[start]); } } /** Purpose ------- This routine is a block-asynchronous Jacobi iteration performing s local Jacobi-updates within the block. Input format is two CSR matrices, one containing the diagonal blocks, one containing the rest. Arguments --------- @param localiters magma_int_t number of local Jacobi-like updates @param D magma_s_sparse_matrix input matrix with diagonal blocks @param R magma_s_sparse_matrix input matrix with non-diagonal parts @param b magma_s_vector RHS @param x magma_s_vector* iterate/solution @ingroup magmasparse_sgegpuk ********************************************************************/ extern "C" magma_int_t magma_sbajac_csr( magma_int_t localiters, magma_s_sparse_matrix D, magma_s_sparse_matrix R, magma_s_vector b, magma_s_vector *x ){ int blocksize1 = BLOCKSIZE; int blocksize2 = 1; int dimgrid1 = ( D.num_rows + blocksize1 -1 ) / blocksize1; int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); if( R.nnz > 0 ){ if( localiters == 1 ) hipLaunchKernelGGL(( magma_sbajac_csr_kernel), dim3(grid), dim3(block), 0, magma_stream , D.num_rows, D.val, D.row, D.col, R.val, R.row, R.col, b.val, x->val ); else hipLaunchKernelGGL(( magma_sbajac_csr_ls_kernel), dim3(grid), dim3(block), 0, magma_stream , localiters, D.num_rows, D.val, D.row, D.col, R.val, R.row, R.col, b.val, x->val ); } else{ printf("error: all elements in diagonal block.\n"); } return MAGMA_SUCCESS; }
e6900b7259e271344e8ad2c64a76a6f358d74bd4.cu
/* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @generated from zbajac_csr.cu normal z -> s, Wed Sep 17 15:08:43 2014 */ #include "common_magma.h" #include "magmasparse_s.h" #include "magma.h" #define PRECISION_s #define BLOCKSIZE 256 __global__ void magma_sbajac_csr_ls_kernel(int localiters, int n, float *valD, magma_index_t *rowD, magma_index_t *colD, float *valR, magma_index_t *rowR, magma_index_t *colR, const float * __restrict__ b, float *x ){ int ind_diag = blockIdx.x*blockDim.x; int index = blockIdx.x*blockDim.x+threadIdx.x; int i, j, start, end; if(index<n){ start=rowR[index]; end =rowR[index+1]; float zero = MAGMA_S_MAKE(0.0, 0.0); float bl, tmp = zero, v = zero; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; start=rowD[index]; end =rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; /* add more local iterations */ __shared__ float local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - ind_diag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } x[index] = local_x[threadIdx.x]; } } __global__ void magma_sbajac_csr_kernel( int n, float *valD, magma_index_t *rowD, magma_index_t *colD, float *valR, magma_index_t *rowR, magma_index_t *colR, float *b, float *x ){ int index = blockIdx.x*blockDim.x+threadIdx.x; int i, start, end; if(index<n){ float zero = MAGMA_S_MAKE(0.0, 0.0); float bl, tmp = zero, v = zero; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif start=rowR[index]; end =rowR[index+1]; #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; v = bl - v; start=rowD[index]; end =rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; x[index] = x[index] + ( v - tmp ) / (valD[start]); } } /** Purpose ------- This routine is a block-asynchronous Jacobi iteration performing s local Jacobi-updates within the block. Input format is two CSR matrices, one containing the diagonal blocks, one containing the rest. Arguments --------- @param localiters magma_int_t number of local Jacobi-like updates @param D magma_s_sparse_matrix input matrix with diagonal blocks @param R magma_s_sparse_matrix input matrix with non-diagonal parts @param b magma_s_vector RHS @param x magma_s_vector* iterate/solution @ingroup magmasparse_sgegpuk ********************************************************************/ extern "C" magma_int_t magma_sbajac_csr( magma_int_t localiters, magma_s_sparse_matrix D, magma_s_sparse_matrix R, magma_s_vector b, magma_s_vector *x ){ int blocksize1 = BLOCKSIZE; int blocksize2 = 1; int dimgrid1 = ( D.num_rows + blocksize1 -1 ) / blocksize1; int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); if( R.nnz > 0 ){ if( localiters == 1 ) magma_sbajac_csr_kernel<<< grid, block, 0, magma_stream >>> ( D.num_rows, D.val, D.row, D.col, R.val, R.row, R.col, b.val, x->val ); else magma_sbajac_csr_ls_kernel<<< grid, block, 0, magma_stream >>> ( localiters, D.num_rows, D.val, D.row, D.col, R.val, R.row, R.col, b.val, x->val ); } else{ printf("error: all elements in diagonal block.\n"); } return MAGMA_SUCCESS; }
a6ec439b1601677bffce15a978185daa0c545127.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define N 64 __global__ void matrixMulGPU( int * a, int * b, int * c ) { int val = 0; int row = blockIdx.x * blockDim.x + threadIdx.x; int col = blockIdx.y * blockDim.y + threadIdx.y; if (row < N && col < N) { for ( int k = 0; k < N; ++k ) val += a[row * N + k] * b[k * N + col]; c[row * N + col] = val; } } /* * This CPU function already works, and will run to create a solution matrix * against which to verify your work building out the matrixMulGPU kernel. */ void matrixMulCPU( int * a, int * b, int * c ) { int val = 0; for( int row = 0; row < N; ++row ) for( int col = 0; col < N; ++col ) { val = 0; for ( int k = 0; k < N; ++k ) val += a[row * N + k] * b[k * N + col]; c[row * N + col] = val; } } int main() { int *a, *b, *c_cpu, *c_gpu; // Allocate a solution matrix for both the CPU and the GPU operations int size = N * N * sizeof (int); // Number of bytes of an N x N matrix // Allocate memory hipMallocManaged (&a, size); hipMallocManaged (&b, size); hipMallocManaged (&c_cpu, size); hipMallocManaged (&c_gpu, size); // Initialize memory; create 2D matrices for( int row = 0; row < N; ++row ) for( int col = 0; col < N; ++col ) { a[row*N + col] = row; b[row*N + col] = col+2; c_cpu[row*N + col] = 0; c_gpu[row*N + col] = 0; } /* * Assign `threads_per_block` and `number_of_blocks` 2D values * that can be used in matrixMulGPU above. */ dim3 threads_per_block (16, 16, 1); // A 16 x 16 block threads dim3 number_of_blocks ((N / threads_per_block.x) + 1, (N / threads_per_block.y) + 1, 1); hipLaunchKernelGGL(( matrixMulGPU) , dim3(number_of_blocks), dim3(threads_per_block) , 0, 0, a, b, c_gpu ); hipDeviceSynchronize(); // Call the CPU version to check our work matrixMulCPU( a, b, c_cpu ); // Compare the two answers to make sure they are equal bool error = false; for( int row = 0; row < N && !error; ++row ) for( int col = 0; col < N && !error; ++col ) if (c_cpu[row * N + col] != c_gpu[row * N + col]) { printf("FOUND ERROR at c[%d][%d]\n", row, col); error = true; break; } if (!error) printf("Success!\n"); // Free all our allocated memory hipFree(a); hipFree(b); hipFree( c_cpu ); hipFree( c_gpu ); }
a6ec439b1601677bffce15a978185daa0c545127.cu
#include <stdio.h> #define N 64 __global__ void matrixMulGPU( int * a, int * b, int * c ) { int val = 0; int row = blockIdx.x * blockDim.x + threadIdx.x; int col = blockIdx.y * blockDim.y + threadIdx.y; if (row < N && col < N) { for ( int k = 0; k < N; ++k ) val += a[row * N + k] * b[k * N + col]; c[row * N + col] = val; } } /* * This CPU function already works, and will run to create a solution matrix * against which to verify your work building out the matrixMulGPU kernel. */ void matrixMulCPU( int * a, int * b, int * c ) { int val = 0; for( int row = 0; row < N; ++row ) for( int col = 0; col < N; ++col ) { val = 0; for ( int k = 0; k < N; ++k ) val += a[row * N + k] * b[k * N + col]; c[row * N + col] = val; } } int main() { int *a, *b, *c_cpu, *c_gpu; // Allocate a solution matrix for both the CPU and the GPU operations int size = N * N * sizeof (int); // Number of bytes of an N x N matrix // Allocate memory cudaMallocManaged (&a, size); cudaMallocManaged (&b, size); cudaMallocManaged (&c_cpu, size); cudaMallocManaged (&c_gpu, size); // Initialize memory; create 2D matrices for( int row = 0; row < N; ++row ) for( int col = 0; col < N; ++col ) { a[row*N + col] = row; b[row*N + col] = col+2; c_cpu[row*N + col] = 0; c_gpu[row*N + col] = 0; } /* * Assign `threads_per_block` and `number_of_blocks` 2D values * that can be used in matrixMulGPU above. */ dim3 threads_per_block (16, 16, 1); // A 16 x 16 block threads dim3 number_of_blocks ((N / threads_per_block.x) + 1, (N / threads_per_block.y) + 1, 1); matrixMulGPU <<< number_of_blocks, threads_per_block >>> ( a, b, c_gpu ); cudaDeviceSynchronize(); // Call the CPU version to check our work matrixMulCPU( a, b, c_cpu ); // Compare the two answers to make sure they are equal bool error = false; for( int row = 0; row < N && !error; ++row ) for( int col = 0; col < N && !error; ++col ) if (c_cpu[row * N + col] != c_gpu[row * N + col]) { printf("FOUND ERROR at c[%d][%d]\n", row, col); error = true; break; } if (!error) printf("Success!\n"); // Free all our allocated memory cudaFree(a); cudaFree(b); cudaFree( c_cpu ); cudaFree( c_gpu ); }
4abe78ee94ec41b0708bf83cd2e6ade2bbf2089c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // #include <optix.h> #include "optixPathTracer.h" #include "random.h" #include <sutil/vec_math.h> #include <cuda/helpers.h> #define TWO_PI 6.2831853071795864769252867665590057683943f #define EPSILON 0.00001f extern "C" { __constant__ Params params; } //------------------------------------------------------------------------------ // // // //------------------------------------------------------------------------------ struct RadiancePRD { // TODO: move some state directly into payload registers? float3 emitted; float3 radiance; float3 attenuation; float3 origin; float3 direction; unsigned int seed; int countEmitted; int done; int pad; bool hitLight; }; struct Onb { __forceinline__ __device__ Onb(const float3& normal) { m_normal = normal; if( fabs(m_normal.x) > fabs(m_normal.z) ) { m_binormal.x = -m_normal.y; m_binormal.y = m_normal.x; m_binormal.z = 0; } else { m_binormal.x = 0; m_binormal.y = -m_normal.z; m_binormal.z = m_normal.y; } m_binormal = normalize(m_binormal); m_tangent = cross( m_binormal, m_normal ); } __forceinline__ __device__ void inverse_transform(float3& p) const { p = p.x*m_tangent + p.y*m_binormal + p.z*m_normal; p = normalize(p); } __forceinline__ __device__ void reflect_ray(float3& p) const { p = reflect(p, m_normal); } __forceinline__ __device__ float3 refract_ray(const float eta, float3& p, float3& n) const { float k = 1.f - eta * eta * (1.f - dot(n, p) * dot(n, p)); if (k < 0.f) return make_float3(0.f); return eta * p + (eta * dot(n, p) - sqrtf(k)) * n; } __forceinline__ __device__ void compute_fresnel_direction(const float u1, const float ior, float3& p) const { float cosine = dot(p, m_normal); if (cosine > 1.f) { cosine = 1.f; } else if (cosine < -1.f) { cosine = -1.f; } float3 n = m_normal; float reflect_prob; float etaI = 1.f; float etaT = ior; if (cosine < 0) { cosine = -cosine; } else { float temp = etaI; etaI = etaT; etaT = temp; n = -n; } float eta = etaI / etaT; float3 refractDir = refract_ray(eta, p, n); if (length(refractDir) == 0.f) { reflect_prob = 1.f; } else { float R0 = (etaI - etaT) / (etaI + etaT); R0 *= R0; reflect_prob = R0 + (1.f - R0) * pow(1.f - cosine, 5.f); } float prob = u1; if (prob < reflect_prob) { reflect_ray(p); } else { p = refractDir; } } float3 m_tangent; float3 m_binormal; float3 m_normal; }; //------------------------------------------------------------------------------ // // // //------------------------------------------------------------------------------ static __forceinline__ __device__ void* unpackPointer( unsigned int i0, unsigned int i1 ) { const unsigned long long uptr = static_cast<unsigned long long>( i0 ) << 32 | i1; void* ptr = reinterpret_cast<void*>( uptr ); return ptr; } static __forceinline__ __device__ void packPointer( void* ptr, unsigned int& i0, unsigned int& i1 ) { const unsigned long long uptr = reinterpret_cast<unsigned long long>( ptr ); i0 = uptr >> 32; i1 = uptr & 0x00000000ffffffff; } static __forceinline__ __device__ RadiancePRD* getPRD() { const unsigned int u0 = optixGetPayload_0(); const unsigned int u1 = optixGetPayload_1(); return reinterpret_cast<RadiancePRD*>( unpackPointer( u0, u1 ) ); } static __forceinline__ __device__ void setPayloadOcclusion( bool occluded ) { optixSetPayload_0( static_cast<unsigned int>( occluded ) ); } static __forceinline__ __device__ void cosine_sample_hemisphere(const float u1, const float u2, float3& p) { // Uniformly sample disk. const float r = sqrtf( u1 ); const float phi = 2.0f*M_PIf * u2; p.x = r * cosf( phi ); p.y = r * sinf( phi ); // Project up to hemisphere. p.z = sqrtf( fmaxf( 0.0f, 1.0f - p.x*p.x - p.y*p.y ) ); } static __forceinline__ __device__ void glossy_lobe_sample(const float u1, const float u2, const float spec_exp, float3& p) { float theta = acos(pow(u1, 1.f / (spec_exp + 1.f))); float phi = TWO_PI * u2; p = make_float3(cos(phi) * sin(theta), sin(phi) * sin(theta), cos(theta)); } /* Compute new ray direction based on material */ static __forceinline__ __device__ void computeNewDirection(const float u1, const float u2, const float ior, const float spec_exp, float3& p, const Material& m, const float3& normal) { Onb onb(normal); switch (m) { case DIFFUSE: cosine_sample_hemisphere(u1, u2, p); onb.inverse_transform(p); // transform the new ray direction to tangent space break; case MIRROR: onb.reflect_ray(p); break; case FRESNEL: onb.compute_fresnel_direction(u1, ior, p); break; case GLOSSY: glossy_lobe_sample(u1, u2, spec_exp, p); onb.inverse_transform(p); // transform the new ray direction to tangent space break; default: break; } } static __forceinline__ __device__ void traceRadiance( OptixTraversableHandle handle, float3 ray_origin, float3 ray_direction, float tmin, float tmax, RadiancePRD* prd ) { // TODO: deduce stride from num ray-types passed in params unsigned int u0, u1; packPointer( prd, u0, u1 ); optixTrace( handle, ray_origin, ray_direction, tmin, tmax, 0.0f, // rayTime OptixVisibilityMask( 1 ), OPTIX_RAY_FLAG_NONE, RAY_TYPE_RADIANCE, // SBT offset RAY_TYPE_COUNT, // SBT stride RAY_TYPE_RADIANCE, // missSBTIndex u0, u1 ); } static __forceinline__ __device__ bool traceOcclusion( OptixTraversableHandle handle, float3 ray_origin, float3 ray_direction, float tmin, float tmax ) { unsigned int occluded = 0u; optixTrace( handle, ray_origin, ray_direction, tmin, tmax, 0.0f, // rayTime OptixVisibilityMask( 1 ), OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT, RAY_TYPE_OCCLUSION, // SBT offset RAY_TYPE_COUNT, // SBT stride RAY_TYPE_OCCLUSION, // missSBTIndex occluded ); return occluded; } //------------------------------------------------------------------------------ // // // //------------------------------------------------------------------------------ extern "C" __global__ void __raygen__rg() { const int w = params.width; const int h = params.height; const float3 eye = params.eye; const float3 U = params.U; const float3 V = params.V; const float3 W = params.W; const uint3 idx = optixGetLaunchIndex(); const int subframe_index = params.subframe_index; unsigned int seed = tea<4>( idx.y*w + idx.x, subframe_index ); float3 result = make_float3( 0.0f ); int i = params.samples_per_launch; do { // The center of each pixel is at fraction (0.5,0.5) const float2 subpixel_jitter = make_float2( rnd( seed ), rnd( seed ) ); const float2 d = 2.0f * make_float2( ( static_cast<float>( idx.x ) + subpixel_jitter.x ) / static_cast<float>( w ), ( static_cast<float>( idx.y ) + subpixel_jitter.y ) / static_cast<float>( h ) ) - 1.0f; float3 ray_direction = normalize(d.x*U + d.y*V + W); float3 ray_origin = eye; RadiancePRD prd; prd.emitted = make_float3(0.f); prd.radiance = make_float3(0.f); prd.attenuation = make_float3(1.f); prd.countEmitted = true; prd.done = false; prd.seed = seed; prd.hitLight = false; int depth = 0; for( ;; ) { traceRadiance( params.handle, ray_origin, ray_direction, 0.01f, // tmin // TODO: smarter offset 1e16f, // tmax &prd ); result += prd.emitted; result += prd.radiance * prd.attenuation; if( depth >= params.depth || prd.hitLight) // Stop tracing if a certain depth is reached or a light source is hit break; // If the ray did not intersect anything we want to set the total ray accumulation to zero if (prd.done) { result = make_float3(0.f); break; } ray_origin = prd.origin; ray_direction = prd.direction; // Apply Russian Roulette if (depth > 2) { // find max component of attenuation float maxComp = 0.f; if (prd.attenuation.x > prd.attenuation.y) { if (prd.attenuation.x > prd.attenuation.z) maxComp = prd.attenuation.x; else maxComp = prd.attenuation.z; } else { if (prd.attenuation.y > prd.attenuation.z) maxComp = prd.attenuation.y; else maxComp = prd.attenuation.z; } float r = rnd(prd.seed); if (r > maxComp) break; prd.attenuation /= maxComp; } ++depth; } } while( --i ); const uint3 launch_index = optixGetLaunchIndex(); const unsigned int image_index = launch_index.y * params.width + launch_index.x; float3 accum_color = result / static_cast<float>( params.samples_per_launch ); if( subframe_index > 0 ) { const float a = 1.0f / static_cast<float>( subframe_index+1 ); const float3 accum_color_prev = make_float3( params.accum_buffer[ image_index ]); accum_color = lerp( accum_color_prev, accum_color, a ); } params.accum_buffer[ image_index ] = make_float4( accum_color, 1.0f); params.frame_buffer[ image_index ] = make_color ( accum_color ); } extern "C" __global__ void __miss__radiance() { MissData* rt_data = reinterpret_cast<MissData*>( optixGetSbtDataPointer() ); RadiancePRD* prd = getPRD(); prd->radiance = make_float3( rt_data->bg_color ); prd->done = true; } extern "C" __global__ void __closesthit__occlusion() { setPayloadOcclusion( true ); } extern "C" __global__ void __closesthit__radiance() { HitGroupData* rt_data = (HitGroupData*)optixGetSbtDataPointer(); const int prim_idx = optixGetPrimitiveIndex(); const float3 ray_dir = optixGetWorldRayDirection(); const int vert_idx_offset = prim_idx*3; const Material mat = rt_data->mat; // material const float3 P = optixGetWorldRayOrigin() + optixGetRayTmax() * ray_dir; // this is the intersection point! const float3 v0 = make_float3(rt_data->vertices[vert_idx_offset + 0]); const float3 v1 = make_float3(rt_data->vertices[vert_idx_offset + 1]); const float3 v2 = make_float3(rt_data->vertices[vert_idx_offset + 2]); const float3 N_0 = normalize(cross(v1 - v0, v2 - v0)); const float3 N = faceforward( N_0, -ray_dir, N_0 ); RadiancePRD* prd = getPRD(); if( prd->countEmitted ) prd->emitted = rt_data->emission_color; else prd->emitted = make_float3( 0.0f ); // Return if a light source is hit if (mat == EMISSIVE) { prd->hitLight = true; prd->radiance += rt_data->emission_color; return; } unsigned int seed = prd->seed; { const float z1 = rnd(seed); const float z2 = rnd(seed); float3 w_in = make_float3(ray_dir.x, ray_dir.y, ray_dir.z); computeNewDirection(z1, z2, rt_data->ior, rt_data->spec_exp, w_in, mat, N); prd->direction = w_in; prd->origin = P + prd->direction * EPSILON; // Update attenuation with brdf sample if (mat == GLOSSY || mat == MIRROR || mat == FRESNEL) { prd->attenuation *= (rt_data->specular_color); } else { prd->attenuation *= (rt_data->diffuse_color); } prd->countEmitted = false; } const float z1 = rnd(seed); const float z2 = rnd(seed); prd->seed = seed; // Choose a random light to sample from // if there is no light in the scene return if (params.num_lights == 0) return; Light light = params.lights[lcg(seed) % params.num_lights]; if (light.shape == POINT_LIGHT) { const float dist = length(light.corner - P); if (dist <= 0.01f) { // too close to point light -> consider this as intersection with the point light prd->hitLight = true; prd->radiance += rt_data->emission_color; return; } const float3 L = normalize(light.corner - P); float nDl = dot(N, L); float weight = 0.f; // Check occlusion if (nDl > 0.f) { const bool occluded = traceOcclusion( params.handle, P, L, 0.01f, // tmin dist - 0.01f // tmax ); // If the point light is not occluded, add emission / distance squared to radiance // With scenes of only point lights we expect sharp shadows if (!occluded) { const float dist_2 = dist * dist; if (dist_2 > 0.f) { weight = nDl / dist_2; } } } prd->radiance += (light.emission * weight); } else if (light.shape == SPOT_LIGHT) { const float dist = length(light.corner - P); if (dist <= 0.01f) { // too close to spot light -> consider this as intersection with the spot light prd->hitLight = true; prd->radiance += rt_data->emission_color; return; } float3 L = normalize(light.corner - P); float nDl = dot(N, L); float weight = 0.f; // Check occlusion if (nDl > 0.f) { const bool occluded = traceOcclusion( params.handle, P, L, 0.01f, // tmin dist - 0.01f // tmax ); // If the point light is not occluded, add emission / distance squared to radiance // With scenes of only point lights we expect sharp shadows if (!occluded) { const float dist_2 = dist * dist; if (dist_2 > 0.f) { // Compute falloff float falloff = 0.f; float cos_angle = dot(normalize(P - light.corner), light.normal); if (cos_angle < light.width) return; else if (cos_angle > light.falloff_start) { falloff = 1.f; } else { if (light.falloff_start - light.width != 0.f) { float delta = (cos_angle - light.width) / (light.falloff_start - light.width); falloff = delta * delta * delta * delta; } } weight = nDl * falloff / dist_2; } } } prd->radiance += (light.emission * weight); } else { const float3 light_pos = light.corner + light.v1 * z1 + light.v2 * z2; // Calculate properties of light sample (for area based pdf) const float Ldist = length(light_pos - P); const float3 L = normalize(light_pos - P); const float nDl = dot(N, L); const float LnDl = -dot(light.normal, L); float weight = 0.0f; if (nDl > 0.0f && LnDl > 0.0f) { const bool occluded = traceOcclusion( params.handle, P, L, 0.01f, // tmin Ldist - 0.01f // tmax ); if (!occluded) { const float A = length(cross(light.v1, light.v2)); weight = nDl * LnDl * A / (M_PIf * Ldist * Ldist); } } prd->radiance += (light.emission * weight); } }
4abe78ee94ec41b0708bf83cd2e6ade2bbf2089c.cu
// // Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // #include <optix.h> #include "optixPathTracer.h" #include "random.h" #include <sutil/vec_math.h> #include <cuda/helpers.h> #define TWO_PI 6.2831853071795864769252867665590057683943f #define EPSILON 0.00001f extern "C" { __constant__ Params params; } //------------------------------------------------------------------------------ // // // //------------------------------------------------------------------------------ struct RadiancePRD { // TODO: move some state directly into payload registers? float3 emitted; float3 radiance; float3 attenuation; float3 origin; float3 direction; unsigned int seed; int countEmitted; int done; int pad; bool hitLight; }; struct Onb { __forceinline__ __device__ Onb(const float3& normal) { m_normal = normal; if( fabs(m_normal.x) > fabs(m_normal.z) ) { m_binormal.x = -m_normal.y; m_binormal.y = m_normal.x; m_binormal.z = 0; } else { m_binormal.x = 0; m_binormal.y = -m_normal.z; m_binormal.z = m_normal.y; } m_binormal = normalize(m_binormal); m_tangent = cross( m_binormal, m_normal ); } __forceinline__ __device__ void inverse_transform(float3& p) const { p = p.x*m_tangent + p.y*m_binormal + p.z*m_normal; p = normalize(p); } __forceinline__ __device__ void reflect_ray(float3& p) const { p = reflect(p, m_normal); } __forceinline__ __device__ float3 refract_ray(const float eta, float3& p, float3& n) const { float k = 1.f - eta * eta * (1.f - dot(n, p) * dot(n, p)); if (k < 0.f) return make_float3(0.f); return eta * p + (eta * dot(n, p) - sqrtf(k)) * n; } __forceinline__ __device__ void compute_fresnel_direction(const float u1, const float ior, float3& p) const { float cosine = dot(p, m_normal); if (cosine > 1.f) { cosine = 1.f; } else if (cosine < -1.f) { cosine = -1.f; } float3 n = m_normal; float reflect_prob; float etaI = 1.f; float etaT = ior; if (cosine < 0) { cosine = -cosine; } else { float temp = etaI; etaI = etaT; etaT = temp; n = -n; } float eta = etaI / etaT; float3 refractDir = refract_ray(eta, p, n); if (length(refractDir) == 0.f) { reflect_prob = 1.f; } else { float R0 = (etaI - etaT) / (etaI + etaT); R0 *= R0; reflect_prob = R0 + (1.f - R0) * pow(1.f - cosine, 5.f); } float prob = u1; if (prob < reflect_prob) { reflect_ray(p); } else { p = refractDir; } } float3 m_tangent; float3 m_binormal; float3 m_normal; }; //------------------------------------------------------------------------------ // // // //------------------------------------------------------------------------------ static __forceinline__ __device__ void* unpackPointer( unsigned int i0, unsigned int i1 ) { const unsigned long long uptr = static_cast<unsigned long long>( i0 ) << 32 | i1; void* ptr = reinterpret_cast<void*>( uptr ); return ptr; } static __forceinline__ __device__ void packPointer( void* ptr, unsigned int& i0, unsigned int& i1 ) { const unsigned long long uptr = reinterpret_cast<unsigned long long>( ptr ); i0 = uptr >> 32; i1 = uptr & 0x00000000ffffffff; } static __forceinline__ __device__ RadiancePRD* getPRD() { const unsigned int u0 = optixGetPayload_0(); const unsigned int u1 = optixGetPayload_1(); return reinterpret_cast<RadiancePRD*>( unpackPointer( u0, u1 ) ); } static __forceinline__ __device__ void setPayloadOcclusion( bool occluded ) { optixSetPayload_0( static_cast<unsigned int>( occluded ) ); } static __forceinline__ __device__ void cosine_sample_hemisphere(const float u1, const float u2, float3& p) { // Uniformly sample disk. const float r = sqrtf( u1 ); const float phi = 2.0f*M_PIf * u2; p.x = r * cosf( phi ); p.y = r * sinf( phi ); // Project up to hemisphere. p.z = sqrtf( fmaxf( 0.0f, 1.0f - p.x*p.x - p.y*p.y ) ); } static __forceinline__ __device__ void glossy_lobe_sample(const float u1, const float u2, const float spec_exp, float3& p) { float theta = acos(pow(u1, 1.f / (spec_exp + 1.f))); float phi = TWO_PI * u2; p = make_float3(cos(phi) * sin(theta), sin(phi) * sin(theta), cos(theta)); } /* Compute new ray direction based on material */ static __forceinline__ __device__ void computeNewDirection(const float u1, const float u2, const float ior, const float spec_exp, float3& p, const Material& m, const float3& normal) { Onb onb(normal); switch (m) { case DIFFUSE: cosine_sample_hemisphere(u1, u2, p); onb.inverse_transform(p); // transform the new ray direction to tangent space break; case MIRROR: onb.reflect_ray(p); break; case FRESNEL: onb.compute_fresnel_direction(u1, ior, p); break; case GLOSSY: glossy_lobe_sample(u1, u2, spec_exp, p); onb.inverse_transform(p); // transform the new ray direction to tangent space break; default: break; } } static __forceinline__ __device__ void traceRadiance( OptixTraversableHandle handle, float3 ray_origin, float3 ray_direction, float tmin, float tmax, RadiancePRD* prd ) { // TODO: deduce stride from num ray-types passed in params unsigned int u0, u1; packPointer( prd, u0, u1 ); optixTrace( handle, ray_origin, ray_direction, tmin, tmax, 0.0f, // rayTime OptixVisibilityMask( 1 ), OPTIX_RAY_FLAG_NONE, RAY_TYPE_RADIANCE, // SBT offset RAY_TYPE_COUNT, // SBT stride RAY_TYPE_RADIANCE, // missSBTIndex u0, u1 ); } static __forceinline__ __device__ bool traceOcclusion( OptixTraversableHandle handle, float3 ray_origin, float3 ray_direction, float tmin, float tmax ) { unsigned int occluded = 0u; optixTrace( handle, ray_origin, ray_direction, tmin, tmax, 0.0f, // rayTime OptixVisibilityMask( 1 ), OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT, RAY_TYPE_OCCLUSION, // SBT offset RAY_TYPE_COUNT, // SBT stride RAY_TYPE_OCCLUSION, // missSBTIndex occluded ); return occluded; } //------------------------------------------------------------------------------ // // // //------------------------------------------------------------------------------ extern "C" __global__ void __raygen__rg() { const int w = params.width; const int h = params.height; const float3 eye = params.eye; const float3 U = params.U; const float3 V = params.V; const float3 W = params.W; const uint3 idx = optixGetLaunchIndex(); const int subframe_index = params.subframe_index; unsigned int seed = tea<4>( idx.y*w + idx.x, subframe_index ); float3 result = make_float3( 0.0f ); int i = params.samples_per_launch; do { // The center of each pixel is at fraction (0.5,0.5) const float2 subpixel_jitter = make_float2( rnd( seed ), rnd( seed ) ); const float2 d = 2.0f * make_float2( ( static_cast<float>( idx.x ) + subpixel_jitter.x ) / static_cast<float>( w ), ( static_cast<float>( idx.y ) + subpixel_jitter.y ) / static_cast<float>( h ) ) - 1.0f; float3 ray_direction = normalize(d.x*U + d.y*V + W); float3 ray_origin = eye; RadiancePRD prd; prd.emitted = make_float3(0.f); prd.radiance = make_float3(0.f); prd.attenuation = make_float3(1.f); prd.countEmitted = true; prd.done = false; prd.seed = seed; prd.hitLight = false; int depth = 0; for( ;; ) { traceRadiance( params.handle, ray_origin, ray_direction, 0.01f, // tmin // TODO: smarter offset 1e16f, // tmax &prd ); result += prd.emitted; result += prd.radiance * prd.attenuation; if( depth >= params.depth || prd.hitLight) // Stop tracing if a certain depth is reached or a light source is hit break; // If the ray did not intersect anything we want to set the total ray accumulation to zero if (prd.done) { result = make_float3(0.f); break; } ray_origin = prd.origin; ray_direction = prd.direction; // Apply Russian Roulette if (depth > 2) { // find max component of attenuation float maxComp = 0.f; if (prd.attenuation.x > prd.attenuation.y) { if (prd.attenuation.x > prd.attenuation.z) maxComp = prd.attenuation.x; else maxComp = prd.attenuation.z; } else { if (prd.attenuation.y > prd.attenuation.z) maxComp = prd.attenuation.y; else maxComp = prd.attenuation.z; } float r = rnd(prd.seed); if (r > maxComp) break; prd.attenuation /= maxComp; } ++depth; } } while( --i ); const uint3 launch_index = optixGetLaunchIndex(); const unsigned int image_index = launch_index.y * params.width + launch_index.x; float3 accum_color = result / static_cast<float>( params.samples_per_launch ); if( subframe_index > 0 ) { const float a = 1.0f / static_cast<float>( subframe_index+1 ); const float3 accum_color_prev = make_float3( params.accum_buffer[ image_index ]); accum_color = lerp( accum_color_prev, accum_color, a ); } params.accum_buffer[ image_index ] = make_float4( accum_color, 1.0f); params.frame_buffer[ image_index ] = make_color ( accum_color ); } extern "C" __global__ void __miss__radiance() { MissData* rt_data = reinterpret_cast<MissData*>( optixGetSbtDataPointer() ); RadiancePRD* prd = getPRD(); prd->radiance = make_float3( rt_data->bg_color ); prd->done = true; } extern "C" __global__ void __closesthit__occlusion() { setPayloadOcclusion( true ); } extern "C" __global__ void __closesthit__radiance() { HitGroupData* rt_data = (HitGroupData*)optixGetSbtDataPointer(); const int prim_idx = optixGetPrimitiveIndex(); const float3 ray_dir = optixGetWorldRayDirection(); const int vert_idx_offset = prim_idx*3; const Material mat = rt_data->mat; // material const float3 P = optixGetWorldRayOrigin() + optixGetRayTmax() * ray_dir; // this is the intersection point! const float3 v0 = make_float3(rt_data->vertices[vert_idx_offset + 0]); const float3 v1 = make_float3(rt_data->vertices[vert_idx_offset + 1]); const float3 v2 = make_float3(rt_data->vertices[vert_idx_offset + 2]); const float3 N_0 = normalize(cross(v1 - v0, v2 - v0)); const float3 N = faceforward( N_0, -ray_dir, N_0 ); RadiancePRD* prd = getPRD(); if( prd->countEmitted ) prd->emitted = rt_data->emission_color; else prd->emitted = make_float3( 0.0f ); // Return if a light source is hit if (mat == EMISSIVE) { prd->hitLight = true; prd->radiance += rt_data->emission_color; return; } unsigned int seed = prd->seed; { const float z1 = rnd(seed); const float z2 = rnd(seed); float3 w_in = make_float3(ray_dir.x, ray_dir.y, ray_dir.z); computeNewDirection(z1, z2, rt_data->ior, rt_data->spec_exp, w_in, mat, N); prd->direction = w_in; prd->origin = P + prd->direction * EPSILON; // Update attenuation with brdf sample if (mat == GLOSSY || mat == MIRROR || mat == FRESNEL) { prd->attenuation *= (rt_data->specular_color); } else { prd->attenuation *= (rt_data->diffuse_color); } prd->countEmitted = false; } const float z1 = rnd(seed); const float z2 = rnd(seed); prd->seed = seed; // Choose a random light to sample from // if there is no light in the scene return if (params.num_lights == 0) return; Light light = params.lights[lcg(seed) % params.num_lights]; if (light.shape == POINT_LIGHT) { const float dist = length(light.corner - P); if (dist <= 0.01f) { // too close to point light -> consider this as intersection with the point light prd->hitLight = true; prd->radiance += rt_data->emission_color; return; } const float3 L = normalize(light.corner - P); float nDl = dot(N, L); float weight = 0.f; // Check occlusion if (nDl > 0.f) { const bool occluded = traceOcclusion( params.handle, P, L, 0.01f, // tmin dist - 0.01f // tmax ); // If the point light is not occluded, add emission / distance squared to radiance // With scenes of only point lights we expect sharp shadows if (!occluded) { const float dist_2 = dist * dist; if (dist_2 > 0.f) { weight = nDl / dist_2; } } } prd->radiance += (light.emission * weight); } else if (light.shape == SPOT_LIGHT) { const float dist = length(light.corner - P); if (dist <= 0.01f) { // too close to spot light -> consider this as intersection with the spot light prd->hitLight = true; prd->radiance += rt_data->emission_color; return; } float3 L = normalize(light.corner - P); float nDl = dot(N, L); float weight = 0.f; // Check occlusion if (nDl > 0.f) { const bool occluded = traceOcclusion( params.handle, P, L, 0.01f, // tmin dist - 0.01f // tmax ); // If the point light is not occluded, add emission / distance squared to radiance // With scenes of only point lights we expect sharp shadows if (!occluded) { const float dist_2 = dist * dist; if (dist_2 > 0.f) { // Compute falloff float falloff = 0.f; float cos_angle = dot(normalize(P - light.corner), light.normal); if (cos_angle < light.width) return; else if (cos_angle > light.falloff_start) { falloff = 1.f; } else { if (light.falloff_start - light.width != 0.f) { float delta = (cos_angle - light.width) / (light.falloff_start - light.width); falloff = delta * delta * delta * delta; } } weight = nDl * falloff / dist_2; } } } prd->radiance += (light.emission * weight); } else { const float3 light_pos = light.corner + light.v1 * z1 + light.v2 * z2; // Calculate properties of light sample (for area based pdf) const float Ldist = length(light_pos - P); const float3 L = normalize(light_pos - P); const float nDl = dot(N, L); const float LnDl = -dot(light.normal, L); float weight = 0.0f; if (nDl > 0.0f && LnDl > 0.0f) { const bool occluded = traceOcclusion( params.handle, P, L, 0.01f, // tmin Ldist - 0.01f // tmax ); if (!occluded) { const float A = length(cross(light.v1, light.v2)); weight = nDl * LnDl * A / (M_PIf * Ldist * Ldist); } } prd->radiance += (light.emission * weight); } }
b0ae6bbc3a4831ac05c54acc75ac3bb73cc4b653.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "cutil_inline.h" #include "Mandelbrot_kernel.h" #include "Mandelbrot_kernel.cu" // The Mandelbrot CUDA GPU thread function /* Version using software scheduling of thread blocks. The idea here is to launch of fixed number of worker blocks to fill the machine, and have each block loop over the available work until it is all done. We use a counter in global memory to keep track of which blocks have been completed. The counter is incremented atomically by each worker block. This method can achieve higher performance when blocks take a wide range of different times to complete. */ __device__ unsigned int blockCounter; // global counter, initialized to zero before kernel launch template<class T> __global__ void Mandelbrot0_sm13(uchar4 *dst, const int imageW, const int imageH, const int crunch, const T xOff, const T yOff, const T xJP, const T yJP, const T scale, const uchar4 colors, const int frame, const int animationFrame, const int gridWidth, const int numBlocks, const bool isJ) { __shared__ unsigned int blockIndex; __shared__ unsigned int blockX, blockY; if ((threadIdx.x==0) && (threadIdx.y==0)) { // get block to process blockIndex = atomicAdd(&blockCounter, 1); blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here blockY = blockIndex / gridWidth; } __syncthreads(); // loop until all blocks completed while(blockIndex < numBlocks) { // process this block const int ix = blockDim.x * blockX + threadIdx.x; const int iy = blockDim.y * blockY + threadIdx.y; if ((ix < imageW) && (iy < imageH)) { // Calculate the location const T xPos = (T)ix * scale + xOff; const T yPos = (T)iy * scale + yOff; // Calculate the Mandelbrot index for the current location int m = CalcMandelbrot<T>(xPos, yPos, xJP, yJP, crunch, isJ); // int m = blockIdx.x; // uncomment to see scheduling order m = m > 0 ? crunch - m : 0; // Convert the Mandelbrot index into a color uchar4 color; if (m) { m += animationFrame; color.x = m * colors.x; color.y = m * colors.y; color.z = m * colors.z; } else { color.x = 0; color.y = 0; color.z = 0; } // Output the pixel int pixel = imageW * iy + ix; if (frame == 0) { color.w = 0; dst[pixel] = color; } else { int frame1 = frame + 1; int frame2 = frame1 / 2; dst[pixel].x = (dst[pixel].x * frame + color.x + frame2) / frame1; dst[pixel].y = (dst[pixel].y * frame + color.y + frame2) / frame1; dst[pixel].z = (dst[pixel].z * frame + color.z + frame2) / frame1; } } __syncthreads(); if ((threadIdx.x==0) && (threadIdx.y==0)) { // get block to process blockIndex = atomicAdd(&blockCounter, 1); blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here blockY = blockIndex / gridWidth; } __syncthreads(); } } // Mandelbrot0 // The Mandelbrot CUDA GPU thread function (double single version) __global__ void MandelbrotDS0_sm13(uchar4 *dst, const int imageW, const int imageH, const int crunch, const float xOff0, const float xOff1, const float yOff0, const float yOff1, const float xJP, const float yJP, const float scale, const uchar4 colors, const int frame, const int animationFrame, const int gridWidth, const int numBlocks, const bool isJ) { __shared__ unsigned int blockIndex; __shared__ unsigned int blockX, blockY; if ((threadIdx.x==0) && (threadIdx.y==0)) { // get block to process blockIndex = atomicAdd(&blockCounter, 1); blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here blockY = blockIndex / gridWidth; } __syncthreads(); // loop until all blocks completed while(blockIndex < numBlocks) { // process this block const int ix = blockDim.x * blockX + threadIdx.x; const int iy = blockDim.y * blockY + threadIdx.y; if ((ix < imageW) && (iy < imageH)) { // Calculate the location float xPos0 = (float)ix * scale; float xPos1 = 0.0f; float yPos0 = (float)iy * scale; float yPos1 = 0.0f; dsadd(xPos0, xPos1, xPos0, xPos1, xOff0, xOff1); dsadd(yPos0, yPos1, yPos0, yPos1, yOff0, yOff1); // Calculate the Mandelbrot index for the current location int m = CalcMandelbrotDS(xPos0, xPos1, yPos0, yPos1, xJP, yJP, crunch, isJ); m = m > 0 ? crunch - m : 0; // Convert the Mandelbrot index into a color uchar4 color; if (m) { m += animationFrame; color.x = m * colors.x; color.y = m * colors.y; color.z = m * colors.z; } else { color.x = 0; color.y = 0; color.z = 0; } // Output the pixel int pixel = imageW * iy + ix; if (frame == 0) { color.w = 0; dst[pixel] = color; } else { int frame1 = frame + 1; int frame2 = frame1 / 2; dst[pixel].x = (dst[pixel].x * frame + color.x + frame2) / frame1; dst[pixel].y = (dst[pixel].y * frame + color.y + frame2) / frame1; dst[pixel].z = (dst[pixel].z * frame + color.z + frame2) / frame1; } } __syncthreads(); if ((threadIdx.x==0) && (threadIdx.y==0)) { // get block to process blockIndex = atomicAdd(&blockCounter, 1); blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here blockY = blockIndex / gridWidth; } __syncthreads(); } } // MandelbrotDS0 // The Mandelbrot secondary AA pass CUDA GPU thread function template<class T> __global__ void Mandelbrot1_sm13(uchar4 *dst, const int imageW, const int imageH, const int crunch, const T xOff, const T yOff, const T xJP, const T yJP, const T scale, const uchar4 colors, const int frame, const int animationFrame, const int gridWidth, const int numBlocks, const bool isJ) { __shared__ unsigned int blockIndex; __shared__ unsigned int blockX, blockY; if ((threadIdx.x==0) && (threadIdx.y==0)) { // get block to process blockIndex = atomicAdd(&blockCounter, 1); blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here blockY = blockIndex / gridWidth; } __syncthreads(); // loop until all blocks completed while(blockIndex < numBlocks) { // process this block const int ix = blockDim.x * blockX + threadIdx.x; const int iy = blockDim.y * blockY + threadIdx.y; if ((ix < imageW) && (iy < imageH)) { // Get the current pixel color int pixel = imageW * iy + ix; uchar4 pixelColor = dst[pixel]; int count = 0; // Search for pixels out of tolerance surrounding the current pixel if (ix > 0) count += CheckColors(pixelColor, dst[pixel - 1]); if (ix + 1 < imageW) count += CheckColors(pixelColor, dst[pixel + 1]); if (iy > 0) count += CheckColors(pixelColor, dst[pixel - imageW]); if (iy + 1 < imageH) count += CheckColors(pixelColor, dst[pixel + imageW]); if (count) { // Calculate the location const T xPos = (T)ix * scale + xOff; const T yPos = (T)iy * scale + yOff; // Calculate the Mandelbrot index for the current location int m = CalcMandelbrot(xPos, yPos, xJP, yJP, crunch, isJ); m = m > 0 ? crunch - m : 0; // Convert the Mandelbrot index into a color uchar4 color; if (m) { m += animationFrame; color.x = m * colors.x; color.y = m * colors.y; color.z = m * colors.z; } else { color.x = 0; color.y = 0; color.z = 0; } // Output the pixel int frame1 = frame + 1; int frame2 = frame1 / 2; dst[pixel].x = (pixelColor.x * frame + color.x + frame2) / frame1; dst[pixel].y = (pixelColor.y * frame + color.y + frame2) / frame1; dst[pixel].z = (pixelColor.z * frame + color.z + frame2) / frame1; } } __syncthreads(); if ((threadIdx.x==0) && (threadIdx.y==0)) { // get block to process blockIndex = atomicAdd(&blockCounter, 1); blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here blockY = blockIndex / gridWidth; } __syncthreads(); } } // Mandelbrot1 // The Mandelbrot secondary AA pass CUDA GPU thread function (double single version) __global__ void MandelbrotDS1_sm13(uchar4 *dst, const int imageW, const int imageH, const int crunch, const float xOff0, const float xOff1, const float yOff0, const float yOff1, const float xJP, const float yJP, const float scale, const uchar4 colors, const int frame, const int animationFrame, const int gridWidth, const int numBlocks, const bool isJ) { __shared__ unsigned int blockIndex; __shared__ unsigned int blockX, blockY; if ((threadIdx.x==0) && (threadIdx.y==0)) { // get block to process blockIndex = atomicAdd(&blockCounter, 1); blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here blockY = blockIndex / gridWidth; } __syncthreads(); // loop until all blocks completed while(blockIndex < numBlocks) { if (blockIndex >= numBlocks) break; // finish // process this block const int ix = blockDim.x * blockX + threadIdx.x; const int iy = blockDim.y * blockY + threadIdx.y; if ((ix < imageW) && (iy < imageH)) { // Get the current pixel color int pixel = imageW * iy + ix; uchar4 pixelColor = dst[pixel]; int count = 0; // Search for pixels out of tolerance surrounding the current pixel if (ix > 0) count += CheckColors(pixelColor, dst[pixel - 1]); if (ix + 1 < imageW) count += CheckColors(pixelColor, dst[pixel + 1]); if (iy > 0) count += CheckColors(pixelColor, dst[pixel - imageW]); if (iy + 1 < imageH) count += CheckColors(pixelColor, dst[pixel + imageW]); if (count) { // Calculate the location float xPos0 = (float)ix * scale; float xPos1 = 0.0f; float yPos0 = (float)iy * scale; float yPos1 = 0.0f; dsadd(xPos0, xPos1, xPos0, xPos1, xOff0, xOff1); dsadd(yPos0, yPos1, yPos0, yPos1, yOff0, yOff1); // Calculate the Mandelbrot index for the current location int m = CalcMandelbrotDS(xPos0, xPos1, yPos0, yPos1, xJP, yJP, crunch, isJ); m = m > 0 ? crunch - m : 0; // Convert the Mandelbrot index into a color uchar4 color; if (m) { m += animationFrame; color.x = m * colors.x; color.y = m * colors.y; color.z = m * colors.z; } else { color.x = 0; color.y = 0; color.z = 0; } // Output the pixel int frame1 = frame + 1; int frame2 = frame1 / 2; dst[pixel].x = (pixelColor.x * frame + color.x + frame2) / frame1; dst[pixel].y = (pixelColor.y * frame + color.y + frame2) / frame1; dst[pixel].z = (pixelColor.z * frame + color.z + frame2) / frame1; } } __syncthreads(); if ((threadIdx.x==0) && (threadIdx.y==0)) { // get block to process blockIndex = atomicAdd(&blockCounter, 1); blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here blockY = blockIndex / gridWidth; } __syncthreads(); } } // MandelbrotDS1 // The host CPU Mandebrot thread spawner void RunMandelbrot0_sm13(uchar4 *dst, const int imageW, const int imageH, const int crunch, const double xOff, const double yOff, const double xjp, const double yjp, const double scale, const uchar4 colors, const int frame, const int animationFrame, const int mode, const int numSMs, const bool isJ) { dim3 threads(BLOCKDIM_X, BLOCKDIM_Y); dim3 grid(iDivUp(imageW, BLOCKDIM_X), iDivUp(imageH, BLOCKDIM_Y)); // zero block counter unsigned int hBlockCounter = 0; cutilSafeCall( hipMemcpyToSymbol(blockCounter, &hBlockCounter, sizeof(unsigned int), 0, hipMemcpyHostToDevice ) ); int numWorkerBlocks = numSMs; switch(mode) { default: case 0: hipLaunchKernelGGL(( Mandelbrot0_sm13<float>), dim3(numWorkerBlocks), dim3(threads), 0, 0, dst, imageW, imageH, crunch, (float)xOff, (float)yOff, (float)xjp, (float)yjp, (float)scale, colors, frame, animationFrame, grid.x, grid.x*grid.y, isJ); break; case 1: float x0, x1, y0, y1; dsdeq(x0, x1, xOff); dsdeq(y0, y1, yOff); hipLaunchKernelGGL(( MandelbrotDS0_sm13), dim3(numWorkerBlocks), dim3(threads), 0, 0, dst, imageW, imageH, crunch, x0, x1, y0, y1, xjp, yjp, (float)scale, colors, frame, animationFrame, grid.x, grid.x*grid.y, isJ); break; case 2: hipLaunchKernelGGL(( Mandelbrot0_sm13<double>), dim3(numWorkerBlocks), dim3(threads), 0, 0, dst, imageW, imageH, crunch, xOff, yOff, xjp, yjp, scale, colors, frame, animationFrame, grid.x, grid.x*grid.y, isJ); break; } cutilCheckMsg("Mandelbrot0_sm13 kernel execution failed.\n"); } // RunMandelbrot0 // The host CPU Mandebrot thread spawner void RunMandelbrot1_sm13(uchar4 *dst, const int imageW, const int imageH, const int crunch, const double xOff, const double yOff, const double xjp, const double yjp, const double scale, const uchar4 colors, const int frame, const int animationFrame, const int mode, const int numSMs, const bool isJ) { dim3 threads(BLOCKDIM_X, BLOCKDIM_Y); dim3 grid(iDivUp(imageW, BLOCKDIM_X), iDivUp(imageH, BLOCKDIM_Y)); // zero block counter unsigned int hBlockCounter = 0; cutilSafeCall( hipMemcpyToSymbol(blockCounter, &hBlockCounter, sizeof(unsigned int), 0, hipMemcpyHostToDevice ) ); int numWorkerBlocks = numSMs; switch(mode) { default: case 0: hipLaunchKernelGGL(( Mandelbrot1_sm13<float>), dim3(numWorkerBlocks), dim3(threads), 0, 0, dst, imageW, imageH, crunch, (float)xOff, (float)yOff, (float)xjp, (float)yjp, (float)scale, colors, frame, animationFrame, grid.x, grid.x*grid.y, isJ); break; case 1: float x0, x1, y0, y1; dsdeq(x0, x1, xOff); dsdeq(y0, y1, yOff); hipLaunchKernelGGL(( MandelbrotDS1_sm13), dim3(numWorkerBlocks), dim3(threads), 0, 0, dst, imageW, imageH, crunch, x0, x1, y0, y1, xjp, yjp, (float)scale, colors, frame, animationFrame, grid.x, grid.x*grid.y, isJ); break; case 2: hipLaunchKernelGGL(( Mandelbrot1_sm13<double>), dim3(numWorkerBlocks), dim3(threads), 0, 0, dst, imageW, imageH, crunch, xOff, yOff, xjp, yjp, scale, colors, frame, animationFrame, grid.x, grid.x*grid.y, isJ); break; } cutilCheckMsg("Mandelbrot1_sm13 kernel execution failed.\n"); } // RunMandelbrot1 // check if we're running in emulation mode int inEmulationMode() { #if 1 return 1; #else return 0; #endif } // inEmulationMode
b0ae6bbc3a4831ac05c54acc75ac3bb73cc4b653.cu
#include <stdio.h> #include "cutil_inline.h" #include "Mandelbrot_kernel.h" #include "Mandelbrot_kernel.cu" // The Mandelbrot CUDA GPU thread function /* Version using software scheduling of thread blocks. The idea here is to launch of fixed number of worker blocks to fill the machine, and have each block loop over the available work until it is all done. We use a counter in global memory to keep track of which blocks have been completed. The counter is incremented atomically by each worker block. This method can achieve higher performance when blocks take a wide range of different times to complete. */ __device__ unsigned int blockCounter; // global counter, initialized to zero before kernel launch template<class T> __global__ void Mandelbrot0_sm13(uchar4 *dst, const int imageW, const int imageH, const int crunch, const T xOff, const T yOff, const T xJP, const T yJP, const T scale, const uchar4 colors, const int frame, const int animationFrame, const int gridWidth, const int numBlocks, const bool isJ) { __shared__ unsigned int blockIndex; __shared__ unsigned int blockX, blockY; if ((threadIdx.x==0) && (threadIdx.y==0)) { // get block to process blockIndex = atomicAdd(&blockCounter, 1); blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here blockY = blockIndex / gridWidth; } __syncthreads(); // loop until all blocks completed while(blockIndex < numBlocks) { // process this block const int ix = blockDim.x * blockX + threadIdx.x; const int iy = blockDim.y * blockY + threadIdx.y; if ((ix < imageW) && (iy < imageH)) { // Calculate the location const T xPos = (T)ix * scale + xOff; const T yPos = (T)iy * scale + yOff; // Calculate the Mandelbrot index for the current location int m = CalcMandelbrot<T>(xPos, yPos, xJP, yJP, crunch, isJ); // int m = blockIdx.x; // uncomment to see scheduling order m = m > 0 ? crunch - m : 0; // Convert the Mandelbrot index into a color uchar4 color; if (m) { m += animationFrame; color.x = m * colors.x; color.y = m * colors.y; color.z = m * colors.z; } else { color.x = 0; color.y = 0; color.z = 0; } // Output the pixel int pixel = imageW * iy + ix; if (frame == 0) { color.w = 0; dst[pixel] = color; } else { int frame1 = frame + 1; int frame2 = frame1 / 2; dst[pixel].x = (dst[pixel].x * frame + color.x + frame2) / frame1; dst[pixel].y = (dst[pixel].y * frame + color.y + frame2) / frame1; dst[pixel].z = (dst[pixel].z * frame + color.z + frame2) / frame1; } } __syncthreads(); if ((threadIdx.x==0) && (threadIdx.y==0)) { // get block to process blockIndex = atomicAdd(&blockCounter, 1); blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here blockY = blockIndex / gridWidth; } __syncthreads(); } } // Mandelbrot0 // The Mandelbrot CUDA GPU thread function (double single version) __global__ void MandelbrotDS0_sm13(uchar4 *dst, const int imageW, const int imageH, const int crunch, const float xOff0, const float xOff1, const float yOff0, const float yOff1, const float xJP, const float yJP, const float scale, const uchar4 colors, const int frame, const int animationFrame, const int gridWidth, const int numBlocks, const bool isJ) { __shared__ unsigned int blockIndex; __shared__ unsigned int blockX, blockY; if ((threadIdx.x==0) && (threadIdx.y==0)) { // get block to process blockIndex = atomicAdd(&blockCounter, 1); blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here blockY = blockIndex / gridWidth; } __syncthreads(); // loop until all blocks completed while(blockIndex < numBlocks) { // process this block const int ix = blockDim.x * blockX + threadIdx.x; const int iy = blockDim.y * blockY + threadIdx.y; if ((ix < imageW) && (iy < imageH)) { // Calculate the location float xPos0 = (float)ix * scale; float xPos1 = 0.0f; float yPos0 = (float)iy * scale; float yPos1 = 0.0f; dsadd(xPos0, xPos1, xPos0, xPos1, xOff0, xOff1); dsadd(yPos0, yPos1, yPos0, yPos1, yOff0, yOff1); // Calculate the Mandelbrot index for the current location int m = CalcMandelbrotDS(xPos0, xPos1, yPos0, yPos1, xJP, yJP, crunch, isJ); m = m > 0 ? crunch - m : 0; // Convert the Mandelbrot index into a color uchar4 color; if (m) { m += animationFrame; color.x = m * colors.x; color.y = m * colors.y; color.z = m * colors.z; } else { color.x = 0; color.y = 0; color.z = 0; } // Output the pixel int pixel = imageW * iy + ix; if (frame == 0) { color.w = 0; dst[pixel] = color; } else { int frame1 = frame + 1; int frame2 = frame1 / 2; dst[pixel].x = (dst[pixel].x * frame + color.x + frame2) / frame1; dst[pixel].y = (dst[pixel].y * frame + color.y + frame2) / frame1; dst[pixel].z = (dst[pixel].z * frame + color.z + frame2) / frame1; } } __syncthreads(); if ((threadIdx.x==0) && (threadIdx.y==0)) { // get block to process blockIndex = atomicAdd(&blockCounter, 1); blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here blockY = blockIndex / gridWidth; } __syncthreads(); } } // MandelbrotDS0 // The Mandelbrot secondary AA pass CUDA GPU thread function template<class T> __global__ void Mandelbrot1_sm13(uchar4 *dst, const int imageW, const int imageH, const int crunch, const T xOff, const T yOff, const T xJP, const T yJP, const T scale, const uchar4 colors, const int frame, const int animationFrame, const int gridWidth, const int numBlocks, const bool isJ) { __shared__ unsigned int blockIndex; __shared__ unsigned int blockX, blockY; if ((threadIdx.x==0) && (threadIdx.y==0)) { // get block to process blockIndex = atomicAdd(&blockCounter, 1); blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here blockY = blockIndex / gridWidth; } __syncthreads(); // loop until all blocks completed while(blockIndex < numBlocks) { // process this block const int ix = blockDim.x * blockX + threadIdx.x; const int iy = blockDim.y * blockY + threadIdx.y; if ((ix < imageW) && (iy < imageH)) { // Get the current pixel color int pixel = imageW * iy + ix; uchar4 pixelColor = dst[pixel]; int count = 0; // Search for pixels out of tolerance surrounding the current pixel if (ix > 0) count += CheckColors(pixelColor, dst[pixel - 1]); if (ix + 1 < imageW) count += CheckColors(pixelColor, dst[pixel + 1]); if (iy > 0) count += CheckColors(pixelColor, dst[pixel - imageW]); if (iy + 1 < imageH) count += CheckColors(pixelColor, dst[pixel + imageW]); if (count) { // Calculate the location const T xPos = (T)ix * scale + xOff; const T yPos = (T)iy * scale + yOff; // Calculate the Mandelbrot index for the current location int m = CalcMandelbrot(xPos, yPos, xJP, yJP, crunch, isJ); m = m > 0 ? crunch - m : 0; // Convert the Mandelbrot index into a color uchar4 color; if (m) { m += animationFrame; color.x = m * colors.x; color.y = m * colors.y; color.z = m * colors.z; } else { color.x = 0; color.y = 0; color.z = 0; } // Output the pixel int frame1 = frame + 1; int frame2 = frame1 / 2; dst[pixel].x = (pixelColor.x * frame + color.x + frame2) / frame1; dst[pixel].y = (pixelColor.y * frame + color.y + frame2) / frame1; dst[pixel].z = (pixelColor.z * frame + color.z + frame2) / frame1; } } __syncthreads(); if ((threadIdx.x==0) && (threadIdx.y==0)) { // get block to process blockIndex = atomicAdd(&blockCounter, 1); blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here blockY = blockIndex / gridWidth; } __syncthreads(); } } // Mandelbrot1 // The Mandelbrot secondary AA pass CUDA GPU thread function (double single version) __global__ void MandelbrotDS1_sm13(uchar4 *dst, const int imageW, const int imageH, const int crunch, const float xOff0, const float xOff1, const float yOff0, const float yOff1, const float xJP, const float yJP, const float scale, const uchar4 colors, const int frame, const int animationFrame, const int gridWidth, const int numBlocks, const bool isJ) { __shared__ unsigned int blockIndex; __shared__ unsigned int blockX, blockY; if ((threadIdx.x==0) && (threadIdx.y==0)) { // get block to process blockIndex = atomicAdd(&blockCounter, 1); blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here blockY = blockIndex / gridWidth; } __syncthreads(); // loop until all blocks completed while(blockIndex < numBlocks) { if (blockIndex >= numBlocks) break; // finish // process this block const int ix = blockDim.x * blockX + threadIdx.x; const int iy = blockDim.y * blockY + threadIdx.y; if ((ix < imageW) && (iy < imageH)) { // Get the current pixel color int pixel = imageW * iy + ix; uchar4 pixelColor = dst[pixel]; int count = 0; // Search for pixels out of tolerance surrounding the current pixel if (ix > 0) count += CheckColors(pixelColor, dst[pixel - 1]); if (ix + 1 < imageW) count += CheckColors(pixelColor, dst[pixel + 1]); if (iy > 0) count += CheckColors(pixelColor, dst[pixel - imageW]); if (iy + 1 < imageH) count += CheckColors(pixelColor, dst[pixel + imageW]); if (count) { // Calculate the location float xPos0 = (float)ix * scale; float xPos1 = 0.0f; float yPos0 = (float)iy * scale; float yPos1 = 0.0f; dsadd(xPos0, xPos1, xPos0, xPos1, xOff0, xOff1); dsadd(yPos0, yPos1, yPos0, yPos1, yOff0, yOff1); // Calculate the Mandelbrot index for the current location int m = CalcMandelbrotDS(xPos0, xPos1, yPos0, yPos1, xJP, yJP, crunch, isJ); m = m > 0 ? crunch - m : 0; // Convert the Mandelbrot index into a color uchar4 color; if (m) { m += animationFrame; color.x = m * colors.x; color.y = m * colors.y; color.z = m * colors.z; } else { color.x = 0; color.y = 0; color.z = 0; } // Output the pixel int frame1 = frame + 1; int frame2 = frame1 / 2; dst[pixel].x = (pixelColor.x * frame + color.x + frame2) / frame1; dst[pixel].y = (pixelColor.y * frame + color.y + frame2) / frame1; dst[pixel].z = (pixelColor.z * frame + color.z + frame2) / frame1; } } __syncthreads(); if ((threadIdx.x==0) && (threadIdx.y==0)) { // get block to process blockIndex = atomicAdd(&blockCounter, 1); blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here blockY = blockIndex / gridWidth; } __syncthreads(); } } // MandelbrotDS1 // The host CPU Mandebrot thread spawner void RunMandelbrot0_sm13(uchar4 *dst, const int imageW, const int imageH, const int crunch, const double xOff, const double yOff, const double xjp, const double yjp, const double scale, const uchar4 colors, const int frame, const int animationFrame, const int mode, const int numSMs, const bool isJ) { dim3 threads(BLOCKDIM_X, BLOCKDIM_Y); dim3 grid(iDivUp(imageW, BLOCKDIM_X), iDivUp(imageH, BLOCKDIM_Y)); // zero block counter unsigned int hBlockCounter = 0; cutilSafeCall( cudaMemcpyToSymbol(blockCounter, &hBlockCounter, sizeof(unsigned int), 0, cudaMemcpyHostToDevice ) ); int numWorkerBlocks = numSMs; switch(mode) { default: case 0: Mandelbrot0_sm13<float><<<numWorkerBlocks, threads>>>(dst, imageW, imageH, crunch, (float)xOff, (float)yOff, (float)xjp, (float)yjp, (float)scale, colors, frame, animationFrame, grid.x, grid.x*grid.y, isJ); break; case 1: float x0, x1, y0, y1; dsdeq(x0, x1, xOff); dsdeq(y0, y1, yOff); MandelbrotDS0_sm13<<<numWorkerBlocks, threads>>>(dst, imageW, imageH, crunch, x0, x1, y0, y1, xjp, yjp, (float)scale, colors, frame, animationFrame, grid.x, grid.x*grid.y, isJ); break; case 2: Mandelbrot0_sm13<double><<<numWorkerBlocks, threads>>>(dst, imageW, imageH, crunch, xOff, yOff, xjp, yjp, scale, colors, frame, animationFrame, grid.x, grid.x*grid.y, isJ); break; } cutilCheckMsg("Mandelbrot0_sm13 kernel execution failed.\n"); } // RunMandelbrot0 // The host CPU Mandebrot thread spawner void RunMandelbrot1_sm13(uchar4 *dst, const int imageW, const int imageH, const int crunch, const double xOff, const double yOff, const double xjp, const double yjp, const double scale, const uchar4 colors, const int frame, const int animationFrame, const int mode, const int numSMs, const bool isJ) { dim3 threads(BLOCKDIM_X, BLOCKDIM_Y); dim3 grid(iDivUp(imageW, BLOCKDIM_X), iDivUp(imageH, BLOCKDIM_Y)); // zero block counter unsigned int hBlockCounter = 0; cutilSafeCall( cudaMemcpyToSymbol(blockCounter, &hBlockCounter, sizeof(unsigned int), 0, cudaMemcpyHostToDevice ) ); int numWorkerBlocks = numSMs; switch(mode) { default: case 0: Mandelbrot1_sm13<float><<<numWorkerBlocks, threads>>>(dst, imageW, imageH, crunch, (float)xOff, (float)yOff, (float)xjp, (float)yjp, (float)scale, colors, frame, animationFrame, grid.x, grid.x*grid.y, isJ); break; case 1: float x0, x1, y0, y1; dsdeq(x0, x1, xOff); dsdeq(y0, y1, yOff); MandelbrotDS1_sm13<<<numWorkerBlocks, threads>>>(dst, imageW, imageH, crunch, x0, x1, y0, y1, xjp, yjp, (float)scale, colors, frame, animationFrame, grid.x, grid.x*grid.y, isJ); break; case 2: Mandelbrot1_sm13<double><<<numWorkerBlocks, threads>>>(dst, imageW, imageH, crunch, xOff, yOff, xjp, yjp, scale, colors, frame, animationFrame, grid.x, grid.x*grid.y, isJ); break; } cutilCheckMsg("Mandelbrot1_sm13 kernel execution failed.\n"); } // RunMandelbrot1 // check if we're running in emulation mode int inEmulationMode() { #if 1 return 1; #else return 0; #endif } // inEmulationMode
fca7acb8a156d1fbe1920b444fd705bd55cbffb5.hip
// !!! This is a file automatically generated by hipify!!! #include <assert.h> #include <hip/hip_runtime.h> #include <iostream> #include <hiprand/hiprand.h> #include "utils/const.h" #include "utils/metrics.cuh" #include "utils/hostbuffer.cuh" #include "utils/devbuffer.cuh" #include "utils/utils.cuh" #include "utils/timer.h" #include "utils/cnv.h" using namespace std; static real dot(real *x, real *y, int dim) { real s = 0; for (int i = 0; i < dim; ++i, ++x, ++y) { s += *x * *y; } return s; } void sim_host(real *mat, int nvec, int dim, real *sim) { real *x = mat; for (int i = 0; i < nvec; ++i, x += dim) { real *y = x; for (int j = i; j < nvec; ++j, y += dim) { sim[i*nvec+j] = sim[j*nvec+i] = dot(x, y, dim); } } } void norm_host(real *mat, int nvec, int dim) { for (int i = 0; i < nvec; ++i) { real norm = 0; for (int j = 0; j < dim; ++j) { auto r = mat[dim * i + j]; norm += r * r; } norm = sqrt(norm); for (int j = 0; j < dim; ++j) { mat[dim * i + j] /= norm; } } } template<int X, int Y> __global__ void sim_ker1(real *mat, int shx, int shy, real *sim) { assert(blockDim.x == X && blockDim.y == Y); if (blockIdx.x * X > (blockIdx.y + 1) * Y) return; __shared__ real left[X][WARP + 1], right[Y][WARP + 1]; real sum = 0.0f; const int tidx = threadIdx.y * X + threadIdx.x; const int warpidx = tidx / WARP, localidx = tidx % WARP; const int effx = min(blockDim.x, shx - blockDim.x * blockIdx.x); const int effy = min(blockDim.y, shx - blockDim.y * blockIdx.y); int j; real *xoff = &mat[X * blockIdx.x * shy + localidx]; real *yoff = &mat[Y * blockIdx.y * shy + localidx]; for (int i = 0; i < shy; i += WARP, xoff += WARP, yoff += WARP) { if (warpidx < (X * Y) / WARP && i + localidx < shy) { for (j = warpidx; j < effx; j += (X * Y) / WARP) { left[j][localidx] = xoff[shy * j]; } for (j = warpidx; j < effy; j += (X * Y) / WARP) { right[j][localidx] = yoff[shy * j]; } } __syncthreads(); if (threadIdx.x < effx && threadIdx.y < effy) { for (j = 0; j < WARP && i + j < shy; ++j) { sum += left[threadIdx.x][j] * right[threadIdx.y][j]; } } __syncthreads(); } if (threadIdx.x < effx && threadIdx.y < effy) { const int xidx = blockDim.x * blockIdx.x + threadIdx.x; const int yidx = blockDim.y * blockIdx.y + threadIdx.y; sim[xidx * shx + yidx] = sim[yidx * shx + xidx] = sum; } } int main(int argc, char **argv) { Timer tim; CNV A(argv[1]); dim3 sh(A.nvec, A.dim); int len = sh.x * sh.y; int slen = sh.x * sh.x; HostBuffer<real> Shost(slen); norm_host(A.data, sh.x, sh.y); sim_host(A.data, sh.x, sh.y, Shost); cout << "Host <Met>: " << host::stats(Shost, sh.x) << '\n'; DevBuffer<real> Adev(len); check(hipMemcpy(Adev, A.data, len * sizeof(real), hipMemcpyHostToDevice)); DevBuffer<real> S1(slen); HostBuffer<real> S1copy(slen); dim3 block(16, 16); dim3 grid(sh.x / block.x + 1, sh.x / block.y + 1); for (int i = 0; i < 50; ++i) { tim.enter(); hipLaunchKernelGGL(( sim_ker1<16, 16>), dim3(grid), dim3(block), 0, 0, Adev, sh.x, sh.y, S1); check(hipMemcpy(S1copy, S1, slen * sizeof(real), hipMemcpyDeviceToHost)); tim.leave(); } cout << "Ker #1 <Time>: " << tim.stats() << '\n'; cout << "Ker #1 <Met>: " << host::stats(S1copy, sh.x) << '\n'; cout << "Host <-> Ker #1: " << host::corr(S1copy, Shost, slen) << '\n'; return 0; }
fca7acb8a156d1fbe1920b444fd705bd55cbffb5.cu
#include <assert.h> #include <cuda_runtime.h> #include <iostream> #include <curand.h> #include "utils/const.h" #include "utils/metrics.cuh" #include "utils/hostbuffer.cuh" #include "utils/devbuffer.cuh" #include "utils/utils.cuh" #include "utils/timer.h" #include "utils/cnv.h" using namespace std; static real dot(real *x, real *y, int dim) { real s = 0; for (int i = 0; i < dim; ++i, ++x, ++y) { s += *x * *y; } return s; } void sim_host(real *mat, int nvec, int dim, real *sim) { real *x = mat; for (int i = 0; i < nvec; ++i, x += dim) { real *y = x; for (int j = i; j < nvec; ++j, y += dim) { sim[i*nvec+j] = sim[j*nvec+i] = dot(x, y, dim); } } } void norm_host(real *mat, int nvec, int dim) { for (int i = 0; i < nvec; ++i) { real norm = 0; for (int j = 0; j < dim; ++j) { auto r = mat[dim * i + j]; norm += r * r; } norm = sqrt(norm); for (int j = 0; j < dim; ++j) { mat[dim * i + j] /= norm; } } } template<int X, int Y> __global__ void sim_ker1(real *mat, int shx, int shy, real *sim) { assert(blockDim.x == X && blockDim.y == Y); if (blockIdx.x * X > (blockIdx.y + 1) * Y) return; __shared__ real left[X][WARP + 1], right[Y][WARP + 1]; real sum = 0.0f; const int tidx = threadIdx.y * X + threadIdx.x; const int warpidx = tidx / WARP, localidx = tidx % WARP; const int effx = min(blockDim.x, shx - blockDim.x * blockIdx.x); const int effy = min(blockDim.y, shx - blockDim.y * blockIdx.y); int j; real *xoff = &mat[X * blockIdx.x * shy + localidx]; real *yoff = &mat[Y * blockIdx.y * shy + localidx]; for (int i = 0; i < shy; i += WARP, xoff += WARP, yoff += WARP) { if (warpidx < (X * Y) / WARP && i + localidx < shy) { for (j = warpidx; j < effx; j += (X * Y) / WARP) { left[j][localidx] = xoff[shy * j]; } for (j = warpidx; j < effy; j += (X * Y) / WARP) { right[j][localidx] = yoff[shy * j]; } } __syncthreads(); if (threadIdx.x < effx && threadIdx.y < effy) { for (j = 0; j < WARP && i + j < shy; ++j) { sum += left[threadIdx.x][j] * right[threadIdx.y][j]; } } __syncthreads(); } if (threadIdx.x < effx && threadIdx.y < effy) { const int xidx = blockDim.x * blockIdx.x + threadIdx.x; const int yidx = blockDim.y * blockIdx.y + threadIdx.y; sim[xidx * shx + yidx] = sim[yidx * shx + xidx] = sum; } } int main(int argc, char **argv) { Timer tim; CNV A(argv[1]); dim3 sh(A.nvec, A.dim); int len = sh.x * sh.y; int slen = sh.x * sh.x; HostBuffer<real> Shost(slen); norm_host(A.data, sh.x, sh.y); sim_host(A.data, sh.x, sh.y, Shost); cout << "Host <Met>: " << host::stats(Shost, sh.x) << '\n'; DevBuffer<real> Adev(len); check(cudaMemcpy(Adev, A.data, len * sizeof(real), cudaMemcpyHostToDevice)); DevBuffer<real> S1(slen); HostBuffer<real> S1copy(slen); dim3 block(16, 16); dim3 grid(sh.x / block.x + 1, sh.x / block.y + 1); for (int i = 0; i < 50; ++i) { tim.enter(); sim_ker1<16, 16><<<grid, block>>>(Adev, sh.x, sh.y, S1); check(cudaMemcpy(S1copy, S1, slen * sizeof(real), cudaMemcpyDeviceToHost)); tim.leave(); } cout << "Ker #1 <Time>: " << tim.stats() << '\n'; cout << "Ker #1 <Met>: " << host::stats(S1copy, sh.x) << '\n'; cout << "Host <-> Ker #1: " << host::corr(S1copy, Shost, slen) << '\n'; return 0; }
393d393385e912b9aba2dcd02194e051a349c235.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/framework/fleet/heter_ps/graph_gpu_ps_table.h" #include "paddle/fluid/framework/fleet/heter_ps/graph_gpu_wrapper.h" #include "paddle/fluid/framework/fleet/heter_ps/heter_resource.h" namespace paddle { namespace framework { #ifdef PADDLE_WITH_HETERPS std::shared_ptr<GraphGpuWrapper> GraphGpuWrapper::s_instance_(nullptr); void GraphGpuWrapper::set_device(std::vector<int> ids) { for (auto device_id : ids) { device_id_mapping.push_back(device_id); } } std::vector<std::vector<int64_t>> GraphGpuWrapper::get_all_id(int type, int idx, int slice_num) { return ((GpuPsGraphTable *)graph_table) ->cpu_graph_table->get_all_id(type, idx, slice_num); } void GraphGpuWrapper::set_up_types(std::vector<std::string> &edge_types, std::vector<std::string> &node_types) { id_to_edge = edge_types; for (size_t table_id = 0; table_id < edge_types.size(); table_id++) { int res = edge_to_id.size(); edge_to_id[edge_types[table_id]] = res; } id_to_feature = node_types; for (size_t table_id = 0; table_id < node_types.size(); table_id++) { int res = feature_to_id.size(); feature_to_id[node_types[table_id]] = res; } table_feat_mapping.resize(node_types.size()); this->table_feat_conf_feat_name.resize(node_types.size()); this->table_feat_conf_feat_dtype.resize(node_types.size()); this->table_feat_conf_feat_shape.resize(node_types.size()); } void GraphGpuWrapper::make_partitions(int idx, int64_t byte_size, int device_len) { ((GpuPsGraphTable *)graph_table) ->cpu_graph_table->make_partitions(idx, byte_size, device_len); } int32_t GraphGpuWrapper::load_next_partition(int idx) { return ((GpuPsGraphTable *)graph_table) ->cpu_graph_table->load_next_partition(idx); } void GraphGpuWrapper::set_search_level(int level) { ((GpuPsGraphTable *)graph_table)->cpu_graph_table->set_search_level(level); } std::vector<int64_t> GraphGpuWrapper::get_partition(int idx, int num) { return ((GpuPsGraphTable *)graph_table) ->cpu_graph_table->get_partition(idx, num); } int32_t GraphGpuWrapper::get_partition_num(int idx) { return ((GpuPsGraphTable *)graph_table) ->cpu_graph_table->get_partition_num(idx); } void GraphGpuWrapper::make_complementary_graph(int idx, int64_t byte_size) { ((GpuPsGraphTable *)graph_table) ->cpu_graph_table->make_complementary_graph(idx, byte_size); } void GraphGpuWrapper::load_edge_file(std::string name, std::string filepath, bool reverse) { // 'e' means load edge std::string params = "e"; if (reverse) { // 'e<' means load edges from $2 to $1 params += "<" + name; } else { // 'e>' means load edges from $1 to $2 params += ">" + name; } if (edge_to_id.find(name) != edge_to_id.end()) { ((GpuPsGraphTable *)graph_table) ->cpu_graph_table->Load(std::string(filepath), params); } } void GraphGpuWrapper::load_node_file(std::string name, std::string filepath) { // 'n' means load nodes and 'node_type' follows std::string params = "n" + name; if (feature_to_id.find(name) != feature_to_id.end()) { ((GpuPsGraphTable *)graph_table) ->cpu_graph_table->Load(std::string(filepath), params); } } void GraphGpuWrapper::add_table_feat_conf(std::string table_name, std::string feat_name, std::string feat_dtype, int feat_shape) { if (feature_to_id.find(table_name) != feature_to_id.end()) { int idx = feature_to_id[table_name]; if (table_feat_mapping[idx].find(feat_name) == table_feat_mapping[idx].end()) { int res = (int)table_feat_mapping[idx].size(); table_feat_mapping[idx][feat_name] = res; } int feat_idx = table_feat_mapping[idx][feat_name]; VLOG(0) << "table_name " << table_name << " mapping id " << idx; VLOG(0) << " feat name " << feat_name << " feat id" << feat_idx; if (feat_idx < table_feat_conf_feat_name[idx].size()) { // overide table_feat_conf_feat_name[idx][feat_idx] = feat_name; table_feat_conf_feat_dtype[idx][feat_idx] = feat_dtype; table_feat_conf_feat_shape[idx][feat_idx] = feat_shape; } else { // new table_feat_conf_feat_name[idx].push_back(feat_name); table_feat_conf_feat_dtype[idx].push_back(feat_dtype); table_feat_conf_feat_shape[idx].push_back(feat_shape); } } VLOG(0) << "add conf over"; } void GraphGpuWrapper::init_search_level(int level) { search_level = level; } void GraphGpuWrapper::init_service() { table_proto.set_task_pool_size(24); table_proto.set_search_level(search_level); table_proto.set_table_name("cpu_graph_table"); table_proto.set_use_cache(false); for (int i = 0; i < id_to_edge.size(); i++) table_proto.add_edge_types(id_to_edge[i]); for (int i = 0; i < id_to_feature.size(); i++) { table_proto.add_node_types(id_to_feature[i]); auto feat_node = id_to_feature[i]; ::paddle::distributed::GraphFeature *g_f = table_proto.add_graph_feature(); for (int x = 0; x < table_feat_conf_feat_name[i].size(); x++) { g_f->add_name(table_feat_conf_feat_name[i][x]); g_f->add_dtype(table_feat_conf_feat_dtype[i][x]); g_f->add_shape(table_feat_conf_feat_shape[i][x]); } } std::shared_ptr<HeterPsResource> resource = std::make_shared<HeterPsResource>(device_id_mapping); resource->enable_p2p(); GpuPsGraphTable *g = new GpuPsGraphTable(resource, 1); g->init_cpu_table(table_proto); graph_table = (char *)g; } void GraphGpuWrapper::upload_batch(int idx, std::vector<std::vector<int64_t>> &ids) { GpuPsGraphTable *g = (GpuPsGraphTable *)graph_table; // std::vector<paddle::framework::GpuPsCommGraph> vec; for (int i = 0; i < ids.size(); i++) { // vec.push_back(g->cpu_graph_table->make_gpu_ps_graph(idx, ids[i])); GpuPsCommGraph sub_graph = g->cpu_graph_table->make_gpu_ps_graph(idx, ids[i]); g->build_graph_on_single_gpu(sub_graph, i); sub_graph.release_on_cpu(); VLOG(0) << "sub graph on gpu " << i << " is built"; } // g->build_graph_from_cpu(vec); } // void GraphGpuWrapper::test() { // int64_t cpu_key[3] = {0, 1, 2}; // void *key; // platform::CUDADeviceGuard guard(0); // hipMalloc((void **)&key, 3 * sizeof(int64_t)); // hipMemcpy(key, cpu_key, 3 * sizeof(int64_t), hipMemcpyHostToDevice); // auto neighbor_sample_res = // ((GpuPsGraphTable *)graph_table) // ->graph_neighbor_sample(0, (int64_t *)key, 2, 3); // int64_t *res = new int64_t[7]; // hipMemcpy(res, neighbor_sample_res.val, 3 * 2 * sizeof(int64_t), // hipMemcpyDeviceToHost); // int *actual_sample_size = new int[3]; // hipMemcpy(actual_sample_size, neighbor_sample_res.actual_sample_size, // 3 * sizeof(int), // hipMemcpyDeviceToHost); // 3, 1, 3 // //{0,9} or {9,0} is expected for key 0 // //{0,2} or {2,0} is expected for key 1 // //{1,3} or {3,1} is expected for key 2 // for (int i = 0; i < 3; i++) { // VLOG(0) << "actual sample size for " << i << " is " // << actual_sample_size[i]; // for (int j = 0; j < actual_sample_size[i]; j++) { // VLOG(0) << "sampled an neighbor for node" << i << " : " << res[i * 2 + // j]; // } // } // } NeighborSampleResult GraphGpuWrapper::graph_neighbor_sample_v3( NeighborSampleQuery q, bool cpu_switch) { return ((GpuPsGraphTable *)graph_table) ->graph_neighbor_sample_v3(q, cpu_switch); } // this function is contributed by Liwb5 std::vector<int64_t> GraphGpuWrapper::graph_neighbor_sample( int gpu_id, std::vector<int64_t> &key, int sample_size) { int64_t *cuda_key; platform::CUDADeviceGuard guard(gpu_id); hipMalloc(&cuda_key, key.size() * sizeof(int64_t)); hipMemcpy(cuda_key, key.data(), key.size() * sizeof(int64_t), hipMemcpyHostToDevice); auto neighbor_sample_res = ((GpuPsGraphTable *)graph_table) ->graph_neighbor_sample(gpu_id, cuda_key, sample_size, key.size()); int *actual_sample_size = new int[key.size()]; hipMemcpy(actual_sample_size, neighbor_sample_res.actual_sample_size, key.size() * sizeof(int), hipMemcpyDeviceToHost); // 3, 1, 3 int cumsum = 0; for (int i = 0; i < key.size(); i++) { cumsum += actual_sample_size[i]; } std::vector<int64_t> cpu_key, res; cpu_key.resize(key.size() * sample_size); hipMemcpy(cpu_key.data(), neighbor_sample_res.val, key.size() * sample_size * sizeof(int64_t), hipMemcpyDeviceToHost); for (int i = 0; i < key.size(); i++) { for (int j = 0; j < actual_sample_size[i]; j++) { res.push_back(key[i]); res.push_back(cpu_key[i * sample_size + j]); } } /* for(int i = 0;i < res.size();i ++) { */ /* VLOG(0) << i << " " << res[i]; */ /* } */ delete[] actual_sample_size; hipFree(cuda_key); return res; } void GraphGpuWrapper::init_sample_status() { ((GpuPsGraphTable *)graph_table)->init_sample_status(); } void GraphGpuWrapper::free_sample_status() { ((GpuPsGraphTable *)graph_table)->free_sample_status(); } NodeQueryResult GraphGpuWrapper::query_node_list(int gpu_id, int start, int query_size) { return ((GpuPsGraphTable *)graph_table) ->query_node_list(gpu_id, start, query_size); } void GraphGpuWrapper::load_node_weight(int type_id, int idx, std::string path) { return ((GpuPsGraphTable *)graph_table) ->cpu_graph_table->load_node_weight(type_id, idx, path); } void GraphGpuWrapper::export_partition_files(int idx, std::string file_path) { return ((GpuPsGraphTable *)graph_table) ->cpu_graph_table->export_partition_files(idx, file_path); } #endif } };
393d393385e912b9aba2dcd02194e051a349c235.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/framework/fleet/heter_ps/graph_gpu_ps_table.h" #include "paddle/fluid/framework/fleet/heter_ps/graph_gpu_wrapper.h" #include "paddle/fluid/framework/fleet/heter_ps/heter_resource.h" namespace paddle { namespace framework { #ifdef PADDLE_WITH_HETERPS std::shared_ptr<GraphGpuWrapper> GraphGpuWrapper::s_instance_(nullptr); void GraphGpuWrapper::set_device(std::vector<int> ids) { for (auto device_id : ids) { device_id_mapping.push_back(device_id); } } std::vector<std::vector<int64_t>> GraphGpuWrapper::get_all_id(int type, int idx, int slice_num) { return ((GpuPsGraphTable *)graph_table) ->cpu_graph_table->get_all_id(type, idx, slice_num); } void GraphGpuWrapper::set_up_types(std::vector<std::string> &edge_types, std::vector<std::string> &node_types) { id_to_edge = edge_types; for (size_t table_id = 0; table_id < edge_types.size(); table_id++) { int res = edge_to_id.size(); edge_to_id[edge_types[table_id]] = res; } id_to_feature = node_types; for (size_t table_id = 0; table_id < node_types.size(); table_id++) { int res = feature_to_id.size(); feature_to_id[node_types[table_id]] = res; } table_feat_mapping.resize(node_types.size()); this->table_feat_conf_feat_name.resize(node_types.size()); this->table_feat_conf_feat_dtype.resize(node_types.size()); this->table_feat_conf_feat_shape.resize(node_types.size()); } void GraphGpuWrapper::make_partitions(int idx, int64_t byte_size, int device_len) { ((GpuPsGraphTable *)graph_table) ->cpu_graph_table->make_partitions(idx, byte_size, device_len); } int32_t GraphGpuWrapper::load_next_partition(int idx) { return ((GpuPsGraphTable *)graph_table) ->cpu_graph_table->load_next_partition(idx); } void GraphGpuWrapper::set_search_level(int level) { ((GpuPsGraphTable *)graph_table)->cpu_graph_table->set_search_level(level); } std::vector<int64_t> GraphGpuWrapper::get_partition(int idx, int num) { return ((GpuPsGraphTable *)graph_table) ->cpu_graph_table->get_partition(idx, num); } int32_t GraphGpuWrapper::get_partition_num(int idx) { return ((GpuPsGraphTable *)graph_table) ->cpu_graph_table->get_partition_num(idx); } void GraphGpuWrapper::make_complementary_graph(int idx, int64_t byte_size) { ((GpuPsGraphTable *)graph_table) ->cpu_graph_table->make_complementary_graph(idx, byte_size); } void GraphGpuWrapper::load_edge_file(std::string name, std::string filepath, bool reverse) { // 'e' means load edge std::string params = "e"; if (reverse) { // 'e<' means load edges from $2 to $1 params += "<" + name; } else { // 'e>' means load edges from $1 to $2 params += ">" + name; } if (edge_to_id.find(name) != edge_to_id.end()) { ((GpuPsGraphTable *)graph_table) ->cpu_graph_table->Load(std::string(filepath), params); } } void GraphGpuWrapper::load_node_file(std::string name, std::string filepath) { // 'n' means load nodes and 'node_type' follows std::string params = "n" + name; if (feature_to_id.find(name) != feature_to_id.end()) { ((GpuPsGraphTable *)graph_table) ->cpu_graph_table->Load(std::string(filepath), params); } } void GraphGpuWrapper::add_table_feat_conf(std::string table_name, std::string feat_name, std::string feat_dtype, int feat_shape) { if (feature_to_id.find(table_name) != feature_to_id.end()) { int idx = feature_to_id[table_name]; if (table_feat_mapping[idx].find(feat_name) == table_feat_mapping[idx].end()) { int res = (int)table_feat_mapping[idx].size(); table_feat_mapping[idx][feat_name] = res; } int feat_idx = table_feat_mapping[idx][feat_name]; VLOG(0) << "table_name " << table_name << " mapping id " << idx; VLOG(0) << " feat name " << feat_name << " feat id" << feat_idx; if (feat_idx < table_feat_conf_feat_name[idx].size()) { // overide table_feat_conf_feat_name[idx][feat_idx] = feat_name; table_feat_conf_feat_dtype[idx][feat_idx] = feat_dtype; table_feat_conf_feat_shape[idx][feat_idx] = feat_shape; } else { // new table_feat_conf_feat_name[idx].push_back(feat_name); table_feat_conf_feat_dtype[idx].push_back(feat_dtype); table_feat_conf_feat_shape[idx].push_back(feat_shape); } } VLOG(0) << "add conf over"; } void GraphGpuWrapper::init_search_level(int level) { search_level = level; } void GraphGpuWrapper::init_service() { table_proto.set_task_pool_size(24); table_proto.set_search_level(search_level); table_proto.set_table_name("cpu_graph_table"); table_proto.set_use_cache(false); for (int i = 0; i < id_to_edge.size(); i++) table_proto.add_edge_types(id_to_edge[i]); for (int i = 0; i < id_to_feature.size(); i++) { table_proto.add_node_types(id_to_feature[i]); auto feat_node = id_to_feature[i]; ::paddle::distributed::GraphFeature *g_f = table_proto.add_graph_feature(); for (int x = 0; x < table_feat_conf_feat_name[i].size(); x++) { g_f->add_name(table_feat_conf_feat_name[i][x]); g_f->add_dtype(table_feat_conf_feat_dtype[i][x]); g_f->add_shape(table_feat_conf_feat_shape[i][x]); } } std::shared_ptr<HeterPsResource> resource = std::make_shared<HeterPsResource>(device_id_mapping); resource->enable_p2p(); GpuPsGraphTable *g = new GpuPsGraphTable(resource, 1); g->init_cpu_table(table_proto); graph_table = (char *)g; } void GraphGpuWrapper::upload_batch(int idx, std::vector<std::vector<int64_t>> &ids) { GpuPsGraphTable *g = (GpuPsGraphTable *)graph_table; // std::vector<paddle::framework::GpuPsCommGraph> vec; for (int i = 0; i < ids.size(); i++) { // vec.push_back(g->cpu_graph_table->make_gpu_ps_graph(idx, ids[i])); GpuPsCommGraph sub_graph = g->cpu_graph_table->make_gpu_ps_graph(idx, ids[i]); g->build_graph_on_single_gpu(sub_graph, i); sub_graph.release_on_cpu(); VLOG(0) << "sub graph on gpu " << i << " is built"; } // g->build_graph_from_cpu(vec); } // void GraphGpuWrapper::test() { // int64_t cpu_key[3] = {0, 1, 2}; // void *key; // platform::CUDADeviceGuard guard(0); // cudaMalloc((void **)&key, 3 * sizeof(int64_t)); // cudaMemcpy(key, cpu_key, 3 * sizeof(int64_t), cudaMemcpyHostToDevice); // auto neighbor_sample_res = // ((GpuPsGraphTable *)graph_table) // ->graph_neighbor_sample(0, (int64_t *)key, 2, 3); // int64_t *res = new int64_t[7]; // cudaMemcpy(res, neighbor_sample_res.val, 3 * 2 * sizeof(int64_t), // cudaMemcpyDeviceToHost); // int *actual_sample_size = new int[3]; // cudaMemcpy(actual_sample_size, neighbor_sample_res.actual_sample_size, // 3 * sizeof(int), // cudaMemcpyDeviceToHost); // 3, 1, 3 // //{0,9} or {9,0} is expected for key 0 // //{0,2} or {2,0} is expected for key 1 // //{1,3} or {3,1} is expected for key 2 // for (int i = 0; i < 3; i++) { // VLOG(0) << "actual sample size for " << i << " is " // << actual_sample_size[i]; // for (int j = 0; j < actual_sample_size[i]; j++) { // VLOG(0) << "sampled an neighbor for node" << i << " : " << res[i * 2 + // j]; // } // } // } NeighborSampleResult GraphGpuWrapper::graph_neighbor_sample_v3( NeighborSampleQuery q, bool cpu_switch) { return ((GpuPsGraphTable *)graph_table) ->graph_neighbor_sample_v3(q, cpu_switch); } // this function is contributed by Liwb5 std::vector<int64_t> GraphGpuWrapper::graph_neighbor_sample( int gpu_id, std::vector<int64_t> &key, int sample_size) { int64_t *cuda_key; platform::CUDADeviceGuard guard(gpu_id); cudaMalloc(&cuda_key, key.size() * sizeof(int64_t)); cudaMemcpy(cuda_key, key.data(), key.size() * sizeof(int64_t), cudaMemcpyHostToDevice); auto neighbor_sample_res = ((GpuPsGraphTable *)graph_table) ->graph_neighbor_sample(gpu_id, cuda_key, sample_size, key.size()); int *actual_sample_size = new int[key.size()]; cudaMemcpy(actual_sample_size, neighbor_sample_res.actual_sample_size, key.size() * sizeof(int), cudaMemcpyDeviceToHost); // 3, 1, 3 int cumsum = 0; for (int i = 0; i < key.size(); i++) { cumsum += actual_sample_size[i]; } std::vector<int64_t> cpu_key, res; cpu_key.resize(key.size() * sample_size); cudaMemcpy(cpu_key.data(), neighbor_sample_res.val, key.size() * sample_size * sizeof(int64_t), cudaMemcpyDeviceToHost); for (int i = 0; i < key.size(); i++) { for (int j = 0; j < actual_sample_size[i]; j++) { res.push_back(key[i]); res.push_back(cpu_key[i * sample_size + j]); } } /* for(int i = 0;i < res.size();i ++) { */ /* VLOG(0) << i << " " << res[i]; */ /* } */ delete[] actual_sample_size; cudaFree(cuda_key); return res; } void GraphGpuWrapper::init_sample_status() { ((GpuPsGraphTable *)graph_table)->init_sample_status(); } void GraphGpuWrapper::free_sample_status() { ((GpuPsGraphTable *)graph_table)->free_sample_status(); } NodeQueryResult GraphGpuWrapper::query_node_list(int gpu_id, int start, int query_size) { return ((GpuPsGraphTable *)graph_table) ->query_node_list(gpu_id, start, query_size); } void GraphGpuWrapper::load_node_weight(int type_id, int idx, std::string path) { return ((GpuPsGraphTable *)graph_table) ->cpu_graph_table->load_node_weight(type_id, idx, path); } void GraphGpuWrapper::export_partition_files(int idx, std::string file_path) { return ((GpuPsGraphTable *)graph_table) ->cpu_graph_table->export_partition_files(idx, file_path); } #endif } };
a37706bec1fa9a60f3ce8d585849624dcb282ac4.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/native/hip/Reduce.cuh> #include <c10/util/ArrayRef.h> #include <iostream> namespace at { namespace native { static inline std::ostream& operator<<(std::ostream& out, dim3 dim) { if (dim.y == 1 && dim.z == 1) { out << dim.x; } else { out << "[" << dim.x << "," << dim.y << "," << dim.z << "]"; } return out; } std::ostream& operator<<(std::ostream& out, const ReduceConfig& config) { out << "ReduceConfig("; out << "element_size_bytes=" << config.element_size_bytes << ", "; out << "num_inputs=" << config.num_inputs << ", "; out << "num_outputs=" << config.num_outputs << ", "; out << "step_input=" << config.step_input << ", "; out << "step_output=" << config.step_output << ", "; out << "ctas_per_output=" << config.ctas_per_output << ", "; out << "input_mult=["; for (int i = 0; i < 3; i++) { if (i != 0) { out << ","; } out << config.input_mult[i]; } out << "], "; out << "output_mult=["; for (int i = 0; i < 2; i++) { if (i != 0) { out << ","; } out << config.output_mult[i]; } out << "], "; out << "values_per_thread=" << config.values_per_thread() << ", "; out << "block=" << config.block() << ", "; out << "grid=" << config.grid() << ", "; out << "global_memory_size=" << config.global_memory_size(); out << ")"; return out; } }} // namespace at::native
a37706bec1fa9a60f3ce8d585849624dcb282ac4.cu
#include <ATen/native/cuda/Reduce.cuh> #include <c10/util/ArrayRef.h> #include <iostream> namespace at { namespace native { static inline std::ostream& operator<<(std::ostream& out, dim3 dim) { if (dim.y == 1 && dim.z == 1) { out << dim.x; } else { out << "[" << dim.x << "," << dim.y << "," << dim.z << "]"; } return out; } std::ostream& operator<<(std::ostream& out, const ReduceConfig& config) { out << "ReduceConfig("; out << "element_size_bytes=" << config.element_size_bytes << ", "; out << "num_inputs=" << config.num_inputs << ", "; out << "num_outputs=" << config.num_outputs << ", "; out << "step_input=" << config.step_input << ", "; out << "step_output=" << config.step_output << ", "; out << "ctas_per_output=" << config.ctas_per_output << ", "; out << "input_mult=["; for (int i = 0; i < 3; i++) { if (i != 0) { out << ","; } out << config.input_mult[i]; } out << "], "; out << "output_mult=["; for (int i = 0; i < 2; i++) { if (i != 0) { out << ","; } out << config.output_mult[i]; } out << "], "; out << "values_per_thread=" << config.values_per_thread() << ", "; out << "block=" << config.block() << ", "; out << "grid=" << config.grid() << ", "; out << "global_memory_size=" << config.global_memory_size(); out << ")"; return out; } }} // namespace at::native
e92c4e3e1e6b86e033bdfbad2685d27b231a58b1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file multibox_target.cu * \brief MultiBoxTarget op * \author Joshua Zhang */ #include "./multibox_target-inl.h" #include <mshadow/cuda/tensor_gpu-inl.cuh> #define MULTIBOX_TARGET_CUDA_CHECK(condition) \ /* Code block avoids redefinition of hipError_t error */ \ do { \ hipError_t error = condition; \ CHECK_EQ(error, hipSuccess) << " " << hipGetErrorString(error); \ } while (0) namespace mshadow { namespace cuda { template<typename DType> __global__ void InitGroundTruthFlags(DType *gt_flags, const DType *labels, const int num_batches, const int num_labels, const int label_width) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= num_batches * num_labels) return; int b = index / num_labels; int l = index % num_labels; if (*(labels + b * num_labels * label_width + l * label_width) == -1.f) { *(gt_flags + b * num_labels + l) = 0; } else { *(gt_flags + b * num_labels + l) = 1; } } template<typename DType> __global__ void FindBestMatches(DType *best_matches, DType *gt_flags, DType *anchor_flags, const DType *overlaps, const int num_anchors, const int num_labels) { int nbatch = blockIdx.x; gt_flags += nbatch * num_labels; overlaps += nbatch * num_anchors * num_labels; best_matches += nbatch * num_anchors; anchor_flags += nbatch * num_anchors; const int num_threads = kMaxThreadsPerBlock; __shared__ int max_indices_y[kMaxThreadsPerBlock]; __shared__ int max_indices_x[kMaxThreadsPerBlock]; __shared__ float max_values[kMaxThreadsPerBlock]; while (1) { // check if all done. bool finished = true; for (int i = 0; i < num_labels; ++i) { if (gt_flags[i] > .5) { finished = false; break; } } if (finished) break; // all done. // finding max indices in different threads int max_x = -1; int max_y = -1; DType max_value = 1e-6; // start with very small overlap for (int i = threadIdx.x; i < num_anchors; i += num_threads) { if (anchor_flags[i] > .5) continue; for (int j = 0; j < num_labels; ++j) { if (gt_flags[j] > .5) { DType temp = overlaps[i * num_labels + j]; if (temp > max_value) { max_x = j; max_y = i; max_value = temp; } } } } max_indices_x[threadIdx.x] = max_x; max_indices_y[threadIdx.x] = max_y; max_values[threadIdx.x] = max_value; __syncthreads(); if (threadIdx.x == 0) { // merge results and assign best match int max_x = -1; int max_y = -1; DType max_value = -1; for (int k = 0; k < num_threads; ++k) { if (max_indices_y[k] < 0 || max_indices_x[k] < 0) continue; float temp = max_values[k]; if (temp > max_value) { max_x = max_indices_x[k]; max_y = max_indices_y[k]; max_value = temp; } } if (max_x >= 0 && max_y >= 0) { best_matches[max_y] = max_x; // mark flags as visited gt_flags[max_x] = 0.f; anchor_flags[max_y] = 1.f; } else { // no more good matches for (int i = 0; i < num_labels; ++i) { gt_flags[i] = 0.f; } } } __syncthreads(); } } template<typename DType> __global__ void FindGoodMatches(DType *best_matches, DType *anchor_flags, const DType *overlaps, const int num_anchors, const int num_labels, const float overlap_threshold) { int nbatch = blockIdx.x; overlaps += nbatch * num_anchors * num_labels; best_matches += nbatch * num_anchors; anchor_flags += nbatch * num_anchors; const int num_threads = kMaxThreadsPerBlock; for (int i = threadIdx.x; i < num_anchors; i += num_threads) { if (anchor_flags[i] < 0) { int idx = -1; float max_value = -1.f; for (int j = 0; j < num_labels; ++j) { DType temp = overlaps[i * num_labels + j]; if (temp > max_value) { max_value = temp; idx = j; } } if (max_value > overlap_threshold && (idx >= 0)) { best_matches[i] = idx; anchor_flags[i] = 0.9f; } } } } template<typename DType> __global__ void UseAllNegatives(DType *anchor_flags, const int num) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= num) return; if (anchor_flags[idx] < 0.5) { anchor_flags[idx] = 0; // regard all non-positive as negatives } } template<typename DType> __global__ void NegativeMining(const DType *overlaps, const DType *cls_preds, DType *anchor_flags, DType *buffer, const float negative_mining_ratio, const float negative_mining_thresh, const int minimum_negative_samples, const int num_anchors, const int num_labels, const int num_classes) { int nbatch = blockIdx.x; overlaps += nbatch * num_anchors * num_labels; cls_preds += nbatch * num_classes * num_anchors; anchor_flags += nbatch * num_anchors; buffer += nbatch * num_anchors * 3; const int num_threads = kMaxThreadsPerBlock; int num_positive; __shared__ int num_negative; if (threadIdx.x == 0) { num_positive = 0; for (int i = 0; i < num_anchors; ++i) { if (anchor_flags[i] > .5) { ++num_positive; } } num_negative = num_positive * negative_mining_ratio; if (num_negative < minimum_negative_samples) { num_negative = minimum_negative_samples; } if (num_negative > (num_anchors - num_positive)) { num_negative = num_anchors - num_positive; } } __syncthreads(); if (num_negative < 1) return; for (int i = threadIdx.x; i < num_anchors; i += num_threads) { buffer[i] = -1.f; if (anchor_flags[i] < 0) { // compute max class prediction score DType max_val = cls_preds[i]; for (int j = 1; j < num_classes; ++j) { DType temp = cls_preds[i + num_anchors * j]; if (temp > max_val) max_val = temp; } DType sum = 0.f; for (int j = 0; j < num_classes; ++j) { DType temp = cls_preds[i + num_anchors * j]; sum += exp(temp - max_val); } DType prob = exp(cls_preds[i] - max_val) / sum; DType max_iou = -1.f; for (int j = 0; j < num_labels; ++j) { DType temp = overlaps[i * num_labels + j]; if (temp > max_iou) max_iou = temp; } if (max_iou < negative_mining_thresh) { // only do it for anchors with iou < thresh buffer[i] = -prob; // -log(x) actually, but value does not matter } } } __syncthreads(); // descend merge sorting for negative mining DType *index_src = buffer + num_anchors; DType *index_dst = buffer + num_anchors * 2; DType *src = index_src; DType *dst = index_dst; for (int i = threadIdx.x; i < num_anchors; i += num_threads) { index_src[i] = i; } __syncthreads(); for (int width = 2; width < (num_anchors << 1); width <<= 1) { int slices = (num_anchors - 1) / (num_threads * width) + 1; int start = width * threadIdx.x * slices; for (int slice = 0; slice < slices; ++slice) { if (start >= num_anchors) break; int middle = start + (width >> 1); if (num_anchors < middle) middle = num_anchors; int end = start + width; if (num_anchors < end) end = num_anchors; int i = start; int j = middle; for (int k = start; k < end; ++k) { int idx_i = static_cast<int>(src[i]); int idx_j = static_cast<int>(src[j]); if (i < middle && (j >= end || buffer[idx_i] > buffer[idx_j])) { dst[k] = src[i]; ++i; } else { dst[k] = src[j]; ++j; } } start += width; } __syncthreads(); // swap src/dst src = src == index_src? index_dst : index_src; dst = dst == index_src? index_dst : index_src; } __syncthreads(); for (int i = threadIdx.x; i < num_negative; i += num_threads) { int idx = static_cast<int>(src[i]); if (anchor_flags[idx] < 0) { anchor_flags[idx] = 0; } } } template<typename DType> __global__ void AssignTrainigTargets(DType *loc_target, DType *loc_mask, DType *cls_target, DType *anchor_flags, DType *best_matches, DType *labels, DType *anchors, const int num_anchors, const int num_labels, const int label_width, const float vx, const float vy, const float vw, const float vh) { const int nbatch = blockIdx.x; loc_target += nbatch * num_anchors * 5; loc_mask += nbatch * num_anchors * 5; cls_target += nbatch * num_anchors; anchor_flags += nbatch * num_anchors; best_matches += nbatch * num_anchors; labels += nbatch * num_labels * label_width; const int num_threads = kMaxThreadsPerBlock; for (int i = threadIdx.x; i < num_anchors; i += num_threads) { if (anchor_flags[i] > 0.5) { // positive sample int offset_l = static_cast<int>(best_matches[i]) * label_width; cls_target[i] = labels[offset_l] + 1; // 0 reserved for background int offset = i * 4; int offset_t = i * 5; loc_mask[offset_t] = 1; loc_mask[offset_t + 1] = 1; loc_mask[offset_t + 2] = 1; loc_mask[offset_t + 3] = 1; loc_mask[offset_t + 4] = 1; // regression targets float al = anchors[offset]; float at = anchors[offset + 1]; float ar = anchors[offset + 2]; float ab = anchors[offset + 3]; float aw = ar - al; float ah = ab - at; float ax = (al + ar) * 0.5; float ay = (at + ab) * 0.5; float gl = labels[offset_l + 1]; float gt = labels[offset_l + 2]; float gr = labels[offset_l + 3]; float gb = labels[offset_l + 4]; float gz = labels[offset_l + 5]; float gw = gr - gl; float gh = gb - gt; float gx = (gl + gr) * 0.5; float gy = (gt + gb) * 0.5; loc_target[offset_t] = DType((gx - ax) / aw / vx); // xmin loc_target[offset_t + 1] = DType((gy - ay) / ah / vy); // ymin loc_target[offset_t + 2] = DType(log(gw / aw) / vw); // xmax loc_target[offset_t + 3] = DType(log(gh / ah) / vh); // ymax loc_target[offset_t + 4] = DType(gz) / 0.1; // dist } else if (anchor_flags[i] < 0.5 && anchor_flags[i] > -0.5) { // background cls_target[i] = 0; } } } } // namespace cuda template<typename DType> inline void MultiBoxTargetForward(const Tensor<gpu, 2, DType> &loc_target, const Tensor<gpu, 2, DType> &loc_mask, const Tensor<gpu, 2, DType> &cls_target, const Tensor<gpu, 2, DType> &anchors, const Tensor<gpu, 3, DType> &labels, const Tensor<gpu, 3, DType> &cls_preds, const Tensor<gpu, 4, DType> &temp_space, const float overlap_threshold, const float background_label, const float negative_mining_ratio, const float negative_mining_thresh, const int minimum_negative_samples, const nnvm::Tuple<float> &variances) { const int num_batches = labels.size(0); const int num_labels = labels.size(1); const int label_width = labels.size(2); const int num_anchors = anchors.size(0); const int num_classes = cls_preds.size(1); CHECK_GE(num_batches, 1); CHECK_GT(num_labels, 2); CHECK_GE(num_anchors, 1); CHECK_EQ(variances.ndim(), 4); // init ground-truth flags, by checking valid labels temp_space[1] = 0.f; DType *gt_flags = temp_space[1].dptr_; const int num_threads = cuda::kMaxThreadsPerBlock; dim3 init_thread_dim(num_threads); dim3 init_block_dim((num_batches * num_labels - 1) / num_threads + 1); cuda::CheckLaunchParam(init_block_dim, init_thread_dim, "MultiBoxTarget Init"); hipLaunchKernelGGL(( cuda::InitGroundTruthFlags<DType>), dim3(init_block_dim), dim3(init_thread_dim), 0, 0, gt_flags, labels.dptr_, num_batches, num_labels, label_width); MULTIBOX_TARGET_CUDA_CHECK(hipPeekAtLastError()); // compute best matches temp_space[2] = -1.f; temp_space[3] = -1.f; DType *anchor_flags = temp_space[2].dptr_; DType *best_matches = temp_space[3].dptr_; const DType *overlaps = temp_space[0].dptr_; cuda::CheckLaunchParam(num_batches, num_threads, "MultiBoxTarget Matching"); hipLaunchKernelGGL(( cuda::FindBestMatches<DType>), dim3(num_batches), dim3(num_threads), 0, 0, best_matches, gt_flags, anchor_flags, overlaps, num_anchors, num_labels); MULTIBOX_TARGET_CUDA_CHECK(hipPeekAtLastError()); // find good matches with overlap > threshold if (overlap_threshold > 0) { hipLaunchKernelGGL(( cuda::FindGoodMatches<DType>), dim3(num_batches), dim3(num_threads), 0, 0, best_matches, anchor_flags, overlaps, num_anchors, num_labels, overlap_threshold); MULTIBOX_TARGET_CUDA_CHECK(hipPeekAtLastError()); } // do negative mining or not if (negative_mining_ratio > 0) { CHECK_GT(negative_mining_thresh, 0); temp_space[4] = 0; DType *buffer = temp_space[4].dptr_; hipLaunchKernelGGL(( cuda::NegativeMining<DType>), dim3(num_batches), dim3(num_threads), 0, 0, overlaps, cls_preds.dptr_, anchor_flags, buffer, negative_mining_ratio, negative_mining_thresh, minimum_negative_samples, num_anchors, num_labels, num_classes); MULTIBOX_TARGET_CUDA_CHECK(hipPeekAtLastError()); } else { int num_blocks = (num_batches * num_anchors - 1) / num_threads + 1; cuda::CheckLaunchParam(num_blocks, num_threads, "MultiBoxTarget Negative"); hipLaunchKernelGGL(( cuda::UseAllNegatives<DType>), dim3(num_blocks), dim3(num_threads), 0, 0, anchor_flags, num_batches * num_anchors); MULTIBOX_TARGET_CUDA_CHECK(hipPeekAtLastError()); } hipLaunchKernelGGL(( cuda::AssignTrainigTargets<DType>), dim3(num_batches), dim3(num_threads), 0, 0, loc_target.dptr_, loc_mask.dptr_, cls_target.dptr_, anchor_flags, best_matches, labels.dptr_, anchors.dptr_, num_anchors, num_labels, label_width, variances[0], variances[1], variances[2], variances[3]); MULTIBOX_TARGET_CUDA_CHECK(hipPeekAtLastError()); } } // namespace mshadow namespace mxnet { namespace op { template<> Operator *CreateOp<gpu>(MultiBoxTargetParam param, int dtype) { Operator *op = NULL; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new MultiBoxTargetOp<gpu, DType>(param); }); return op; } } // namespace op } // namespace mxnet
e92c4e3e1e6b86e033bdfbad2685d27b231a58b1.cu
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file multibox_target.cu * \brief MultiBoxTarget op * \author Joshua Zhang */ #include "./multibox_target-inl.h" #include <mshadow/cuda/tensor_gpu-inl.cuh> #define MULTIBOX_TARGET_CUDA_CHECK(condition) \ /* Code block avoids redefinition of cudaError_t error */ \ do { \ cudaError_t error = condition; \ CHECK_EQ(error, cudaSuccess) << " " << cudaGetErrorString(error); \ } while (0) namespace mshadow { namespace cuda { template<typename DType> __global__ void InitGroundTruthFlags(DType *gt_flags, const DType *labels, const int num_batches, const int num_labels, const int label_width) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= num_batches * num_labels) return; int b = index / num_labels; int l = index % num_labels; if (*(labels + b * num_labels * label_width + l * label_width) == -1.f) { *(gt_flags + b * num_labels + l) = 0; } else { *(gt_flags + b * num_labels + l) = 1; } } template<typename DType> __global__ void FindBestMatches(DType *best_matches, DType *gt_flags, DType *anchor_flags, const DType *overlaps, const int num_anchors, const int num_labels) { int nbatch = blockIdx.x; gt_flags += nbatch * num_labels; overlaps += nbatch * num_anchors * num_labels; best_matches += nbatch * num_anchors; anchor_flags += nbatch * num_anchors; const int num_threads = kMaxThreadsPerBlock; __shared__ int max_indices_y[kMaxThreadsPerBlock]; __shared__ int max_indices_x[kMaxThreadsPerBlock]; __shared__ float max_values[kMaxThreadsPerBlock]; while (1) { // check if all done. bool finished = true; for (int i = 0; i < num_labels; ++i) { if (gt_flags[i] > .5) { finished = false; break; } } if (finished) break; // all done. // finding max indices in different threads int max_x = -1; int max_y = -1; DType max_value = 1e-6; // start with very small overlap for (int i = threadIdx.x; i < num_anchors; i += num_threads) { if (anchor_flags[i] > .5) continue; for (int j = 0; j < num_labels; ++j) { if (gt_flags[j] > .5) { DType temp = overlaps[i * num_labels + j]; if (temp > max_value) { max_x = j; max_y = i; max_value = temp; } } } } max_indices_x[threadIdx.x] = max_x; max_indices_y[threadIdx.x] = max_y; max_values[threadIdx.x] = max_value; __syncthreads(); if (threadIdx.x == 0) { // merge results and assign best match int max_x = -1; int max_y = -1; DType max_value = -1; for (int k = 0; k < num_threads; ++k) { if (max_indices_y[k] < 0 || max_indices_x[k] < 0) continue; float temp = max_values[k]; if (temp > max_value) { max_x = max_indices_x[k]; max_y = max_indices_y[k]; max_value = temp; } } if (max_x >= 0 && max_y >= 0) { best_matches[max_y] = max_x; // mark flags as visited gt_flags[max_x] = 0.f; anchor_flags[max_y] = 1.f; } else { // no more good matches for (int i = 0; i < num_labels; ++i) { gt_flags[i] = 0.f; } } } __syncthreads(); } } template<typename DType> __global__ void FindGoodMatches(DType *best_matches, DType *anchor_flags, const DType *overlaps, const int num_anchors, const int num_labels, const float overlap_threshold) { int nbatch = blockIdx.x; overlaps += nbatch * num_anchors * num_labels; best_matches += nbatch * num_anchors; anchor_flags += nbatch * num_anchors; const int num_threads = kMaxThreadsPerBlock; for (int i = threadIdx.x; i < num_anchors; i += num_threads) { if (anchor_flags[i] < 0) { int idx = -1; float max_value = -1.f; for (int j = 0; j < num_labels; ++j) { DType temp = overlaps[i * num_labels + j]; if (temp > max_value) { max_value = temp; idx = j; } } if (max_value > overlap_threshold && (idx >= 0)) { best_matches[i] = idx; anchor_flags[i] = 0.9f; } } } } template<typename DType> __global__ void UseAllNegatives(DType *anchor_flags, const int num) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= num) return; if (anchor_flags[idx] < 0.5) { anchor_flags[idx] = 0; // regard all non-positive as negatives } } template<typename DType> __global__ void NegativeMining(const DType *overlaps, const DType *cls_preds, DType *anchor_flags, DType *buffer, const float negative_mining_ratio, const float negative_mining_thresh, const int minimum_negative_samples, const int num_anchors, const int num_labels, const int num_classes) { int nbatch = blockIdx.x; overlaps += nbatch * num_anchors * num_labels; cls_preds += nbatch * num_classes * num_anchors; anchor_flags += nbatch * num_anchors; buffer += nbatch * num_anchors * 3; const int num_threads = kMaxThreadsPerBlock; int num_positive; __shared__ int num_negative; if (threadIdx.x == 0) { num_positive = 0; for (int i = 0; i < num_anchors; ++i) { if (anchor_flags[i] > .5) { ++num_positive; } } num_negative = num_positive * negative_mining_ratio; if (num_negative < minimum_negative_samples) { num_negative = minimum_negative_samples; } if (num_negative > (num_anchors - num_positive)) { num_negative = num_anchors - num_positive; } } __syncthreads(); if (num_negative < 1) return; for (int i = threadIdx.x; i < num_anchors; i += num_threads) { buffer[i] = -1.f; if (anchor_flags[i] < 0) { // compute max class prediction score DType max_val = cls_preds[i]; for (int j = 1; j < num_classes; ++j) { DType temp = cls_preds[i + num_anchors * j]; if (temp > max_val) max_val = temp; } DType sum = 0.f; for (int j = 0; j < num_classes; ++j) { DType temp = cls_preds[i + num_anchors * j]; sum += exp(temp - max_val); } DType prob = exp(cls_preds[i] - max_val) / sum; DType max_iou = -1.f; for (int j = 0; j < num_labels; ++j) { DType temp = overlaps[i * num_labels + j]; if (temp > max_iou) max_iou = temp; } if (max_iou < negative_mining_thresh) { // only do it for anchors with iou < thresh buffer[i] = -prob; // -log(x) actually, but value does not matter } } } __syncthreads(); // descend merge sorting for negative mining DType *index_src = buffer + num_anchors; DType *index_dst = buffer + num_anchors * 2; DType *src = index_src; DType *dst = index_dst; for (int i = threadIdx.x; i < num_anchors; i += num_threads) { index_src[i] = i; } __syncthreads(); for (int width = 2; width < (num_anchors << 1); width <<= 1) { int slices = (num_anchors - 1) / (num_threads * width) + 1; int start = width * threadIdx.x * slices; for (int slice = 0; slice < slices; ++slice) { if (start >= num_anchors) break; int middle = start + (width >> 1); if (num_anchors < middle) middle = num_anchors; int end = start + width; if (num_anchors < end) end = num_anchors; int i = start; int j = middle; for (int k = start; k < end; ++k) { int idx_i = static_cast<int>(src[i]); int idx_j = static_cast<int>(src[j]); if (i < middle && (j >= end || buffer[idx_i] > buffer[idx_j])) { dst[k] = src[i]; ++i; } else { dst[k] = src[j]; ++j; } } start += width; } __syncthreads(); // swap src/dst src = src == index_src? index_dst : index_src; dst = dst == index_src? index_dst : index_src; } __syncthreads(); for (int i = threadIdx.x; i < num_negative; i += num_threads) { int idx = static_cast<int>(src[i]); if (anchor_flags[idx] < 0) { anchor_flags[idx] = 0; } } } template<typename DType> __global__ void AssignTrainigTargets(DType *loc_target, DType *loc_mask, DType *cls_target, DType *anchor_flags, DType *best_matches, DType *labels, DType *anchors, const int num_anchors, const int num_labels, const int label_width, const float vx, const float vy, const float vw, const float vh) { const int nbatch = blockIdx.x; loc_target += nbatch * num_anchors * 5; loc_mask += nbatch * num_anchors * 5; cls_target += nbatch * num_anchors; anchor_flags += nbatch * num_anchors; best_matches += nbatch * num_anchors; labels += nbatch * num_labels * label_width; const int num_threads = kMaxThreadsPerBlock; for (int i = threadIdx.x; i < num_anchors; i += num_threads) { if (anchor_flags[i] > 0.5) { // positive sample int offset_l = static_cast<int>(best_matches[i]) * label_width; cls_target[i] = labels[offset_l] + 1; // 0 reserved for background int offset = i * 4; int offset_t = i * 5; loc_mask[offset_t] = 1; loc_mask[offset_t + 1] = 1; loc_mask[offset_t + 2] = 1; loc_mask[offset_t + 3] = 1; loc_mask[offset_t + 4] = 1; // regression targets float al = anchors[offset]; float at = anchors[offset + 1]; float ar = anchors[offset + 2]; float ab = anchors[offset + 3]; float aw = ar - al; float ah = ab - at; float ax = (al + ar) * 0.5; float ay = (at + ab) * 0.5; float gl = labels[offset_l + 1]; float gt = labels[offset_l + 2]; float gr = labels[offset_l + 3]; float gb = labels[offset_l + 4]; float gz = labels[offset_l + 5]; float gw = gr - gl; float gh = gb - gt; float gx = (gl + gr) * 0.5; float gy = (gt + gb) * 0.5; loc_target[offset_t] = DType((gx - ax) / aw / vx); // xmin loc_target[offset_t + 1] = DType((gy - ay) / ah / vy); // ymin loc_target[offset_t + 2] = DType(log(gw / aw) / vw); // xmax loc_target[offset_t + 3] = DType(log(gh / ah) / vh); // ymax loc_target[offset_t + 4] = DType(gz) / 0.1; // dist } else if (anchor_flags[i] < 0.5 && anchor_flags[i] > -0.5) { // background cls_target[i] = 0; } } } } // namespace cuda template<typename DType> inline void MultiBoxTargetForward(const Tensor<gpu, 2, DType> &loc_target, const Tensor<gpu, 2, DType> &loc_mask, const Tensor<gpu, 2, DType> &cls_target, const Tensor<gpu, 2, DType> &anchors, const Tensor<gpu, 3, DType> &labels, const Tensor<gpu, 3, DType> &cls_preds, const Tensor<gpu, 4, DType> &temp_space, const float overlap_threshold, const float background_label, const float negative_mining_ratio, const float negative_mining_thresh, const int minimum_negative_samples, const nnvm::Tuple<float> &variances) { const int num_batches = labels.size(0); const int num_labels = labels.size(1); const int label_width = labels.size(2); const int num_anchors = anchors.size(0); const int num_classes = cls_preds.size(1); CHECK_GE(num_batches, 1); CHECK_GT(num_labels, 2); CHECK_GE(num_anchors, 1); CHECK_EQ(variances.ndim(), 4); // init ground-truth flags, by checking valid labels temp_space[1] = 0.f; DType *gt_flags = temp_space[1].dptr_; const int num_threads = cuda::kMaxThreadsPerBlock; dim3 init_thread_dim(num_threads); dim3 init_block_dim((num_batches * num_labels - 1) / num_threads + 1); cuda::CheckLaunchParam(init_block_dim, init_thread_dim, "MultiBoxTarget Init"); cuda::InitGroundTruthFlags<DType><<<init_block_dim, init_thread_dim>>>( gt_flags, labels.dptr_, num_batches, num_labels, label_width); MULTIBOX_TARGET_CUDA_CHECK(cudaPeekAtLastError()); // compute best matches temp_space[2] = -1.f; temp_space[3] = -1.f; DType *anchor_flags = temp_space[2].dptr_; DType *best_matches = temp_space[3].dptr_; const DType *overlaps = temp_space[0].dptr_; cuda::CheckLaunchParam(num_batches, num_threads, "MultiBoxTarget Matching"); cuda::FindBestMatches<DType><<<num_batches, num_threads>>>(best_matches, gt_flags, anchor_flags, overlaps, num_anchors, num_labels); MULTIBOX_TARGET_CUDA_CHECK(cudaPeekAtLastError()); // find good matches with overlap > threshold if (overlap_threshold > 0) { cuda::FindGoodMatches<DType><<<num_batches, num_threads>>>(best_matches, anchor_flags, overlaps, num_anchors, num_labels, overlap_threshold); MULTIBOX_TARGET_CUDA_CHECK(cudaPeekAtLastError()); } // do negative mining or not if (negative_mining_ratio > 0) { CHECK_GT(negative_mining_thresh, 0); temp_space[4] = 0; DType *buffer = temp_space[4].dptr_; cuda::NegativeMining<DType><<<num_batches, num_threads>>>(overlaps, cls_preds.dptr_, anchor_flags, buffer, negative_mining_ratio, negative_mining_thresh, minimum_negative_samples, num_anchors, num_labels, num_classes); MULTIBOX_TARGET_CUDA_CHECK(cudaPeekAtLastError()); } else { int num_blocks = (num_batches * num_anchors - 1) / num_threads + 1; cuda::CheckLaunchParam(num_blocks, num_threads, "MultiBoxTarget Negative"); cuda::UseAllNegatives<DType><<<num_blocks, num_threads>>>(anchor_flags, num_batches * num_anchors); MULTIBOX_TARGET_CUDA_CHECK(cudaPeekAtLastError()); } cuda::AssignTrainigTargets<DType><<<num_batches, num_threads>>>( loc_target.dptr_, loc_mask.dptr_, cls_target.dptr_, anchor_flags, best_matches, labels.dptr_, anchors.dptr_, num_anchors, num_labels, label_width, variances[0], variances[1], variances[2], variances[3]); MULTIBOX_TARGET_CUDA_CHECK(cudaPeekAtLastError()); } } // namespace mshadow namespace mxnet { namespace op { template<> Operator *CreateOp<gpu>(MultiBoxTargetParam param, int dtype) { Operator *op = NULL; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new MultiBoxTargetOp<gpu, DType>(param); }); return op; } } // namespace op } // namespace mxnet
d7d7a25fa83f8bb37a91b17bbdfa95e8f85553ea.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <chrono> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include "Ising2D.hpp" #include "mykernel.hpp" using namespace std; int main(void){ int xgrid = 32; int ygrid = 32; const chrono::system_clock::time_point start = chrono::system_clock::now(); Ising2D tmp; // setup tmp.hostInit(); tmp.setDim(xgrid,ygrid,(ROW + xgrid -1)/xgrid ,(COL + ygrid -1 ) / ygrid); tmp.devInit(); tmp.devInfo(); // test tmp.energyDtoH(); //tmp.writeGraph("test.png"); //tmp.printEnergy(); // main tmp.deviceRun(); //tmp.hostRun(); // End tmp.devEnd(); tmp.hostEnd(); const chrono::system_clock::time_point end = chrono::system_clock::now(); const auto ttime = chrono::duration_cast<chrono::milliseconds>(end - start); printf("total::%10ldms\n",ttime.count()); return 0; }
d7d7a25fa83f8bb37a91b17bbdfa95e8f85553ea.cu
#include <iostream> #include <chrono> #include <cuda.h> #include <cuda_runtime.h> #include <curand.h> #include <curand_kernel.h> #include "Ising2D.hpp" #include "mykernel.hpp" using namespace std; int main(void){ int xgrid = 32; int ygrid = 32; const chrono::system_clock::time_point start = chrono::system_clock::now(); Ising2D tmp; // setup tmp.hostInit(); tmp.setDim(xgrid,ygrid,(ROW + xgrid -1)/xgrid ,(COL + ygrid -1 ) / ygrid); tmp.devInit(); tmp.devInfo(); // test tmp.energyDtoH(); //tmp.writeGraph("test.png"); //tmp.printEnergy(); // main tmp.deviceRun(); //tmp.hostRun(); // End tmp.devEnd(); tmp.hostEnd(); const chrono::system_clock::time_point end = chrono::system_clock::now(); const auto ttime = chrono::duration_cast<chrono::milliseconds>(end - start); printf("total::%10ldms\n",ttime.count()); return 0; }
9d4ce4745a32b0288312a6b52b8010497b474ea2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * * tex1dfetch_host.cu * * Microdemo to illustrate how to texture from host memory. * * Build with: nvcc -I ../chLib <options> tex1dfetch_host.cu * Requires: No minimum SM requirement. * * Copyright (c) 2011-2012, Archaea Software, LLC. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include <stdio.h> #include <chError.h> #define NUM_FLOATS 16 texture<float, 1, hipReadModeElementType> tex1; __global__ void TexReadout( float *out, size_t N ) { for ( size_t i = blockIdx.x*blockDim.x + threadIdx.x; i < N; i += gridDim.x*blockDim.x ) { out[i] = tex1Dfetch( tex1, i ); } } void PrintTex( float *host, size_t N ) { float *device; hipError_t status; memset( host, 0, N*sizeof(float) ); cuda(HostGetDevicePointer( (void **) &device, host, 0 )); hipLaunchKernelGGL(( TexReadout), dim3(2),dim3(384), 0, 0, device, N ); cuda(ThreadSynchronize()); for ( int i = 0; i < N; i++ ) { printf( "%.2f ", host[i] ); } printf( "\n" ); Error:; } int main( int argc, char *argv[] ) { int ret = 1; float *p = 0; float *finHost; float *finDevice; float *foutHost; float *foutDevice; hipError_t status; // hipChannelFormatDesc desc; cuda(SetDeviceFlags(hipDeviceMapHost)); cuda(Malloc( (void **) &p, NUM_FLOATS*sizeof(float)) ); cuda(HostAlloc( (void **) &finHost, NUM_FLOATS*sizeof(float), hipHostMallocMapped)); cuda(HostGetDevicePointer( (void **) &finDevice, finHost, 0 )); cuda(HostAlloc( (void **) &foutHost, NUM_FLOATS*sizeof(float), hipHostMallocMapped)); cuda(HostGetDevicePointer( (void **) &foutDevice, foutHost, 0 )); for ( int i = 0; i < NUM_FLOATS; i++ ) { finHost[i] = (float) i; } { size_t offset; cuda(BindTexture( &offset, tex1, finDevice, NUM_FLOATS*sizeof(float)) ); } PrintTex( foutHost, NUM_FLOATS ); ret = 0; Error: hipFree( p ); return ret; }
9d4ce4745a32b0288312a6b52b8010497b474ea2.cu
/* * * tex1dfetch_host.cu * * Microdemo to illustrate how to texture from host memory. * * Build with: nvcc -I ../chLib <options> tex1dfetch_host.cu * Requires: No minimum SM requirement. * * Copyright (c) 2011-2012, Archaea Software, LLC. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include <stdio.h> #include <chError.h> #define NUM_FLOATS 16 texture<float, 1, cudaReadModeElementType> tex1; __global__ void TexReadout( float *out, size_t N ) { for ( size_t i = blockIdx.x*blockDim.x + threadIdx.x; i < N; i += gridDim.x*blockDim.x ) { out[i] = tex1Dfetch( tex1, i ); } } void PrintTex( float *host, size_t N ) { float *device; cudaError_t status; memset( host, 0, N*sizeof(float) ); cuda(HostGetDevicePointer( (void **) &device, host, 0 )); TexReadout<<<2,384>>>( device, N ); cuda(ThreadSynchronize()); for ( int i = 0; i < N; i++ ) { printf( "%.2f ", host[i] ); } printf( "\n" ); Error:; } int main( int argc, char *argv[] ) { int ret = 1; float *p = 0; float *finHost; float *finDevice; float *foutHost; float *foutDevice; cudaError_t status; // cudaChannelFormatDesc desc; cuda(SetDeviceFlags(cudaDeviceMapHost)); cuda(Malloc( (void **) &p, NUM_FLOATS*sizeof(float)) ); cuda(HostAlloc( (void **) &finHost, NUM_FLOATS*sizeof(float), cudaHostAllocMapped)); cuda(HostGetDevicePointer( (void **) &finDevice, finHost, 0 )); cuda(HostAlloc( (void **) &foutHost, NUM_FLOATS*sizeof(float), cudaHostAllocMapped)); cuda(HostGetDevicePointer( (void **) &foutDevice, foutHost, 0 )); for ( int i = 0; i < NUM_FLOATS; i++ ) { finHost[i] = (float) i; } { size_t offset; cuda(BindTexture( &offset, tex1, finDevice, NUM_FLOATS*sizeof(float)) ); } PrintTex( foutHost, NUM_FLOATS ); ret = 0; Error: cudaFree( p ); return ret; }
b2c4496e51ea4a46b974ff2173e4c921f502c6c3.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> /* Naive kernel for transposing a rectangular host array. */ #define CHECK(call) \ { \ const hipError_t error = call; \ if (error != hipSuccess) \ { \ fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \ fprintf(stderr, "code: %d, reason: %s\n", error, \ hipGetErrorString(error)); \ exit(1); \ } \ } void initialData(float *in, const int size) { // initialise matrix for (int i = 0; i < size; i++) { in[i] = (float)(rand() & 0xFF) / 10.0f; } return; } void printData(float *in, const int size) { // print matrix for (int i = 0; i < size; i++) { printf("%3.0f ", in[i]); } printf("\n"); return; } void checkResult(float *hostRef, float *gpuRef, int rows, int cols) { // check that transposed matrix is correct double epsilon = 1.0E-8; bool match = 1; for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { int index = i*cols + j; if (abs(hostRef[index] - gpuRef[index]) > epsilon) { match = 0; printf("different on (%d, %d) (offset=%d) element in transposed matrix: host %f gpu %f\n", i, j, index, hostRef[index], gpuRef[index]); break; } } if (!match) break; } if (!match) printf("Arrays do not match.\n\n"); } void transposeHost(float *out, float *in, const int nrows, const int ncols) { // transpose using CPU for (int iy = 0; iy < ncols; ++iy) { for (int ix = 0; ix < nrows; ++ix) { out[ix * ncols + iy] = in[iy * nrows + ix]; } } } __global__ void justcopy(float *out, float *in, const int nrows, const int ncols) { // routine to copy data from one matrix to another -- no transposition done // get matrix coordinate (ix,iy) unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x; unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y; // copy data as is with boundary test if (ix < nrows && iy < ncols) { out[ix * ncols + iy] = in[ix * ncols + iy]; } } __global__ void naivetranspose(float *out, float *in, const int nrows, const int ncols) { // naive routine to transpose a matrix -- no optimisations considered // get matrix coordinate (ix,iy) unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x; unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y; // transpose with boundary test if (ix < nrows && iy < ncols) { out[ix * ncols + iy] = in[iy * nrows + ix]; } } int main(int argc, char **argv) { // set up device int dev = 0; hipDeviceProp_t deviceProp; CHECK(hipGetDeviceProperties(&deviceProp, dev)); printf("%s starting transpose at ", argv[0]); printf("device %d: %s ", dev, deviceProp.name); CHECK(hipSetDevice(dev)); // initialise CUDA timing float milli; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); bool iprint = 0; // set up array size 2048 int nrows = 1 << 11; int ncols = 1 << 11; int blockx = 16; int blocky = 16; // interpret command line arguments if present if (argc > 1) iprint = atoi(argv[1]); if (argc > 2) blockx = atoi(argv[2]); if (argc > 3) blocky = atoi(argv[3]); if (argc > 4) nrows = atoi(argv[4]); if (argc > 5) ncols = atoi(argv[5]); printf(" with matrix nrows %d ncols %d\n", nrows, ncols); size_t ncells = nrows * ncols; size_t nBytes = ncells * sizeof(float); // execution configuration dim3 block (blockx, blocky); dim3 grid ((nrows + block.x - 1) / block.x, (ncols + block.y - 1) / block.y); // allocate host memory float *h_A = (float *)malloc(nBytes); float *hostRef = (float *)malloc(nBytes); float *gpuRef = (float *)malloc(nBytes); // initialize host array initialData(h_A, nrows * ncols); // transpose at host side transposeHost(hostRef, h_A, nrows, ncols); // allocate device memory float *d_A, *d_C; CHECK(hipMalloc((float**)&d_A, nBytes)); CHECK(hipMalloc((float**)&d_C, nBytes)); // copy data from host to device CHECK(hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice)); // execute justcopy kernel CHECK(hipMemset(d_C, 0, nBytes)); memset(gpuRef, 0, nBytes); hipEventRecord(start); // start timing hipLaunchKernelGGL(( justcopy), dim3(grid), dim3(block), 0, 0, d_C, d_A, nrows, ncols); CHECK(hipDeviceSynchronize()); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milli, start, stop); // stop timing actual kernel execution CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost)); if(iprint) printData(gpuRef, nrows * ncols); float ibnd = 2 * ncells * sizeof(float) / (1024.0 * 1024.0 * 1024.0) / (milli/1000); // convert bytes and millisec to GB/sec // ibnd = 2 * ncells * sizeof(float) / 1e9 / milli/1000; printf("justcopy kernel elapsed %f msec <<< grid (%d,%d) block (%d,%d)>>> effective bandwidth %f GB/s\n", milli, grid.x, grid.y, block.x, block.y, ibnd); // execute naive transpose kernel CHECK(hipMemset(d_C, 0, nBytes)); memset(gpuRef, 0, nBytes); hipEventRecord(start); // start timing hipLaunchKernelGGL(( naivetranspose), dim3(grid), dim3(block), 0, 0, d_C, d_A, nrows, ncols); CHECK(hipDeviceSynchronize()); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milli, start, stop); // stop timing actual kernel execution CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost)); if(iprint) printData(gpuRef, ncells); checkResult(hostRef, gpuRef, ncols, nrows); ibnd = 2 * ncells * sizeof(float) / (1024.0 * 1024.0 * 1024.0) / (milli/1000); printf("naive transpose elapsed %f msec <<< grid (%d,%d) block (%d,%d)>>> effective bandwidth %f GB/s\n", milli, grid.x, grid.y, block.x, block.y, ibnd); // free host and device memory CHECK(hipFree(d_A)); CHECK(hipFree(d_C)); free(h_A); free(hostRef); free(gpuRef); // reset device CHECK(hipDeviceReset()); return EXIT_SUCCESS; }
b2c4496e51ea4a46b974ff2173e4c921f502c6c3.cu
#include <cuda_runtime.h> #include <stdio.h> /* Naive kernel for transposing a rectangular host array. */ #define CHECK(call) \ { \ const cudaError_t error = call; \ if (error != cudaSuccess) \ { \ fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \ fprintf(stderr, "code: %d, reason: %s\n", error, \ cudaGetErrorString(error)); \ exit(1); \ } \ } void initialData(float *in, const int size) { // initialise matrix for (int i = 0; i < size; i++) { in[i] = (float)(rand() & 0xFF) / 10.0f; } return; } void printData(float *in, const int size) { // print matrix for (int i = 0; i < size; i++) { printf("%3.0f ", in[i]); } printf("\n"); return; } void checkResult(float *hostRef, float *gpuRef, int rows, int cols) { // check that transposed matrix is correct double epsilon = 1.0E-8; bool match = 1; for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { int index = i*cols + j; if (abs(hostRef[index] - gpuRef[index]) > epsilon) { match = 0; printf("different on (%d, %d) (offset=%d) element in transposed matrix: host %f gpu %f\n", i, j, index, hostRef[index], gpuRef[index]); break; } } if (!match) break; } if (!match) printf("Arrays do not match.\n\n"); } void transposeHost(float *out, float *in, const int nrows, const int ncols) { // transpose using CPU for (int iy = 0; iy < ncols; ++iy) { for (int ix = 0; ix < nrows; ++ix) { out[ix * ncols + iy] = in[iy * nrows + ix]; } } } __global__ void justcopy(float *out, float *in, const int nrows, const int ncols) { // routine to copy data from one matrix to another -- no transposition done // get matrix coordinate (ix,iy) unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x; unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y; // copy data as is with boundary test if (ix < nrows && iy < ncols) { out[ix * ncols + iy] = in[ix * ncols + iy]; } } __global__ void naivetranspose(float *out, float *in, const int nrows, const int ncols) { // naive routine to transpose a matrix -- no optimisations considered // get matrix coordinate (ix,iy) unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x; unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y; // transpose with boundary test if (ix < nrows && iy < ncols) { out[ix * ncols + iy] = in[iy * nrows + ix]; } } int main(int argc, char **argv) { // set up device int dev = 0; cudaDeviceProp deviceProp; CHECK(cudaGetDeviceProperties(&deviceProp, dev)); printf("%s starting transpose at ", argv[0]); printf("device %d: %s ", dev, deviceProp.name); CHECK(cudaSetDevice(dev)); // initialise CUDA timing float milli; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); bool iprint = 0; // set up array size 2048 int nrows = 1 << 11; int ncols = 1 << 11; int blockx = 16; int blocky = 16; // interpret command line arguments if present if (argc > 1) iprint = atoi(argv[1]); if (argc > 2) blockx = atoi(argv[2]); if (argc > 3) blocky = atoi(argv[3]); if (argc > 4) nrows = atoi(argv[4]); if (argc > 5) ncols = atoi(argv[5]); printf(" with matrix nrows %d ncols %d\n", nrows, ncols); size_t ncells = nrows * ncols; size_t nBytes = ncells * sizeof(float); // execution configuration dim3 block (blockx, blocky); dim3 grid ((nrows + block.x - 1) / block.x, (ncols + block.y - 1) / block.y); // allocate host memory float *h_A = (float *)malloc(nBytes); float *hostRef = (float *)malloc(nBytes); float *gpuRef = (float *)malloc(nBytes); // initialize host array initialData(h_A, nrows * ncols); // transpose at host side transposeHost(hostRef, h_A, nrows, ncols); // allocate device memory float *d_A, *d_C; CHECK(cudaMalloc((float**)&d_A, nBytes)); CHECK(cudaMalloc((float**)&d_C, nBytes)); // copy data from host to device CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice)); // execute justcopy kernel CHECK(cudaMemset(d_C, 0, nBytes)); memset(gpuRef, 0, nBytes); cudaEventRecord(start); // start timing justcopy<<<grid, block>>>(d_C, d_A, nrows, ncols); CHECK(cudaDeviceSynchronize()); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milli, start, stop); // stop timing actual kernel execution CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost)); if(iprint) printData(gpuRef, nrows * ncols); float ibnd = 2 * ncells * sizeof(float) / (1024.0 * 1024.0 * 1024.0) / (milli/1000); // convert bytes and millisec to GB/sec // ibnd = 2 * ncells * sizeof(float) / 1e9 / milli/1000; printf("justcopy kernel elapsed %f msec <<< grid (%d,%d) block (%d,%d)>>> effective bandwidth %f GB/s\n", milli, grid.x, grid.y, block.x, block.y, ibnd); // execute naive transpose kernel CHECK(cudaMemset(d_C, 0, nBytes)); memset(gpuRef, 0, nBytes); cudaEventRecord(start); // start timing naivetranspose<<<grid, block>>>(d_C, d_A, nrows, ncols); CHECK(cudaDeviceSynchronize()); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milli, start, stop); // stop timing actual kernel execution CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost)); if(iprint) printData(gpuRef, ncells); checkResult(hostRef, gpuRef, ncols, nrows); ibnd = 2 * ncells * sizeof(float) / (1024.0 * 1024.0 * 1024.0) / (milli/1000); printf("naive transpose elapsed %f msec <<< grid (%d,%d) block (%d,%d)>>> effective bandwidth %f GB/s\n", milli, grid.x, grid.y, block.x, block.y, ibnd); // free host and device memory CHECK(cudaFree(d_A)); CHECK(cudaFree(d_C)); free(h_A); free(hostRef); free(gpuRef); // reset device CHECK(cudaDeviceReset()); return EXIT_SUCCESS; }
7cfe3046a3bed461bd97f4e9a3a459e5addc5eba.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/native/UnaryOps.h> #include <limits> #include <ATen/AccumulateType.h> #include <ATen/Context.h> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/Math.h> #include <ATen/native/TensorFactories.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/hip/Math.cuh> #include <ATen/NumericUtils.h> #include <c10/hip/HIPMathCompat.h> #include <ATen/NumericUtils.h> #include <c10/util/complex.h> namespace at { namespace native { void bitwise_not_kernel_cuda(TensorIterator& iter) { if (iter.dtype() == ScalarType::Bool) { gpu_kernel(iter, []GPU_LAMBDA(bool a) { return !a; }); } else { AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "bitwise_not_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ~a; }); }); } } void exp_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "exp_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ::exp(a); }); }); } void expm1_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "expm1_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ::expm1(a); }); }); } // We manually overload rsqrt because std::rsqrt does not work with complex types. template<typename scalar_t> __host__ __device__ static inline scalar_t rsqrt_wrapper(scalar_t v) { return ::rsqrt(v); } template<typename T> __host__ __device__ static inline c10::complex<T> rsqrt_wrapper(c10::complex<T> v) { const c10::complex<T> one = c10::complex<T>(1.0, 0); // std::sqrt for c10::complex is overloaded in c10/util/complex_math.h return one / ::sqrt(v); } void rsqrt_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.common_dtype(), "rsqrt_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { // In CUDA, ::rsqrt is overloaded for float and at::Half here is implicitly cast to float. return rsqrt_wrapper(a); }); }); } void sqrt_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "sqrt_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ::sqrt(a); }); }); } void clamp_kernel_cuda(TensorIterator& iter, const Scalar& min_value, const Scalar& max_value) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "clamp_cuda", [&]() { auto lower = min_value.to<scalar_t>(); auto upper = max_value.to<scalar_t>(); gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t { // Propagate nan, which doesn't propagate automatically for ROCm if (_isnan(v)) { return v; } else { return ::min(::max(v, lower), upper); } }); }); } void clamp_min_kernel_cuda(TensorIterator& iter, const Scalar& min_value) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "clamp_min_cuda", [&]() { auto lower = min_value.to<scalar_t>(); gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t { // Propagate nan, which doesn't propagate automatically for ROCm if (_isnan(v)) { return v; } else { return ::max(v, lower); } }); }); } void clamp_max_kernel_cuda(TensorIterator& iter, const Scalar& max_value) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "clamp_max_cuda", [&]() { auto upper = max_value.to<scalar_t>(); gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t { // Propagate nan, which doesn't propagate automatically for ROCm if (_isnan(v)) { return v; } else { return ::min(v, upper); } }); }); } void nan_to_num_kernel_cuda( TensorIterator& iter, c10::optional<double> nan, c10::optional<double> pos_inf, c10::optional<double> neg_inf) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "nan_to_num_cuda", [&]() { scalar_t nan_replacement = static_cast<scalar_t>(nan.value_or(0.)); scalar_t pos_inf_replacement = pos_inf.has_value() ? static_cast<scalar_t>(pos_inf.value()) : std::numeric_limits<scalar_t>::max(); scalar_t neg_inf_replacement = neg_inf.has_value() ? static_cast<scalar_t>(neg_inf.value()) : std::numeric_limits<scalar_t>::lowest(); gpu_kernel(iter, [=] GPU_LAMBDA(scalar_t a) -> scalar_t { return ( at::_isnan(a) ? nan_replacement : (a == std::numeric_limits<scalar_t>::infinity() ? pos_inf_replacement : (a == -std::numeric_limits<scalar_t>::infinity() ? neg_inf_replacement : a))); }); }); } void frexp_kernel_cuda(TensorIterator& iter) { #ifdef __HIP_PLATFORM_HCC__ // Reference: https://rocmdocs.amd.com/en/latest/ROCm_API_References/HIP-MATH.html // https://github.com/ROCm-Developer-Tools/HIP/issues/2169 // ROCm does not support frexp function yet TORCH_CHECK(false, "torch.frexp() is not implemented on ROCm platform."); #else AT_DISPATCH_FLOATING_TYPES_AND(ScalarType::Half, // The iter.dtype() here is the dtype of mantissa output. // It's a floating point type and must be the same as the input's dtype. iter.dtype(), "frexp_cuda", [&]() { gpu_kernel_multiple_outputs(iter, [=] GPU_LAMBDA (scalar_t a) -> thrust::tuple<scalar_t, int32_t> { int32_t exponent; scalar_t mantissa = ::frexp(a, &exponent); return {mantissa, exponent}; }); }); #endif } REGISTER_DISPATCH(bitwise_not_stub, &bitwise_not_kernel_cuda); REGISTER_DISPATCH(exp_stub, &exp_kernel_cuda); REGISTER_DISPATCH(expm1_stub, &expm1_kernel_cuda); REGISTER_DISPATCH(rsqrt_stub, &rsqrt_kernel_cuda); REGISTER_DISPATCH(sqrt_stub, &sqrt_kernel_cuda); REGISTER_DISPATCH(clamp_stub, &clamp_kernel_cuda); REGISTER_DISPATCH(clamp_min_stub, &clamp_min_kernel_cuda); REGISTER_DISPATCH(clamp_max_stub, &clamp_max_kernel_cuda); REGISTER_DISPATCH(nan_to_num_stub, &nan_to_num_kernel_cuda); REGISTER_DISPATCH(frexp_stub, &frexp_kernel_cuda); } // namespace native } // namespace at
7cfe3046a3bed461bd97f4e9a3a459e5addc5eba.cu
#include <ATen/native/UnaryOps.h> #include <limits> #include <ATen/AccumulateType.h> #include <ATen/Context.h> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/Math.h> #include <ATen/native/TensorFactories.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/cuda/Math.cuh> #include <ATen/NumericUtils.h> #include <c10/cuda/CUDAMathCompat.h> #include <ATen/NumericUtils.h> #include <c10/util/complex.h> namespace at { namespace native { void bitwise_not_kernel_cuda(TensorIterator& iter) { if (iter.dtype() == ScalarType::Bool) { gpu_kernel(iter, []GPU_LAMBDA(bool a) { return !a; }); } else { AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "bitwise_not_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ~a; }); }); } } void exp_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "exp_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ::exp(a); }); }); } void expm1_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "expm1_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ::expm1(a); }); }); } // We manually overload rsqrt because std::rsqrt does not work with complex types. template<typename scalar_t> __host__ __device__ static inline scalar_t rsqrt_wrapper(scalar_t v) { return ::rsqrt(v); } template<typename T> __host__ __device__ static inline c10::complex<T> rsqrt_wrapper(c10::complex<T> v) { const c10::complex<T> one = c10::complex<T>(1.0, 0); // std::sqrt for c10::complex is overloaded in c10/util/complex_math.h return one / ::sqrt(v); } void rsqrt_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.common_dtype(), "rsqrt_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { // In CUDA, ::rsqrt is overloaded for float and at::Half here is implicitly cast to float. return rsqrt_wrapper(a); }); }); } void sqrt_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "sqrt_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ::sqrt(a); }); }); } void clamp_kernel_cuda(TensorIterator& iter, const Scalar& min_value, const Scalar& max_value) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "clamp_cuda", [&]() { auto lower = min_value.to<scalar_t>(); auto upper = max_value.to<scalar_t>(); gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t { // Propagate nan, which doesn't propagate automatically for ROCm if (_isnan(v)) { return v; } else { return ::min(::max(v, lower), upper); } }); }); } void clamp_min_kernel_cuda(TensorIterator& iter, const Scalar& min_value) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "clamp_min_cuda", [&]() { auto lower = min_value.to<scalar_t>(); gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t { // Propagate nan, which doesn't propagate automatically for ROCm if (_isnan(v)) { return v; } else { return ::max(v, lower); } }); }); } void clamp_max_kernel_cuda(TensorIterator& iter, const Scalar& max_value) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "clamp_max_cuda", [&]() { auto upper = max_value.to<scalar_t>(); gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t { // Propagate nan, which doesn't propagate automatically for ROCm if (_isnan(v)) { return v; } else { return ::min(v, upper); } }); }); } void nan_to_num_kernel_cuda( TensorIterator& iter, c10::optional<double> nan, c10::optional<double> pos_inf, c10::optional<double> neg_inf) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "nan_to_num_cuda", [&]() { scalar_t nan_replacement = static_cast<scalar_t>(nan.value_or(0.)); scalar_t pos_inf_replacement = pos_inf.has_value() ? static_cast<scalar_t>(pos_inf.value()) : std::numeric_limits<scalar_t>::max(); scalar_t neg_inf_replacement = neg_inf.has_value() ? static_cast<scalar_t>(neg_inf.value()) : std::numeric_limits<scalar_t>::lowest(); gpu_kernel(iter, [=] GPU_LAMBDA(scalar_t a) -> scalar_t { return ( at::_isnan(a) ? nan_replacement : (a == std::numeric_limits<scalar_t>::infinity() ? pos_inf_replacement : (a == -std::numeric_limits<scalar_t>::infinity() ? neg_inf_replacement : a))); }); }); } void frexp_kernel_cuda(TensorIterator& iter) { #ifdef __HIP_PLATFORM_HCC__ // Reference: https://rocmdocs.amd.com/en/latest/ROCm_API_References/HIP-MATH.html // https://github.com/ROCm-Developer-Tools/HIP/issues/2169 // ROCm does not support frexp function yet TORCH_CHECK(false, "torch.frexp() is not implemented on ROCm platform."); #else AT_DISPATCH_FLOATING_TYPES_AND(ScalarType::Half, // The iter.dtype() here is the dtype of mantissa output. // It's a floating point type and must be the same as the input's dtype. iter.dtype(), "frexp_cuda", [&]() { gpu_kernel_multiple_outputs(iter, [=] GPU_LAMBDA (scalar_t a) -> thrust::tuple<scalar_t, int32_t> { int32_t exponent; scalar_t mantissa = std::frexp(a, &exponent); return {mantissa, exponent}; }); }); #endif } REGISTER_DISPATCH(bitwise_not_stub, &bitwise_not_kernel_cuda); REGISTER_DISPATCH(exp_stub, &exp_kernel_cuda); REGISTER_DISPATCH(expm1_stub, &expm1_kernel_cuda); REGISTER_DISPATCH(rsqrt_stub, &rsqrt_kernel_cuda); REGISTER_DISPATCH(sqrt_stub, &sqrt_kernel_cuda); REGISTER_DISPATCH(clamp_stub, &clamp_kernel_cuda); REGISTER_DISPATCH(clamp_min_stub, &clamp_min_kernel_cuda); REGISTER_DISPATCH(clamp_max_stub, &clamp_max_kernel_cuda); REGISTER_DISPATCH(nan_to_num_stub, &nan_to_num_kernel_cuda); REGISTER_DISPATCH(frexp_stub, &frexp_kernel_cuda); } // namespace native } // namespace at
2a6dba2251bd8ce050379378322bd8a04678413a.hip
// !!! This is a file automatically generated by hipify!!! #include <vector> #include "cuda/dcn_v2_im2col_cuda.h" #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <THH/THH.h> #include <THH/THHAtomics.cuh> #include <THH/THHDeviceUtils.cuh> extern THCState *state; // author: Charles Shang // https://github.com/torch/cunn/blob/master/lib/THCUNN/generic/SpatialConvolutionMM.cu at::Tensor dcn_v2_cuda_forward(const at::Tensor &input, const at::Tensor &weight, const at::Tensor &bias, const at::Tensor &offset, const at::Tensor &mask, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const int deformable_group) { // THCAssertSameGPU(THCudaTensor_checkGPU(state, 5, input, weight, bias, offset, mask)); AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor"); AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor"); AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor"); AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor"); const int batch = input.size(0); const int channels = input.size(1); const int height = input.size(2); const int width = input.size(3); const int channels_out = weight.size(0); const int channels_kernel = weight.size(1); const int kernel_h_ = weight.size(2); const int kernel_w_ = weight.size(3); // printf("Kernels: %d %d %d %d\n", kernel_h_, kernel_w_, kernel_w, kernel_h); // printf("Channels: %d %d\n", channels, channels_kernel); // printf("Channels: %d %d\n", channels_out, channels_kernel); AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w, "Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_); AT_ASSERTM(channels == channels_kernel, "Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel); const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; auto ones = at::ones({height_out, width_out}, input.options()); auto columns = at::empty({channels * kernel_h * kernel_w, 1 * height_out * width_out}, input.options()); auto output = at::empty({batch, channels_out, height_out, width_out}, input.options()); using scalar_t = float; for (int b = 0; b < batch; b++) { auto input_n = input.select(0, b); auto offset_n = offset.select(0, b); auto mask_n = mask.select(0, b); auto output_n = output.select(0, b); // Do Bias first: // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) // (N x 1) (1 x M) long m_ = channels_out; long n_ = height_out * width_out; long k_ = 1; THCudaBlas_Sgemm(state, 't', 'n', n_, m_, k_, 1.0f, ones.contiguous().data<scalar_t>(), k_, bias.contiguous().data<scalar_t>(), k_, 0.0f, output_n.data<scalar_t>(), n_); modulated_deformable_im2col_cuda(THCState_getCurrentStream(state), input_n.data<scalar_t>(), offset_n.data<scalar_t>(), mask_n.data<scalar_t>(), 1, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, columns.data<scalar_t>()); //(k * m) x (m * n) // Y = WC long m = channels_out; long n = height_out * width_out; long k = channels * kernel_h * kernel_w; THCudaBlas_Sgemm(state, 'n', 'n', n, m, k, 1.0f, columns.data<scalar_t>(), n, weight.data<scalar_t>(), k, 1.0f, output_n.data<scalar_t>(), n); } return output; } std::vector<at::Tensor> dcn_v2_cuda_backward(const at::Tensor &input, const at::Tensor &weight, const at::Tensor &bias, const at::Tensor &offset, const at::Tensor &mask, const at::Tensor &grad_output, int kernel_h, int kernel_w, int stride_h, int stride_w, int pad_h, int pad_w, int dilation_h, int dilation_w, int deformable_group) { THArgCheck(input.is_contiguous(), 1, "input tensor has to be contiguous"); THArgCheck(weight.is_contiguous(), 2, "weight tensor has to be contiguous"); AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor"); AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor"); AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor"); AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor"); const int batch = input.size(0); const int channels = input.size(1); const int height = input.size(2); const int width = input.size(3); const int channels_out = weight.size(0); const int channels_kernel = weight.size(1); const int kernel_h_ = weight.size(2); const int kernel_w_ = weight.size(3); AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w, "Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_); AT_ASSERTM(channels == channels_kernel, "Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel); const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; auto ones = at::ones({height_out, width_out}, input.options()); auto columns = at::empty({channels * kernel_h * kernel_w, 1 * height_out * width_out}, input.options()); auto output = at::empty({batch, channels_out, height_out, width_out}, input.options()); auto grad_input = at::zeros_like(input); auto grad_weight = at::zeros_like(weight); auto grad_bias = at::zeros_like(bias); auto grad_offset = at::zeros_like(offset); auto grad_mask = at::zeros_like(mask); using scalar_t = float; for (int b = 0; b < batch; b++) { auto input_n = input.select(0, b); auto offset_n = offset.select(0, b); auto mask_n = mask.select(0, b); auto grad_output_n = grad_output.select(0, b); auto grad_input_n = grad_input.select(0, b); auto grad_offset_n = grad_offset.select(0, b); auto grad_mask_n = grad_mask.select(0, b); long m = channels * kernel_h * kernel_w; long n = height_out * width_out; long k = channels_out; THCudaBlas_Sgemm(state, 'n', 't', n, m, k, 1.0f, grad_output_n.data<scalar_t>(), n, weight.data<scalar_t>(), m, 0.0f, columns.data<scalar_t>(), n); // gradient w.r.t. input coordinate data modulated_deformable_col2im_coord_cuda(THCState_getCurrentStream(state), columns.data<scalar_t>(), input_n.data<scalar_t>(), offset_n.data<scalar_t>(), mask_n.data<scalar_t>(), 1, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, grad_offset_n.data<scalar_t>(), grad_mask_n.data<scalar_t>()); // gradient w.r.t. input data modulated_deformable_col2im_cuda(THCState_getCurrentStream(state), columns.data<scalar_t>(), offset_n.data<scalar_t>(), mask_n.data<scalar_t>(), 1, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, grad_input_n.data<scalar_t>()); // gradient w.r.t. weight, dWeight should accumulate across the batch and group modulated_deformable_im2col_cuda(THCState_getCurrentStream(state), input_n.data<scalar_t>(), offset_n.data<scalar_t>(), mask_n.data<scalar_t>(), 1, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, columns.data<scalar_t>()); long m_ = channels_out; long n_ = channels * kernel_h * kernel_w; long k_ = height_out * width_out; THCudaBlas_Sgemm(state, 't', 'n', n_, m_, k_, 1.0f, columns.data<scalar_t>(), k_, grad_output_n.data<scalar_t>(), k_, 1.0f, grad_weight.data<scalar_t>(), n_); // gradient w.r.t. bias // long m_ = channels_out; // long k__ = height_out * width_out; THCudaBlas_Sgemv(state, 't', k_, m_, 1.0f, grad_output_n.data<scalar_t>(), k_, ones.data<scalar_t>(), 1, 1.0f, grad_bias.data<scalar_t>(), 1); } return { grad_input, grad_offset, grad_mask, grad_weight, grad_bias }; }
2a6dba2251bd8ce050379378322bd8a04678413a.cu
#include <vector> #include "cuda/dcn_v2_im2col_cuda.h" #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <THC/THC.h> #include <THC/THCAtomics.cuh> #include <THC/THCDeviceUtils.cuh> extern THCState *state; // author: Charles Shang // https://github.com/torch/cunn/blob/master/lib/THCUNN/generic/SpatialConvolutionMM.cu at::Tensor dcn_v2_cuda_forward(const at::Tensor &input, const at::Tensor &weight, const at::Tensor &bias, const at::Tensor &offset, const at::Tensor &mask, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const int deformable_group) { // THCAssertSameGPU(THCudaTensor_checkGPU(state, 5, input, weight, bias, offset, mask)); AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor"); AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor"); AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor"); AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor"); const int batch = input.size(0); const int channels = input.size(1); const int height = input.size(2); const int width = input.size(3); const int channels_out = weight.size(0); const int channels_kernel = weight.size(1); const int kernel_h_ = weight.size(2); const int kernel_w_ = weight.size(3); // printf("Kernels: %d %d %d %d\n", kernel_h_, kernel_w_, kernel_w, kernel_h); // printf("Channels: %d %d\n", channels, channels_kernel); // printf("Channels: %d %d\n", channels_out, channels_kernel); AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w, "Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_); AT_ASSERTM(channels == channels_kernel, "Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel); const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; auto ones = at::ones({height_out, width_out}, input.options()); auto columns = at::empty({channels * kernel_h * kernel_w, 1 * height_out * width_out}, input.options()); auto output = at::empty({batch, channels_out, height_out, width_out}, input.options()); using scalar_t = float; for (int b = 0; b < batch; b++) { auto input_n = input.select(0, b); auto offset_n = offset.select(0, b); auto mask_n = mask.select(0, b); auto output_n = output.select(0, b); // Do Bias first: // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) // (N x 1) (1 x M) long m_ = channels_out; long n_ = height_out * width_out; long k_ = 1; THCudaBlas_Sgemm(state, 't', 'n', n_, m_, k_, 1.0f, ones.contiguous().data<scalar_t>(), k_, bias.contiguous().data<scalar_t>(), k_, 0.0f, output_n.data<scalar_t>(), n_); modulated_deformable_im2col_cuda(THCState_getCurrentStream(state), input_n.data<scalar_t>(), offset_n.data<scalar_t>(), mask_n.data<scalar_t>(), 1, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, columns.data<scalar_t>()); //(k * m) x (m * n) // Y = WC long m = channels_out; long n = height_out * width_out; long k = channels * kernel_h * kernel_w; THCudaBlas_Sgemm(state, 'n', 'n', n, m, k, 1.0f, columns.data<scalar_t>(), n, weight.data<scalar_t>(), k, 1.0f, output_n.data<scalar_t>(), n); } return output; } std::vector<at::Tensor> dcn_v2_cuda_backward(const at::Tensor &input, const at::Tensor &weight, const at::Tensor &bias, const at::Tensor &offset, const at::Tensor &mask, const at::Tensor &grad_output, int kernel_h, int kernel_w, int stride_h, int stride_w, int pad_h, int pad_w, int dilation_h, int dilation_w, int deformable_group) { THArgCheck(input.is_contiguous(), 1, "input tensor has to be contiguous"); THArgCheck(weight.is_contiguous(), 2, "weight tensor has to be contiguous"); AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor"); AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor"); AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor"); AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor"); const int batch = input.size(0); const int channels = input.size(1); const int height = input.size(2); const int width = input.size(3); const int channels_out = weight.size(0); const int channels_kernel = weight.size(1); const int kernel_h_ = weight.size(2); const int kernel_w_ = weight.size(3); AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w, "Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_); AT_ASSERTM(channels == channels_kernel, "Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel); const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; auto ones = at::ones({height_out, width_out}, input.options()); auto columns = at::empty({channels * kernel_h * kernel_w, 1 * height_out * width_out}, input.options()); auto output = at::empty({batch, channels_out, height_out, width_out}, input.options()); auto grad_input = at::zeros_like(input); auto grad_weight = at::zeros_like(weight); auto grad_bias = at::zeros_like(bias); auto grad_offset = at::zeros_like(offset); auto grad_mask = at::zeros_like(mask); using scalar_t = float; for (int b = 0; b < batch; b++) { auto input_n = input.select(0, b); auto offset_n = offset.select(0, b); auto mask_n = mask.select(0, b); auto grad_output_n = grad_output.select(0, b); auto grad_input_n = grad_input.select(0, b); auto grad_offset_n = grad_offset.select(0, b); auto grad_mask_n = grad_mask.select(0, b); long m = channels * kernel_h * kernel_w; long n = height_out * width_out; long k = channels_out; THCudaBlas_Sgemm(state, 'n', 't', n, m, k, 1.0f, grad_output_n.data<scalar_t>(), n, weight.data<scalar_t>(), m, 0.0f, columns.data<scalar_t>(), n); // gradient w.r.t. input coordinate data modulated_deformable_col2im_coord_cuda(THCState_getCurrentStream(state), columns.data<scalar_t>(), input_n.data<scalar_t>(), offset_n.data<scalar_t>(), mask_n.data<scalar_t>(), 1, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, grad_offset_n.data<scalar_t>(), grad_mask_n.data<scalar_t>()); // gradient w.r.t. input data modulated_deformable_col2im_cuda(THCState_getCurrentStream(state), columns.data<scalar_t>(), offset_n.data<scalar_t>(), mask_n.data<scalar_t>(), 1, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, grad_input_n.data<scalar_t>()); // gradient w.r.t. weight, dWeight should accumulate across the batch and group modulated_deformable_im2col_cuda(THCState_getCurrentStream(state), input_n.data<scalar_t>(), offset_n.data<scalar_t>(), mask_n.data<scalar_t>(), 1, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, columns.data<scalar_t>()); long m_ = channels_out; long n_ = channels * kernel_h * kernel_w; long k_ = height_out * width_out; THCudaBlas_Sgemm(state, 't', 'n', n_, m_, k_, 1.0f, columns.data<scalar_t>(), k_, grad_output_n.data<scalar_t>(), k_, 1.0f, grad_weight.data<scalar_t>(), n_); // gradient w.r.t. bias // long m_ = channels_out; // long k__ = height_out * width_out; THCudaBlas_Sgemv(state, 't', k_, m_, 1.0f, grad_output_n.data<scalar_t>(), k_, ones.data<scalar_t>(), 1, 1.0f, grad_bias.data<scalar_t>(), 1); } return { grad_input, grad_offset, grad_mask, grad_weight, grad_bias }; }
2baa57c20d93f059cdb3830cb9bcf34ea913d50c.hip
// !!! This is a file automatically generated by hipify!!! //#include "cudacommon.h" #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <math.h> #include <stdlib.h> #include <string.h> #include <cassert> #include <iostream> #include <vector> #include "../benchmark_common.h" #include "Scan.h" #include "scan_kernel.h" using namespace std; // **************************************************************************** // Function: addBenchmarkSpecOptions // // Purpose: // Add benchmark specific options parsing // // Arguments: // op: the options parser / parameter database // // Returns: nothing // // Programmer: Kyle Spafford // Creation: August 13, 2009 // // Modifications: // // **************************************************************************** // void addBenchmarkSpecOptions(OptionParser &op) //{ // op.addOption("iterations", OPT_INT, "256", "specify scan iterations"); //} // **************************************************************************** // Function: RunBenchmark // // Purpose: // Executes the scan (parallel prefix sum) benchmark // // Arguments: // resultDB: results from the benchmark are stored in this db // op: the options parser / parameter database // // Returns: nothing // // Programmer: Kyle Spafford // Creation: August 13, 2009 // // Modifications: // 5/18/2011 - KS - Changing to a non-recursive algorithm // **************************************************************************** int main_scan(hipStream_t stream_app, pthread_mutex_t* mutexapp, bool flag) { int device; hipGetDevice(&device); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, device); cout << "Running single precision test" << endl; pthread_mutex_unlock(mutexapp); RunTest<float, float4>(stream_app, mutexapp, flag); // Test to see if this device supports double precision if ((deviceProp.major == 1 && deviceProp.minor >= 3) || (deviceProp.major >= 2)) { cout << "Running double precision test" << endl; RunTest<double, double4>(stream_app, mutexapp, flag); } else { cout << "Skipping double precision test" << endl; // char atts[1024] = "DP_Not_Supported"; // resultDB requires neg entry for every possible result /* int passes = op.getOptionInt("passes"); for (int k = 0; k < passes; k++) { resultDB.AddResult("Scan-DP" , atts, "GB/s", FLT_MAX); resultDB.AddResult("Scan-DP_PCIe" , atts, "GB/s", FLT_MAX); resultDB.AddResult("Scan-DP_Parity" , atts, "GB/s", FLT_MAX); }*/ } return 0; } template <class T, class vecT> void RunTest(hipStream_t stream_app, pthread_mutex_t* mutexapp, bool flag) { int probSizes[4] = {1, 8, 32, 64}; int size = probSizes[0]; // Convert to MiB size = (size * 1024 * 1024) / sizeof(T); // create input data on CPU unsigned int bytes = size * sizeof(T); // Allocate Host Memory T* h_idata; T* reference; T* h_odata; CUDA_SAFE_CALL(hipHostMalloc((void**)&h_idata, bytes)); CUDA_SAFE_CALL(hipHostMalloc((void**)&reference, bytes)); CUDA_SAFE_CALL(hipHostMalloc((void**)&h_odata, bytes)); // Initialize host memory cout << "Initializing host memory." << endl; for (int i = 0; i < size; i++) { h_idata[i] = i % 3; // Fill with some pattern h_odata[i] = i % 3; } // Thread configuration // Note: changing this may require updating the kernel calls below int num_blocks = 64; int num_threads = 256; int smem_size = sizeof(T) * num_threads; // Allocate device memory T *d_idata, *d_odata, *d_block_sums; CUDA_SAFE_CALL(hipMalloc((void**)&d_idata, bytes)); CUDA_SAFE_CALL(hipMalloc((void**)&d_odata, bytes)); CUDA_SAFE_CALL(hipMalloc((void**)&d_block_sums, num_blocks * sizeof(T))); // Copy data to GPU cout << "Copying data to device." << endl; // hipEvent_t start, stop; // CUDA_SAFE_CALL(hipEventCreate(&start)); // CUDA_SAFE_CALL(hipEventCreate(&stop)); // CUDA_SAFE_CALL(hipEventRecord(start, 0)); CUDA_SAFE_CALL(hipMemcpyAsync(d_idata, h_idata, bytes, hipMemcpyHostToDevice, stream_app)); // hipEventRecord(stop, 0); // CUDA_SAFE_CALL(hipEventSynchronize(stop)); // Get elapsed time float transferTime = 0.0f; // hipEventElapsedTime(&transferTime, start, stop); transferTime *= 1.e-3; // int passes = op.getOptionInt("passes"); // int iters = op.getOptionInt("iterations"); int passes = 10; int iters = 256; cout << "Running benchmark with size " << size << endl; for (int k = 0; k < passes; k++) { float totalScanTime = 0.0f; // CUDA_SAFE_CALL(hipEventRecord(start, 0)); for (int j = 0; j < iters; j++) { // For scan, we use a reduce-then-scan approach // Each thread block gets an equal portion of the // input array, and computes the sum. pthread_mutex_lock(mutexapp); hipLaunchKernelGGL(( reduce<T, 256>), dim3(num_blocks), dim3(num_threads), smem_size, stream_app, d_idata, d_block_sums, size); // Next, a top-level exclusive scan is performed on the array // of block sums hipLaunchKernelGGL(( scan_single_block<T, 256>), dim3(1), dim3(num_threads), smem_size * 2, stream_app, d_block_sums, num_blocks); // Finally, a bottom-level scan is performed by each block // that is seeded with the scanned value in block sums hipLaunchKernelGGL(( bottom_scan<T, vecT>), dim3(num_blocks), dim3(num_threads), 2 * smem_size, stream_app, d_idata, d_odata, d_block_sums, size); pthread_mutex_unlock(mutexapp); } if (flag) cutilSafeCall(hipStreamSynchronize(stream_app)); // CUDA_SAFE_CALL(hipEventRecord(stop, 0)); // CUDA_SAFE_CALL(hipEventSynchronize(stop)); // hipEventElapsedTime(&totalScanTime, start, stop); float oTransferTime = 0.0f; // CUDA_SAFE_CALL(hipEventRecord(start, 0)); CUDA_SAFE_CALL(hipMemcpyAsync(h_odata, d_odata, bytes, hipMemcpyDeviceToHost, stream_app)); // CUDA_SAFE_CALL(hipEventRecord(stop, 0)); // CUDA_SAFE_CALL(hipEventSynchronize(stop)); // hipEventElapsedTime(&oTransferTime, start, stop); // Only add output transfer time once if (k == 0) { transferTime += oTransferTime; } // If results aren't correct, don't report perf numbers if (!scanCPU<T>(h_idata, reference, h_odata, size)) { return; } char atts[1024]; double avgTime = (totalScanTime / (double)iters); avgTime *= 1.e-3; sprintf(atts, "%ditems", size); // double gb = (double)(size * sizeof(T)) / (1000. * 1000. * 1000.); // resultDB.AddResult(testName, atts, "GB/s", gb / avgTime); // resultDB.AddResult(testName+"_PCIe", atts, "GB/s",gb / (avgTime + // transferTime)); // resultDB.AddResult(testName+"_Parity", atts, "N",transferTime / avgTime); } /*CUDA_SAFE_CALL(hipFree(d_idata)); CUDA_SAFE_CALL(hipFree(d_odata)); CUDA_SAFE_CALL(hipFree(d_block_sums)); CUDA_SAFE_CALL(hipHostFree(h_idata)); CUDA_SAFE_CALL(hipHostFree(h_odata)); CUDA_SAFE_CALL(hipHostFree(reference));*/ // CUDA_SAFE_CALL(hipEventDestroy(start)); // CUDA_SAFE_CALL(hipEventDestroy(stop)); } // **************************************************************************** // Function: scanCPU // // Purpose: // Simple cpu scan routine to verify device results // // Arguments: // data : the input data // reference : space for the cpu solution // dev_result : result from the device // size : number of elements // // Returns: nothing, prints relevant info to stdout // // Programmer: Kyle Spafford // Creation: August 13, 2009 // // Modifications: // // **************************************************************************** template <class T> bool scanCPU(T* data, T* reference, T* dev_result, const size_t size) { bool passed = true; T last = 0.0f; for (unsigned int i = 0; i < size; ++i) { reference[i] = data[i] + last; last = reference[i]; } for (unsigned int i = 0; i < size; ++i) { if (reference[i] != dev_result[i]) { #ifdef VERBOSE_OUTPUT cout << "Mismatch at i: " << i << " ref: " << reference[i] << " dev: " << dev_result[i] << endl; #endif passed = false; } } cout << "Test "; if (passed) cout << "Passed" << endl; else cout << "---FAILED---" << endl; return passed; }
2baa57c20d93f059cdb3830cb9bcf34ea913d50c.cu
//#include "cudacommon.h" #include <cuda.h> #include <cuda_runtime_api.h> #include <math.h> #include <stdlib.h> #include <string.h> #include <cassert> #include <iostream> #include <vector> #include "../benchmark_common.h" #include "Scan.h" #include "scan_kernel.h" using namespace std; // **************************************************************************** // Function: addBenchmarkSpecOptions // // Purpose: // Add benchmark specific options parsing // // Arguments: // op: the options parser / parameter database // // Returns: nothing // // Programmer: Kyle Spafford // Creation: August 13, 2009 // // Modifications: // // **************************************************************************** // void addBenchmarkSpecOptions(OptionParser &op) //{ // op.addOption("iterations", OPT_INT, "256", "specify scan iterations"); //} // **************************************************************************** // Function: RunBenchmark // // Purpose: // Executes the scan (parallel prefix sum) benchmark // // Arguments: // resultDB: results from the benchmark are stored in this db // op: the options parser / parameter database // // Returns: nothing // // Programmer: Kyle Spafford // Creation: August 13, 2009 // // Modifications: // 5/18/2011 - KS - Changing to a non-recursive algorithm // **************************************************************************** int main_scan(cudaStream_t stream_app, pthread_mutex_t* mutexapp, bool flag) { int device; cudaGetDevice(&device); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, device); cout << "Running single precision test" << endl; pthread_mutex_unlock(mutexapp); RunTest<float, float4>(stream_app, mutexapp, flag); // Test to see if this device supports double precision if ((deviceProp.major == 1 && deviceProp.minor >= 3) || (deviceProp.major >= 2)) { cout << "Running double precision test" << endl; RunTest<double, double4>(stream_app, mutexapp, flag); } else { cout << "Skipping double precision test" << endl; // char atts[1024] = "DP_Not_Supported"; // resultDB requires neg entry for every possible result /* int passes = op.getOptionInt("passes"); for (int k = 0; k < passes; k++) { resultDB.AddResult("Scan-DP" , atts, "GB/s", FLT_MAX); resultDB.AddResult("Scan-DP_PCIe" , atts, "GB/s", FLT_MAX); resultDB.AddResult("Scan-DP_Parity" , atts, "GB/s", FLT_MAX); }*/ } return 0; } template <class T, class vecT> void RunTest(cudaStream_t stream_app, pthread_mutex_t* mutexapp, bool flag) { int probSizes[4] = {1, 8, 32, 64}; int size = probSizes[0]; // Convert to MiB size = (size * 1024 * 1024) / sizeof(T); // create input data on CPU unsigned int bytes = size * sizeof(T); // Allocate Host Memory T* h_idata; T* reference; T* h_odata; CUDA_SAFE_CALL(cudaMallocHost((void**)&h_idata, bytes)); CUDA_SAFE_CALL(cudaMallocHost((void**)&reference, bytes)); CUDA_SAFE_CALL(cudaMallocHost((void**)&h_odata, bytes)); // Initialize host memory cout << "Initializing host memory." << endl; for (int i = 0; i < size; i++) { h_idata[i] = i % 3; // Fill with some pattern h_odata[i] = i % 3; } // Thread configuration // Note: changing this may require updating the kernel calls below int num_blocks = 64; int num_threads = 256; int smem_size = sizeof(T) * num_threads; // Allocate device memory T *d_idata, *d_odata, *d_block_sums; CUDA_SAFE_CALL(cudaMalloc((void**)&d_idata, bytes)); CUDA_SAFE_CALL(cudaMalloc((void**)&d_odata, bytes)); CUDA_SAFE_CALL(cudaMalloc((void**)&d_block_sums, num_blocks * sizeof(T))); // Copy data to GPU cout << "Copying data to device." << endl; // cudaEvent_t start, stop; // CUDA_SAFE_CALL(cudaEventCreate(&start)); // CUDA_SAFE_CALL(cudaEventCreate(&stop)); // CUDA_SAFE_CALL(cudaEventRecord(start, 0)); CUDA_SAFE_CALL(cudaMemcpyAsync(d_idata, h_idata, bytes, cudaMemcpyHostToDevice, stream_app)); // cudaEventRecord(stop, 0); // CUDA_SAFE_CALL(cudaEventSynchronize(stop)); // Get elapsed time float transferTime = 0.0f; // cudaEventElapsedTime(&transferTime, start, stop); transferTime *= 1.e-3; // int passes = op.getOptionInt("passes"); // int iters = op.getOptionInt("iterations"); int passes = 10; int iters = 256; cout << "Running benchmark with size " << size << endl; for (int k = 0; k < passes; k++) { float totalScanTime = 0.0f; // CUDA_SAFE_CALL(cudaEventRecord(start, 0)); for (int j = 0; j < iters; j++) { // For scan, we use a reduce-then-scan approach // Each thread block gets an equal portion of the // input array, and computes the sum. pthread_mutex_lock(mutexapp); reduce<T, 256><<<num_blocks, num_threads, smem_size, stream_app>>>( d_idata, d_block_sums, size); // Next, a top-level exclusive scan is performed on the array // of block sums scan_single_block<T, 256><<<1, num_threads, smem_size * 2, stream_app>>>( d_block_sums, num_blocks); // Finally, a bottom-level scan is performed by each block // that is seeded with the scanned value in block sums bottom_scan<T, vecT><<<num_blocks, num_threads, 2 * smem_size, stream_app>>>( d_idata, d_odata, d_block_sums, size); pthread_mutex_unlock(mutexapp); } if (flag) cutilSafeCall(cudaStreamSynchronize(stream_app)); // CUDA_SAFE_CALL(cudaEventRecord(stop, 0)); // CUDA_SAFE_CALL(cudaEventSynchronize(stop)); // cudaEventElapsedTime(&totalScanTime, start, stop); float oTransferTime = 0.0f; // CUDA_SAFE_CALL(cudaEventRecord(start, 0)); CUDA_SAFE_CALL(cudaMemcpyAsync(h_odata, d_odata, bytes, cudaMemcpyDeviceToHost, stream_app)); // CUDA_SAFE_CALL(cudaEventRecord(stop, 0)); // CUDA_SAFE_CALL(cudaEventSynchronize(stop)); // cudaEventElapsedTime(&oTransferTime, start, stop); // Only add output transfer time once if (k == 0) { transferTime += oTransferTime; } // If results aren't correct, don't report perf numbers if (!scanCPU<T>(h_idata, reference, h_odata, size)) { return; } char atts[1024]; double avgTime = (totalScanTime / (double)iters); avgTime *= 1.e-3; sprintf(atts, "%ditems", size); // double gb = (double)(size * sizeof(T)) / (1000. * 1000. * 1000.); // resultDB.AddResult(testName, atts, "GB/s", gb / avgTime); // resultDB.AddResult(testName+"_PCIe", atts, "GB/s",gb / (avgTime + // transferTime)); // resultDB.AddResult(testName+"_Parity", atts, "N",transferTime / avgTime); } /*CUDA_SAFE_CALL(cudaFree(d_idata)); CUDA_SAFE_CALL(cudaFree(d_odata)); CUDA_SAFE_CALL(cudaFree(d_block_sums)); CUDA_SAFE_CALL(cudaFreeHost(h_idata)); CUDA_SAFE_CALL(cudaFreeHost(h_odata)); CUDA_SAFE_CALL(cudaFreeHost(reference));*/ // CUDA_SAFE_CALL(cudaEventDestroy(start)); // CUDA_SAFE_CALL(cudaEventDestroy(stop)); } // **************************************************************************** // Function: scanCPU // // Purpose: // Simple cpu scan routine to verify device results // // Arguments: // data : the input data // reference : space for the cpu solution // dev_result : result from the device // size : number of elements // // Returns: nothing, prints relevant info to stdout // // Programmer: Kyle Spafford // Creation: August 13, 2009 // // Modifications: // // **************************************************************************** template <class T> bool scanCPU(T* data, T* reference, T* dev_result, const size_t size) { bool passed = true; T last = 0.0f; for (unsigned int i = 0; i < size; ++i) { reference[i] = data[i] + last; last = reference[i]; } for (unsigned int i = 0; i < size; ++i) { if (reference[i] != dev_result[i]) { #ifdef VERBOSE_OUTPUT cout << "Mismatch at i: " << i << " ref: " << reference[i] << " dev: " << dev_result[i] << endl; #endif passed = false; } } cout << "Test "; if (passed) cout << "Passed" << endl; else cout << "---FAILED---" << endl; return passed; }
ca0b3f6abf250eba708427ae5d227f462b1942d9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // TODO(ataei): reduce the apparent redundancy of all the code below. #include <cfloat> #include "caffe2/core/context_gpu.h" #include "caffe2/operators/pool_op.h" namespace caffe2 { namespace { class AveragePool {}; class MaxPool {}; } // namespace namespace { template <typename T> __global__ void Average1DPoolForwardNCHW( const int nthreads, const T* bottom_data, const int num, const int channels, const int height, const int pooled_height, const int kernel_h, const int stride_h, const int pad_t, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int n = index; int ph = n % pooled_height; n /= pooled_height; int c = n % channels; n /= channels; int hstart = ph * stride_h - pad_t; int hend = min(hstart + kernel_h, height); hstart = max(hstart, 0); top_data[index] = 0; int bottom_offset = (n * channels + c) * height; for (int h = hstart; h < hend; ++h) { top_data[index] += bottom_data[bottom_offset + h]; } top_data[index] /= (hend - hstart); } } template <typename T> __global__ void Average2DPoolForwardNCHW( const int nthreads, const T* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int n = index; int pw = n % pooled_width; n /= pooled_width; int ph = n % pooled_height; n /= pooled_height; int c = n % channels; n /= channels; int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); top_data[index] = 0; int bottom_offset = (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { top_data[index] += bottom_data[bottom_offset + h * width + w]; } } top_data[index] /= (hend - hstart) * (wend - wstart); } } template <typename T> __global__ void Average3DPoolForwardNCHW( const int nthreads, const T* bottom_data, const int num, const int channels, const int height, const int width, const int depth, const int pooled_height, const int pooled_width, const int pooled_depth, const int kernel_h, const int kernel_w, const int kernel_d, const int stride_h, const int stride_w, const int stride_d, const int pad_t, const int pad_l, const int pad_f, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int n = index; int pd = n % pooled_depth; n /= pooled_depth; int pw = n % pooled_width; n /= pooled_width; int ph = n % pooled_height; n /= pooled_height; int c = n % channels; n /= channels; int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int dstart = pd * stride_d - pad_f; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); int dend = min(dstart + kernel_d, depth); hstart = max(hstart, 0); wstart = max(wstart, 0); dstart = max(dstart, 0); top_data[index] = 0; int bottom_offset = (n * channels + c) * height * width * depth; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { for (int d = dstart; d < dend; ++d) { const int input_index = bottom_offset + h * width * depth + w * depth + d; top_data[index] += bottom_data[input_index]; } } } top_data[index] /= (hend - hstart) * (wend - wstart) * (dend - dstart); } } template <typename T> __global__ void Average1DPoolForwardNHWC( const int nthreads, const T* bottom_data, const int num, const int height, const int channels, const int pooled_height, const int kernel_h, const int stride_h, const int pad_t, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int c = index % channels; int ph = (index / channels) % pooled_height; int n = index / channels / pooled_height; int hstart = ph * stride_h - pad_t; int hend = min(hstart + kernel_h, height); hstart = max(hstart, 0); T output = 0; int bottom_offset = n * height * channels + c; for (int h = hstart; h < hend; ++h) { output += bottom_data[bottom_offset + h * channels]; } int pool_size = (hend - hstart); top_data[index] = output / pool_size; } } template <typename T> __global__ void Average2DPoolForwardNHWC( const int nthreads, const T* bottom_data, const int num, const int height, const int width, const int channels, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int c = index % channels; int pw = (index / channels) % pooled_width; int ph = (index / channels / pooled_width) % pooled_height; int n = index / channels / pooled_width / pooled_height; int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); T output = 0; int bottom_offset = n * height * width * channels + c; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { output += bottom_data[bottom_offset + (h * width + w) * channels]; } } int pool_size = (hend - hstart) * (wend - wstart); top_data[index] = output / pool_size; } } template <typename T> __global__ void Average3DPoolForwardNHWC( const int nthreads, const T* bottom_data, const int num, const int height, const int width, const int depth, const int channels, const int pooled_height, const int pooled_width, const int pooled_depth, const int kernel_h, const int kernel_w, const int kernel_d, const int stride_h, const int stride_w, const int stride_d, const int pad_t, const int pad_l, const int pad_f, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int c = index % channels; int pd = (index / channels) % pooled_depth; int pw = (index / channels / pooled_depth) % pooled_width; int ph = (index / channels / pooled_depth / pooled_width) % pooled_height; int n = index / channels / pooled_depth / pooled_width / pooled_height; int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int dstart = pd * stride_d - pad_f; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); int dend = min(dstart + kernel_d, depth); hstart = max(hstart, 0); wstart = max(wstart, 0); dstart = max(dstart, 0); T output = 0; int bottom_offset = n * height * width * depth * channels + c; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { for (int d = dstart; d < dend; ++d) { const int bottom_index = bottom_offset + (h * depth * width + w * depth + d) * channels; output += bottom_data[bottom_index]; } } } int pool_size = (hend - hstart) * (wend - wstart) * (dend - dstart); top_data[index] = output / pool_size; } } template <typename T> __global__ void Ave1DPoolBackwardNCHW( const int nthreads, const T* const top_diff, const int num, const int channels, const int height, const int pooled_height, const int kernel_h, const int stride_h, const int pad_t, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int h = index % height + pad_t; const int c = (index / height) % channels; const int n = index / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); T gradient = 0; const T* const top_diff_slice = top_diff + (n * channels + c) * pooled_height; for (int ph = phstart; ph < phend; ++ph) { // figure out the pooling size int hstart = ph * stride_h - pad_t; int hend = min(hstart + kernel_h, height); hstart = max(hstart, 0); int pool_size = (hend - hstart); gradient += top_diff_slice[ph] / pool_size; } bottom_diff[index] = gradient; } } template <typename T> __global__ void Ave2DPoolBackwardNCHW( const int nthreads, const T* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width + pad_l; const int h = (index / width) % height + pad_t; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); T gradient = 0; const T* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); int pool_size = (hend - hstart) * (wend - wstart); gradient += top_diff_slice[ph * pooled_width + pw] / pool_size; } } bottom_diff[index] = gradient; } } template <typename T> __global__ void Ave3DPoolBackwardNCHW( const int nthreads, const T* const top_diff, const int num, const int channels, const int height, const int width, const int depth, const int pooled_height, const int pooled_width, const int pooled_depth, const int kernel_h, const int kernel_w, const int kernel_d, const int stride_h, const int stride_w, const int stride_d, const int pad_t, const int pad_l, const int pad_f, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int d = index % depth + pad_f; const int w = (index / depth) % width + pad_l; const int h = (index / depth / width) % height + pad_t; const int c = (index / depth / width / height) % channels; const int n = index / depth / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); const int pdstart = (d < kernel_d) ? 0 : (d - kernel_d) / stride_d + 1; const int pdend = min(d / stride_d + 1, pooled_depth); T gradient = 0; const T* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width * pooled_depth; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { for (int pd = pdstart; pd < pdend; ++pd) { // figure out the pooling size int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int dstart = pd * stride_d - pad_f; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); int dend = min(dstart + kernel_d, depth); hstart = max(hstart, 0); wstart = max(wstart, 0); dstart = max(dstart, 0); int pool_size = (hend - hstart) * (wend - wstart) * (dend - dstart); const int pooled_index = ph * pooled_depth * pooled_width + pooled_depth * pw + pd; gradient += top_diff_slice[pooled_index] / pool_size; } } } bottom_diff[index] = gradient; } } template <typename T> __global__ void Ave1DPoolBackwardNHWC( const int nthreads, const T* const top_diff, const int num, const int height, const int channels, const int pooled_height, const int kernel_h, const int stride_h, const int pad_t, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int c = index % channels; const int h = (index / channels) % height + pad_t; const int n = index / channels / height; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); T gradient = 0; const T* const top_diff_slice = top_diff + n * pooled_height * channels + c; for (int ph = phstart; ph < phend; ++ph) { // figure out the pooling size int hstart = ph * stride_h - pad_t; int hend = min(hstart + kernel_h, height); hstart = max(hstart, 0); int pool_size = (hend - hstart); gradient += top_diff_slice[ph * channels] / pool_size; } bottom_diff[index] = gradient; } } template <typename T> __global__ void Ave2DPoolBackwardNHWC( const int nthreads, const T* const top_diff, const int num, const int height, const int width, const int channels, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int c = index % channels; const int w = index / channels % width + pad_l; const int h = (index / channels / width) % height + pad_t; const int n = index / channels / width / height; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); T gradient = 0; const T* const top_diff_slice = top_diff + n * pooled_height * pooled_width * channels + c; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); int pool_size = (hend - hstart) * (wend - wstart); gradient += top_diff_slice[(ph * pooled_width + pw) * channels] / pool_size; } } bottom_diff[index] = gradient; } } template <typename T> __global__ void Ave3DPoolBackwardNHWC( const int nthreads, const T* const top_diff, const int num, const int height, const int width, const int depth, const int channels, const int pooled_height, const int pooled_width, const int pooled_depth, const int kernel_h, const int kernel_w, const int kernel_d, const int stride_h, const int stride_w, const int stride_d, const int pad_t, const int pad_l, const int pad_f, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int c = index % channels; const int d = index / channels % depth + pad_f; const int w = (index / channels / depth) % width + pad_l; const int h = (index / channels / depth / width) % height + pad_t; const int n = index / channels / depth / width / height; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); const int pdstart = (d < kernel_d) ? 0 : (d - kernel_d) / stride_d + 1; const int pdend = min(d / stride_d + 1, pooled_depth); T gradient = 0; const T* const top_diff_slice = top_diff + n * pooled_height * pooled_width * pooled_depth * channels + c; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { for (int pd = pdstart; pd < pdend; ++pd) { // figure out the pooling size int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int dstart = pd * stride_d - pad_f; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); int dend = min(dstart + kernel_d, depth); hstart = max(hstart, 0); wstart = max(wstart, 0); dstart = max(dstart, 0); int pool_size = (hend - hstart) * (wend - wstart) * (dend - dstart); const int pooled_index = (ph * pooled_depth * pooled_width + pw * pooled_depth + pd) * channels; gradient += top_diff_slice[pooled_index] / pool_size; } } } bottom_diff[index] = gradient; } } } // namespace template <> bool PoolOp<float, CUDAContext, AveragePool>::RunOnDeviceWithOrderNCHW() { auto& X = Input(0); auto* Y = Output(0); ConvPoolOpBase<CUDAContext>::SetOutputSize(X, Y, X.dim32(1)); int output_size = Y->size(); switch (kernel_.size()) { case 1: hipLaunchKernelGGL(( Average1DPoolForwardNCHW<float>) , dim3(CAFFE_GET_BLOCKS(output_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), output_size, X.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), Y->dim32(2), kernel_h(), stride_h(), pad_t(), Y->template mutable_data<float>()); break; case 2: hipLaunchKernelGGL(( Average2DPoolForwardNCHW<float>) , dim3(CAFFE_GET_BLOCKS(output_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), output_size, X.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), Y->dim32(2), Y->dim32(3), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), Y->template mutable_data<float>()); break; case 3: hipLaunchKernelGGL(( Average3DPoolForwardNCHW<float>) , dim3(CAFFE_GET_BLOCKS(output_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), output_size, X.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), X.dim32(4), Y->dim32(2), Y->dim32(3), Y->dim32(4), kernel_h(), kernel_w(), kernel_[2], stride_h(), stride_w(), stride_[2], pad_t(), pad_l(), pads_[2], Y->template mutable_data<float>()); break; default: CAFFE_THROW("Unsupported pooling size : ", kernel_.size()); } return true; } template <> bool PoolOp<float, CUDAContext, AveragePool>::RunOnDeviceWithOrderNHWC() { auto& X = Input(0); auto* Y = Output(0); ConvPoolOpBase<CUDAContext>::SetOutputSize(X, Y, X.dim32(X.ndim() - 1)); int output_size = Y->size(); switch (kernel_.size()) { case 1: hipLaunchKernelGGL(( Average1DPoolForwardNHWC<float>) , dim3(CAFFE_GET_BLOCKS(output_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), output_size, X.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), Y->dim32(1), kernel_h(), stride_h(), pad_t(), Y->template mutable_data<float>()); break; case 2: hipLaunchKernelGGL(( Average2DPoolForwardNHWC<float>) , dim3(CAFFE_GET_BLOCKS(output_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), output_size, X.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), Y->dim32(1), Y->dim32(2), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), Y->template mutable_data<float>()); break; case 3: hipLaunchKernelGGL(( Average3DPoolForwardNHWC<float>) , dim3(CAFFE_GET_BLOCKS(output_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), output_size, X.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), X.dim32(4), Y->dim32(1), Y->dim32(2), Y->dim32(3), kernel_h(), kernel_w(), kernel_[2], stride_h(), stride_w(), stride_[2], pad_t(), pad_l(), pads_[2], Y->template mutable_data<float>()); break; default: CAFFE_THROW("Unsupported pooling size : ", kernel_.size()); } return true; } template <> bool PoolGradientOp<float, CUDAContext, AveragePool>:: RunOnDeviceWithOrderNCHW() { auto& X = Input(0); auto& dY = Input(2); CAFFE_ENFORCE_EQ(dY.dim32(1), X.dim32(1)); auto* dX = Output(0); dX->ResizeLike(X); vector<int> dims(X.dims().begin() + 2, X.dims().end()); ConvPoolOpBase<CUDAContext>::ComputePads(dims); switch (kernel_.size()) { case 1: hipLaunchKernelGGL(( Ave1DPoolBackwardNCHW<float>) , dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.size(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), dY.dim32(2), kernel_h(), stride_h(), pad_t(), dX->template mutable_data<float>()); break; case 2: hipLaunchKernelGGL(( Ave2DPoolBackwardNCHW<float>) , dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.size(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), dY.dim32(2), dY.dim32(3), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), dX->template mutable_data<float>()); break; case 3: hipLaunchKernelGGL(( Ave3DPoolBackwardNCHW<float>) , dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.size(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), X.dim32(4), dY.dim32(2), dY.dim32(3), dY.dim32(4), kernel_h(), kernel_w(), kernel_[2], stride_h(), stride_w(), stride_[2], pad_t(), pad_l(), pads_[2], dX->template mutable_data<float>()); break; default: CAFFE_THROW("Unsupported pooling size : ", kernel_.size()); } return true; } template <> bool PoolGradientOp<float, CUDAContext, AveragePool>:: RunOnDeviceWithOrderNHWC() { auto& X = Input(0); auto& dY = Input(2); CAFFE_ENFORCE_EQ(X.ndim(), dY.ndim()); CAFFE_ENFORCE_EQ(X.dim32(X.ndim() - 1), dY.dim32(dY.ndim() - 1)); auto* dX = Output(0); dX->ResizeLike(X); vector<int> dims(X.dims().begin() + 1, X.dims().end() - 1); ConvPoolOpBase<CUDAContext>::ComputePads(dims); switch (kernel_.size()) { case 1: hipLaunchKernelGGL(( Ave1DPoolBackwardNHWC<float>) , dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.size(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), dY.dim32(1), kernel_h(), stride_h(), pad_t(), dX->template mutable_data<float>()); break; case 2: hipLaunchKernelGGL(( Ave2DPoolBackwardNHWC<float>) , dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.size(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), dY.dim32(1), dY.dim32(2), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), dX->template mutable_data<float>()); break; case 3: hipLaunchKernelGGL(( Ave3DPoolBackwardNHWC<float>) , dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.size(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), X.dim32(4), dY.dim32(1), dY.dim32(2), dY.dim32(3), kernel_h(), kernel_w(), kernel_[2], stride_h(), stride_w(), stride_[2], pad_t(), pad_l(), pads_[2], dX->template mutable_data<float>()); break; default: CAFFE_THROW("Unsupported pooling size : ", kernel_.size()); } return true; } namespace { template <typename T> __global__ void MaxPool1DForwardNCHW( const int nthreads, const T* bottom_data, const int channels, const int height, const int pooled_height, const int kernel_h, const int stride_h, const int pad_t, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int ph = index % pooled_height; int c = (index / pooled_height) % channels; int n = index / pooled_height / channels; int hstart = ph * stride_h - pad_t; int hend = min(hstart + kernel_h, height); hstart = max(hstart, 0); T maxval = -FLT_MAX; const T* bdata_offset = bottom_data + n * channels * height; for (int h = hstart; h < hend; ++h) { int idx = c * height + h; if (bdata_offset[idx] > maxval) { maxval = bdata_offset[idx]; } } top_data[index] = maxval; } } template <typename T> __global__ void MaxPool2DForwardNCHW( const int nthreads, const T* bottom_data, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); T maxval = -FLT_MAX; const T* bdata_offset = bottom_data + n * channels * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int idx = c * height * width + h * width + w; if (bdata_offset[idx] > maxval) { maxval = bdata_offset[idx]; } } } top_data[index] = maxval; } } template <typename T> __global__ void MaxPool3DForwardNCHW( const int nthreads, const T* bottom_data, const int channels, const int height, const int width, const int depth, const int pooled_height, const int pooled_width, const int pooled_depth, const int kernel_h, const int kernel_w, const int kernel_d, const int stride_h, const int stride_w, const int stride_d, const int pad_t, const int pad_l, const int pad_f, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int pd = index % pooled_depth; int pw = (index / pooled_depth) % pooled_width; int ph = (index / pooled_depth / pooled_width) % pooled_height; int c = (index / pooled_depth / pooled_width / pooled_height) % channels; int n = index / pooled_depth / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); int dstart = pd * stride_d - pad_f; int dend = min(dstart + kernel_d, depth); hstart = max(hstart, 0); wstart = max(wstart, 0); dstart = max(dstart, 0); T maxval = -FLT_MAX; const T* bdata_offset = bottom_data + n * channels * height * width * depth; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { for (int d = dstart; d < dend; ++d) { int idx = ((c * height + h) * width + w) * depth + d; if (bdata_offset[idx] > maxval) { maxval = bdata_offset[idx]; } } } } top_data[index] = maxval; } } template <typename T> __global__ void MaxPool1DForwardNHWC( const int nthreads, const T* bottom_data, const int height, const int channels, const int pooled_height, const int kernel_h, const int stride_h, const int pad_t, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int n = index; int c = n % channels; n /= channels; int hstart = (n % pooled_height) * stride_h - pad_t; n /= pooled_height; int hend = min(hstart + kernel_h, height); hstart = max(hstart, 0); T maxval = -FLT_MAX; const T* bdata_offset = bottom_data + n * height * channels; for (int h = hstart; h < hend; ++h) { int idx = h * channels + c; if (bdata_offset[idx] > maxval) { maxval = bdata_offset[idx]; } } top_data[index] = maxval; } } template <typename T> __global__ void MaxPool2DForwardNHWC( const int nthreads, const T* bottom_data, const int height, const int width, const int channels, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int n = index; int c = n % channels; n /= channels; int wstart = (n % pooled_width) * stride_w - pad_l; n /= pooled_width; int hstart = (n % pooled_height) * stride_h - pad_t; n /= pooled_height; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); T maxval = -FLT_MAX; const T* bdata_offset = bottom_data + n * height * width * channels; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int idx = (h * width + w) * channels + c; if (bdata_offset[idx] > maxval) { maxval = bdata_offset[idx]; } } } top_data[index] = maxval; } } template <typename T> __global__ void MaxPool3DForwardNHWC( const int nthreads, const T* bottom_data, const int height, const int width, const int depth, const int channels, const int pooled_height, const int pooled_width, const int pooled_depth, const int kernel_h, const int kernel_w, const int kernel_d, const int stride_h, const int stride_w, const int stride_d, const int pad_t, const int pad_l, const int pad_f, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int n = index; int c = n % channels; n /= channels; int dstart = (n % pooled_depth) * stride_d - pad_f; n /= pooled_depth; int wstart = (n % pooled_width) * stride_w - pad_l; n /= pooled_width; int hstart = (n % pooled_height) * stride_h - pad_t; n /= pooled_height; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); int dend = min(dstart + kernel_d, depth); hstart = max(hstart, 0); wstart = max(wstart, 0); dstart = max(dstart, 0); T maxval = -FLT_MAX; const T* bdata_offset = bottom_data + n * height * width * depth * channels; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { for (int d = dstart; d < dend; ++d) { int idx = ((h * width + w) * depth + d) * channels + c; if (bdata_offset[idx] > maxval) { maxval = bdata_offset[idx]; } } } } top_data[index] = maxval; } } template <typename T> __global__ void MaxPool1DBackwardNCHW( const int nthreads, const T* const bottom_data, const T* const top_data, const T* const top_diff, const int num, const int channels, const int height, const int pooled_height, const int kernel_h, const int stride_h, const int pad_t, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int h = index % height + pad_t; const int c = (index / height) % channels; const int n = index / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int top_offset = (n * channels + c) * pooled_height; bottom_diff[index] = 0; for (int ph = phstart; ph < phend; ++ph) { int top_local_offset = top_offset + ph; if (bottom_data[index] == top_data[top_local_offset]) { bottom_diff[index] += top_diff[top_local_offset]; } } } } template <typename T> __global__ void MaxPool2DBackwardNCHW( const int nthreads, const T* const bottom_data, const T* const top_data, const T* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width + pad_l; const int h = (index / width) % height + pad_t; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); const int top_offset = (n * channels + c) * pooled_height * pooled_width; bottom_diff[index] = 0; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { int top_local_offset = top_offset + ph * pooled_width + pw; if (bottom_data[index] == top_data[top_local_offset]) { bottom_diff[index] += top_diff[top_local_offset]; } } } } } template <typename T> __global__ void MaxPool3DBackwardNCHW( const int nthreads, const T* const bottom_data, const T* const top_data, const T* const top_diff, const int num, const int channels, const int height, const int width, const int depth, const int pooled_height, const int pooled_width, const int pooled_depth, const int kernel_h, const int kernel_w, const int kernel_d, const int stride_h, const int stride_w, const int stride_d, const int pad_t, const int pad_l, const int pad_f, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int d = index % depth + pad_f; const int w = (index / depth) % width + pad_l; const int h = (index / depth / width) % height + pad_t; const int c = (index / depth / width / height) % channels; const int n = index / depth / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); const int pdstart = (d < kernel_d) ? 0 : (d - kernel_d) / stride_d + 1; const int pdend = min(d / stride_d + 1, pooled_depth); const int top_offset = (n * channels + c) * pooled_height * pooled_width * pooled_depth; bottom_diff[index] = 0; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { for (int pd = pdstart; pd < pdend; ++pd) { int top_local_offset = top_offset + (ph * pooled_width + pw) * pooled_depth + pd; if (bottom_data[index] == top_data[top_local_offset]) { bottom_diff[index] += top_diff[top_local_offset]; } } } } } } template <typename T> __global__ void MaxPool1DBackwardNHWC( const int nthreads, const T* const bottom_data, const T* const top_data, const T* const top_diff, const int num, const int height, const int channels, const int pooled_height, const int kernel_h, const int stride_h, const int pad_t, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int c = index % channels; const int h = (index / channels) % height + pad_t; const int n = index / channels / height; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int top_offset = n * pooled_height * channels + c; bottom_diff[index] = 0; for (int ph = phstart; ph < phend; ++ph) { int top_local_offset = top_offset + ph * channels; if (bottom_data[index] == top_data[top_local_offset]) { bottom_diff[index] += top_diff[top_local_offset]; } } } } template <typename T> __global__ void MaxPool2DBackwardNHWC( const int nthreads, const T* const bottom_data, const T* const top_data, const T* const top_diff, const int num, const int height, const int width, const int channels, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int c = index % channels; const int w = index / channels % width + pad_l; const int h = (index / channels / width) % height + pad_t; const int n = index / channels / width / height; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); const int top_offset = n * pooled_height * pooled_width * channels + c; bottom_diff[index] = 0; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { int top_local_offset = top_offset + (ph * pooled_width + pw) * channels; if (bottom_data[index] == top_data[top_local_offset]) { bottom_diff[index] += top_diff[top_local_offset]; } } } } } template <typename T> __global__ void MaxPool3DBackwardNHWC( const int nthreads, const T* const bottom_data, const T* const top_data, const T* const top_diff, const int num, const int height, const int width, const int depth, const int channels, const int pooled_height, const int pooled_width, const int pooled_depth, const int kernel_h, const int kernel_w, const int kernel_d, const int stride_h, const int stride_w, const int stride_d, const int pad_t, const int pad_l, const int pad_f, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int c = index % channels; const int d = index / channels % depth + pad_f; const int w = (index / depth / channels) % width + pad_l; const int h = (index / channels / depth / width) % height + pad_t; const int n = index / channels / depth / width / height; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); const int pdstart = (d < kernel_d) ? 0 : (d - kernel_d) / stride_d + 1; const int pdend = min(d / stride_d + 1, pooled_depth); const int top_offset = n * pooled_height * pooled_width * pooled_depth * channels + c; bottom_diff[index] = 0; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { for (int pd = pdstart; pd < pdend; ++pd) { int top_local_offset = top_offset + ((ph * pooled_width + pw) * pooled_depth + d) * channels; if (bottom_data[index] == top_data[top_local_offset]) { bottom_diff[index] += top_diff[top_local_offset]; } } } } } } } // namespace template <> bool PoolOp<float, CUDAContext, MaxPool>::RunOnDeviceWithOrderNCHW() { auto& X = Input(0); auto* Y = Output(0); ConvPoolOpBase<CUDAContext>::SetOutputSize(X, Y, X.dim32(1)); int output_size = Y->size(); switch (kernel_.size()) { case 1: hipLaunchKernelGGL(( MaxPool1DForwardNCHW<float>) , dim3(CAFFE_GET_BLOCKS(output_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), output_size, X.data<float>(), X.dim32(1), X.dim32(2), Y->dim32(2), kernel_h(), stride_h(), pad_t(), Y->template mutable_data<float>()); break; case 2: hipLaunchKernelGGL(( MaxPool2DForwardNCHW<float>) , dim3(CAFFE_GET_BLOCKS(output_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), output_size, X.data<float>(), X.dim32(1), X.dim32(2), X.dim32(3), Y->dim32(2), Y->dim32(3), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), Y->template mutable_data<float>()); break; case 3: hipLaunchKernelGGL(( MaxPool3DForwardNCHW<float>) , dim3(CAFFE_GET_BLOCKS(output_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), output_size, X.data<float>(), X.dim32(1), X.dim32(2), X.dim32(3), X.dim32(4), Y->dim32(2), Y->dim32(3), Y->dim32(4), kernel_h(), kernel_w(), kernel_[2], stride_h(), stride_w(), stride_[2], pad_t(), pad_l(), pads_[2], Y->template mutable_data<float>()); break; default: CAFFE_THROW("Unsupported pooling size : ", kernel_.size()); } return true; } template <> bool PoolOp<float, CUDAContext, MaxPool>::RunOnDeviceWithOrderNHWC() { auto& X = Input(0); auto* Y = Output(0); ConvPoolOpBase<CUDAContext>::SetOutputSize(X, Y, X.dim32(X.ndim() - 1)); int output_size = Y->size(); switch (kernel_.size()) { case 1: hipLaunchKernelGGL(( MaxPool1DForwardNHWC<float>) , dim3(CAFFE_GET_BLOCKS(output_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), output_size, X.data<float>(), X.dim32(1), X.dim32(2), Y->dim32(1), kernel_h(), stride_h(), pad_t(), Y->template mutable_data<float>()); break; case 2: hipLaunchKernelGGL(( MaxPool2DForwardNHWC<float>) , dim3(CAFFE_GET_BLOCKS(output_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), output_size, X.data<float>(), X.dim32(1), X.dim32(2), X.dim32(3), Y->dim32(1), Y->dim32(2), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), Y->template mutable_data<float>()); break; case 3: hipLaunchKernelGGL(( MaxPool3DForwardNHWC<float>) , dim3(CAFFE_GET_BLOCKS(output_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), output_size, X.data<float>(), X.dim32(1), X.dim32(2), X.dim32(3), X.dim32(4), Y->dim32(1), Y->dim32(2), Y->dim32(3), kernel_h(), kernel_w(), kernel_[2], stride_h(), stride_w(), stride_[2], pad_t(), pad_l(), pads_[2], Y->template mutable_data<float>()); break; default: CAFFE_THROW("Unsupported pooling size : ", kernel_.size()); } return true; } template <> bool PoolGradientOp<float, CUDAContext, MaxPool>::RunOnDeviceWithOrderNCHW() { auto& X = Input(0); auto& Y = Input(1); auto& dY = Input(2); CAFFE_ENFORCE_EQ(dY.ndim(), X.ndim()); auto* dX = Output(0); dX->ResizeLike(X); vector<int> dims(X.dims().begin() + 2, X.dims().end()); ConvPoolOpBase<CUDAContext>::ComputePads(dims); switch (kernel_.size()) { case 1: hipLaunchKernelGGL(( MaxPool1DBackwardNCHW<float>) , dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.size(), X.data<float>(), Y.data<float>(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), dY.dim32(2), kernel_h(), stride_h(), pad_t(), dX->template mutable_data<float>()); break; case 2: hipLaunchKernelGGL(( MaxPool2DBackwardNCHW<float>) , dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.size(), X.data<float>(), Y.data<float>(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), dY.dim32(2), dY.dim32(3), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), dX->template mutable_data<float>()); break; case 3: hipLaunchKernelGGL(( MaxPool3DBackwardNCHW<float>) , dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.size(), X.data<float>(), Y.data<float>(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), X.dim32(4), dY.dim32(2), dY.dim32(3), dY.dim32(4), kernel_h(), kernel_w(), kernel_[2], stride_h(), stride_w(), stride_[2], pad_t(), pad_l(), pads_[2], dX->template mutable_data<float>()); break; default: CAFFE_THROW("Unsupported pooling size : ", kernel_.size()); } return true; } template <> bool PoolGradientOp<float, CUDAContext, MaxPool>::RunOnDeviceWithOrderNHWC() { auto& X = Input(0); auto& Y = Input(1); auto& dY = Input(2); CAFFE_ENFORCE_EQ(dY.ndim(), X.ndim()); auto* dX = Output(0); dX->ResizeLike(X); vector<int> dims(X.dims().begin() + 1, X.dims().end() - 1); ConvPoolOpBase<CUDAContext>::ComputePads(dims); switch (kernel_.size()) { case 1: hipLaunchKernelGGL(( MaxPool1DBackwardNHWC<float>) , dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.size(), X.data<float>(), Y.data<float>(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), dY.dim32(1), kernel_h(), stride_h(), pad_t(), dX->template mutable_data<float>()); break; case 2: hipLaunchKernelGGL(( MaxPool2DBackwardNHWC<float>) , dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.size(), X.data<float>(), Y.data<float>(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), dY.dim32(1), dY.dim32(2), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), dX->template mutable_data<float>()); break; case 3: hipLaunchKernelGGL(( MaxPool3DBackwardNHWC<float>) , dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.size(), X.data<float>(), Y.data<float>(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), X.dim32(4), dY.dim32(1), dY.dim32(2), dY.dim32(3), kernel_h(), kernel_w(), kernel_[2], stride_h(), stride_w(), stride_[2], pad_t(), pad_l(), pads_[2], dX->template mutable_data<float>()); break; default: CAFFE_THROW("Unsupported pooling size : ", kernel_.size()); } return true; } REGISTER_CUDA_OPERATOR(AveragePool, PoolOp<float, CUDAContext, AveragePool>); REGISTER_CUDA_OPERATOR(AveragePoolGradient, PoolGradientOp<float, CUDAContext, AveragePool>); REGISTER_CUDA_OPERATOR(AveragePool1D, PoolOp<float, CUDAContext, AveragePool>); REGISTER_CUDA_OPERATOR( AveragePool1DGradient, PoolGradientOp<float, CUDAContext, AveragePool>); REGISTER_CUDA_OPERATOR(AveragePool2D, PoolOp<float, CUDAContext, AveragePool>); REGISTER_CUDA_OPERATOR( AveragePool2DGradient, PoolGradientOp<float, CUDAContext, AveragePool>); REGISTER_CUDA_OPERATOR(AveragePool3D, PoolOp<float, CUDAContext, AveragePool>); REGISTER_CUDA_OPERATOR( AveragePool3DGradient, PoolGradientOp<float, CUDAContext, AveragePool>); REGISTER_CUDA_OPERATOR(MaxPool, PoolOp<float, CUDAContext, MaxPool>); REGISTER_CUDA_OPERATOR(MaxPoolGradient, PoolGradientOp<float, CUDAContext, MaxPool>); REGISTER_CUDA_OPERATOR(MaxPool1D, PoolOp<float, CUDAContext, MaxPool>); REGISTER_CUDA_OPERATOR( MaxPool1DGradient, PoolGradientOp<float, CUDAContext, MaxPool>); REGISTER_CUDA_OPERATOR(MaxPool2D, PoolOp<float, CUDAContext, MaxPool>); REGISTER_CUDA_OPERATOR( MaxPool2DGradient, PoolGradientOp<float, CUDAContext, MaxPool>); REGISTER_CUDA_OPERATOR(MaxPool3D, PoolOp<float, CUDAContext, MaxPool>); REGISTER_CUDA_OPERATOR( MaxPool3DGradient, PoolGradientOp<float, CUDAContext, MaxPool>); } // namespace caffe2
ca0b3f6abf250eba708427ae5d227f462b1942d9.cu
// TODO(ataei): reduce the apparent redundancy of all the code below. #include <cfloat> #include "caffe2/core/context_gpu.h" #include "caffe2/operators/pool_op.h" namespace caffe2 { namespace { class AveragePool {}; class MaxPool {}; } // namespace namespace { template <typename T> __global__ void Average1DPoolForwardNCHW( const int nthreads, const T* bottom_data, const int num, const int channels, const int height, const int pooled_height, const int kernel_h, const int stride_h, const int pad_t, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int n = index; int ph = n % pooled_height; n /= pooled_height; int c = n % channels; n /= channels; int hstart = ph * stride_h - pad_t; int hend = min(hstart + kernel_h, height); hstart = max(hstart, 0); top_data[index] = 0; int bottom_offset = (n * channels + c) * height; for (int h = hstart; h < hend; ++h) { top_data[index] += bottom_data[bottom_offset + h]; } top_data[index] /= (hend - hstart); } } template <typename T> __global__ void Average2DPoolForwardNCHW( const int nthreads, const T* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int n = index; int pw = n % pooled_width; n /= pooled_width; int ph = n % pooled_height; n /= pooled_height; int c = n % channels; n /= channels; int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); top_data[index] = 0; int bottom_offset = (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { top_data[index] += bottom_data[bottom_offset + h * width + w]; } } top_data[index] /= (hend - hstart) * (wend - wstart); } } template <typename T> __global__ void Average3DPoolForwardNCHW( const int nthreads, const T* bottom_data, const int num, const int channels, const int height, const int width, const int depth, const int pooled_height, const int pooled_width, const int pooled_depth, const int kernel_h, const int kernel_w, const int kernel_d, const int stride_h, const int stride_w, const int stride_d, const int pad_t, const int pad_l, const int pad_f, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int n = index; int pd = n % pooled_depth; n /= pooled_depth; int pw = n % pooled_width; n /= pooled_width; int ph = n % pooled_height; n /= pooled_height; int c = n % channels; n /= channels; int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int dstart = pd * stride_d - pad_f; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); int dend = min(dstart + kernel_d, depth); hstart = max(hstart, 0); wstart = max(wstart, 0); dstart = max(dstart, 0); top_data[index] = 0; int bottom_offset = (n * channels + c) * height * width * depth; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { for (int d = dstart; d < dend; ++d) { const int input_index = bottom_offset + h * width * depth + w * depth + d; top_data[index] += bottom_data[input_index]; } } } top_data[index] /= (hend - hstart) * (wend - wstart) * (dend - dstart); } } template <typename T> __global__ void Average1DPoolForwardNHWC( const int nthreads, const T* bottom_data, const int num, const int height, const int channels, const int pooled_height, const int kernel_h, const int stride_h, const int pad_t, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int c = index % channels; int ph = (index / channels) % pooled_height; int n = index / channels / pooled_height; int hstart = ph * stride_h - pad_t; int hend = min(hstart + kernel_h, height); hstart = max(hstart, 0); T output = 0; int bottom_offset = n * height * channels + c; for (int h = hstart; h < hend; ++h) { output += bottom_data[bottom_offset + h * channels]; } int pool_size = (hend - hstart); top_data[index] = output / pool_size; } } template <typename T> __global__ void Average2DPoolForwardNHWC( const int nthreads, const T* bottom_data, const int num, const int height, const int width, const int channels, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int c = index % channels; int pw = (index / channels) % pooled_width; int ph = (index / channels / pooled_width) % pooled_height; int n = index / channels / pooled_width / pooled_height; int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); T output = 0; int bottom_offset = n * height * width * channels + c; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { output += bottom_data[bottom_offset + (h * width + w) * channels]; } } int pool_size = (hend - hstart) * (wend - wstart); top_data[index] = output / pool_size; } } template <typename T> __global__ void Average3DPoolForwardNHWC( const int nthreads, const T* bottom_data, const int num, const int height, const int width, const int depth, const int channels, const int pooled_height, const int pooled_width, const int pooled_depth, const int kernel_h, const int kernel_w, const int kernel_d, const int stride_h, const int stride_w, const int stride_d, const int pad_t, const int pad_l, const int pad_f, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int c = index % channels; int pd = (index / channels) % pooled_depth; int pw = (index / channels / pooled_depth) % pooled_width; int ph = (index / channels / pooled_depth / pooled_width) % pooled_height; int n = index / channels / pooled_depth / pooled_width / pooled_height; int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int dstart = pd * stride_d - pad_f; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); int dend = min(dstart + kernel_d, depth); hstart = max(hstart, 0); wstart = max(wstart, 0); dstart = max(dstart, 0); T output = 0; int bottom_offset = n * height * width * depth * channels + c; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { for (int d = dstart; d < dend; ++d) { const int bottom_index = bottom_offset + (h * depth * width + w * depth + d) * channels; output += bottom_data[bottom_index]; } } } int pool_size = (hend - hstart) * (wend - wstart) * (dend - dstart); top_data[index] = output / pool_size; } } template <typename T> __global__ void Ave1DPoolBackwardNCHW( const int nthreads, const T* const top_diff, const int num, const int channels, const int height, const int pooled_height, const int kernel_h, const int stride_h, const int pad_t, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int h = index % height + pad_t; const int c = (index / height) % channels; const int n = index / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); T gradient = 0; const T* const top_diff_slice = top_diff + (n * channels + c) * pooled_height; for (int ph = phstart; ph < phend; ++ph) { // figure out the pooling size int hstart = ph * stride_h - pad_t; int hend = min(hstart + kernel_h, height); hstart = max(hstart, 0); int pool_size = (hend - hstart); gradient += top_diff_slice[ph] / pool_size; } bottom_diff[index] = gradient; } } template <typename T> __global__ void Ave2DPoolBackwardNCHW( const int nthreads, const T* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width + pad_l; const int h = (index / width) % height + pad_t; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); T gradient = 0; const T* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); int pool_size = (hend - hstart) * (wend - wstart); gradient += top_diff_slice[ph * pooled_width + pw] / pool_size; } } bottom_diff[index] = gradient; } } template <typename T> __global__ void Ave3DPoolBackwardNCHW( const int nthreads, const T* const top_diff, const int num, const int channels, const int height, const int width, const int depth, const int pooled_height, const int pooled_width, const int pooled_depth, const int kernel_h, const int kernel_w, const int kernel_d, const int stride_h, const int stride_w, const int stride_d, const int pad_t, const int pad_l, const int pad_f, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int d = index % depth + pad_f; const int w = (index / depth) % width + pad_l; const int h = (index / depth / width) % height + pad_t; const int c = (index / depth / width / height) % channels; const int n = index / depth / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); const int pdstart = (d < kernel_d) ? 0 : (d - kernel_d) / stride_d + 1; const int pdend = min(d / stride_d + 1, pooled_depth); T gradient = 0; const T* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width * pooled_depth; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { for (int pd = pdstart; pd < pdend; ++pd) { // figure out the pooling size int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int dstart = pd * stride_d - pad_f; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); int dend = min(dstart + kernel_d, depth); hstart = max(hstart, 0); wstart = max(wstart, 0); dstart = max(dstart, 0); int pool_size = (hend - hstart) * (wend - wstart) * (dend - dstart); const int pooled_index = ph * pooled_depth * pooled_width + pooled_depth * pw + pd; gradient += top_diff_slice[pooled_index] / pool_size; } } } bottom_diff[index] = gradient; } } template <typename T> __global__ void Ave1DPoolBackwardNHWC( const int nthreads, const T* const top_diff, const int num, const int height, const int channels, const int pooled_height, const int kernel_h, const int stride_h, const int pad_t, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int c = index % channels; const int h = (index / channels) % height + pad_t; const int n = index / channels / height; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); T gradient = 0; const T* const top_diff_slice = top_diff + n * pooled_height * channels + c; for (int ph = phstart; ph < phend; ++ph) { // figure out the pooling size int hstart = ph * stride_h - pad_t; int hend = min(hstart + kernel_h, height); hstart = max(hstart, 0); int pool_size = (hend - hstart); gradient += top_diff_slice[ph * channels] / pool_size; } bottom_diff[index] = gradient; } } template <typename T> __global__ void Ave2DPoolBackwardNHWC( const int nthreads, const T* const top_diff, const int num, const int height, const int width, const int channels, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int c = index % channels; const int w = index / channels % width + pad_l; const int h = (index / channels / width) % height + pad_t; const int n = index / channels / width / height; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); T gradient = 0; const T* const top_diff_slice = top_diff + n * pooled_height * pooled_width * channels + c; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); int pool_size = (hend - hstart) * (wend - wstart); gradient += top_diff_slice[(ph * pooled_width + pw) * channels] / pool_size; } } bottom_diff[index] = gradient; } } template <typename T> __global__ void Ave3DPoolBackwardNHWC( const int nthreads, const T* const top_diff, const int num, const int height, const int width, const int depth, const int channels, const int pooled_height, const int pooled_width, const int pooled_depth, const int kernel_h, const int kernel_w, const int kernel_d, const int stride_h, const int stride_w, const int stride_d, const int pad_t, const int pad_l, const int pad_f, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int c = index % channels; const int d = index / channels % depth + pad_f; const int w = (index / channels / depth) % width + pad_l; const int h = (index / channels / depth / width) % height + pad_t; const int n = index / channels / depth / width / height; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); const int pdstart = (d < kernel_d) ? 0 : (d - kernel_d) / stride_d + 1; const int pdend = min(d / stride_d + 1, pooled_depth); T gradient = 0; const T* const top_diff_slice = top_diff + n * pooled_height * pooled_width * pooled_depth * channels + c; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { for (int pd = pdstart; pd < pdend; ++pd) { // figure out the pooling size int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int dstart = pd * stride_d - pad_f; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); int dend = min(dstart + kernel_d, depth); hstart = max(hstart, 0); wstart = max(wstart, 0); dstart = max(dstart, 0); int pool_size = (hend - hstart) * (wend - wstart) * (dend - dstart); const int pooled_index = (ph * pooled_depth * pooled_width + pw * pooled_depth + pd) * channels; gradient += top_diff_slice[pooled_index] / pool_size; } } } bottom_diff[index] = gradient; } } } // namespace template <> bool PoolOp<float, CUDAContext, AveragePool>::RunOnDeviceWithOrderNCHW() { auto& X = Input(0); auto* Y = Output(0); ConvPoolOpBase<CUDAContext>::SetOutputSize(X, Y, X.dim32(1)); int output_size = Y->size(); switch (kernel_.size()) { case 1: Average1DPoolForwardNCHW<float> <<<CAFFE_GET_BLOCKS(output_size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( output_size, X.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), Y->dim32(2), kernel_h(), stride_h(), pad_t(), Y->template mutable_data<float>()); break; case 2: Average2DPoolForwardNCHW<float> <<<CAFFE_GET_BLOCKS(output_size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( output_size, X.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), Y->dim32(2), Y->dim32(3), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), Y->template mutable_data<float>()); break; case 3: Average3DPoolForwardNCHW<float> <<<CAFFE_GET_BLOCKS(output_size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( output_size, X.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), X.dim32(4), Y->dim32(2), Y->dim32(3), Y->dim32(4), kernel_h(), kernel_w(), kernel_[2], stride_h(), stride_w(), stride_[2], pad_t(), pad_l(), pads_[2], Y->template mutable_data<float>()); break; default: CAFFE_THROW("Unsupported pooling size : ", kernel_.size()); } return true; } template <> bool PoolOp<float, CUDAContext, AveragePool>::RunOnDeviceWithOrderNHWC() { auto& X = Input(0); auto* Y = Output(0); ConvPoolOpBase<CUDAContext>::SetOutputSize(X, Y, X.dim32(X.ndim() - 1)); int output_size = Y->size(); switch (kernel_.size()) { case 1: Average1DPoolForwardNHWC<float> <<<CAFFE_GET_BLOCKS(output_size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( output_size, X.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), Y->dim32(1), kernel_h(), stride_h(), pad_t(), Y->template mutable_data<float>()); break; case 2: Average2DPoolForwardNHWC<float> <<<CAFFE_GET_BLOCKS(output_size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( output_size, X.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), Y->dim32(1), Y->dim32(2), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), Y->template mutable_data<float>()); break; case 3: Average3DPoolForwardNHWC<float> <<<CAFFE_GET_BLOCKS(output_size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( output_size, X.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), X.dim32(4), Y->dim32(1), Y->dim32(2), Y->dim32(3), kernel_h(), kernel_w(), kernel_[2], stride_h(), stride_w(), stride_[2], pad_t(), pad_l(), pads_[2], Y->template mutable_data<float>()); break; default: CAFFE_THROW("Unsupported pooling size : ", kernel_.size()); } return true; } template <> bool PoolGradientOp<float, CUDAContext, AveragePool>:: RunOnDeviceWithOrderNCHW() { auto& X = Input(0); auto& dY = Input(2); CAFFE_ENFORCE_EQ(dY.dim32(1), X.dim32(1)); auto* dX = Output(0); dX->ResizeLike(X); vector<int> dims(X.dims().begin() + 2, X.dims().end()); ConvPoolOpBase<CUDAContext>::ComputePads(dims); switch (kernel_.size()) { case 1: Ave1DPoolBackwardNCHW<float> <<<CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.size(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), dY.dim32(2), kernel_h(), stride_h(), pad_t(), dX->template mutable_data<float>()); break; case 2: Ave2DPoolBackwardNCHW<float> <<<CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.size(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), dY.dim32(2), dY.dim32(3), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), dX->template mutable_data<float>()); break; case 3: Ave3DPoolBackwardNCHW<float> <<<CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.size(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), X.dim32(4), dY.dim32(2), dY.dim32(3), dY.dim32(4), kernel_h(), kernel_w(), kernel_[2], stride_h(), stride_w(), stride_[2], pad_t(), pad_l(), pads_[2], dX->template mutable_data<float>()); break; default: CAFFE_THROW("Unsupported pooling size : ", kernel_.size()); } return true; } template <> bool PoolGradientOp<float, CUDAContext, AveragePool>:: RunOnDeviceWithOrderNHWC() { auto& X = Input(0); auto& dY = Input(2); CAFFE_ENFORCE_EQ(X.ndim(), dY.ndim()); CAFFE_ENFORCE_EQ(X.dim32(X.ndim() - 1), dY.dim32(dY.ndim() - 1)); auto* dX = Output(0); dX->ResizeLike(X); vector<int> dims(X.dims().begin() + 1, X.dims().end() - 1); ConvPoolOpBase<CUDAContext>::ComputePads(dims); switch (kernel_.size()) { case 1: Ave1DPoolBackwardNHWC<float> <<<CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.size(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), dY.dim32(1), kernel_h(), stride_h(), pad_t(), dX->template mutable_data<float>()); break; case 2: Ave2DPoolBackwardNHWC<float> <<<CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.size(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), dY.dim32(1), dY.dim32(2), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), dX->template mutable_data<float>()); break; case 3: Ave3DPoolBackwardNHWC<float> <<<CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.size(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), X.dim32(4), dY.dim32(1), dY.dim32(2), dY.dim32(3), kernel_h(), kernel_w(), kernel_[2], stride_h(), stride_w(), stride_[2], pad_t(), pad_l(), pads_[2], dX->template mutable_data<float>()); break; default: CAFFE_THROW("Unsupported pooling size : ", kernel_.size()); } return true; } namespace { template <typename T> __global__ void MaxPool1DForwardNCHW( const int nthreads, const T* bottom_data, const int channels, const int height, const int pooled_height, const int kernel_h, const int stride_h, const int pad_t, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int ph = index % pooled_height; int c = (index / pooled_height) % channels; int n = index / pooled_height / channels; int hstart = ph * stride_h - pad_t; int hend = min(hstart + kernel_h, height); hstart = max(hstart, 0); T maxval = -FLT_MAX; const T* bdata_offset = bottom_data + n * channels * height; for (int h = hstart; h < hend; ++h) { int idx = c * height + h; if (bdata_offset[idx] > maxval) { maxval = bdata_offset[idx]; } } top_data[index] = maxval; } } template <typename T> __global__ void MaxPool2DForwardNCHW( const int nthreads, const T* bottom_data, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); T maxval = -FLT_MAX; const T* bdata_offset = bottom_data + n * channels * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int idx = c * height * width + h * width + w; if (bdata_offset[idx] > maxval) { maxval = bdata_offset[idx]; } } } top_data[index] = maxval; } } template <typename T> __global__ void MaxPool3DForwardNCHW( const int nthreads, const T* bottom_data, const int channels, const int height, const int width, const int depth, const int pooled_height, const int pooled_width, const int pooled_depth, const int kernel_h, const int kernel_w, const int kernel_d, const int stride_h, const int stride_w, const int stride_d, const int pad_t, const int pad_l, const int pad_f, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int pd = index % pooled_depth; int pw = (index / pooled_depth) % pooled_width; int ph = (index / pooled_depth / pooled_width) % pooled_height; int c = (index / pooled_depth / pooled_width / pooled_height) % channels; int n = index / pooled_depth / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); int dstart = pd * stride_d - pad_f; int dend = min(dstart + kernel_d, depth); hstart = max(hstart, 0); wstart = max(wstart, 0); dstart = max(dstart, 0); T maxval = -FLT_MAX; const T* bdata_offset = bottom_data + n * channels * height * width * depth; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { for (int d = dstart; d < dend; ++d) { int idx = ((c * height + h) * width + w) * depth + d; if (bdata_offset[idx] > maxval) { maxval = bdata_offset[idx]; } } } } top_data[index] = maxval; } } template <typename T> __global__ void MaxPool1DForwardNHWC( const int nthreads, const T* bottom_data, const int height, const int channels, const int pooled_height, const int kernel_h, const int stride_h, const int pad_t, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int n = index; int c = n % channels; n /= channels; int hstart = (n % pooled_height) * stride_h - pad_t; n /= pooled_height; int hend = min(hstart + kernel_h, height); hstart = max(hstart, 0); T maxval = -FLT_MAX; const T* bdata_offset = bottom_data + n * height * channels; for (int h = hstart; h < hend; ++h) { int idx = h * channels + c; if (bdata_offset[idx] > maxval) { maxval = bdata_offset[idx]; } } top_data[index] = maxval; } } template <typename T> __global__ void MaxPool2DForwardNHWC( const int nthreads, const T* bottom_data, const int height, const int width, const int channels, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int n = index; int c = n % channels; n /= channels; int wstart = (n % pooled_width) * stride_w - pad_l; n /= pooled_width; int hstart = (n % pooled_height) * stride_h - pad_t; n /= pooled_height; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); T maxval = -FLT_MAX; const T* bdata_offset = bottom_data + n * height * width * channels; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int idx = (h * width + w) * channels + c; if (bdata_offset[idx] > maxval) { maxval = bdata_offset[idx]; } } } top_data[index] = maxval; } } template <typename T> __global__ void MaxPool3DForwardNHWC( const int nthreads, const T* bottom_data, const int height, const int width, const int depth, const int channels, const int pooled_height, const int pooled_width, const int pooled_depth, const int kernel_h, const int kernel_w, const int kernel_d, const int stride_h, const int stride_w, const int stride_d, const int pad_t, const int pad_l, const int pad_f, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int n = index; int c = n % channels; n /= channels; int dstart = (n % pooled_depth) * stride_d - pad_f; n /= pooled_depth; int wstart = (n % pooled_width) * stride_w - pad_l; n /= pooled_width; int hstart = (n % pooled_height) * stride_h - pad_t; n /= pooled_height; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); int dend = min(dstart + kernel_d, depth); hstart = max(hstart, 0); wstart = max(wstart, 0); dstart = max(dstart, 0); T maxval = -FLT_MAX; const T* bdata_offset = bottom_data + n * height * width * depth * channels; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { for (int d = dstart; d < dend; ++d) { int idx = ((h * width + w) * depth + d) * channels + c; if (bdata_offset[idx] > maxval) { maxval = bdata_offset[idx]; } } } } top_data[index] = maxval; } } template <typename T> __global__ void MaxPool1DBackwardNCHW( const int nthreads, const T* const bottom_data, const T* const top_data, const T* const top_diff, const int num, const int channels, const int height, const int pooled_height, const int kernel_h, const int stride_h, const int pad_t, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int h = index % height + pad_t; const int c = (index / height) % channels; const int n = index / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int top_offset = (n * channels + c) * pooled_height; bottom_diff[index] = 0; for (int ph = phstart; ph < phend; ++ph) { int top_local_offset = top_offset + ph; if (bottom_data[index] == top_data[top_local_offset]) { bottom_diff[index] += top_diff[top_local_offset]; } } } } template <typename T> __global__ void MaxPool2DBackwardNCHW( const int nthreads, const T* const bottom_data, const T* const top_data, const T* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width + pad_l; const int h = (index / width) % height + pad_t; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); const int top_offset = (n * channels + c) * pooled_height * pooled_width; bottom_diff[index] = 0; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { int top_local_offset = top_offset + ph * pooled_width + pw; if (bottom_data[index] == top_data[top_local_offset]) { bottom_diff[index] += top_diff[top_local_offset]; } } } } } template <typename T> __global__ void MaxPool3DBackwardNCHW( const int nthreads, const T* const bottom_data, const T* const top_data, const T* const top_diff, const int num, const int channels, const int height, const int width, const int depth, const int pooled_height, const int pooled_width, const int pooled_depth, const int kernel_h, const int kernel_w, const int kernel_d, const int stride_h, const int stride_w, const int stride_d, const int pad_t, const int pad_l, const int pad_f, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int d = index % depth + pad_f; const int w = (index / depth) % width + pad_l; const int h = (index / depth / width) % height + pad_t; const int c = (index / depth / width / height) % channels; const int n = index / depth / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); const int pdstart = (d < kernel_d) ? 0 : (d - kernel_d) / stride_d + 1; const int pdend = min(d / stride_d + 1, pooled_depth); const int top_offset = (n * channels + c) * pooled_height * pooled_width * pooled_depth; bottom_diff[index] = 0; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { for (int pd = pdstart; pd < pdend; ++pd) { int top_local_offset = top_offset + (ph * pooled_width + pw) * pooled_depth + pd; if (bottom_data[index] == top_data[top_local_offset]) { bottom_diff[index] += top_diff[top_local_offset]; } } } } } } template <typename T> __global__ void MaxPool1DBackwardNHWC( const int nthreads, const T* const bottom_data, const T* const top_data, const T* const top_diff, const int num, const int height, const int channels, const int pooled_height, const int kernel_h, const int stride_h, const int pad_t, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int c = index % channels; const int h = (index / channels) % height + pad_t; const int n = index / channels / height; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int top_offset = n * pooled_height * channels + c; bottom_diff[index] = 0; for (int ph = phstart; ph < phend; ++ph) { int top_local_offset = top_offset + ph * channels; if (bottom_data[index] == top_data[top_local_offset]) { bottom_diff[index] += top_diff[top_local_offset]; } } } } template <typename T> __global__ void MaxPool2DBackwardNHWC( const int nthreads, const T* const bottom_data, const T* const top_data, const T* const top_diff, const int num, const int height, const int width, const int channels, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int c = index % channels; const int w = index / channels % width + pad_l; const int h = (index / channels / width) % height + pad_t; const int n = index / channels / width / height; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); const int top_offset = n * pooled_height * pooled_width * channels + c; bottom_diff[index] = 0; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { int top_local_offset = top_offset + (ph * pooled_width + pw) * channels; if (bottom_data[index] == top_data[top_local_offset]) { bottom_diff[index] += top_diff[top_local_offset]; } } } } } template <typename T> __global__ void MaxPool3DBackwardNHWC( const int nthreads, const T* const bottom_data, const T* const top_data, const T* const top_diff, const int num, const int height, const int width, const int depth, const int channels, const int pooled_height, const int pooled_width, const int pooled_depth, const int kernel_h, const int kernel_w, const int kernel_d, const int stride_h, const int stride_w, const int stride_d, const int pad_t, const int pad_l, const int pad_f, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int c = index % channels; const int d = index / channels % depth + pad_f; const int w = (index / depth / channels) % width + pad_l; const int h = (index / channels / depth / width) % height + pad_t; const int n = index / channels / depth / width / height; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); const int pdstart = (d < kernel_d) ? 0 : (d - kernel_d) / stride_d + 1; const int pdend = min(d / stride_d + 1, pooled_depth); const int top_offset = n * pooled_height * pooled_width * pooled_depth * channels + c; bottom_diff[index] = 0; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { for (int pd = pdstart; pd < pdend; ++pd) { int top_local_offset = top_offset + ((ph * pooled_width + pw) * pooled_depth + d) * channels; if (bottom_data[index] == top_data[top_local_offset]) { bottom_diff[index] += top_diff[top_local_offset]; } } } } } } } // namespace template <> bool PoolOp<float, CUDAContext, MaxPool>::RunOnDeviceWithOrderNCHW() { auto& X = Input(0); auto* Y = Output(0); ConvPoolOpBase<CUDAContext>::SetOutputSize(X, Y, X.dim32(1)); int output_size = Y->size(); switch (kernel_.size()) { case 1: MaxPool1DForwardNCHW<float> <<<CAFFE_GET_BLOCKS(output_size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( output_size, X.data<float>(), X.dim32(1), X.dim32(2), Y->dim32(2), kernel_h(), stride_h(), pad_t(), Y->template mutable_data<float>()); break; case 2: MaxPool2DForwardNCHW<float> <<<CAFFE_GET_BLOCKS(output_size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( output_size, X.data<float>(), X.dim32(1), X.dim32(2), X.dim32(3), Y->dim32(2), Y->dim32(3), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), Y->template mutable_data<float>()); break; case 3: MaxPool3DForwardNCHW<float> <<<CAFFE_GET_BLOCKS(output_size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( output_size, X.data<float>(), X.dim32(1), X.dim32(2), X.dim32(3), X.dim32(4), Y->dim32(2), Y->dim32(3), Y->dim32(4), kernel_h(), kernel_w(), kernel_[2], stride_h(), stride_w(), stride_[2], pad_t(), pad_l(), pads_[2], Y->template mutable_data<float>()); break; default: CAFFE_THROW("Unsupported pooling size : ", kernel_.size()); } return true; } template <> bool PoolOp<float, CUDAContext, MaxPool>::RunOnDeviceWithOrderNHWC() { auto& X = Input(0); auto* Y = Output(0); ConvPoolOpBase<CUDAContext>::SetOutputSize(X, Y, X.dim32(X.ndim() - 1)); int output_size = Y->size(); switch (kernel_.size()) { case 1: MaxPool1DForwardNHWC<float> <<<CAFFE_GET_BLOCKS(output_size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( output_size, X.data<float>(), X.dim32(1), X.dim32(2), Y->dim32(1), kernel_h(), stride_h(), pad_t(), Y->template mutable_data<float>()); break; case 2: MaxPool2DForwardNHWC<float> <<<CAFFE_GET_BLOCKS(output_size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( output_size, X.data<float>(), X.dim32(1), X.dim32(2), X.dim32(3), Y->dim32(1), Y->dim32(2), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), Y->template mutable_data<float>()); break; case 3: MaxPool3DForwardNHWC<float> <<<CAFFE_GET_BLOCKS(output_size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( output_size, X.data<float>(), X.dim32(1), X.dim32(2), X.dim32(3), X.dim32(4), Y->dim32(1), Y->dim32(2), Y->dim32(3), kernel_h(), kernel_w(), kernel_[2], stride_h(), stride_w(), stride_[2], pad_t(), pad_l(), pads_[2], Y->template mutable_data<float>()); break; default: CAFFE_THROW("Unsupported pooling size : ", kernel_.size()); } return true; } template <> bool PoolGradientOp<float, CUDAContext, MaxPool>::RunOnDeviceWithOrderNCHW() { auto& X = Input(0); auto& Y = Input(1); auto& dY = Input(2); CAFFE_ENFORCE_EQ(dY.ndim(), X.ndim()); auto* dX = Output(0); dX->ResizeLike(X); vector<int> dims(X.dims().begin() + 2, X.dims().end()); ConvPoolOpBase<CUDAContext>::ComputePads(dims); switch (kernel_.size()) { case 1: MaxPool1DBackwardNCHW<float> <<<CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.size(), X.data<float>(), Y.data<float>(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), dY.dim32(2), kernel_h(), stride_h(), pad_t(), dX->template mutable_data<float>()); break; case 2: MaxPool2DBackwardNCHW<float> <<<CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.size(), X.data<float>(), Y.data<float>(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), dY.dim32(2), dY.dim32(3), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), dX->template mutable_data<float>()); break; case 3: MaxPool3DBackwardNCHW<float> <<<CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.size(), X.data<float>(), Y.data<float>(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), X.dim32(4), dY.dim32(2), dY.dim32(3), dY.dim32(4), kernel_h(), kernel_w(), kernel_[2], stride_h(), stride_w(), stride_[2], pad_t(), pad_l(), pads_[2], dX->template mutable_data<float>()); break; default: CAFFE_THROW("Unsupported pooling size : ", kernel_.size()); } return true; } template <> bool PoolGradientOp<float, CUDAContext, MaxPool>::RunOnDeviceWithOrderNHWC() { auto& X = Input(0); auto& Y = Input(1); auto& dY = Input(2); CAFFE_ENFORCE_EQ(dY.ndim(), X.ndim()); auto* dX = Output(0); dX->ResizeLike(X); vector<int> dims(X.dims().begin() + 1, X.dims().end() - 1); ConvPoolOpBase<CUDAContext>::ComputePads(dims); switch (kernel_.size()) { case 1: MaxPool1DBackwardNHWC<float> <<<CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.size(), X.data<float>(), Y.data<float>(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), dY.dim32(1), kernel_h(), stride_h(), pad_t(), dX->template mutable_data<float>()); break; case 2: MaxPool2DBackwardNHWC<float> <<<CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.size(), X.data<float>(), Y.data<float>(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), dY.dim32(1), dY.dim32(2), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), dX->template mutable_data<float>()); break; case 3: MaxPool3DBackwardNHWC<float> <<<CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.size(), X.data<float>(), Y.data<float>(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), X.dim32(4), dY.dim32(1), dY.dim32(2), dY.dim32(3), kernel_h(), kernel_w(), kernel_[2], stride_h(), stride_w(), stride_[2], pad_t(), pad_l(), pads_[2], dX->template mutable_data<float>()); break; default: CAFFE_THROW("Unsupported pooling size : ", kernel_.size()); } return true; } REGISTER_CUDA_OPERATOR(AveragePool, PoolOp<float, CUDAContext, AveragePool>); REGISTER_CUDA_OPERATOR(AveragePoolGradient, PoolGradientOp<float, CUDAContext, AveragePool>); REGISTER_CUDA_OPERATOR(AveragePool1D, PoolOp<float, CUDAContext, AveragePool>); REGISTER_CUDA_OPERATOR( AveragePool1DGradient, PoolGradientOp<float, CUDAContext, AveragePool>); REGISTER_CUDA_OPERATOR(AveragePool2D, PoolOp<float, CUDAContext, AveragePool>); REGISTER_CUDA_OPERATOR( AveragePool2DGradient, PoolGradientOp<float, CUDAContext, AveragePool>); REGISTER_CUDA_OPERATOR(AveragePool3D, PoolOp<float, CUDAContext, AveragePool>); REGISTER_CUDA_OPERATOR( AveragePool3DGradient, PoolGradientOp<float, CUDAContext, AveragePool>); REGISTER_CUDA_OPERATOR(MaxPool, PoolOp<float, CUDAContext, MaxPool>); REGISTER_CUDA_OPERATOR(MaxPoolGradient, PoolGradientOp<float, CUDAContext, MaxPool>); REGISTER_CUDA_OPERATOR(MaxPool1D, PoolOp<float, CUDAContext, MaxPool>); REGISTER_CUDA_OPERATOR( MaxPool1DGradient, PoolGradientOp<float, CUDAContext, MaxPool>); REGISTER_CUDA_OPERATOR(MaxPool2D, PoolOp<float, CUDAContext, MaxPool>); REGISTER_CUDA_OPERATOR( MaxPool2DGradient, PoolGradientOp<float, CUDAContext, MaxPool>); REGISTER_CUDA_OPERATOR(MaxPool3D, PoolOp<float, CUDAContext, MaxPool>); REGISTER_CUDA_OPERATOR( MaxPool3DGradient, PoolGradientOp<float, CUDAContext, MaxPool>); } // namespace caffe2
2eee0de1b43aa61f212ea560b7e8cea2db8d3d2b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <quantiles/tdigest/tdigest_util.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/detail/iterator.cuh> #include <cudf/detail/tdigest/tdigest.hpp> #include <cudf/detail/utilities/cuda.cuh> #include <cudf/detail/valid_if.cuh> #include <cudf/lists/lists_column_view.hpp> #include <cudf/types.hpp> #include <cudf/utilities/default_stream.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/exec_policy.hpp> #include <thrust/advance.h> #include <thrust/binary_search.h> #include <thrust/distance.h> #include <thrust/execution_policy.h> #include <thrust/fill.h> #include <thrust/functional.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/reduce.h> #include <thrust/scan.h> using namespace cudf::tdigest; namespace cudf { namespace tdigest { namespace detail { // https://developer.nvidia.com/blog/lerp-faster-cuda/ template <typename T> __device__ inline T lerp(T v0, T v1, T t) { return fma(t, v1, fma(-t, v0, v0)); } struct centroid { double mean; double weight; }; struct make_centroid { double const* means; double const* weights; __device__ centroid operator()(size_type i) { return {means[i], weights[i]}; } }; // kernel for computing percentiles on input tdigest (mean, weight) centroid data. template <typename CentroidIter> __global__ void compute_percentiles_kernel(device_span<size_type const> tdigest_offsets, column_device_view percentiles, CentroidIter centroids_, double const* min_, double const* max_, double const* cumulative_weight_, double* output) { auto const tid = cudf::detail::grid_1d::global_thread_id(); auto const num_tdigests = tdigest_offsets.size() - 1; auto const tdigest_index = tid / percentiles.size(); if (tdigest_index >= num_tdigests) { return; } auto const pindex = tid % percentiles.size(); // size of the digest we're querying auto const tdigest_size = tdigest_offsets[tdigest_index + 1] - tdigest_offsets[tdigest_index]; // no work to do. values will be set to null if (tdigest_size == 0 || !percentiles.is_valid(pindex)) { return; } output[tid] = [&]() { double const percentage = percentiles.element<double>(pindex); double const* cumulative_weight = cumulative_weight_ + tdigest_offsets[tdigest_index]; // centroids for this particular tdigest CentroidIter centroids = centroids_ + tdigest_offsets[tdigest_index]; // min and max for the digest double const* min_val = min_ + tdigest_index; double const* max_val = max_ + tdigest_index; double const total_weight = cumulative_weight[tdigest_size - 1]; // The following Arrow code serves as a basis for this computation // https://github.com/apache/arrow/blob/master/cpp/src/arrow/util/tdigest.cc#L280 double const weighted_q = percentage * total_weight; if (weighted_q <= 1) { return *min_val; } else if (weighted_q >= total_weight - 1) { return *max_val; } // determine what centroid this weighted quantile falls within. size_type const centroid_index = static_cast<size_type>(thrust::distance( cumulative_weight, thrust::lower_bound( thrust::seq, cumulative_weight, cumulative_weight + tdigest_size, weighted_q))); centroid c = centroids[centroid_index]; // diff == how far from the "center" of the centroid we are, // in unit weights. // visually: // // centroid of weight 7 // C <-- center of the centroid // |-------| // | | | // X Y Z // X has a diff of -2 (2 units to the left of the center of the centroid) // Y has a diff of 0 (directly in the middle of the centroid) // Z has a diff of 3 (3 units to the right of the center of the centroid) double const diff = weighted_q + c.weight / 2 - cumulative_weight[centroid_index]; // if we're completely within a centroid of weight 1, just return that. if (c.weight == 1 && std::abs(diff) < 0.5) { return c.mean; } // otherwise, interpolate between two centroids. // get the two centroids we want to interpolate between auto const look_left = diff < 0; auto const [lhs, rhs] = [&]() { if (look_left) { // if we're at the first centroid, "left" of us is the min value auto const first_centroid = centroid_index == 0; auto const lhs = first_centroid ? centroid{*min_val, 0} : centroids[centroid_index - 1]; auto const rhs = c; return std::pair<centroid, centroid>{lhs, rhs}; } else { // if we're at the last centroid, "right" of us is the max value auto const last_centroid = (centroid_index == tdigest_size - 1); auto const lhs = c; auto const rhs = last_centroid ? centroid{*max_val, 0} : centroids[centroid_index + 1]; return std::pair<centroid, centroid>{lhs, rhs}; } }(); // compute interpolation value t // total interpolation range. the total range of "space" between the lhs and rhs centroids. auto const tip = lhs.weight / 2 + rhs.weight / 2; // if we're looking left, diff is negative, so shift it so that we are interpolating // from lhs -> rhs. auto const t = (look_left) ? (diff + tip) / tip : diff / tip; // interpolate return lerp(lhs.mean, rhs.mean, t); }(); } /** * @brief Calculate approximate percentiles on a provided tdigest column. * * Produces a LIST column where each row `i` represents output from querying the * corresponding tdigest of from row `i` in `input`. The length of each output list * is the number of percentiles specified in `percentiles` * * @param input tdigest input data. One tdigest per row. * @param percentiles Desired percentiles in range [0, 1]. * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device * memory * * @returns Column of doubles containing requested percentile values. */ std::unique_ptr<column> compute_approx_percentiles(tdigest_column_view const& input, column_view const& percentiles, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { tdigest_column_view tdv(input); // offsets, representing the size of each tdigest auto offsets = tdv.centroids().offsets(); // compute summed weights auto weight = tdv.weights(); auto cumulative_weights = cudf::make_fixed_width_column(data_type{type_id::FLOAT64}, weight.size(), mask_state::UNALLOCATED, stream, rmm::mr::get_current_device_resource()); auto keys = cudf::detail::make_counting_transform_iterator( 0, [offsets_begin = offsets.begin<size_type>(), offsets_end = offsets.end<size_type>()] __device__(size_type i) { return thrust::distance( offsets_begin, thrust::prev(thrust::upper_bound(thrust::seq, offsets_begin, offsets_end, i))); }); thrust::inclusive_scan_by_key(rmm::exec_policy(stream), keys, keys + weight.size(), weight.begin<double>(), cumulative_weights->mutable_view().begin<double>()); auto percentiles_cdv = column_device_view::create(percentiles, stream); // leaf is a column of size input.size() * percentiles.size() auto const num_output_values = input.size() * percentiles.size(); // null percentiles become null results. auto [null_mask, null_count] = [&]() { return percentiles.null_count() != 0 ? cudf::detail::valid_if( thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(0) + num_output_values, [percentiles = *percentiles_cdv] __device__(size_type i) { return percentiles.is_valid(i % percentiles.size()); }, stream, mr) : std::pair<rmm::device_buffer, size_type>{rmm::device_buffer{}, 0}; }(); auto result = cudf::make_fixed_width_column( data_type{type_id::FLOAT64}, num_output_values, std::move(null_mask), null_count, stream, mr); auto centroids = cudf::detail::make_counting_transform_iterator( 0, make_centroid{tdv.means().begin<double>(), tdv.weights().begin<double>()}); constexpr size_type block_size = 256; cudf::detail::grid_1d const grid(percentiles.size() * input.size(), block_size); hipLaunchKernelGGL(( compute_percentiles_kernel), dim3(grid.num_blocks), dim3(block_size), 0, stream.value(), {offsets.begin<size_type>(), static_cast<size_t>(offsets.size())}, *percentiles_cdv, centroids, tdv.min_begin(), tdv.max_begin(), cumulative_weights->view().begin<double>(), result->mutable_view().begin<double>()); return result; } std::unique_ptr<column> make_tdigest_column(size_type num_rows, std::unique_ptr<column>&& centroid_means, std::unique_ptr<column>&& centroid_weights, std::unique_ptr<column>&& tdigest_offsets, std::unique_ptr<column>&& min_values, std::unique_ptr<column>&& max_values, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_EXPECTS(tdigest_offsets->size() == num_rows + 1, "Encountered unexpected offset count in make_tdigest_column"); CUDF_EXPECTS(centroid_means->size() == centroid_weights->size(), "Encountered unexpected centroid size mismatch in make_tdigest_column"); CUDF_EXPECTS(min_values->size() == num_rows, "Encountered unexpected min value count in make_tdigest_column"); CUDF_EXPECTS(max_values->size() == num_rows, "Encountered unexpected max value count in make_tdigest_column"); // inner struct column auto const centroids_size = centroid_means->size(); std::vector<std::unique_ptr<column>> inner_children; inner_children.push_back(std::move(centroid_means)); inner_children.push_back(std::move(centroid_weights)); auto tdigest_data = cudf::make_structs_column(centroids_size, std::move(inner_children), 0, {}, stream, mr); // grouped into lists auto tdigest = cudf::make_lists_column( num_rows, std::move(tdigest_offsets), std::move(tdigest_data), 0, {}, stream, mr); // create the final column std::vector<std::unique_ptr<column>> children; children.push_back(std::move(tdigest)); children.push_back(std::move(min_values)); children.push_back(std::move(max_values)); return make_structs_column(num_rows, std::move(children), 0, {}, stream, mr); } std::unique_ptr<column> make_empty_tdigest_column(rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto offsets = cudf::make_fixed_width_column( data_type(type_id::INT32), 2, mask_state::UNALLOCATED, stream, mr); thrust::fill(rmm::exec_policy(stream), offsets->mutable_view().begin<size_type>(), offsets->mutable_view().end<size_type>(), 0); auto min_col = cudf::make_numeric_column(data_type(type_id::FLOAT64), 1, mask_state::UNALLOCATED, stream, mr); thrust::fill(rmm::exec_policy(stream), min_col->mutable_view().begin<double>(), min_col->mutable_view().end<double>(), 0); auto max_col = cudf::make_numeric_column(data_type(type_id::FLOAT64), 1, mask_state::UNALLOCATED, stream, mr); thrust::fill(rmm::exec_policy(stream), max_col->mutable_view().begin<double>(), max_col->mutable_view().end<double>(), 0); return make_tdigest_column(1, make_empty_column(type_id::FLOAT64), make_empty_column(type_id::FLOAT64), std::move(offsets), std::move(min_col), std::move(max_col), stream, mr); } /** * @brief Create an empty tdigest scalar. * * An empty tdigest scalar is a struct_scalar that contains a single row of length 0 * * @param stream CUDA stream used for device memory operations and kernel launches. * @param mr Device memory resource used to allocate the returned column's device memory. * * @returns An empty tdigest scalar. */ std::unique_ptr<scalar> make_empty_tdigest_scalar(rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto contents = make_empty_tdigest_column(stream, mr)->release(); return std::make_unique<struct_scalar>( std::move(*std::make_unique<table>(std::move(contents.children))), true, stream, mr); } } // namespace detail std::unique_ptr<column> percentile_approx(tdigest_column_view const& input, column_view const& percentiles, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { tdigest_column_view tdv(input); CUDF_EXPECTS(percentiles.type().id() == type_id::FLOAT64, "percentile_approx expects float64 percentile inputs"); // output is a list column with each row containing percentiles.size() percentile values auto offsets = cudf::make_fixed_width_column( data_type{type_id::INT32}, input.size() + 1, mask_state::UNALLOCATED, stream, mr); auto const all_empty_rows = thrust::count_if(rmm::exec_policy(stream), detail::size_begin(input), detail::size_begin(input) + input.size(), [] __device__(auto const x) { return x == 0; }) == input.size(); auto row_size_iter = thrust::make_constant_iterator(all_empty_rows ? 0 : percentiles.size()); thrust::exclusive_scan(rmm::exec_policy(stream), row_size_iter, row_size_iter + input.size() + 1, offsets->mutable_view().begin<size_type>()); if (percentiles.size() == 0 || all_empty_rows) { return cudf::make_lists_column( input.size(), std::move(offsets), cudf::make_empty_column(type_id::FLOAT64), input.size(), cudf::detail::create_null_mask( input.size(), mask_state::ALL_NULL, rmm::cuda_stream_view(stream), mr), stream, mr); } // if any of the input digests are empty, nullify the corresponding output rows (values will be // uninitialized) auto [bitmask, null_count] = [stream, mr, &tdv]() { auto tdigest_is_empty = thrust::make_transform_iterator( detail::size_begin(tdv), [] __device__(size_type tdigest_size) -> size_type { return tdigest_size == 0; }); auto const null_count = thrust::reduce(rmm::exec_policy(stream), tdigest_is_empty, tdigest_is_empty + tdv.size(), 0); if (null_count == 0) { return std::pair<rmm::device_buffer, size_type>{rmm::device_buffer{}, null_count}; } return cudf::detail::valid_if( tdigest_is_empty, tdigest_is_empty + tdv.size(), thrust::logical_not{}, stream, mr); }(); return cudf::make_lists_column(input.size(), std::move(offsets), detail::compute_approx_percentiles(input, percentiles, stream, mr), null_count, std::move(bitmask), stream, mr); } } // namespace tdigest std::unique_ptr<column> percentile_approx(tdigest_column_view const& input, column_view const& percentiles, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return tdigest::percentile_approx(input, percentiles, cudf::get_default_stream(), mr); } } // namespace cudf
2eee0de1b43aa61f212ea560b7e8cea2db8d3d2b.cu
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <quantiles/tdigest/tdigest_util.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/detail/iterator.cuh> #include <cudf/detail/tdigest/tdigest.hpp> #include <cudf/detail/utilities/cuda.cuh> #include <cudf/detail/valid_if.cuh> #include <cudf/lists/lists_column_view.hpp> #include <cudf/types.hpp> #include <cudf/utilities/default_stream.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/exec_policy.hpp> #include <thrust/advance.h> #include <thrust/binary_search.h> #include <thrust/distance.h> #include <thrust/execution_policy.h> #include <thrust/fill.h> #include <thrust/functional.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/reduce.h> #include <thrust/scan.h> using namespace cudf::tdigest; namespace cudf { namespace tdigest { namespace detail { // https://developer.nvidia.com/blog/lerp-faster-cuda/ template <typename T> __device__ inline T lerp(T v0, T v1, T t) { return fma(t, v1, fma(-t, v0, v0)); } struct centroid { double mean; double weight; }; struct make_centroid { double const* means; double const* weights; __device__ centroid operator()(size_type i) { return {means[i], weights[i]}; } }; // kernel for computing percentiles on input tdigest (mean, weight) centroid data. template <typename CentroidIter> __global__ void compute_percentiles_kernel(device_span<size_type const> tdigest_offsets, column_device_view percentiles, CentroidIter centroids_, double const* min_, double const* max_, double const* cumulative_weight_, double* output) { auto const tid = cudf::detail::grid_1d::global_thread_id(); auto const num_tdigests = tdigest_offsets.size() - 1; auto const tdigest_index = tid / percentiles.size(); if (tdigest_index >= num_tdigests) { return; } auto const pindex = tid % percentiles.size(); // size of the digest we're querying auto const tdigest_size = tdigest_offsets[tdigest_index + 1] - tdigest_offsets[tdigest_index]; // no work to do. values will be set to null if (tdigest_size == 0 || !percentiles.is_valid(pindex)) { return; } output[tid] = [&]() { double const percentage = percentiles.element<double>(pindex); double const* cumulative_weight = cumulative_weight_ + tdigest_offsets[tdigest_index]; // centroids for this particular tdigest CentroidIter centroids = centroids_ + tdigest_offsets[tdigest_index]; // min and max for the digest double const* min_val = min_ + tdigest_index; double const* max_val = max_ + tdigest_index; double const total_weight = cumulative_weight[tdigest_size - 1]; // The following Arrow code serves as a basis for this computation // https://github.com/apache/arrow/blob/master/cpp/src/arrow/util/tdigest.cc#L280 double const weighted_q = percentage * total_weight; if (weighted_q <= 1) { return *min_val; } else if (weighted_q >= total_weight - 1) { return *max_val; } // determine what centroid this weighted quantile falls within. size_type const centroid_index = static_cast<size_type>(thrust::distance( cumulative_weight, thrust::lower_bound( thrust::seq, cumulative_weight, cumulative_weight + tdigest_size, weighted_q))); centroid c = centroids[centroid_index]; // diff == how far from the "center" of the centroid we are, // in unit weights. // visually: // // centroid of weight 7 // C <-- center of the centroid // |-------| // | | | // X Y Z // X has a diff of -2 (2 units to the left of the center of the centroid) // Y has a diff of 0 (directly in the middle of the centroid) // Z has a diff of 3 (3 units to the right of the center of the centroid) double const diff = weighted_q + c.weight / 2 - cumulative_weight[centroid_index]; // if we're completely within a centroid of weight 1, just return that. if (c.weight == 1 && std::abs(diff) < 0.5) { return c.mean; } // otherwise, interpolate between two centroids. // get the two centroids we want to interpolate between auto const look_left = diff < 0; auto const [lhs, rhs] = [&]() { if (look_left) { // if we're at the first centroid, "left" of us is the min value auto const first_centroid = centroid_index == 0; auto const lhs = first_centroid ? centroid{*min_val, 0} : centroids[centroid_index - 1]; auto const rhs = c; return std::pair<centroid, centroid>{lhs, rhs}; } else { // if we're at the last centroid, "right" of us is the max value auto const last_centroid = (centroid_index == tdigest_size - 1); auto const lhs = c; auto const rhs = last_centroid ? centroid{*max_val, 0} : centroids[centroid_index + 1]; return std::pair<centroid, centroid>{lhs, rhs}; } }(); // compute interpolation value t // total interpolation range. the total range of "space" between the lhs and rhs centroids. auto const tip = lhs.weight / 2 + rhs.weight / 2; // if we're looking left, diff is negative, so shift it so that we are interpolating // from lhs -> rhs. auto const t = (look_left) ? (diff + tip) / tip : diff / tip; // interpolate return lerp(lhs.mean, rhs.mean, t); }(); } /** * @brief Calculate approximate percentiles on a provided tdigest column. * * Produces a LIST column where each row `i` represents output from querying the * corresponding tdigest of from row `i` in `input`. The length of each output list * is the number of percentiles specified in `percentiles` * * @param input tdigest input data. One tdigest per row. * @param percentiles Desired percentiles in range [0, 1]. * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device * memory * * @returns Column of doubles containing requested percentile values. */ std::unique_ptr<column> compute_approx_percentiles(tdigest_column_view const& input, column_view const& percentiles, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { tdigest_column_view tdv(input); // offsets, representing the size of each tdigest auto offsets = tdv.centroids().offsets(); // compute summed weights auto weight = tdv.weights(); auto cumulative_weights = cudf::make_fixed_width_column(data_type{type_id::FLOAT64}, weight.size(), mask_state::UNALLOCATED, stream, rmm::mr::get_current_device_resource()); auto keys = cudf::detail::make_counting_transform_iterator( 0, [offsets_begin = offsets.begin<size_type>(), offsets_end = offsets.end<size_type>()] __device__(size_type i) { return thrust::distance( offsets_begin, thrust::prev(thrust::upper_bound(thrust::seq, offsets_begin, offsets_end, i))); }); thrust::inclusive_scan_by_key(rmm::exec_policy(stream), keys, keys + weight.size(), weight.begin<double>(), cumulative_weights->mutable_view().begin<double>()); auto percentiles_cdv = column_device_view::create(percentiles, stream); // leaf is a column of size input.size() * percentiles.size() auto const num_output_values = input.size() * percentiles.size(); // null percentiles become null results. auto [null_mask, null_count] = [&]() { return percentiles.null_count() != 0 ? cudf::detail::valid_if( thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(0) + num_output_values, [percentiles = *percentiles_cdv] __device__(size_type i) { return percentiles.is_valid(i % percentiles.size()); }, stream, mr) : std::pair<rmm::device_buffer, size_type>{rmm::device_buffer{}, 0}; }(); auto result = cudf::make_fixed_width_column( data_type{type_id::FLOAT64}, num_output_values, std::move(null_mask), null_count, stream, mr); auto centroids = cudf::detail::make_counting_transform_iterator( 0, make_centroid{tdv.means().begin<double>(), tdv.weights().begin<double>()}); constexpr size_type block_size = 256; cudf::detail::grid_1d const grid(percentiles.size() * input.size(), block_size); compute_percentiles_kernel<<<grid.num_blocks, block_size, 0, stream.value()>>>( {offsets.begin<size_type>(), static_cast<size_t>(offsets.size())}, *percentiles_cdv, centroids, tdv.min_begin(), tdv.max_begin(), cumulative_weights->view().begin<double>(), result->mutable_view().begin<double>()); return result; } std::unique_ptr<column> make_tdigest_column(size_type num_rows, std::unique_ptr<column>&& centroid_means, std::unique_ptr<column>&& centroid_weights, std::unique_ptr<column>&& tdigest_offsets, std::unique_ptr<column>&& min_values, std::unique_ptr<column>&& max_values, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_EXPECTS(tdigest_offsets->size() == num_rows + 1, "Encountered unexpected offset count in make_tdigest_column"); CUDF_EXPECTS(centroid_means->size() == centroid_weights->size(), "Encountered unexpected centroid size mismatch in make_tdigest_column"); CUDF_EXPECTS(min_values->size() == num_rows, "Encountered unexpected min value count in make_tdigest_column"); CUDF_EXPECTS(max_values->size() == num_rows, "Encountered unexpected max value count in make_tdigest_column"); // inner struct column auto const centroids_size = centroid_means->size(); std::vector<std::unique_ptr<column>> inner_children; inner_children.push_back(std::move(centroid_means)); inner_children.push_back(std::move(centroid_weights)); auto tdigest_data = cudf::make_structs_column(centroids_size, std::move(inner_children), 0, {}, stream, mr); // grouped into lists auto tdigest = cudf::make_lists_column( num_rows, std::move(tdigest_offsets), std::move(tdigest_data), 0, {}, stream, mr); // create the final column std::vector<std::unique_ptr<column>> children; children.push_back(std::move(tdigest)); children.push_back(std::move(min_values)); children.push_back(std::move(max_values)); return make_structs_column(num_rows, std::move(children), 0, {}, stream, mr); } std::unique_ptr<column> make_empty_tdigest_column(rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto offsets = cudf::make_fixed_width_column( data_type(type_id::INT32), 2, mask_state::UNALLOCATED, stream, mr); thrust::fill(rmm::exec_policy(stream), offsets->mutable_view().begin<size_type>(), offsets->mutable_view().end<size_type>(), 0); auto min_col = cudf::make_numeric_column(data_type(type_id::FLOAT64), 1, mask_state::UNALLOCATED, stream, mr); thrust::fill(rmm::exec_policy(stream), min_col->mutable_view().begin<double>(), min_col->mutable_view().end<double>(), 0); auto max_col = cudf::make_numeric_column(data_type(type_id::FLOAT64), 1, mask_state::UNALLOCATED, stream, mr); thrust::fill(rmm::exec_policy(stream), max_col->mutable_view().begin<double>(), max_col->mutable_view().end<double>(), 0); return make_tdigest_column(1, make_empty_column(type_id::FLOAT64), make_empty_column(type_id::FLOAT64), std::move(offsets), std::move(min_col), std::move(max_col), stream, mr); } /** * @brief Create an empty tdigest scalar. * * An empty tdigest scalar is a struct_scalar that contains a single row of length 0 * * @param stream CUDA stream used for device memory operations and kernel launches. * @param mr Device memory resource used to allocate the returned column's device memory. * * @returns An empty tdigest scalar. */ std::unique_ptr<scalar> make_empty_tdigest_scalar(rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto contents = make_empty_tdigest_column(stream, mr)->release(); return std::make_unique<struct_scalar>( std::move(*std::make_unique<table>(std::move(contents.children))), true, stream, mr); } } // namespace detail std::unique_ptr<column> percentile_approx(tdigest_column_view const& input, column_view const& percentiles, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { tdigest_column_view tdv(input); CUDF_EXPECTS(percentiles.type().id() == type_id::FLOAT64, "percentile_approx expects float64 percentile inputs"); // output is a list column with each row containing percentiles.size() percentile values auto offsets = cudf::make_fixed_width_column( data_type{type_id::INT32}, input.size() + 1, mask_state::UNALLOCATED, stream, mr); auto const all_empty_rows = thrust::count_if(rmm::exec_policy(stream), detail::size_begin(input), detail::size_begin(input) + input.size(), [] __device__(auto const x) { return x == 0; }) == input.size(); auto row_size_iter = thrust::make_constant_iterator(all_empty_rows ? 0 : percentiles.size()); thrust::exclusive_scan(rmm::exec_policy(stream), row_size_iter, row_size_iter + input.size() + 1, offsets->mutable_view().begin<size_type>()); if (percentiles.size() == 0 || all_empty_rows) { return cudf::make_lists_column( input.size(), std::move(offsets), cudf::make_empty_column(type_id::FLOAT64), input.size(), cudf::detail::create_null_mask( input.size(), mask_state::ALL_NULL, rmm::cuda_stream_view(stream), mr), stream, mr); } // if any of the input digests are empty, nullify the corresponding output rows (values will be // uninitialized) auto [bitmask, null_count] = [stream, mr, &tdv]() { auto tdigest_is_empty = thrust::make_transform_iterator( detail::size_begin(tdv), [] __device__(size_type tdigest_size) -> size_type { return tdigest_size == 0; }); auto const null_count = thrust::reduce(rmm::exec_policy(stream), tdigest_is_empty, tdigest_is_empty + tdv.size(), 0); if (null_count == 0) { return std::pair<rmm::device_buffer, size_type>{rmm::device_buffer{}, null_count}; } return cudf::detail::valid_if( tdigest_is_empty, tdigest_is_empty + tdv.size(), thrust::logical_not{}, stream, mr); }(); return cudf::make_lists_column(input.size(), std::move(offsets), detail::compute_approx_percentiles(input, percentiles, stream, mr), null_count, std::move(bitmask), stream, mr); } } // namespace tdigest std::unique_ptr<column> percentile_approx(tdigest_column_view const& input, column_view const& percentiles, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return tdigest::percentile_approx(input, percentiles, cudf::get_default_stream(), mr); } } // namespace cudf
36105ae6e4b72adb5caa7fff13944b7249b1bff5.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************************************** * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without *modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, *this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright *notice, this list of conditions and the following disclaimer in the *documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its *contributors may be used to endorse or promote products derived from this *software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE *DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY DIRECT, *INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, *DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY *OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TOR (INCLUDING *NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, *EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** This example shows how to run convolution kernels using functions and data structures provided by CUTLASS using tensor cores; which we run on a NVIDIA Turing GPU. Writing a single high performance convolution kernel is hard but do-able. Whereas writing high performance kernels at scale which works for multiple problem sizes with good abstractions is really hard. CUTLASS solves this problem by providing simplified abstractions to compose multiple sections of implicit gemm kernel. When used properly, the kernels can hit peak performance of GPU easily. CUTLASS divides a kernel into hierarchical composable sections. Which means, at each thread, warp and thread-block level, they compute on their own tile-size with higher level of tile sizes being composed from lower level ones. Multiple thread-tiles (tile size each thread computes) can be used to form warp-tiles (tile size each warp computes) and multiple warp tiles can be used to compute threadblock-tile (tile size computed by a threadblock). In thie example, we split variable initialization into 1. Setting up data properties : describes how tensors are laid out in the memory and how the kernel can view them (logical to physical mapping) 2. Setting up computation properties : describes how the above set tensors will be used to compute output of convolution. First, we setup the data types of the input tensor A, weights' tensor B and output tensor C along with alpha, beta as the equation for convolution is C = alpha * Conv(A, B) + beta * C. In CUTLASS, the kernels first compute Conv(A, B) and leave the rest of the computation to end of the kernel as alpha * X + beta * C is a simple element-wise operation on X (Conv(A, B)) and C. We call this as epilogue of kernel. Hence, we setup data types for alpha and beta to be equal to ElementComputeEpilogue = float. We want to use MMA instructions on Turing and they support 4-bit signed integer. But int4b_t is not fully supported by Nvidia software stack, so CUTLASS introduces cutlass::int4b_t. We use the data type for elements in input tensor A and B as cutlass::int4b_t. We convey this to CUTLASS kernel by initializing template variables ElementAccumulator (int32_t), ElementComputeEpilogue (float), ElementInputA (cutlass::int4b_t), ElementInputB (cutlass::int4b_t), ElementOutput (int32_t). Communicating just the data type is not enough. As the data is laid out linearly in memory, we have to convey the layout of tensors. We do that by initializing template variables LayoutInputA, LayoutInputB and LayoutOutput to TensorNHWC cutlass variable. Next, we setup rules to comptue alpha * X + beta * C which is called epilogue of the kernel. We initialize template variable EpilogueOp, which takes the data type of output ElementOutput (int32_t), the number of elements per vector memory access (32), data type of accumulator (int32_t) and data type of computation of linear combination (alpha * X + beta * C). Now that we setup the properties of data, we have to setup properties of computation. Second, we create template variables of tile sizes for thread-block, warp and mma-op to 128x128x128, 64x64x128, 8x8x32 (MxNxK) respectively. When passed to instantiate CUTLASS Implicit GEMM kernel, it internally deduces the amount of threads needed per thread-block, amount of shared memory, storing data in bank-conflict free manner, and ton of other variables required to compose, intialize and launch a high performance Implicit GEMM kernel. This is the beauty of CUTLASS, it relieves developer from understanding and coding complicated hardware optimizations which can easily go wrong. CUTLASS also supports multiple MMA pipelines in a threadblock. What are MMA pipelines? MMA pipelines constitute the whole process of loading input data from global memory to shared memory, loading data from shared memory to registers, doing matrix multiplication, store to global memory. The below flow sequence shows a typical mma pipeline. tensor in global memory -> registers -> tile in shared memory -> registers -> mma -> registers -> output to global memory The problem with single pipeline is, each stage is synchronous which means, each stage has to wait until the previous finished executing. There are stages in the pipeline which do not have fixed latency, for example, the loads from global memory and shared memory. Therefore, we can add one more pipeline with a phase shift in mma kernel to hide latency from global and shared memory loads. Finally, the pipeline in a kernel looks like (1) tensor in global memory -> (2) registers -> (3) tile in shared memory -> (4) registers -> (5) mma -> (6) registers -> (7) output to global memory (1) <null> -> (2) <null> -> (3) tensor in global memory -> (4) registers -> (5) tile in shared memory -> (6) registers -> (7) mma -> (8) registers -> (9) output to global memory This way, you can hide the second global memory load latency by doing computation on already loaded input data. There are few more template variables initialized such as, which threadblock tile of output matrix is done which threadblock launched on an SM, CUDA SM architecture of GPU you want to run on. These are all put together to create a template variable which describes CUTLASS Implicit GEMM kernel using cutlass::conv::device::ImplicitGemm template. The next step is to intialize physical data, instantiate and initialize CUTLASS kernel and run it. We use CUTLASS utilities to initialize, fill, compare tensors as they are simple and doesn't come in the way of learning CUTLASS. Once all the tensors are initialized and filled with data, create arguments tuple to launch CUTLASS kernel which takes problem size (N = 1, H = 64, W = 64, C = 128), filter size (K = 64, R = 3, S = 3, C = 128 ), padding, strides, dilation, tensors, alpha, beta and the important one, split k-dimension factor. Along with that, we query CUTLASS if any scratch-space memory required by the kernel we instantiated. If yes, we create it and pass it along with other arguments created to intialize CUTLASS kernel then, the kernel is launched. In this example, we later on launch a reference convolution kernel (from CUTLASS utilities) to compare if the output from CUTLASS kernel is same as the reference implicit GEMM kernel. */ #include <iostream> #include <sstream> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm.h" #include "cutlass/conv/kernel/default_conv2d_fprop.h" #include "cutlass/conv/device/implicit_gemm_convolution.h" #include "cutlass/util/command_line.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/device/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/host/convolution.h" #include "cutlass/util/tensor_view_io.h" #include "helper.h" // The code section below describes datatype for input, output tensors and // computation between elements using ElementAccumulator = int32_t; // Data type of accumulator using ElementComputeEpilogue = float; // Data type of epilogue computation (alpha, beta) using ElementInputA = cutlass::int4b_t; // Data type of elements in input tensor using ElementInputB = cutlass::int4b_t; // Data type of elements in input tensor using ElementOutput = cutlass::int4b_t; // Data type of elements in output tensor using LayoutInputA = cutlass::layout::TensorNHWC; using LayoutInputB = cutlass::layout::TensorNHWC; using LayoutOutput = cutlass::layout::TensorNHWC; // This code section describes whether you want to use tensor cores or regular // SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassTensorOp; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm75; // This code section describes the tile size a thread block will compute using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 128>; // Threadblock tile shape // This code section describes tile size a warp will compute using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>; // Warp tile shape // This code section describes the size of MMA op using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; // TensorCore instruction shape // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // Number of pipelines you want to use constexpr int NumStages = 2; // This code section describes the epilogue part of the kernel, we use default // value using EpilogueOp = cutlass::epilogue::thread::LinearCombinationClamp< ElementOutput, // Data type of output matrix. 8, // The number of elements per vectorized. // memory access. This becomes the vector width of // math instructions in the epilogue too. ElementAccumulator, // Data type of accumulator ElementComputeEpilogue>; // Data type for alpha/beta in linear // combination using Conv2dFpropKernel = typename cutlass::conv::kernel::DefaultConv2dFprop< ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ThreadblockShape, WarpShape, InstructionShape, EpilogueOp, SwizzleThreadBlock, NumStages, cutlass::arch::OpMultiplyAddSaturate, cutlass::conv::IteratorAlgorithm::kAnalytic>::Kernel; using ImplicitGemm = cutlass::conv::device::ImplicitGemmConvolution<Conv2dFpropKernel>; ///////////////////////////////////////////////////////////////////////////////////////////////// // Command line options parsing struct Options { bool help; cutlass::Tensor4DCoord input_size; cutlass::Tensor4DCoord filter_size; cutlass::Tensor4DCoord padding; cutlass::MatrixCoord conv_stride; cutlass::MatrixCoord dilation; bool reference_check; bool measure_performance; int iterations; bool save_workspace; ElementComputeEpilogue alpha; ElementComputeEpilogue beta; bool benchmark; std::string tag; Options() : help(false), input_size(1, 32, 32, 32), filter_size(32, 3, 3, 32), padding(1, 1, 1, 1), conv_stride(1, 1), dilation(1, 1), reference_check(false), measure_performance(true), iterations(20), save_workspace(false), alpha(1), beta(0), benchmark(false) {} // Verify the problem size is compatible with the CUTLASS Convolution // implementation. bool valid() { // // CUTLASS attempts to load 128b vectors of int4b_t elements. // Consequently, all pointers, strides, and tensor extents must be // divisible by 32 elements. // int const kAlignment = 32; if ((input_size.c() % kAlignment) || (filter_size.n() % kAlignment)) { // misaligned tensors return false; } // Invalid padding if ((padding.h() != filter_size.h() / 2) || (padding.w() != filter_size.w() / 2)) { return false; } return true; } /// Updates input and filter sizes void update(cutlass::Tensor4DCoord input_size, cutlass::Tensor4DCoord filter_size) { this->input_size = input_size; this->filter_size = filter_size; padding.n() = filter_size.h() / 2; padding.h() = filter_size.h() / 2; padding.w() = filter_size.w() / 2; padding.c() = filter_size.w() / 2; } // Parses the command line void parse(int argc, char const** args) { cutlass::CommandLine cmd(argc, args); if (cmd.check_cmd_line_flag("help")) { help = true; } if (cmd.check_cmd_line_flag("ref-check")) { reference_check = true; } if (cmd.check_cmd_line_flag("perf-check")) { measure_performance = true; } if (cmd.check_cmd_line_flag("save-workspace")) { save_workspace = true; } if (cmd.check_cmd_line_flag("benchmark")) { benchmark = true; } cmd.get_cmd_line_argument("n", input_size.n()); cmd.get_cmd_line_argument("h", input_size.h()); cmd.get_cmd_line_argument("w", input_size.w()); cmd.get_cmd_line_argument("c", input_size.c()); cmd.get_cmd_line_argument("k", filter_size.n()); cmd.get_cmd_line_argument("r", filter_size.h()); cmd.get_cmd_line_argument("s", filter_size.w()); filter_size.c() = input_size.c(); cmd.get_cmd_line_argument("alpha", alpha); cmd.get_cmd_line_argument("beta", beta); cmd.get_cmd_line_argument("iterations", iterations); cmd.get_cmd_line_argument("tag", tag); if (filter_size.h() == 3 && filter_size.w() == 3) { padding = {1, 1, 1, 1}; } else { filter_size.h() = 1; filter_size.w() = 1; padding = {0, 0, 0, 0}; } } /// Prints the usage statement. std::ostream& print_usage(std::ostream& out) const { out << "09_turing_tensorop_conv2dfprop example\n\n" << " This example uses Turing's Tensor Core operators on int4 " "data types to compute\n" << " forward convolution on tensors of layout NHWC.\n\n" << "Options:\n\n" << " --help If specified, displays this usage " "statement.\n\n" << " --n <int> Input tensor extent N\n" << " --h <int> Input tensor extent H\n" << " --w <int> Input tensor extent W\n" << " --c <int> Input tensor extent C\n" << " --k <int> Filter extent K\n" << " --r <int> Filter extent R\n" << " --s <int> Filter extent S\n\n" << " --alpha <float> Epilogue scalar alpha\n" << " --beta <float> Epilogue scalar beta\n\n" << " --ref-check If set (true), reference check on the " "host is computed\n" << " --perf-check If set (true), performance is " "measured.\n" << " --benchmark If set (true), performance benchmarking " "on several layers and batch-size.\n" << " --iterations <int> Number of profiling iterations to " "perform.\n" << " --save-workspace If set, workspace is written to a text " "file.\n" << " --tag <string> String to replicate across the first " "column in the results table\n"; out << "\n\nExamples:\n\n" << "$ " "./examples/09_turing_tensorop_conv2dfprop/" "09_turing_tensorop_conv2dfprop --n=32 --h=224 --w=224 --c=128 " "--k=256 --r=1 --s=1\n\n" << "$ " "./examples/09_turing_tensorop_conv2dfprop/" "09_turing_tensorop_conv2dfprop --n=1 --h=224 --w=224 --c=32 " "--k=32 --r=3 --s=3 --ref-check\n\n"; return out; } /// Computes the output tensor size (NPQK) cutlass::Tensor4DCoord output_size() const { return cutlass::Tensor4DCoord( input_size.n(), (input_size.h() + padding.n() + padding.h() - filter_size.h()) / conv_stride.row() + 1, (input_size.w() + padding.w() + padding.c() - filter_size.w()) / conv_stride.column() + 1, filter_size.n()); } /// Compute performance in GFLOP/s double gflops(double runtime_s) const { // Number of multiply-adds = NPQK * CRS int64_t fmas = output_size().product() * int64_t(filter_size.h() * filter_size.w() * filter_size.c()); // Two flops per multiply-add return 2.0 * double(fmas) / double(1.0e9) / runtime_s; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// struct Result { double runtime_ms; double gflops; cutlass::Status status; cutlass::Status reference_check; hipError_t error; Result() : runtime_ms(0), gflops(0), status(cutlass::Status::kSuccess), reference_check(cutlass::Status::kInvalid), error(hipSuccess) {} static std::ostream& print_header(std::ostream& out, Options const& options) { if (!options.tag.empty()) { out << "Name,"; } out << "Layer,N,H,W,C,K,R,S,Runtime,GFLOPs"; return out; } std::ostream& print(std::ostream& out, int idx, Options const& options) { if (!options.tag.empty()) { out << options.tag << ","; } out << "conv_" << idx << "," << options.input_size.n() << "," << options.input_size.h() << "," << options.input_size.w() << "," << options.input_size.c() << "," << options.filter_size.n() << "," << options.filter_size.h() << "," << options.filter_size.w() << "," << runtime_ms << "," << gflops; return out; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Runs one benchmark Result profile_convolution(Options const& options) { Result result; // // Allocate host-device tensors using the CUTLASS Utilities. // cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a( options.input_size); cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b( options.filter_size); cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c( options.output_size()); cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_c( options.output_size()); // // Initialize tensors // // Fill tensor A on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_a.host_view(), 1, ElementInputA(7), ElementInputA(-8), 0); // Fill tensor B on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_b.host_view(), 1, ElementInputB(7), ElementInputB(-8), 0); // Fill tensor C on host with zeros cutlass::reference::host::TensorFill(tensor_c.host_view()); // Fill tensor C for reference on host with zeros cutlass::reference::host::TensorFill(tensor_ref_c.host_view()); // Copy data from host to GPU tensor_a.sync_device(); tensor_b.sync_device(); tensor_c.sync_device(); tensor_ref_c.sync_device(); // // Define arguments for CUTLASS Convolution // // mode (kCrossCorrelation or kConvolution) cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation; // Split K dimension into 1 partitions int split_k_slices = 1; cutlass::conv::Conv2dProblemSize problem_size( options.input_size, options.filter_size, options.padding, options.conv_stride, options.dilation, options.output_size(), mode, split_k_slices); typename ImplicitGemm::Arguments arguments{ problem_size, tensor_a.device_ref(), tensor_b.device_ref(), tensor_c.device_ref(), tensor_c.device_ref(), {options.alpha, options.beta}, }; // // Initialize CUTLASS Convolution // ImplicitGemm implicit_gemm_op; size_t workspace_size = implicit_gemm_op.get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); result.status = implicit_gemm_op.initialize(arguments, workspace.get()); CUTLASS_CHECK(result.status); // // Launch initialized CUTLASS kernel // result.status = implicit_gemm_op(); CUTLASS_CHECK(result.status); // // Optional reference check // if (options.reference_check) { std::cout << "Verification on host...\n"; // Compute with reference implementation cutlass::reference::host::Conv2dFprop< ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementComputeEpilogue, ElementAccumulator, cutlass::NumericConverterClamp<ElementOutput, ElementComputeEpilogue> >( problem_size, tensor_a.host_ref(), tensor_b.host_ref(), tensor_c.host_ref(), tensor_ref_c.host_ref(), options.alpha, options.beta); // Check if output from CUTLASS kernel and reference kernel are equal or // not tensor_c.sync_host(); bool passed = cutlass::reference::host::TensorEquals( tensor_c.host_view(), tensor_ref_c.host_view()); if (!passed) { result.reference_check = cutlass::Status::kErrorInternal; std::cout << "ERROR - results miscompared.\n"; } else { result.reference_check = cutlass::Status::kSuccess; std::cout << "Passed.\n"; } } else { result.reference_check = cutlass::Status::kInvalid; } if (options.save_workspace) { std::stringstream ss; ss << "09_tensor_conv_workspace_conv2dfprop_" << options.input_size.n() << "x" << options.input_size.h() << "x" << options.input_size.w() << "x" << options.input_size.c() << "_" << options.filter_size.n() << "x" << options.filter_size.h() << "x" << options.filter_size.w() << "x" << options.filter_size.c() << ".dat"; std::ofstream output_workspace(ss.str()); output_workspace << "Input = \n" << tensor_a.host_view() << "\n\n" << "Filters = \n" << tensor_b.host_view() << "\n\n"; if (options.reference_check) { output_workspace << "Reference = \n" << tensor_ref_c.host_view() << "\n\n"; } output_workspace << "Computed = \n" << tensor_c.host_view() << std::endl; std::cout << "Results written to '" << ss.str() << "'." << std::endl; } // // Performance measurement // if (options.measure_performance) { hipEvent_t events[2]; for (auto& event : events) { result.error = hipEventCreate(&event); if (result.error != hipSuccess) { std::cerr << "hipEventCreate() failed: " << hipGetErrorString(result.error) << std::endl; return result; } } // Record an event at the start of a series of convolution operations. result.error = hipEventRecord(events[0]); if (result.error != hipSuccess) { std::cerr << "hipEventRecord() failed: " << hipGetErrorString(result.error) << std::endl; return result; } // Launch a sequence of implicit GEMM operations on the device for (int iteration = 0; iteration < options.iterations; ++iteration) { result.status = implicit_gemm_op(); CUTLASS_CHECK(result.status); } // Record an event when the convolutions have been launched. result.error = hipEventRecord(events[1]); if (result.error != hipSuccess) { std::cerr << "hipEventRecord() failed: " << hipGetErrorString(result.error) << std::endl; return result; } // Wait for work on the device to complete. result.error = hipEventSynchronize(events[1]); if (result.error != hipSuccess) { std::cerr << "hipEventSynchronize() failed: " << hipGetErrorString(result.error) << std::endl; return result; } // Measure elapsed runtime float runtime_ms = 0; result.error = hipEventElapsedTime(&runtime_ms, events[0], events[1]); if (result.error != hipSuccess) { std::cerr << "cudaEventElapsed() failed: " << hipGetErrorString(result.error) << std::endl; return result; } // Print average runtime and GFLOPs. result.runtime_ms = double(runtime_ms) / double(options.iterations); result.gflops = options.gflops(result.runtime_ms / 1000.0); // Cleanup for (auto event : events) { (void)hipEventDestroy(event); } } return result; } ///////////////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char const** args) { // Turing Tensor Core operations exposed with mma.sync are first available // in CUDA 10.2. // // CUTLASS must be compiled with CUDA 10.2 Toolkit to run these examples. if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2))) { std::cerr << "Turing Tensor Core operations must be compiled with CUDA " "10.2 Toolkit or later." << std::endl; return 0; } hipDeviceProp_t props; CUDA_CHECK(hipGetDeviceProperties(&props, 0)); if (!(props.major > 7 || (props.major == 7 && props.minor >= 5))) { std::cerr << "Turing Tensor Ops must be run on a machine with compute " "capability at least 75." << std::endl; return 0; } Options options; options.parse(argc, args); if (options.help) { options.print_usage(std::cout) << std::endl; return 0; } if (options.benchmark) { // Benchmark several layers int batch_sizes[] = {1, 32, 64, 128, 256, 512}; struct Benchmark { int h, w, c, k, r, s; } layers[] = { {56, 56, 64, 256, 1, 1}, {56, 56, 64, 64, 1, 1}, {56, 56, 64, 64, 3, 3}, {56, 56, 256, 64, 1, 1}, {56, 56, 256, 512, 1, 1}, {56, 56, 256, 128, 1, 1}, {28, 28, 128, 128, 3, 3}, {28, 28, 128, 512, 1, 1}, {28, 28, 512, 128, 1, 1}, {28, 28, 512, 1024, 1, 1}, {28, 28, 512, 256, 1, 1}, {14, 14, 256, 256, 3, 3}, {14, 14, 256, 1024, 1, 1}, {14, 14, 1024, 256, 1, 1}, {14, 14, 1024, 2048, 1, 1}, {14, 14, 1024, 512, 1, 1}, {7, 7, 512, 512, 3, 3}, }; Result::print_header(std::cout, options) << std::endl; int idx = 1; for (auto const& layer : layers) { for (auto N : batch_sizes) { options.update({N, layer.h, layer.w, layer.c}, {layer.k, layer.r, layer.s, layer.c}); Result result = profile_convolution(options); result.print(std::cout, idx, options) << std::endl; } ++idx; } } else { // Execute one problem size if (!options.valid()) { std::cerr << "Invalid problem." << std::endl; return -1; } Result result = profile_convolution(options); Result::print_header(std::cout, options) << std::endl; result.print(std::cout, 1, options) << std::endl; } return 0; } /////////////////////////////////////////////////////////////////////////////////////////////////
36105ae6e4b72adb5caa7fff13944b7249b1bff5.cu
/*************************************************************************************************** * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without *modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, *this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright *notice, this list of conditions and the following disclaimer in the *documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its *contributors may be used to endorse or promote products derived from this *software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE *DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY DIRECT, *INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, *DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY *OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TOR (INCLUDING *NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, *EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** This example shows how to run convolution kernels using functions and data structures provided by CUTLASS using tensor cores; which we run on a NVIDIA Turing GPU. Writing a single high performance convolution kernel is hard but do-able. Whereas writing high performance kernels at scale which works for multiple problem sizes with good abstractions is really hard. CUTLASS solves this problem by providing simplified abstractions to compose multiple sections of implicit gemm kernel. When used properly, the kernels can hit peak performance of GPU easily. CUTLASS divides a kernel into hierarchical composable sections. Which means, at each thread, warp and thread-block level, they compute on their own tile-size with higher level of tile sizes being composed from lower level ones. Multiple thread-tiles (tile size each thread computes) can be used to form warp-tiles (tile size each warp computes) and multiple warp tiles can be used to compute threadblock-tile (tile size computed by a threadblock). In thie example, we split variable initialization into 1. Setting up data properties : describes how tensors are laid out in the memory and how the kernel can view them (logical to physical mapping) 2. Setting up computation properties : describes how the above set tensors will be used to compute output of convolution. First, we setup the data types of the input tensor A, weights' tensor B and output tensor C along with alpha, beta as the equation for convolution is C = alpha * Conv(A, B) + beta * C. In CUTLASS, the kernels first compute Conv(A, B) and leave the rest of the computation to end of the kernel as alpha * X + beta * C is a simple element-wise operation on X (Conv(A, B)) and C. We call this as epilogue of kernel. Hence, we setup data types for alpha and beta to be equal to ElementComputeEpilogue = float. We want to use MMA instructions on Turing and they support 4-bit signed integer. But int4b_t is not fully supported by Nvidia software stack, so CUTLASS introduces cutlass::int4b_t. We use the data type for elements in input tensor A and B as cutlass::int4b_t. We convey this to CUTLASS kernel by initializing template variables ElementAccumulator (int32_t), ElementComputeEpilogue (float), ElementInputA (cutlass::int4b_t), ElementInputB (cutlass::int4b_t), ElementOutput (int32_t). Communicating just the data type is not enough. As the data is laid out linearly in memory, we have to convey the layout of tensors. We do that by initializing template variables LayoutInputA, LayoutInputB and LayoutOutput to TensorNHWC cutlass variable. Next, we setup rules to comptue alpha * X + beta * C which is called epilogue of the kernel. We initialize template variable EpilogueOp, which takes the data type of output ElementOutput (int32_t), the number of elements per vector memory access (32), data type of accumulator (int32_t) and data type of computation of linear combination (alpha * X + beta * C). Now that we setup the properties of data, we have to setup properties of computation. Second, we create template variables of tile sizes for thread-block, warp and mma-op to 128x128x128, 64x64x128, 8x8x32 (MxNxK) respectively. When passed to instantiate CUTLASS Implicit GEMM kernel, it internally deduces the amount of threads needed per thread-block, amount of shared memory, storing data in bank-conflict free manner, and ton of other variables required to compose, intialize and launch a high performance Implicit GEMM kernel. This is the beauty of CUTLASS, it relieves developer from understanding and coding complicated hardware optimizations which can easily go wrong. CUTLASS also supports multiple MMA pipelines in a threadblock. What are MMA pipelines? MMA pipelines constitute the whole process of loading input data from global memory to shared memory, loading data from shared memory to registers, doing matrix multiplication, store to global memory. The below flow sequence shows a typical mma pipeline. tensor in global memory -> registers -> tile in shared memory -> registers -> mma -> registers -> output to global memory The problem with single pipeline is, each stage is synchronous which means, each stage has to wait until the previous finished executing. There are stages in the pipeline which do not have fixed latency, for example, the loads from global memory and shared memory. Therefore, we can add one more pipeline with a phase shift in mma kernel to hide latency from global and shared memory loads. Finally, the pipeline in a kernel looks like (1) tensor in global memory -> (2) registers -> (3) tile in shared memory -> (4) registers -> (5) mma -> (6) registers -> (7) output to global memory (1) <null> -> (2) <null> -> (3) tensor in global memory -> (4) registers -> (5) tile in shared memory -> (6) registers -> (7) mma -> (8) registers -> (9) output to global memory This way, you can hide the second global memory load latency by doing computation on already loaded input data. There are few more template variables initialized such as, which threadblock tile of output matrix is done which threadblock launched on an SM, CUDA SM architecture of GPU you want to run on. These are all put together to create a template variable which describes CUTLASS Implicit GEMM kernel using cutlass::conv::device::ImplicitGemm template. The next step is to intialize physical data, instantiate and initialize CUTLASS kernel and run it. We use CUTLASS utilities to initialize, fill, compare tensors as they are simple and doesn't come in the way of learning CUTLASS. Once all the tensors are initialized and filled with data, create arguments tuple to launch CUTLASS kernel which takes problem size (N = 1, H = 64, W = 64, C = 128), filter size (K = 64, R = 3, S = 3, C = 128 ), padding, strides, dilation, tensors, alpha, beta and the important one, split k-dimension factor. Along with that, we query CUTLASS if any scratch-space memory required by the kernel we instantiated. If yes, we create it and pass it along with other arguments created to intialize CUTLASS kernel then, the kernel is launched. In this example, we later on launch a reference convolution kernel (from CUTLASS utilities) to compare if the output from CUTLASS kernel is same as the reference implicit GEMM kernel. */ #include <iostream> #include <sstream> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm.h" #include "cutlass/conv/kernel/default_conv2d_fprop.h" #include "cutlass/conv/device/implicit_gemm_convolution.h" #include "cutlass/util/command_line.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/device/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/host/convolution.h" #include "cutlass/util/tensor_view_io.h" #include "helper.h" // The code section below describes datatype for input, output tensors and // computation between elements using ElementAccumulator = int32_t; // Data type of accumulator using ElementComputeEpilogue = float; // Data type of epilogue computation (alpha, beta) using ElementInputA = cutlass::int4b_t; // Data type of elements in input tensor using ElementInputB = cutlass::int4b_t; // Data type of elements in input tensor using ElementOutput = cutlass::int4b_t; // Data type of elements in output tensor using LayoutInputA = cutlass::layout::TensorNHWC; using LayoutInputB = cutlass::layout::TensorNHWC; using LayoutOutput = cutlass::layout::TensorNHWC; // This code section describes whether you want to use tensor cores or regular // SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassTensorOp; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm75; // This code section describes the tile size a thread block will compute using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 128>; // Threadblock tile shape // This code section describes tile size a warp will compute using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>; // Warp tile shape // This code section describes the size of MMA op using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; // TensorCore instruction shape // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // Number of pipelines you want to use constexpr int NumStages = 2; // This code section describes the epilogue part of the kernel, we use default // value using EpilogueOp = cutlass::epilogue::thread::LinearCombinationClamp< ElementOutput, // Data type of output matrix. 8, // The number of elements per vectorized. // memory access. This becomes the vector width of // math instructions in the epilogue too. ElementAccumulator, // Data type of accumulator ElementComputeEpilogue>; // Data type for alpha/beta in linear // combination using Conv2dFpropKernel = typename cutlass::conv::kernel::DefaultConv2dFprop< ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ThreadblockShape, WarpShape, InstructionShape, EpilogueOp, SwizzleThreadBlock, NumStages, cutlass::arch::OpMultiplyAddSaturate, cutlass::conv::IteratorAlgorithm::kAnalytic>::Kernel; using ImplicitGemm = cutlass::conv::device::ImplicitGemmConvolution<Conv2dFpropKernel>; ///////////////////////////////////////////////////////////////////////////////////////////////// // Command line options parsing struct Options { bool help; cutlass::Tensor4DCoord input_size; cutlass::Tensor4DCoord filter_size; cutlass::Tensor4DCoord padding; cutlass::MatrixCoord conv_stride; cutlass::MatrixCoord dilation; bool reference_check; bool measure_performance; int iterations; bool save_workspace; ElementComputeEpilogue alpha; ElementComputeEpilogue beta; bool benchmark; std::string tag; Options() : help(false), input_size(1, 32, 32, 32), filter_size(32, 3, 3, 32), padding(1, 1, 1, 1), conv_stride(1, 1), dilation(1, 1), reference_check(false), measure_performance(true), iterations(20), save_workspace(false), alpha(1), beta(0), benchmark(false) {} // Verify the problem size is compatible with the CUTLASS Convolution // implementation. bool valid() { // // CUTLASS attempts to load 128b vectors of int4b_t elements. // Consequently, all pointers, strides, and tensor extents must be // divisible by 32 elements. // int const kAlignment = 32; if ((input_size.c() % kAlignment) || (filter_size.n() % kAlignment)) { // misaligned tensors return false; } // Invalid padding if ((padding.h() != filter_size.h() / 2) || (padding.w() != filter_size.w() / 2)) { return false; } return true; } /// Updates input and filter sizes void update(cutlass::Tensor4DCoord input_size, cutlass::Tensor4DCoord filter_size) { this->input_size = input_size; this->filter_size = filter_size; padding.n() = filter_size.h() / 2; padding.h() = filter_size.h() / 2; padding.w() = filter_size.w() / 2; padding.c() = filter_size.w() / 2; } // Parses the command line void parse(int argc, char const** args) { cutlass::CommandLine cmd(argc, args); if (cmd.check_cmd_line_flag("help")) { help = true; } if (cmd.check_cmd_line_flag("ref-check")) { reference_check = true; } if (cmd.check_cmd_line_flag("perf-check")) { measure_performance = true; } if (cmd.check_cmd_line_flag("save-workspace")) { save_workspace = true; } if (cmd.check_cmd_line_flag("benchmark")) { benchmark = true; } cmd.get_cmd_line_argument("n", input_size.n()); cmd.get_cmd_line_argument("h", input_size.h()); cmd.get_cmd_line_argument("w", input_size.w()); cmd.get_cmd_line_argument("c", input_size.c()); cmd.get_cmd_line_argument("k", filter_size.n()); cmd.get_cmd_line_argument("r", filter_size.h()); cmd.get_cmd_line_argument("s", filter_size.w()); filter_size.c() = input_size.c(); cmd.get_cmd_line_argument("alpha", alpha); cmd.get_cmd_line_argument("beta", beta); cmd.get_cmd_line_argument("iterations", iterations); cmd.get_cmd_line_argument("tag", tag); if (filter_size.h() == 3 && filter_size.w() == 3) { padding = {1, 1, 1, 1}; } else { filter_size.h() = 1; filter_size.w() = 1; padding = {0, 0, 0, 0}; } } /// Prints the usage statement. std::ostream& print_usage(std::ostream& out) const { out << "09_turing_tensorop_conv2dfprop example\n\n" << " This example uses Turing's Tensor Core operators on int4 " "data types to compute\n" << " forward convolution on tensors of layout NHWC.\n\n" << "Options:\n\n" << " --help If specified, displays this usage " "statement.\n\n" << " --n <int> Input tensor extent N\n" << " --h <int> Input tensor extent H\n" << " --w <int> Input tensor extent W\n" << " --c <int> Input tensor extent C\n" << " --k <int> Filter extent K\n" << " --r <int> Filter extent R\n" << " --s <int> Filter extent S\n\n" << " --alpha <float> Epilogue scalar alpha\n" << " --beta <float> Epilogue scalar beta\n\n" << " --ref-check If set (true), reference check on the " "host is computed\n" << " --perf-check If set (true), performance is " "measured.\n" << " --benchmark If set (true), performance benchmarking " "on several layers and batch-size.\n" << " --iterations <int> Number of profiling iterations to " "perform.\n" << " --save-workspace If set, workspace is written to a text " "file.\n" << " --tag <string> String to replicate across the first " "column in the results table\n"; out << "\n\nExamples:\n\n" << "$ " "./examples/09_turing_tensorop_conv2dfprop/" "09_turing_tensorop_conv2dfprop --n=32 --h=224 --w=224 --c=128 " "--k=256 --r=1 --s=1\n\n" << "$ " "./examples/09_turing_tensorop_conv2dfprop/" "09_turing_tensorop_conv2dfprop --n=1 --h=224 --w=224 --c=32 " "--k=32 --r=3 --s=3 --ref-check\n\n"; return out; } /// Computes the output tensor size (NPQK) cutlass::Tensor4DCoord output_size() const { return cutlass::Tensor4DCoord( input_size.n(), (input_size.h() + padding.n() + padding.h() - filter_size.h()) / conv_stride.row() + 1, (input_size.w() + padding.w() + padding.c() - filter_size.w()) / conv_stride.column() + 1, filter_size.n()); } /// Compute performance in GFLOP/s double gflops(double runtime_s) const { // Number of multiply-adds = NPQK * CRS int64_t fmas = output_size().product() * int64_t(filter_size.h() * filter_size.w() * filter_size.c()); // Two flops per multiply-add return 2.0 * double(fmas) / double(1.0e9) / runtime_s; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// struct Result { double runtime_ms; double gflops; cutlass::Status status; cutlass::Status reference_check; cudaError_t error; Result() : runtime_ms(0), gflops(0), status(cutlass::Status::kSuccess), reference_check(cutlass::Status::kInvalid), error(cudaSuccess) {} static std::ostream& print_header(std::ostream& out, Options const& options) { if (!options.tag.empty()) { out << "Name,"; } out << "Layer,N,H,W,C,K,R,S,Runtime,GFLOPs"; return out; } std::ostream& print(std::ostream& out, int idx, Options const& options) { if (!options.tag.empty()) { out << options.tag << ","; } out << "conv_" << idx << "," << options.input_size.n() << "," << options.input_size.h() << "," << options.input_size.w() << "," << options.input_size.c() << "," << options.filter_size.n() << "," << options.filter_size.h() << "," << options.filter_size.w() << "," << runtime_ms << "," << gflops; return out; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Runs one benchmark Result profile_convolution(Options const& options) { Result result; // // Allocate host-device tensors using the CUTLASS Utilities. // cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a( options.input_size); cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b( options.filter_size); cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c( options.output_size()); cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_c( options.output_size()); // // Initialize tensors // // Fill tensor A on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_a.host_view(), 1, ElementInputA(7), ElementInputA(-8), 0); // Fill tensor B on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_b.host_view(), 1, ElementInputB(7), ElementInputB(-8), 0); // Fill tensor C on host with zeros cutlass::reference::host::TensorFill(tensor_c.host_view()); // Fill tensor C for reference on host with zeros cutlass::reference::host::TensorFill(tensor_ref_c.host_view()); // Copy data from host to GPU tensor_a.sync_device(); tensor_b.sync_device(); tensor_c.sync_device(); tensor_ref_c.sync_device(); // // Define arguments for CUTLASS Convolution // // mode (kCrossCorrelation or kConvolution) cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation; // Split K dimension into 1 partitions int split_k_slices = 1; cutlass::conv::Conv2dProblemSize problem_size( options.input_size, options.filter_size, options.padding, options.conv_stride, options.dilation, options.output_size(), mode, split_k_slices); typename ImplicitGemm::Arguments arguments{ problem_size, tensor_a.device_ref(), tensor_b.device_ref(), tensor_c.device_ref(), tensor_c.device_ref(), {options.alpha, options.beta}, }; // // Initialize CUTLASS Convolution // ImplicitGemm implicit_gemm_op; size_t workspace_size = implicit_gemm_op.get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); result.status = implicit_gemm_op.initialize(arguments, workspace.get()); CUTLASS_CHECK(result.status); // // Launch initialized CUTLASS kernel // result.status = implicit_gemm_op(); CUTLASS_CHECK(result.status); // // Optional reference check // if (options.reference_check) { std::cout << "Verification on host...\n"; // Compute with reference implementation cutlass::reference::host::Conv2dFprop< ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementComputeEpilogue, ElementAccumulator, cutlass::NumericConverterClamp<ElementOutput, ElementComputeEpilogue> >( problem_size, tensor_a.host_ref(), tensor_b.host_ref(), tensor_c.host_ref(), tensor_ref_c.host_ref(), options.alpha, options.beta); // Check if output from CUTLASS kernel and reference kernel are equal or // not tensor_c.sync_host(); bool passed = cutlass::reference::host::TensorEquals( tensor_c.host_view(), tensor_ref_c.host_view()); if (!passed) { result.reference_check = cutlass::Status::kErrorInternal; std::cout << "ERROR - results miscompared.\n"; } else { result.reference_check = cutlass::Status::kSuccess; std::cout << "Passed.\n"; } } else { result.reference_check = cutlass::Status::kInvalid; } if (options.save_workspace) { std::stringstream ss; ss << "09_tensor_conv_workspace_conv2dfprop_" << options.input_size.n() << "x" << options.input_size.h() << "x" << options.input_size.w() << "x" << options.input_size.c() << "_" << options.filter_size.n() << "x" << options.filter_size.h() << "x" << options.filter_size.w() << "x" << options.filter_size.c() << ".dat"; std::ofstream output_workspace(ss.str()); output_workspace << "Input = \n" << tensor_a.host_view() << "\n\n" << "Filters = \n" << tensor_b.host_view() << "\n\n"; if (options.reference_check) { output_workspace << "Reference = \n" << tensor_ref_c.host_view() << "\n\n"; } output_workspace << "Computed = \n" << tensor_c.host_view() << std::endl; std::cout << "Results written to '" << ss.str() << "'." << std::endl; } // // Performance measurement // if (options.measure_performance) { cudaEvent_t events[2]; for (auto& event : events) { result.error = cudaEventCreate(&event); if (result.error != cudaSuccess) { std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } } // Record an event at the start of a series of convolution operations. result.error = cudaEventRecord(events[0]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Launch a sequence of implicit GEMM operations on the device for (int iteration = 0; iteration < options.iterations; ++iteration) { result.status = implicit_gemm_op(); CUTLASS_CHECK(result.status); } // Record an event when the convolutions have been launched. result.error = cudaEventRecord(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Wait for work on the device to complete. result.error = cudaEventSynchronize(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Measure elapsed runtime float runtime_ms = 0; result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Print average runtime and GFLOPs. result.runtime_ms = double(runtime_ms) / double(options.iterations); result.gflops = options.gflops(result.runtime_ms / 1000.0); // Cleanup for (auto event : events) { (void)cudaEventDestroy(event); } } return result; } ///////////////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char const** args) { // Turing Tensor Core operations exposed with mma.sync are first available // in CUDA 10.2. // // CUTLASS must be compiled with CUDA 10.2 Toolkit to run these examples. if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2))) { std::cerr << "Turing Tensor Core operations must be compiled with CUDA " "10.2 Toolkit or later." << std::endl; return 0; } cudaDeviceProp props; CUDA_CHECK(cudaGetDeviceProperties(&props, 0)); if (!(props.major > 7 || (props.major == 7 && props.minor >= 5))) { std::cerr << "Turing Tensor Ops must be run on a machine with compute " "capability at least 75." << std::endl; return 0; } Options options; options.parse(argc, args); if (options.help) { options.print_usage(std::cout) << std::endl; return 0; } if (options.benchmark) { // Benchmark several layers int batch_sizes[] = {1, 32, 64, 128, 256, 512}; struct Benchmark { int h, w, c, k, r, s; } layers[] = { {56, 56, 64, 256, 1, 1}, {56, 56, 64, 64, 1, 1}, {56, 56, 64, 64, 3, 3}, {56, 56, 256, 64, 1, 1}, {56, 56, 256, 512, 1, 1}, {56, 56, 256, 128, 1, 1}, {28, 28, 128, 128, 3, 3}, {28, 28, 128, 512, 1, 1}, {28, 28, 512, 128, 1, 1}, {28, 28, 512, 1024, 1, 1}, {28, 28, 512, 256, 1, 1}, {14, 14, 256, 256, 3, 3}, {14, 14, 256, 1024, 1, 1}, {14, 14, 1024, 256, 1, 1}, {14, 14, 1024, 2048, 1, 1}, {14, 14, 1024, 512, 1, 1}, {7, 7, 512, 512, 3, 3}, }; Result::print_header(std::cout, options) << std::endl; int idx = 1; for (auto const& layer : layers) { for (auto N : batch_sizes) { options.update({N, layer.h, layer.w, layer.c}, {layer.k, layer.r, layer.s, layer.c}); Result result = profile_convolution(options); result.print(std::cout, idx, options) << std::endl; } ++idx; } } else { // Execute one problem size if (!options.valid()) { std::cerr << "Invalid problem." << std::endl; return -1; } Result result = profile_convolution(options); Result::print_header(std::cout, options) << std::endl; result.print(std::cout, 1, options) << std::endl; } return 0; } /////////////////////////////////////////////////////////////////////////////////////////////////
b5767fd86f984085b9e4d2c37a039edf5a36ac43.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2009-2017 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. /*! \file DriverTersoffGPU.cu \brief Defines the driver functions for computing all types of three-body forces on the GPU */ #include "DriverTersoffGPU.cuh" #include "EvaluatorTersoff.h" hipError_t gpu_compute_tersoff_forces(const tersoff_args_t& pair_args, const tersoff_params *d_params) { return gpu_compute_triplet_forces<EvaluatorTersoff>(pair_args, d_params); } hipError_t gpu_compute_sq_density_forces(const tersoff_args_t& pair_args, const Scalar2 *d_params) { return gpu_compute_triplet_forces<EvaluatorSquareDensity>(pair_args, d_params); }
b5767fd86f984085b9e4d2c37a039edf5a36ac43.cu
// Copyright (c) 2009-2017 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. /*! \file DriverTersoffGPU.cu \brief Defines the driver functions for computing all types of three-body forces on the GPU */ #include "DriverTersoffGPU.cuh" #include "EvaluatorTersoff.h" cudaError_t gpu_compute_tersoff_forces(const tersoff_args_t& pair_args, const tersoff_params *d_params) { return gpu_compute_triplet_forces<EvaluatorTersoff>(pair_args, d_params); } cudaError_t gpu_compute_sq_density_forces(const tersoff_args_t& pair_args, const Scalar2 *d_params) { return gpu_compute_triplet_forces<EvaluatorSquareDensity>(pair_args, d_params); }
48b8d07fb621c48bce426151b65e22f0263b8548.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*========================================================================= Program: Robarts Visualization Toolkit Module: CUDA_imagevote.cu Copyright (c) John SH Baxter, Robarts Research Institute This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the above copyright notice for more information. =========================================================================*/ /** @file CUDA_imagevote.cu * * @brief Implementation file with definitions of GPU kernels used predominantly in performing a voting * operation to merge probabilistic labellings * * @author John Stuart Haberl Baxter (Dr. Peters' Lab (VASST) at Robarts Research Institute) * * @note August 27th 2013 - Documentation first compiled. * */ #include "CUDA_commonKernels.h" #include "CUDA_imagevote.h" #include "vtkCudaCommon.h" template<typename IT, typename OT> __global__ void CUDA_CIV_kernMinWithMap(IT* inputBuffer, IT* currentMax, OT* outputBuffer, OT newMapVal, int size) { int idx = CUDASTDOFFSET; IT inputValue = inputBuffer[idx]; IT previValue = currentMax[idx]; OT previMap = outputBuffer[idx]; OT newMap = (inputValue >= previValue) ? newMapVal: previMap; IT newVal = (inputValue >= previValue) ? inputValue: previValue; if( idx < size ) { currentMax[idx] = newVal; outputBuffer[idx] = newMap; } } template void CUDA_CIV_COMPUTE<double,double>( double** inputBuffers, int inputNum, double* outputBuffer, double* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<long,double>( long** inputBuffers, int inputNum, double* outputBuffer, double* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long,double>( unsigned long** inputBuffers, int inputNum, double* outputBuffer, double* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<long long,double>( long long** inputBuffers, int inputNum, double* outputBuffer, double* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long long,double>( unsigned long long** inputBuffers, int inputNum, double* outputBuffer, double* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<int,double>( int** inputBuffers, int inputNum, double* outputBuffer, double* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned int,double>( unsigned int** inputBuffers, int inputNum, double* outputBuffer, double* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<short,double>( short** inputBuffers, int inputNum, double* outputBuffer, double* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned short,double>( unsigned short** inputBuffers, int inputNum, double* outputBuffer, double* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<char,double>( char** inputBuffers, int inputNum, double* outputBuffer, double* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned char,double>( unsigned char** inputBuffers, int inputNum, double* outputBuffer, double* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<signed char,double>( signed char** inputBuffers, int inputNum, double* outputBuffer, double* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<float,double>( float** inputBuffers, int inputNum, double* outputBuffer, double* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<double,long>( double** inputBuffers, int inputNum, long* outputBuffer, long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<long,long>( long** inputBuffers, int inputNum, long* outputBuffer, long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long,long>( unsigned long** inputBuffers, int inputNum, long* outputBuffer, long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<long long,long>( long long** inputBuffers, int inputNum, long* outputBuffer, long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long long,long>( unsigned long long** inputBuffers, int inputNum, long* outputBuffer, long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<int,long>( int** inputBuffers, int inputNum, long* outputBuffer, long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned int,long>( unsigned int** inputBuffers, int inputNum, long* outputBuffer, long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<short,long>( short** inputBuffers, int inputNum, long* outputBuffer, long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned short,long>( unsigned short** inputBuffers, int inputNum, long* outputBuffer, long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<char,long>( char** inputBuffers, int inputNum, long* outputBuffer, long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned char,long>( unsigned char** inputBuffers, int inputNum, long* outputBuffer, long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<signed char,long>( signed char** inputBuffers, int inputNum, long* outputBuffer, long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<float,long>( float** inputBuffers, int inputNum, long* outputBuffer, long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<double,unsigned long>( double** inputBuffers, int inputNum, unsigned long* outputBuffer, unsigned long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<long,unsigned long>( long** inputBuffers, int inputNum, unsigned long* outputBuffer, unsigned long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long,unsigned long>( unsigned long** inputBuffers, int inputNum, unsigned long* outputBuffer, unsigned long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<long long,unsigned long>( long long** inputBuffers, int inputNum, unsigned long* outputBuffer, unsigned long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long long,unsigned long>( unsigned long long** inputBuffers, int inputNum, unsigned long* outputBuffer, unsigned long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<int,unsigned long>( int** inputBuffers, int inputNum, unsigned long* outputBuffer, unsigned long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned int,unsigned long>( unsigned int** inputBuffers, int inputNum, unsigned long* outputBuffer, unsigned long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<short,unsigned long>( short** inputBuffers, int inputNum, unsigned long* outputBuffer, unsigned long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned short,unsigned long>( unsigned short** inputBuffers, int inputNum, unsigned long* outputBuffer, unsigned long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<char,unsigned long>( char** inputBuffers, int inputNum, unsigned long* outputBuffer, unsigned long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned char,unsigned long>( unsigned char** inputBuffers, int inputNum, unsigned long* outputBuffer, unsigned long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<signed char,unsigned long>( signed char** inputBuffers, int inputNum, unsigned long* outputBuffer, unsigned long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<float,unsigned long>( float** inputBuffers, int inputNum, unsigned long* outputBuffer, unsigned long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<double,long long>( double** inputBuffers, int inputNum, long long* outputBuffer, long long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<long,long long>( long** inputBuffers, int inputNum, long long* outputBuffer, long long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long,long long>( unsigned long** inputBuffers, int inputNum, long long* outputBuffer, long long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<long long,long long>( long long** inputBuffers, int inputNum, long long* outputBuffer, long long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long long,long long>( unsigned long long** inputBuffers, int inputNum, long long* outputBuffer, long long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<int,long long>( int** inputBuffers, int inputNum, long long* outputBuffer, long long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned int,long long>( unsigned int** inputBuffers, int inputNum, long long* outputBuffer, long long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<short,long long>( short** inputBuffers, int inputNum, long long* outputBuffer, long long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned short,long long>( unsigned short** inputBuffers, int inputNum, long long* outputBuffer, long long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<char,long long>( char** inputBuffers, int inputNum, long long* outputBuffer, long long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned char,long long>( unsigned char** inputBuffers, int inputNum, long long* outputBuffer, long long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<signed char,long long>( signed char** inputBuffers, int inputNum, long long* outputBuffer, long long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<float,long long>( float** inputBuffers, int inputNum, long long* outputBuffer, long long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<double,unsigned long long>( double** inputBuffers, int inputNum, unsigned long long* outputBuffer, unsigned long long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<long,unsigned long long>( long** inputBuffers, int inputNum, unsigned long long* outputBuffer, unsigned long long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long,unsigned long long>( unsigned long** inputBuffers, int inputNum, unsigned long long* outputBuffer, unsigned long long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<long long,unsigned long long>( long long** inputBuffers, int inputNum, unsigned long long* outputBuffer, unsigned long long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long long,unsigned long long>( unsigned long long** inputBuffers, int inputNum, unsigned long long* outputBuffer, unsigned long long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<int,unsigned long long>( int** inputBuffers, int inputNum, unsigned long long* outputBuffer, unsigned long long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned int,unsigned long long>( unsigned int** inputBuffers, int inputNum, unsigned long long* outputBuffer, unsigned long long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<short,unsigned long long>( short** inputBuffers, int inputNum, unsigned long long* outputBuffer, unsigned long long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned short,unsigned long long>( unsigned short** inputBuffers, int inputNum, unsigned long long* outputBuffer, unsigned long long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<char,unsigned long long>( char** inputBuffers, int inputNum, unsigned long long* outputBuffer, unsigned long long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned char,unsigned long long>( unsigned char** inputBuffers, int inputNum, unsigned long long* outputBuffer, unsigned long long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<signed char,unsigned long long>( signed char** inputBuffers, int inputNum, unsigned long long* outputBuffer, unsigned long long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<float,unsigned long long>( float** inputBuffers, int inputNum, unsigned long long* outputBuffer, unsigned long long* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<double,int>( double** inputBuffers, int inputNum, int* outputBuffer, int* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<long,int>( long** inputBuffers, int inputNum, int* outputBuffer, int* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long,int>( unsigned long** inputBuffers, int inputNum, int* outputBuffer, int* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<long long,int>( long long** inputBuffers, int inputNum, int* outputBuffer, int* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long long,int>( unsigned long long** inputBuffers, int inputNum, int* outputBuffer, int* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<int,int>( int** inputBuffers, int inputNum, int* outputBuffer, int* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned int,int>( unsigned int** inputBuffers, int inputNum, int* outputBuffer, int* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<short,int>( short** inputBuffers, int inputNum, int* outputBuffer, int* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned short,int>( unsigned short** inputBuffers, int inputNum, int* outputBuffer, int* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<char,int>( char** inputBuffers, int inputNum, int* outputBuffer, int* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned char,int>( unsigned char** inputBuffers, int inputNum, int* outputBuffer, int* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<signed char,int>( signed char** inputBuffers, int inputNum, int* outputBuffer, int* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<float,int>( float** inputBuffers, int inputNum, int* outputBuffer, int* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<double,unsigned int>( double** inputBuffers, int inputNum, unsigned int* outputBuffer, unsigned int* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<long,unsigned int>( long** inputBuffers, int inputNum, unsigned int* outputBuffer, unsigned int* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long,unsigned int>( unsigned long** inputBuffers, int inputNum, unsigned int* outputBuffer, unsigned int* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<long long,unsigned int>( long long** inputBuffers, int inputNum, unsigned int* outputBuffer, unsigned int* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long long,unsigned int>( unsigned long long** inputBuffers, int inputNum, unsigned int* outputBuffer, unsigned int* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<int,unsigned int>( int** inputBuffers, int inputNum, unsigned int* outputBuffer, unsigned int* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned int,unsigned int>( unsigned int** inputBuffers, int inputNum, unsigned int* outputBuffer, unsigned int* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<short,unsigned int>( short** inputBuffers, int inputNum, unsigned int* outputBuffer, unsigned int* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned short,unsigned int>( unsigned short** inputBuffers, int inputNum, unsigned int* outputBuffer, unsigned int* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<char,unsigned int>( char** inputBuffers, int inputNum, unsigned int* outputBuffer, unsigned int* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned char,unsigned int>( unsigned char** inputBuffers, int inputNum, unsigned int* outputBuffer, unsigned int* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<signed char,unsigned int>( signed char** inputBuffers, int inputNum, unsigned int* outputBuffer, unsigned int* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<float,unsigned int>( float** inputBuffers, int inputNum, unsigned int* outputBuffer, unsigned int* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<double,short>( double** inputBuffers, int inputNum, short* outputBuffer, short* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<long,short>( long** inputBuffers, int inputNum, short* outputBuffer, short* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long,short>( unsigned long** inputBuffers, int inputNum, short* outputBuffer, short* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<long long,short>( long long** inputBuffers, int inputNum, short* outputBuffer, short* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long long,short>( unsigned long long** inputBuffers, int inputNum, short* outputBuffer, short* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<int,short>( int** inputBuffers, int inputNum, short* outputBuffer, short* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned int,short>( unsigned int** inputBuffers, int inputNum, short* outputBuffer, short* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<short,short>( short** inputBuffers, int inputNum, short* outputBuffer, short* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned short,short>( unsigned short** inputBuffers, int inputNum, short* outputBuffer, short* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<char,short>( char** inputBuffers, int inputNum, short* outputBuffer, short* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned char,short>( unsigned char** inputBuffers, int inputNum, short* outputBuffer, short* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<signed char,short>( signed char** inputBuffers, int inputNum, short* outputBuffer, short* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<float,short>( float** inputBuffers, int inputNum, short* outputBuffer, short* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<double,unsigned short>( double** inputBuffers, int inputNum, unsigned short* outputBuffer, unsigned short* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<long,unsigned short>( long** inputBuffers, int inputNum, unsigned short* outputBuffer, unsigned short* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long,unsigned short>( unsigned long** inputBuffers, int inputNum, unsigned short* outputBuffer, unsigned short* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<long long,unsigned short>( long long** inputBuffers, int inputNum, unsigned short* outputBuffer, unsigned short* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long long,unsigned short>( unsigned long long** inputBuffers, int inputNum, unsigned short* outputBuffer, unsigned short* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<int,unsigned short>( int** inputBuffers, int inputNum, unsigned short* outputBuffer, unsigned short* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned int,unsigned short>( unsigned int** inputBuffers, int inputNum, unsigned short* outputBuffer, unsigned short* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<short,unsigned short>( short** inputBuffers, int inputNum, unsigned short* outputBuffer, unsigned short* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned short,unsigned short>( unsigned short** inputBuffers, int inputNum, unsigned short* outputBuffer, unsigned short* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<char,unsigned short>( char** inputBuffers, int inputNum, unsigned short* outputBuffer, unsigned short* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned char,unsigned short>( unsigned char** inputBuffers, int inputNum, unsigned short* outputBuffer, unsigned short* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<signed char,unsigned short>( signed char** inputBuffers, int inputNum, unsigned short* outputBuffer, unsigned short* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<float,unsigned short>( float** inputBuffers, int inputNum, unsigned short* outputBuffer, unsigned short* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<double,char>( double** inputBuffers, int inputNum, char* outputBuffer, char* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<long,char>( long** inputBuffers, int inputNum, char* outputBuffer, char* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long,char>( unsigned long** inputBuffers, int inputNum, char* outputBuffer, char* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<long long,char>( long long** inputBuffers, int inputNum, char* outputBuffer, char* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long long,char>( unsigned long long** inputBuffers, int inputNum, char* outputBuffer, char* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<int,char>( int** inputBuffers, int inputNum, char* outputBuffer, char* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned int,char>( unsigned int** inputBuffers, int inputNum, char* outputBuffer, char* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<short,char>( short** inputBuffers, int inputNum, char* outputBuffer, char* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned short,char>( unsigned short** inputBuffers, int inputNum, char* outputBuffer, char* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<char,char>( char** inputBuffers, int inputNum, char* outputBuffer, char* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned char,char>( unsigned char** inputBuffers, int inputNum, char* outputBuffer, char* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<signed char,char>( signed char** inputBuffers, int inputNum, char* outputBuffer, char* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<float,char>( float** inputBuffers, int inputNum, char* outputBuffer, char* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<double,unsigned char>( double** inputBuffers, int inputNum, unsigned char* outputBuffer, unsigned char* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<long,unsigned char>( long** inputBuffers, int inputNum, unsigned char* outputBuffer, unsigned char* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long,unsigned char>( unsigned long** inputBuffers, int inputNum, unsigned char* outputBuffer, unsigned char* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<long long,unsigned char>( long long** inputBuffers, int inputNum, unsigned char* outputBuffer, unsigned char* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long long,unsigned char>( unsigned long long** inputBuffers, int inputNum, unsigned char* outputBuffer, unsigned char* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<int,unsigned char>( int** inputBuffers, int inputNum, unsigned char* outputBuffer, unsigned char* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned int,unsigned char>( unsigned int** inputBuffers, int inputNum, unsigned char* outputBuffer, unsigned char* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<short,unsigned char>( short** inputBuffers, int inputNum, unsigned char* outputBuffer, unsigned char* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned short,unsigned char>( unsigned short** inputBuffers, int inputNum, unsigned char* outputBuffer, unsigned char* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<char,unsigned char>( char** inputBuffers, int inputNum, unsigned char* outputBuffer, unsigned char* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned char,unsigned char>( unsigned char** inputBuffers, int inputNum, unsigned char* outputBuffer, unsigned char* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<signed char,unsigned char>( signed char** inputBuffers, int inputNum, unsigned char* outputBuffer, unsigned char* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<float,unsigned char>( float** inputBuffers, int inputNum, unsigned char* outputBuffer, unsigned char* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<double,signed char>( double** inputBuffers, int inputNum, signed char* outputBuffer, signed char* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<long,signed char>( long** inputBuffers, int inputNum, signed char* outputBuffer, signed char* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long,signed char>( unsigned long** inputBuffers, int inputNum, signed char* outputBuffer, signed char* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<long long,signed char>( long long** inputBuffers, int inputNum, signed char* outputBuffer, signed char* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long long,signed char>( unsigned long long** inputBuffers, int inputNum, signed char* outputBuffer, signed char* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<int,signed char>( int** inputBuffers, int inputNum, signed char* outputBuffer, signed char* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned int,signed char>( unsigned int** inputBuffers, int inputNum, signed char* outputBuffer, signed char* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<short,signed char>( short** inputBuffers, int inputNum, signed char* outputBuffer, signed char* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned short,signed char>( unsigned short** inputBuffers, int inputNum, signed char* outputBuffer, signed char* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<char,signed char>( char** inputBuffers, int inputNum, signed char* outputBuffer, signed char* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned char,signed char>( unsigned char** inputBuffers, int inputNum, signed char* outputBuffer, signed char* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<signed char,signed char>( signed char** inputBuffers, int inputNum, signed char* outputBuffer, signed char* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<float,signed char>( float** inputBuffers, int inputNum, signed char* outputBuffer, signed char* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<double,float>( double** inputBuffers, int inputNum, float* outputBuffer, float* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<long,float>( long** inputBuffers, int inputNum, float* outputBuffer, float* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long,float>( unsigned long** inputBuffers, int inputNum, float* outputBuffer, float* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<long long,float>( long long** inputBuffers, int inputNum, float* outputBuffer, float* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long long,float>( unsigned long long** inputBuffers, int inputNum, float* outputBuffer, float* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<int,float>( int** inputBuffers, int inputNum, float* outputBuffer, float* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned int,float>( unsigned int** inputBuffers, int inputNum, float* outputBuffer, float* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<short,float>( short** inputBuffers, int inputNum, float* outputBuffer, float* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned short,float>( unsigned short** inputBuffers, int inputNum, float* outputBuffer, float* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<char,float>( char** inputBuffers, int inputNum, float* outputBuffer, float* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned char,float>( unsigned char** inputBuffers, int inputNum, float* outputBuffer, float* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<signed char,float>( signed char** inputBuffers, int inputNum, float* outputBuffer, float* map, int size, hipStream_t* stream); template void CUDA_CIV_COMPUTE<float,float>( float** inputBuffers, int inputNum, float* outputBuffer, float* map, int size, hipStream_t* stream); template<typename IT, typename OT> void CUDA_CIV_COMPUTE( IT** inputBuffers, int inputNum, OT* outputBuffer, OT* map, int size, hipStream_t* stream) { dim3 threads(NUMTHREADS,1,1); dim3 grid = GetGrid(size); //allocate GPU output buffer and maximum value buffer IT* gpuMaxBuffer = 0; IT* gpuInBuffer = 0; OT* gpuOutBuffer = 0; hipMalloc( &gpuMaxBuffer, sizeof(IT)*size ); hipMalloc( &gpuInBuffer, sizeof(IT)*size ); hipMalloc( &gpuOutBuffer, sizeof(OT)*size ); //initialize max buffer hipMemcpyAsync( gpuMaxBuffer, inputBuffers[0], sizeof(IT)*size, hipMemcpyHostToDevice, *stream); for(int i = 0; i < inputNum; i++) { //copy current input in hipMemcpyAsync( gpuInBuffer, inputBuffers[i], sizeof(IT)*size, hipMemcpyHostToDevice, *stream); //perform kernel hipLaunchKernelGGL(( CUDA_CIV_kernMinWithMap<IT,OT>), dim3(grid),dim3(threads),0,*stream, gpuInBuffer, gpuMaxBuffer, gpuOutBuffer, map[i], size); } //copy output back hipMemcpyAsync( outputBuffer, gpuOutBuffer, sizeof(OT)*size, hipMemcpyDeviceToHost, *stream); //sync everything hipStreamSynchronize(*stream); //deallocate buffers hipFree(gpuMaxBuffer); hipFree(gpuInBuffer); hipFree(gpuOutBuffer); }
48b8d07fb621c48bce426151b65e22f0263b8548.cu
/*========================================================================= Program: Robarts Visualization Toolkit Module: CUDA_imagevote.cu Copyright (c) John SH Baxter, Robarts Research Institute This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the above copyright notice for more information. =========================================================================*/ /** @file CUDA_imagevote.cu * * @brief Implementation file with definitions of GPU kernels used predominantly in performing a voting * operation to merge probabilistic labellings * * @author John Stuart Haberl Baxter (Dr. Peters' Lab (VASST) at Robarts Research Institute) * * @note August 27th 2013 - Documentation first compiled. * */ #include "CUDA_commonKernels.h" #include "CUDA_imagevote.h" #include "vtkCudaCommon.h" template<typename IT, typename OT> __global__ void CUDA_CIV_kernMinWithMap(IT* inputBuffer, IT* currentMax, OT* outputBuffer, OT newMapVal, int size) { int idx = CUDASTDOFFSET; IT inputValue = inputBuffer[idx]; IT previValue = currentMax[idx]; OT previMap = outputBuffer[idx]; OT newMap = (inputValue >= previValue) ? newMapVal: previMap; IT newVal = (inputValue >= previValue) ? inputValue: previValue; if( idx < size ) { currentMax[idx] = newVal; outputBuffer[idx] = newMap; } } template void CUDA_CIV_COMPUTE<double,double>( double** inputBuffers, int inputNum, double* outputBuffer, double* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<long,double>( long** inputBuffers, int inputNum, double* outputBuffer, double* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long,double>( unsigned long** inputBuffers, int inputNum, double* outputBuffer, double* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<long long,double>( long long** inputBuffers, int inputNum, double* outputBuffer, double* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long long,double>( unsigned long long** inputBuffers, int inputNum, double* outputBuffer, double* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<int,double>( int** inputBuffers, int inputNum, double* outputBuffer, double* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned int,double>( unsigned int** inputBuffers, int inputNum, double* outputBuffer, double* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<short,double>( short** inputBuffers, int inputNum, double* outputBuffer, double* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned short,double>( unsigned short** inputBuffers, int inputNum, double* outputBuffer, double* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<char,double>( char** inputBuffers, int inputNum, double* outputBuffer, double* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned char,double>( unsigned char** inputBuffers, int inputNum, double* outputBuffer, double* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<signed char,double>( signed char** inputBuffers, int inputNum, double* outputBuffer, double* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<float,double>( float** inputBuffers, int inputNum, double* outputBuffer, double* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<double,long>( double** inputBuffers, int inputNum, long* outputBuffer, long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<long,long>( long** inputBuffers, int inputNum, long* outputBuffer, long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long,long>( unsigned long** inputBuffers, int inputNum, long* outputBuffer, long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<long long,long>( long long** inputBuffers, int inputNum, long* outputBuffer, long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long long,long>( unsigned long long** inputBuffers, int inputNum, long* outputBuffer, long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<int,long>( int** inputBuffers, int inputNum, long* outputBuffer, long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned int,long>( unsigned int** inputBuffers, int inputNum, long* outputBuffer, long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<short,long>( short** inputBuffers, int inputNum, long* outputBuffer, long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned short,long>( unsigned short** inputBuffers, int inputNum, long* outputBuffer, long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<char,long>( char** inputBuffers, int inputNum, long* outputBuffer, long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned char,long>( unsigned char** inputBuffers, int inputNum, long* outputBuffer, long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<signed char,long>( signed char** inputBuffers, int inputNum, long* outputBuffer, long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<float,long>( float** inputBuffers, int inputNum, long* outputBuffer, long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<double,unsigned long>( double** inputBuffers, int inputNum, unsigned long* outputBuffer, unsigned long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<long,unsigned long>( long** inputBuffers, int inputNum, unsigned long* outputBuffer, unsigned long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long,unsigned long>( unsigned long** inputBuffers, int inputNum, unsigned long* outputBuffer, unsigned long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<long long,unsigned long>( long long** inputBuffers, int inputNum, unsigned long* outputBuffer, unsigned long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long long,unsigned long>( unsigned long long** inputBuffers, int inputNum, unsigned long* outputBuffer, unsigned long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<int,unsigned long>( int** inputBuffers, int inputNum, unsigned long* outputBuffer, unsigned long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned int,unsigned long>( unsigned int** inputBuffers, int inputNum, unsigned long* outputBuffer, unsigned long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<short,unsigned long>( short** inputBuffers, int inputNum, unsigned long* outputBuffer, unsigned long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned short,unsigned long>( unsigned short** inputBuffers, int inputNum, unsigned long* outputBuffer, unsigned long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<char,unsigned long>( char** inputBuffers, int inputNum, unsigned long* outputBuffer, unsigned long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned char,unsigned long>( unsigned char** inputBuffers, int inputNum, unsigned long* outputBuffer, unsigned long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<signed char,unsigned long>( signed char** inputBuffers, int inputNum, unsigned long* outputBuffer, unsigned long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<float,unsigned long>( float** inputBuffers, int inputNum, unsigned long* outputBuffer, unsigned long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<double,long long>( double** inputBuffers, int inputNum, long long* outputBuffer, long long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<long,long long>( long** inputBuffers, int inputNum, long long* outputBuffer, long long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long,long long>( unsigned long** inputBuffers, int inputNum, long long* outputBuffer, long long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<long long,long long>( long long** inputBuffers, int inputNum, long long* outputBuffer, long long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long long,long long>( unsigned long long** inputBuffers, int inputNum, long long* outputBuffer, long long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<int,long long>( int** inputBuffers, int inputNum, long long* outputBuffer, long long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned int,long long>( unsigned int** inputBuffers, int inputNum, long long* outputBuffer, long long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<short,long long>( short** inputBuffers, int inputNum, long long* outputBuffer, long long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned short,long long>( unsigned short** inputBuffers, int inputNum, long long* outputBuffer, long long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<char,long long>( char** inputBuffers, int inputNum, long long* outputBuffer, long long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned char,long long>( unsigned char** inputBuffers, int inputNum, long long* outputBuffer, long long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<signed char,long long>( signed char** inputBuffers, int inputNum, long long* outputBuffer, long long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<float,long long>( float** inputBuffers, int inputNum, long long* outputBuffer, long long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<double,unsigned long long>( double** inputBuffers, int inputNum, unsigned long long* outputBuffer, unsigned long long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<long,unsigned long long>( long** inputBuffers, int inputNum, unsigned long long* outputBuffer, unsigned long long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long,unsigned long long>( unsigned long** inputBuffers, int inputNum, unsigned long long* outputBuffer, unsigned long long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<long long,unsigned long long>( long long** inputBuffers, int inputNum, unsigned long long* outputBuffer, unsigned long long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long long,unsigned long long>( unsigned long long** inputBuffers, int inputNum, unsigned long long* outputBuffer, unsigned long long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<int,unsigned long long>( int** inputBuffers, int inputNum, unsigned long long* outputBuffer, unsigned long long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned int,unsigned long long>( unsigned int** inputBuffers, int inputNum, unsigned long long* outputBuffer, unsigned long long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<short,unsigned long long>( short** inputBuffers, int inputNum, unsigned long long* outputBuffer, unsigned long long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned short,unsigned long long>( unsigned short** inputBuffers, int inputNum, unsigned long long* outputBuffer, unsigned long long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<char,unsigned long long>( char** inputBuffers, int inputNum, unsigned long long* outputBuffer, unsigned long long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned char,unsigned long long>( unsigned char** inputBuffers, int inputNum, unsigned long long* outputBuffer, unsigned long long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<signed char,unsigned long long>( signed char** inputBuffers, int inputNum, unsigned long long* outputBuffer, unsigned long long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<float,unsigned long long>( float** inputBuffers, int inputNum, unsigned long long* outputBuffer, unsigned long long* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<double,int>( double** inputBuffers, int inputNum, int* outputBuffer, int* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<long,int>( long** inputBuffers, int inputNum, int* outputBuffer, int* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long,int>( unsigned long** inputBuffers, int inputNum, int* outputBuffer, int* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<long long,int>( long long** inputBuffers, int inputNum, int* outputBuffer, int* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long long,int>( unsigned long long** inputBuffers, int inputNum, int* outputBuffer, int* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<int,int>( int** inputBuffers, int inputNum, int* outputBuffer, int* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned int,int>( unsigned int** inputBuffers, int inputNum, int* outputBuffer, int* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<short,int>( short** inputBuffers, int inputNum, int* outputBuffer, int* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned short,int>( unsigned short** inputBuffers, int inputNum, int* outputBuffer, int* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<char,int>( char** inputBuffers, int inputNum, int* outputBuffer, int* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned char,int>( unsigned char** inputBuffers, int inputNum, int* outputBuffer, int* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<signed char,int>( signed char** inputBuffers, int inputNum, int* outputBuffer, int* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<float,int>( float** inputBuffers, int inputNum, int* outputBuffer, int* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<double,unsigned int>( double** inputBuffers, int inputNum, unsigned int* outputBuffer, unsigned int* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<long,unsigned int>( long** inputBuffers, int inputNum, unsigned int* outputBuffer, unsigned int* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long,unsigned int>( unsigned long** inputBuffers, int inputNum, unsigned int* outputBuffer, unsigned int* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<long long,unsigned int>( long long** inputBuffers, int inputNum, unsigned int* outputBuffer, unsigned int* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long long,unsigned int>( unsigned long long** inputBuffers, int inputNum, unsigned int* outputBuffer, unsigned int* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<int,unsigned int>( int** inputBuffers, int inputNum, unsigned int* outputBuffer, unsigned int* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned int,unsigned int>( unsigned int** inputBuffers, int inputNum, unsigned int* outputBuffer, unsigned int* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<short,unsigned int>( short** inputBuffers, int inputNum, unsigned int* outputBuffer, unsigned int* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned short,unsigned int>( unsigned short** inputBuffers, int inputNum, unsigned int* outputBuffer, unsigned int* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<char,unsigned int>( char** inputBuffers, int inputNum, unsigned int* outputBuffer, unsigned int* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned char,unsigned int>( unsigned char** inputBuffers, int inputNum, unsigned int* outputBuffer, unsigned int* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<signed char,unsigned int>( signed char** inputBuffers, int inputNum, unsigned int* outputBuffer, unsigned int* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<float,unsigned int>( float** inputBuffers, int inputNum, unsigned int* outputBuffer, unsigned int* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<double,short>( double** inputBuffers, int inputNum, short* outputBuffer, short* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<long,short>( long** inputBuffers, int inputNum, short* outputBuffer, short* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long,short>( unsigned long** inputBuffers, int inputNum, short* outputBuffer, short* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<long long,short>( long long** inputBuffers, int inputNum, short* outputBuffer, short* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long long,short>( unsigned long long** inputBuffers, int inputNum, short* outputBuffer, short* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<int,short>( int** inputBuffers, int inputNum, short* outputBuffer, short* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned int,short>( unsigned int** inputBuffers, int inputNum, short* outputBuffer, short* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<short,short>( short** inputBuffers, int inputNum, short* outputBuffer, short* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned short,short>( unsigned short** inputBuffers, int inputNum, short* outputBuffer, short* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<char,short>( char** inputBuffers, int inputNum, short* outputBuffer, short* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned char,short>( unsigned char** inputBuffers, int inputNum, short* outputBuffer, short* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<signed char,short>( signed char** inputBuffers, int inputNum, short* outputBuffer, short* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<float,short>( float** inputBuffers, int inputNum, short* outputBuffer, short* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<double,unsigned short>( double** inputBuffers, int inputNum, unsigned short* outputBuffer, unsigned short* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<long,unsigned short>( long** inputBuffers, int inputNum, unsigned short* outputBuffer, unsigned short* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long,unsigned short>( unsigned long** inputBuffers, int inputNum, unsigned short* outputBuffer, unsigned short* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<long long,unsigned short>( long long** inputBuffers, int inputNum, unsigned short* outputBuffer, unsigned short* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long long,unsigned short>( unsigned long long** inputBuffers, int inputNum, unsigned short* outputBuffer, unsigned short* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<int,unsigned short>( int** inputBuffers, int inputNum, unsigned short* outputBuffer, unsigned short* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned int,unsigned short>( unsigned int** inputBuffers, int inputNum, unsigned short* outputBuffer, unsigned short* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<short,unsigned short>( short** inputBuffers, int inputNum, unsigned short* outputBuffer, unsigned short* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned short,unsigned short>( unsigned short** inputBuffers, int inputNum, unsigned short* outputBuffer, unsigned short* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<char,unsigned short>( char** inputBuffers, int inputNum, unsigned short* outputBuffer, unsigned short* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned char,unsigned short>( unsigned char** inputBuffers, int inputNum, unsigned short* outputBuffer, unsigned short* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<signed char,unsigned short>( signed char** inputBuffers, int inputNum, unsigned short* outputBuffer, unsigned short* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<float,unsigned short>( float** inputBuffers, int inputNum, unsigned short* outputBuffer, unsigned short* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<double,char>( double** inputBuffers, int inputNum, char* outputBuffer, char* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<long,char>( long** inputBuffers, int inputNum, char* outputBuffer, char* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long,char>( unsigned long** inputBuffers, int inputNum, char* outputBuffer, char* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<long long,char>( long long** inputBuffers, int inputNum, char* outputBuffer, char* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long long,char>( unsigned long long** inputBuffers, int inputNum, char* outputBuffer, char* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<int,char>( int** inputBuffers, int inputNum, char* outputBuffer, char* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned int,char>( unsigned int** inputBuffers, int inputNum, char* outputBuffer, char* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<short,char>( short** inputBuffers, int inputNum, char* outputBuffer, char* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned short,char>( unsigned short** inputBuffers, int inputNum, char* outputBuffer, char* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<char,char>( char** inputBuffers, int inputNum, char* outputBuffer, char* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned char,char>( unsigned char** inputBuffers, int inputNum, char* outputBuffer, char* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<signed char,char>( signed char** inputBuffers, int inputNum, char* outputBuffer, char* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<float,char>( float** inputBuffers, int inputNum, char* outputBuffer, char* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<double,unsigned char>( double** inputBuffers, int inputNum, unsigned char* outputBuffer, unsigned char* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<long,unsigned char>( long** inputBuffers, int inputNum, unsigned char* outputBuffer, unsigned char* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long,unsigned char>( unsigned long** inputBuffers, int inputNum, unsigned char* outputBuffer, unsigned char* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<long long,unsigned char>( long long** inputBuffers, int inputNum, unsigned char* outputBuffer, unsigned char* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long long,unsigned char>( unsigned long long** inputBuffers, int inputNum, unsigned char* outputBuffer, unsigned char* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<int,unsigned char>( int** inputBuffers, int inputNum, unsigned char* outputBuffer, unsigned char* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned int,unsigned char>( unsigned int** inputBuffers, int inputNum, unsigned char* outputBuffer, unsigned char* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<short,unsigned char>( short** inputBuffers, int inputNum, unsigned char* outputBuffer, unsigned char* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned short,unsigned char>( unsigned short** inputBuffers, int inputNum, unsigned char* outputBuffer, unsigned char* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<char,unsigned char>( char** inputBuffers, int inputNum, unsigned char* outputBuffer, unsigned char* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned char,unsigned char>( unsigned char** inputBuffers, int inputNum, unsigned char* outputBuffer, unsigned char* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<signed char,unsigned char>( signed char** inputBuffers, int inputNum, unsigned char* outputBuffer, unsigned char* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<float,unsigned char>( float** inputBuffers, int inputNum, unsigned char* outputBuffer, unsigned char* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<double,signed char>( double** inputBuffers, int inputNum, signed char* outputBuffer, signed char* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<long,signed char>( long** inputBuffers, int inputNum, signed char* outputBuffer, signed char* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long,signed char>( unsigned long** inputBuffers, int inputNum, signed char* outputBuffer, signed char* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<long long,signed char>( long long** inputBuffers, int inputNum, signed char* outputBuffer, signed char* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long long,signed char>( unsigned long long** inputBuffers, int inputNum, signed char* outputBuffer, signed char* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<int,signed char>( int** inputBuffers, int inputNum, signed char* outputBuffer, signed char* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned int,signed char>( unsigned int** inputBuffers, int inputNum, signed char* outputBuffer, signed char* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<short,signed char>( short** inputBuffers, int inputNum, signed char* outputBuffer, signed char* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned short,signed char>( unsigned short** inputBuffers, int inputNum, signed char* outputBuffer, signed char* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<char,signed char>( char** inputBuffers, int inputNum, signed char* outputBuffer, signed char* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned char,signed char>( unsigned char** inputBuffers, int inputNum, signed char* outputBuffer, signed char* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<signed char,signed char>( signed char** inputBuffers, int inputNum, signed char* outputBuffer, signed char* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<float,signed char>( float** inputBuffers, int inputNum, signed char* outputBuffer, signed char* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<double,float>( double** inputBuffers, int inputNum, float* outputBuffer, float* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<long,float>( long** inputBuffers, int inputNum, float* outputBuffer, float* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long,float>( unsigned long** inputBuffers, int inputNum, float* outputBuffer, float* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<long long,float>( long long** inputBuffers, int inputNum, float* outputBuffer, float* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned long long,float>( unsigned long long** inputBuffers, int inputNum, float* outputBuffer, float* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<int,float>( int** inputBuffers, int inputNum, float* outputBuffer, float* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned int,float>( unsigned int** inputBuffers, int inputNum, float* outputBuffer, float* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<short,float>( short** inputBuffers, int inputNum, float* outputBuffer, float* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned short,float>( unsigned short** inputBuffers, int inputNum, float* outputBuffer, float* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<char,float>( char** inputBuffers, int inputNum, float* outputBuffer, float* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<unsigned char,float>( unsigned char** inputBuffers, int inputNum, float* outputBuffer, float* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<signed char,float>( signed char** inputBuffers, int inputNum, float* outputBuffer, float* map, int size, cudaStream_t* stream); template void CUDA_CIV_COMPUTE<float,float>( float** inputBuffers, int inputNum, float* outputBuffer, float* map, int size, cudaStream_t* stream); template<typename IT, typename OT> void CUDA_CIV_COMPUTE( IT** inputBuffers, int inputNum, OT* outputBuffer, OT* map, int size, cudaStream_t* stream) { dim3 threads(NUMTHREADS,1,1); dim3 grid = GetGrid(size); //allocate GPU output buffer and maximum value buffer IT* gpuMaxBuffer = 0; IT* gpuInBuffer = 0; OT* gpuOutBuffer = 0; cudaMalloc( &gpuMaxBuffer, sizeof(IT)*size ); cudaMalloc( &gpuInBuffer, sizeof(IT)*size ); cudaMalloc( &gpuOutBuffer, sizeof(OT)*size ); //initialize max buffer cudaMemcpyAsync( gpuMaxBuffer, inputBuffers[0], sizeof(IT)*size, cudaMemcpyHostToDevice, *stream); for(int i = 0; i < inputNum; i++) { //copy current input in cudaMemcpyAsync( gpuInBuffer, inputBuffers[i], sizeof(IT)*size, cudaMemcpyHostToDevice, *stream); //perform kernel CUDA_CIV_kernMinWithMap<IT,OT><<<grid,threads,0,*stream>>>(gpuInBuffer, gpuMaxBuffer, gpuOutBuffer, map[i], size); } //copy output back cudaMemcpyAsync( outputBuffer, gpuOutBuffer, sizeof(OT)*size, cudaMemcpyDeviceToHost, *stream); //sync everything cudaStreamSynchronize(*stream); //deallocate buffers cudaFree(gpuMaxBuffer); cudaFree(gpuInBuffer); cudaFree(gpuOutBuffer); }
229934fbc51e60cda5159960a77d50ba6ebec7ee.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdlib> #include <cstdio> #include "testproc.cuh" #include "kernels_hip.cuh" #define NAME(proc) #proc static int runScenario(int scenario, dim3 domain, bool verify) { printf("----------------------------------------------\n"); printf(" Test scenario %d\n\n", scenario); switch (scenario) { case 0: { runTest(RatesVersion1, domain, verify, NAME(RatesVersion1)); runTest(RatesVersion2, domain, verify, NAME(RatesVersion2)); runTest(RatesVersion3<256>, domain, verify, NAME(RatesVersion3<256>)); } break; case 1: { runTest(RatesVersion1, domain, verify, NAME(RatesVersion1)); } break; case 2: { runTest(RatesVersion2, domain, verify, NAME(RatesVersion2)); } break; case 3: { runTest(RatesVersion3<256>, domain, verify, NAME(RatesVersion3<256>)); } break; default: { printf(" Unknown test scenario! Exiting...\n"); return EXIT_FAILURE; } } return EXIT_SUCCESS; } int main(int argc, char** argv) { Configuration config = {0}; if (parseCommandline(argc, argv, config) > 0) { return EXIT_FAILURE; } int devID = 0; hipError_t error; hipDeviceProp_t deviceProp; error = hipGetDevice(&devID); if (error != hipSuccess) { printf("hipGetDevice returned error code %d, line(%d)\n", error, __LINE__); } error = hipGetDeviceProperties(&deviceProp, devID); if (deviceProp.computeMode == hipComputeModeProhibited) { fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::hipSetDevice().\n"); exit(EXIT_SUCCESS); } if (error != hipSuccess) { printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); } else { printf("\nGPUDevice %d: %s\nCompute cap: %d.%d\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); } return runScenario(config.scenario, config.domain, config.verification); }
229934fbc51e60cda5159960a77d50ba6ebec7ee.cu
#include <cstdlib> #include <cstdio> #include "testproc.cuh" #include "kernels.cuh" #define NAME(proc) #proc static int runScenario(int scenario, dim3 domain, bool verify) { printf("----------------------------------------------\n"); printf(" Test scenario %d\n\n", scenario); switch (scenario) { case 0: { runTest(RatesVersion1, domain, verify, NAME(RatesVersion1)); runTest(RatesVersion2, domain, verify, NAME(RatesVersion2)); runTest(RatesVersion3<256>, domain, verify, NAME(RatesVersion3<256>)); } break; case 1: { runTest(RatesVersion1, domain, verify, NAME(RatesVersion1)); } break; case 2: { runTest(RatesVersion2, domain, verify, NAME(RatesVersion2)); } break; case 3: { runTest(RatesVersion3<256>, domain, verify, NAME(RatesVersion3<256>)); } break; default: { printf(" Unknown test scenario! Exiting...\n"); return EXIT_FAILURE; } } return EXIT_SUCCESS; } int main(int argc, char** argv) { Configuration config = {0}; if (parseCommandline(argc, argv, config) > 0) { return EXIT_FAILURE; } int devID = 0; cudaError_t error; cudaDeviceProp deviceProp; error = cudaGetDevice(&devID); if (error != cudaSuccess) { printf("cudaGetDevice returned error code %d, line(%d)\n", error, __LINE__); } error = cudaGetDeviceProperties(&deviceProp, devID); if (deviceProp.computeMode == cudaComputeModeProhibited) { fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n"); exit(EXIT_SUCCESS); } if (error != cudaSuccess) { printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); } else { printf("\nGPUDevice %d: %s\nCompute cap: %d.%d\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); } return runScenario(config.scenario, config.domain, config.verification); }
1fa830fc32b3f10b0d07cf328c4c403c4d0ebbf4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "utils.h" #include <stdio.h> /**************************************************************************************** *** Matrix Transpose *** *** https://devblogs.nvidia.com/parallelforall/efficient-matrix-transpose-cuda-cc/ *** *** *** ****************************************************************************************/ __global__ void transpose(float *A,float *B,int n,int m) { int x = threadIdx.x, y = threadIdx.y; int row = blockIdx.x*blockDim.x + threadIdx.x; int col = blockIdx.y*blockDim.y + threadIdx.y; __shared__ float tile[BLOCK_SIZE_32][BLOCK_SIZE_32+1]; if(row<n&&col<m){ tile[y][x] = A[row*m+col]; __syncthreads(); B[row+col*n] = tile[y][x]; } } /**************************************************************************************** *** Prefix sum *** *** http://http.developer.nvidia.com/GPUGems3/gpugems3_ch39.html *** *** *** ****************************************************************************************/ template<typename T> __global__ void scan(T* input, float * output, float *aux, int len,int i,int M) { // put input into shared memory __shared__ float temp[BLOCK_SIZE_32*2]; unsigned int t = threadIdx.x, start = 2 * blockIdx.x * BLOCK_SIZE_32; if (start + t < len) temp[t] = (float)input[start + t + i*M]; else temp[t] = 0; if (start + BLOCK_SIZE_32 + t < len) temp[BLOCK_SIZE_32 + t] = (float) input[start + BLOCK_SIZE_32 + t + i*M]; else temp[BLOCK_SIZE_32 + t] = 0; __syncthreads(); // Reduction int stride; for (stride = 1; stride <= BLOCK_SIZE_32; stride <<= 1) { int index = (t + 1) * stride * 2 - 1; if (index < 2 * BLOCK_SIZE_32) temp[index] += temp[index - stride]; __syncthreads(); } // Post reduction for (stride = BLOCK_SIZE_32 >> 1; stride; stride >>= 1) { int index = (t + 1) * stride * 2 - 1; if (index + stride < 2 * BLOCK_SIZE_32) temp[index + stride] += temp[index]; __syncthreads(); } if (start + t < len) output[start + t +i*M] = temp[t]; if (start + BLOCK_SIZE_32 + t < len) output[start + BLOCK_SIZE_32 + t +i*M] = temp[BLOCK_SIZE_32 + t]; if (aux && t == 0) aux[blockIdx.x] = temp[2 * BLOCK_SIZE_32 - 1]; } __global__ void fixup(float* input, float *aux, int len,int i,int M) { unsigned int t = threadIdx.x, start = 2 * blockIdx.x * BLOCK_SIZE_32; if (blockIdx.x) { if (start + t < len) input[start + t +i*M] += aux[blockIdx.x - 1]; if (start + BLOCK_SIZE_32 + t < len) input[start + BLOCK_SIZE_32 + t +i*M] += aux[blockIdx.x - 1]; } } void compute_integral_image(unsigned char* const d_rgbaImage, float* const d_integralImage, size_t numRows, size_t numCols){ float *deviceAuxArray, *deviceAuxScannedArray,*d_temp; unsigned int N = numRows,M= numCols; unsigned int numElements = N*M; checkCudaErrors(hipMalloc(&deviceAuxArray, (BLOCK_SIZE_32 << 1) * sizeof(float))); checkCudaErrors(hipMalloc(&deviceAuxScannedArray, (BLOCK_SIZE_32 << 1) * sizeof(float))); checkCudaErrors(hipMalloc(&d_temp, numElements * sizeof(float))); hipMemset(d_integralImage, 0, numElements*sizeof(float)); const int blocksX = numRows/BLOCK_SIZE_32+1; const int blocksY = numCols/BLOCK_SIZE_32+1; const dim3 dimblock(BLOCK_SIZE_32,BLOCK_SIZE_32); const dim3 dimgrid(blocksX , blocksY); const dim3 dimgrid2(blocksY , blocksX); int numBlocks = ceil((float)numElements/(BLOCK_SIZE_32*2)); dim3 scanGrid(numBlocks, 1, 1); dim3 scanBlock(BLOCK_SIZE_32, 1, 1); //kernal scan forn(i,N){ hipLaunchKernelGGL(( scan<unsigned char>), dim3(scanGrid), dim3(scanBlock), 0, 0, d_rgbaImage, d_integralImage, deviceAuxArray, M,i,M); hipDeviceSynchronize(); hipLaunchKernelGGL(( scan<float>), dim3(dim3(1,1,1)), dim3(scanBlock), 0, 0, deviceAuxArray, deviceAuxScannedArray, NULL, BLOCK_SIZE_32 << 1,i,0); hipDeviceSynchronize(); hipLaunchKernelGGL(( fixup), dim3(scanGrid), dim3(scanBlock), 0, 0, d_integralImage, deviceAuxScannedArray, M,i,M); } //Transpose hipLaunchKernelGGL(( transpose), dim3(dimgrid),dim3(dimblock), 0, 0, d_integralImage,d_temp,N,M); //scan forn(i,M){ hipLaunchKernelGGL(( scan<unsigned char>), dim3(scanGrid), dim3(scanBlock), 0, 0, d_rgbaImage, d_temp, deviceAuxArray, N,i,N); hipDeviceSynchronize(); hipLaunchKernelGGL(( scan<float>), dim3(dim3(1,1,1)), dim3(scanBlock), 0, 0, deviceAuxArray, deviceAuxScannedArray, NULL, BLOCK_SIZE_32 << 1,i,0); hipDeviceSynchronize(); hipLaunchKernelGGL(( fixup), dim3(scanGrid), dim3(scanBlock), 0, 0, d_temp, deviceAuxScannedArray, N,i,N); } //transpose hipLaunchKernelGGL(( transpose), dim3(dimgrid2),dim3(dimblock), 0, 0, d_temp,d_integralImage,M,N); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); }
1fa830fc32b3f10b0d07cf328c4c403c4d0ebbf4.cu
#include "utils.h" #include <stdio.h> /**************************************************************************************** *** Matrix Transpose *** *** https://devblogs.nvidia.com/parallelforall/efficient-matrix-transpose-cuda-cc/ *** *** *** ****************************************************************************************/ __global__ void transpose(float *A,float *B,int n,int m) { int x = threadIdx.x, y = threadIdx.y; int row = blockIdx.x*blockDim.x + threadIdx.x; int col = blockIdx.y*blockDim.y + threadIdx.y; __shared__ float tile[BLOCK_SIZE_32][BLOCK_SIZE_32+1]; if(row<n&&col<m){ tile[y][x] = A[row*m+col]; __syncthreads(); B[row+col*n] = tile[y][x]; } } /**************************************************************************************** *** Prefix sum *** *** http://http.developer.nvidia.com/GPUGems3/gpugems3_ch39.html *** *** *** ****************************************************************************************/ template<typename T> __global__ void scan(T* input, float * output, float *aux, int len,int i,int M) { // put input into shared memory __shared__ float temp[BLOCK_SIZE_32*2]; unsigned int t = threadIdx.x, start = 2 * blockIdx.x * BLOCK_SIZE_32; if (start + t < len) temp[t] = (float)input[start + t + i*M]; else temp[t] = 0; if (start + BLOCK_SIZE_32 + t < len) temp[BLOCK_SIZE_32 + t] = (float) input[start + BLOCK_SIZE_32 + t + i*M]; else temp[BLOCK_SIZE_32 + t] = 0; __syncthreads(); // Reduction int stride; for (stride = 1; stride <= BLOCK_SIZE_32; stride <<= 1) { int index = (t + 1) * stride * 2 - 1; if (index < 2 * BLOCK_SIZE_32) temp[index] += temp[index - stride]; __syncthreads(); } // Post reduction for (stride = BLOCK_SIZE_32 >> 1; stride; stride >>= 1) { int index = (t + 1) * stride * 2 - 1; if (index + stride < 2 * BLOCK_SIZE_32) temp[index + stride] += temp[index]; __syncthreads(); } if (start + t < len) output[start + t +i*M] = temp[t]; if (start + BLOCK_SIZE_32 + t < len) output[start + BLOCK_SIZE_32 + t +i*M] = temp[BLOCK_SIZE_32 + t]; if (aux && t == 0) aux[blockIdx.x] = temp[2 * BLOCK_SIZE_32 - 1]; } __global__ void fixup(float* input, float *aux, int len,int i,int M) { unsigned int t = threadIdx.x, start = 2 * blockIdx.x * BLOCK_SIZE_32; if (blockIdx.x) { if (start + t < len) input[start + t +i*M] += aux[blockIdx.x - 1]; if (start + BLOCK_SIZE_32 + t < len) input[start + BLOCK_SIZE_32 + t +i*M] += aux[blockIdx.x - 1]; } } void compute_integral_image(unsigned char* const d_rgbaImage, float* const d_integralImage, size_t numRows, size_t numCols){ float *deviceAuxArray, *deviceAuxScannedArray,*d_temp; unsigned int N = numRows,M= numCols; unsigned int numElements = N*M; checkCudaErrors(cudaMalloc(&deviceAuxArray, (BLOCK_SIZE_32 << 1) * sizeof(float))); checkCudaErrors(cudaMalloc(&deviceAuxScannedArray, (BLOCK_SIZE_32 << 1) * sizeof(float))); checkCudaErrors(cudaMalloc(&d_temp, numElements * sizeof(float))); cudaMemset(d_integralImage, 0, numElements*sizeof(float)); const int blocksX = numRows/BLOCK_SIZE_32+1; const int blocksY = numCols/BLOCK_SIZE_32+1; const dim3 dimblock(BLOCK_SIZE_32,BLOCK_SIZE_32); const dim3 dimgrid(blocksX , blocksY); const dim3 dimgrid2(blocksY , blocksX); int numBlocks = ceil((float)numElements/(BLOCK_SIZE_32*2)); dim3 scanGrid(numBlocks, 1, 1); dim3 scanBlock(BLOCK_SIZE_32, 1, 1); //kernal scan forn(i,N){ scan<unsigned char><<<scanGrid, scanBlock>>>(d_rgbaImage, d_integralImage, deviceAuxArray, M,i,M); cudaDeviceSynchronize(); scan<float><<<dim3(1,1,1), scanBlock>>>(deviceAuxArray, deviceAuxScannedArray, NULL, BLOCK_SIZE_32 << 1,i,0); cudaDeviceSynchronize(); fixup<<<scanGrid, scanBlock>>>(d_integralImage, deviceAuxScannedArray, M,i,M); } //Transpose transpose<<<dimgrid,dimblock>>>(d_integralImage,d_temp,N,M); //scan forn(i,M){ scan<unsigned char><<<scanGrid, scanBlock>>>(d_rgbaImage, d_temp, deviceAuxArray, N,i,N); cudaDeviceSynchronize(); scan<float><<<dim3(1,1,1), scanBlock>>>(deviceAuxArray, deviceAuxScannedArray, NULL, BLOCK_SIZE_32 << 1,i,0); cudaDeviceSynchronize(); fixup<<<scanGrid, scanBlock>>>(d_temp, deviceAuxScannedArray, N,i,N); } //transpose transpose<<<dimgrid2,dimblock>>>(d_temp,d_integralImage,M,N); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); }
ca309870d27a22754e37314e9de77da90e801fd0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <assert.h> #include "math.h" #include "hemi.h" #include "common.h" #include "cuda_constant.cuh" #include <float.h> __device__ double d_u_function(double t, double x, double y) { return c_b * y * (1. - y) * (M_PI / 2. + atan(-x)); } __device__ double d_v_function(double t, double x, double y) { return atan( (x - c_lb) * (x - c_rb) * (1. + t) / 10. * (y - c_ub) * (y - c_bb)); } __device__ double d_itemOfInteg_1SpecType( double Py, double Qy, // double Gx, double Hx, // double a, double b ) { double integ; integ = (Hx - a)*(Hx - a) - (Gx - a)*(Gx - a); integ = integ * ( (Qy - b)*(Qy - b) - (Py - b)*(Py - b) ); return integ / 4.; } __device__ double d_analytSolut(double t, double x, double y ) { return 1.1 + sin( t * x * y); } __device__ double d_itemOfInteg_2SpecType( double Py, double Qy, // double alpha, // double a, double b, double betta ) { double buf_D, integ; // Computing... buf_D = (Qy - alpha) * (a*Qy + b - betta) * (a*Qy + b - betta) * (a*Qy + b - betta); buf_D = buf_D - (Py - alpha) * (a*Py + b - betta) * (a*Py + b - betta) * (a*Py + b - betta); integ = buf_D / (3. * a); buf_D = (a*Qy + b - betta) * (a*Qy + b - betta) * (a*Qy + b - betta) * (a*Qy + b - betta); buf_D = buf_D - (a*Py + b - betta) * (a*Py + b - betta) * (a*Py + b - betta) * (a*Py + b - betta); return integ - buf_D / (12. *a *a); } __device__ double d_integUnderLeftTr_OneCell( double Py, double Qy, // double a_SL, double b_SL, double Hx, int iCurrTL, // - Index of current time layer. // int * indCurSqOx, // - Index of current square by Ox axis. int * indCurSqOy, // - Index of current square by Oy axis. double * rhoInPrevTL_asV ) { double integ = 0; double buf_D, bufInteg_D; double rho[2][2]; double t = c_tau * (iCurrTL - 1.); double x, y; if( (indCurSqOx[0] >=0) && (indCurSqOx[1] <= c_x_length) ) { if( (indCurSqOy[0] >=0) && (indCurSqOy[1] <=c_y_length) ) { rho[0][0] = rhoInPrevTL_asV[ ((c_x_length +1)*indCurSqOy[0] + indCurSqOx[0]) ]; rho[0][1] = rhoInPrevTL_asV[ ((c_x_length +1)*indCurSqOy[1] + indCurSqOx[0]) ]; rho[1][0] = rhoInPrevTL_asV[ ((c_x_length +1)*indCurSqOy[0] + indCurSqOx[1]) ]; rho[1][1] = rhoInPrevTL_asV[ ((c_x_length +1)*indCurSqOy[1] + indCurSqOx[1]) ]; } } if( (indCurSqOx[0] < 0) || (indCurSqOx[1] > c_x_length) || (indCurSqOy[0] < 0) || (indCurSqOy[1] > c_y_length) ) { x = indCurSqOx[0] * c_h; y = indCurSqOy[0] * c_h; rho[0][0] = d_analytSolut(t, x, y ); x = indCurSqOx[0] * c_h; y = indCurSqOy[1] * c_h; rho[0][1] = d_analytSolut(t, x, y ); x = indCurSqOx[1] * c_h; y = indCurSqOy[0] * c_h; rho[1][0] = d_analytSolut(t, x, y ); x = indCurSqOx[1] * c_h; y = indCurSqOy[1] * c_h; rho[1][1] = d_analytSolut(t, x, y ); } // 1. buf_D = (Qy - c_h * indCurSqOy[1]) * (Qy - c_h * indCurSqOy[1]) - (Py - c_h * indCurSqOy[1]) * (Py - c_h * indCurSqOy[1]); if( (indCurSqOx[1] >= 0) && (indCurSqOy[1] >= 0) ) { buf_D = buf_D * (Hx - c_h * indCurSqOx[1]) * (Hx - c_h * indCurSqOx[1]) /4.; bufInteg_D = d_itemOfInteg_2SpecType( Py, Qy, c_h * indCurSqOy[1], a_SL, b_SL, c_h * indCurSqOx[1] ); } else { buf_D = buf_D * (Hx - c_h * indCurSqOx[1] ) * (Hx - c_h * indCurSqOx[1] ) /4.; bufInteg_D = d_itemOfInteg_2SpecType( Py, Qy, c_h * indCurSqOy[1], a_SL, b_SL, c_h * indCurSqOx[1] ); } buf_D = buf_D - bufInteg_D /2.; integ = buf_D * rho[0][0] /c_h /c_h; // 2. buf_D = (Qy - c_h * indCurSqOy[1]) * (Qy - c_h * indCurSqOy[1]) - (Py - c_h * indCurSqOy[1]) * (Py - c_h * indCurSqOy[1]); if( (indCurSqOx[0] >= 0) && (indCurSqOy[1] >= 0) ) { buf_D = -1. * buf_D * (Hx - c_h * indCurSqOx[0]) * (Hx - c_h * indCurSqOx[0]) /4.; bufInteg_D = d_itemOfInteg_2SpecType( Py, Qy, c_h * indCurSqOy[1], a_SL, b_SL, c_h * indCurSqOx[0] ); } else { buf_D = -1. * buf_D * (Hx - c_h * indCurSqOx[0] ) * (Hx - c_h * indCurSqOx[0] ) /4.; bufInteg_D = d_itemOfInteg_2SpecType( Py, Qy, c_h * indCurSqOy[1], a_SL, b_SL, c_h * indCurSqOx[0] ); } buf_D = buf_D + bufInteg_D /2.; integ = integ + buf_D * rho[1][0] /c_h /c_h; // 3. buf_D = (Qy - c_h * indCurSqOy[0]) * (Qy - c_h * indCurSqOy[0]) - (Py - c_h * indCurSqOy[0]) * (Py - c_h * indCurSqOy[0]); if( (indCurSqOx[1] >= 0) && (indCurSqOy[0] >= 0) ) { buf_D = -1. * buf_D * (Hx - c_h * indCurSqOx[1]) * (Hx - c_h * indCurSqOx[1]) /4.; bufInteg_D = d_itemOfInteg_2SpecType( Py, Qy, c_h * indCurSqOy[0], a_SL, b_SL, c_h * indCurSqOx[1] ); } else { buf_D = -1. * buf_D * (Hx - c_h * indCurSqOx[1] ) * (Hx - c_h * indCurSqOx[1] ) /4.; bufInteg_D = d_itemOfInteg_2SpecType( Py, Qy, c_h * indCurSqOy[0], a_SL, b_SL, c_h * indCurSqOx[1] ); } buf_D = buf_D + bufInteg_D /2.; integ = integ + buf_D * rho[0][1] /c_h /c_h; // 4. buf_D = (Qy - c_h * indCurSqOy[0]) * (Qy - c_h * indCurSqOy[0]) - (Py - c_h * indCurSqOy[0]) * (Py - c_h * indCurSqOy[0]); if( (indCurSqOx[0] >= 0) && (indCurSqOy[0] >= 0) ) { buf_D = buf_D * (Hx - c_h * indCurSqOx[0]) * (Hx - c_h * indCurSqOx[0]) /4.; bufInteg_D = d_itemOfInteg_2SpecType( Py, Qy, c_h * indCurSqOy[0], a_SL, b_SL, c_h * indCurSqOx[0] ); } else { buf_D = buf_D * (Hx - c_h * indCurSqOx[0] ) * (Hx - c_h * indCurSqOx[0] ) /4.; bufInteg_D = d_itemOfInteg_2SpecType( Py, Qy, c_h * indCurSqOy[0], a_SL, b_SL, c_h * indCurSqOx[0] ); } buf_D = buf_D - bufInteg_D /2.; integ += buf_D * rho[1][1] /c_h /c_h; return integ; } __device__ double d_integUnderRightTr_OneCell( double Py, double Qy, // double a_SL, double b_SL, double Gx, int iCurrTL, // - Index of current time layer. // int * indCurSqOx, // - Index of current square by Ox axis. int * indCurSqOy, // - Index of current square by Oy axis. double * rhoInPrevTL_asV ) { return -1. * d_integUnderLeftTr_OneCell( Py, Qy, // a_SL, b_SL, Gx, // - double Hx, iCurrTL, // - Index of current time layer. // indCurSqOx, // - Index of current square by Ox axis. indCurSqOy, // - Index of current square by Oy axis. // rhoInPrevTL_asV ); } __device__ double d_integUnderRectAng_OneCell( double Py, double Qy, // double Gx, double Hx, int iCurrTL, // - Index of current time layer. // int * indCurSqOx, // - Index of current square by Ox axis. int * indCurSqOy, // - Index of current square by Oy axis. double * rhoInPrevTL_asV ) { double integ = 0; double buf_D; double rho[2][2]; double t = c_tau * (iCurrTL -1.); double x, y; if( (indCurSqOx[0] >=0) && (indCurSqOy[0] >=0) ) { rho[0][0] = rhoInPrevTL_asV[ ((c_x_length +1)*indCurSqOy[0] + indCurSqOx[0]) ]; rho[0][1] = rhoInPrevTL_asV[ ((c_x_length +1)*indCurSqOy[1] + indCurSqOx[0]) ]; rho[1][0] = rhoInPrevTL_asV[ ((c_x_length +1)*indCurSqOy[0] + indCurSqOx[1]) ]; rho[1][1] = rhoInPrevTL_asV[ ((c_x_length +1)*indCurSqOy[1] + indCurSqOx[1]) ]; } else { x = indCurSqOx[0] * c_h; y = indCurSqOy[0] * c_h; rho[0][0] = d_analytSolut(t, x, y ); x = indCurSqOx[0] * c_h; y = indCurSqOy[1] * c_h; rho[0][1] = d_analytSolut(t, x, y ); x = indCurSqOx[1] * c_h; y = indCurSqOy[0] * c_h; rho[1][0] = d_analytSolut(t, x, y ); x = indCurSqOx[1] * c_h; y = indCurSqOy[1] * c_h; rho[1][1] = d_analytSolut(t, x, y ); } if( (indCurSqOx[1] >= 0) && (indCurSqOy[1] >= 0) ) { buf_D = d_itemOfInteg_1SpecType( Py, Qy, Gx, Hx, c_h * indCurSqOx[1], c_h * indCurSqOy[1] ); } else { buf_D = d_itemOfInteg_1SpecType( Py, Qy, Gx, Hx, c_h *indCurSqOx[1] , c_h * indCurSqOy[1] ); } buf_D = buf_D /c_h /c_h; integ = buf_D * rho[0][0]; // rhoInPrevTL[ indCurSqOx[0] ][ indCurSqOy[0] ]; if( (indCurSqOx[0] >= 0) && (indCurSqOy[1] >= 0) ) { buf_D = d_itemOfInteg_1SpecType( Py, Qy, Gx, Hx, c_h *indCurSqOx[0] , c_h * indCurSqOy[1] ); } else { buf_D = d_itemOfInteg_1SpecType( Py, Qy, Gx, Hx, c_h * indCurSqOx[0] , c_h * indCurSqOy[1] ); } buf_D = buf_D /c_h /c_h; integ = integ - buf_D * rho[1][0]; // rhoInPrevTL[ indCurSqOx[1] ][ indCurSqOy[0] ]; if( (indCurSqOx[1] >= 0) && (indCurSqOy[0] >= 0) ) { buf_D = d_itemOfInteg_1SpecType( Py, Qy, Gx, Hx, c_h * indCurSqOx[1] , c_h * indCurSqOy[0] ); } else { buf_D = d_itemOfInteg_1SpecType( Py, Qy, Gx, Hx, c_h * indCurSqOx[1] , c_h * indCurSqOy[0] ); } buf_D = buf_D /c_h /c_h; integ = integ - buf_D * rho[0][1]; // rhoInPrevTL[ indCurSqOx[0] ][ indCurSqOy[1] ]; if( (indCurSqOx[0] >= 0) && (indCurSqOy[0] >= 0) ) { buf_D = d_itemOfInteg_1SpecType( Py, Qy, Gx, Hx, c_h *indCurSqOx[0], c_h * indCurSqOy[0] ); } else { buf_D = d_itemOfInteg_1SpecType( Py, Qy, Gx, Hx, c_h * indCurSqOx[0] , c_h * indCurSqOy[0] ); } buf_D = buf_D /c_h /c_h; return integ + buf_D * rho[1][1]; // rhoInPrevTL[ indCurSqOx[1] ][ indCurSqOy[1] ]; } __device__ double d_integOfChan_SLRightSd( int iCurrTL, // - Index of current time layer. // double *bv, int wTrPCI, // - Where travel point current (botton vertex) is. double *uv, int wTrPNI, // - Where travel point next (upper vertex) is. // int * indCurSqOx, // - Index by OX axis where bv and uv are. // double lb, int * indLB, // - Left boundary by Ox. Index by OX axis where lb is. // int * indCurSqOy, // - Index of current square by Oy axis. double * rhoInPrevTL_asV ) { double mv[2]; // - Middle and right vertices. int wMvI; // - Where middle vertex is. int indCurSqOxToCh[2]; // - Indices of current square by Ox axis to be changed. Under which we want to integrate. double h = c_h; double a_SL, b_SL; // - Coefficients of slant line: x = a_SL *y + b_SL. double Gx, Hx; // - Left boundary for each integration. double integ = 0.; double buf_D; int j; // Let's compute helpful values. if( uv[0] <= bv[0] ) { mv[0] = uv[0]; mv[1] = uv[1]; wMvI = wTrPNI; } if( uv[0] > bv[0] ) { mv[0] = bv[0]; mv[1] = bv[1]; wMvI = wTrPCI; } if( ( fabs(uv[1] - bv[1]) ) <= 1.e-12 ) { // Computation is impossible. Too smale values. Let's return some approximate value. // buf_D = (uv[1] - bv[1]) * ((uv[0] + bv[0]) /2. - lb) * rhoInPrevTL[ indCurSqOx[0] ][ indCurSqOy[0] ]; return fabs(uv[1] - bv[1]); // fabs(uv[1] - bv[1]); } // First step: from "lb" to "mas OX[ indCurSqOx[0] ]" by iteration. // integ += fabs( mv[0] - lb) * fabs(uv[1] - bv[1]); indCurSqOxToCh[0] = indLB[0]; indCurSqOxToCh[1] = indCurSqOxToCh[0] +1; for( j = indLB[0]; j< indCurSqOx[0]; j++ ) { // If this is first cell we should integrate under rectangle only. if( indCurSqOxToCh[0] >= 0 ) { Gx = c_h * indCurSqOxToCh[0]; Hx = c_h * indCurSqOxToCh[1]; } if( indCurSqOxToCh[0] < 0 ) { Gx = h * indCurSqOxToCh[0]; Hx = h * indCurSqOxToCh[1]; } if( j == indLB[0] ) { Gx = lb; } buf_D = d_integUnderRectAng_OneCell( bv[1], // - double Py, uv[1], // - double Qy, // Gx, // - double Gx, Hx, // - double Hx, // iCurrTL, // - Index of current time layer. // indCurSqOxToCh, // - Index of current square by Ox axis. indCurSqOy, // - Index of current square by Oy axis. rhoInPrevTL_asV ); integ += buf_D; indCurSqOxToCh[0] += 1; indCurSqOxToCh[1] = indCurSqOxToCh[0] +1; } // Integration. Second step: under [ indCurSqOx[0]; indCurSqOx[1] ] square. // A. Under rectangle. if( wMvI == 1 ) { if( indCurSqOx[0] == indLB[0] ) { Gx = lb; } if( indCurSqOx[0] > indLB[0] ) { if( indCurSqOx[0] >= 0) { Gx = c_h * indCurSqOx[0]; } if( indCurSqOx[0] < 0) { Gx = h * indCurSqOx[0]; } } buf_D = d_integUnderRectAng_OneCell( bv[1], // - double Py, uv[1], // - double Qy, // Gx, // - double Gx, mv[0], // - double Hx, // iCurrTL, // - Index of current time layer. // indCurSqOx, // - Index of current square by Ox axis. indCurSqOy, // - Index of current square by Oy axis. rhoInPrevTL_asV ); integ += buf_D; } // B. Under triangle. if( ( fabs(uv[1] - bv[1]) ) > 1.e-12 ) { // integ += fabs(uv[1] - bv[1]) * (rv[0] - mv[0]) /2.; // Coefficients of slant line: x = a_SL *y + b_SL. a_SL = (uv[0] - bv[0]) / (uv[1] - bv[1]); b_SL = bv[0] - a_SL * bv[1]; // Integration under one cell triangle. if( fabs( a_SL ) > 1.e-12 ) { buf_D = d_integUnderRightTr_OneCell( bv[1], // - double Py, uv[1], // - double Qy, // a_SL, b_SL, mv[0], // - double Gx, // iCurrTL, // - Index of current time layer. // indCurSqOx, // - Index of current square by Ox axis. indCurSqOy, // - Index of current square by Oy axis. rhoInPrevTL_asV ); integ += buf_D; } } return integ; } __device__ double d_integOfChan_SLLeftSd( int iCurrTL, // - Index of current time layer. // double *bv, int wTrPCI, // - Where travel point current (botton vertex) is. double *uv, int wTrPNI, // - Where travel point next (upper vertex) is. // int * indCurSqOx, // - Index by OX axis where bv and uv are. // double rb, int * indRB, // - Right boundary by Ox. Index by OX axis where rb is. // int * indCurSqOy, // - Index of current square by Oy axis. double * rhoInPrevTL_asV ) { double mv[2]; // - Left and middle vertices. int wMvI; // - Where middle vertex is. int indCurSqOxToCh[2]; // - Indices of current square by Ox axis to be changed. Under which we want to integrate. double h = c_h; double a_SL, b_SL; // - Coefficients of slant line: x = a_SL *y + b_SL. double Gx, Hx; // - Left and right boundary for each integration. double integ = 0.; double buf_D; int j; // Let's compute helpful values. if( uv[0] <= bv[0] ) { mv[0] = bv[0]; mv[1] = bv[1]; wMvI = wTrPCI; } if( uv[0] > bv[0] ) { mv[0] = uv[0]; mv[1] = uv[1]; wMvI = wTrPNI; } if( ( fabs(uv[1] - bv[1]) ) <= 1.e-12 ) { // Computation is impossible. Too smale values. Let's return some approximate value. // buf_D = (uv[1] - bv[1]) * (rb - (uv[0] + bv[0]) /2.) * rhoInPrevTL[ indCurSqOx[0] ][ indCurSqOy[0] ]; return fabs(uv[1] - bv[1]); // fabs(uv[1] - bv[1]); } // Integration. First step: under [ indCurSqOx[0]; indCurSqOx[1] ] square. // A. Under triangle. if( ( fabs(uv[1] - bv[1]) ) > 1.e-12 ) { // Coefficients of slant line: x = a_SL *y + b_SL. a_SL = (uv[0] - bv[0]) / (uv[1] - bv[1]); b_SL = bv[0] - a_SL * bv[1]; // Integration under one cell triangle. if( fabs( a_SL ) > 1.e-12 ) { buf_D = d_integUnderLeftTr_OneCell( bv[1], // - double Py, uv[1], // - double Qy, // a_SL, b_SL, mv[0], // - double Hx, // iCurrTL, // - Index of current time layer. // indCurSqOx, // - Index of current square by Ox axis. indCurSqOy, // - Index of current square by Oy axis. rhoInPrevTL_asV ); integ += buf_D; } } // B. Under rectangle. Need to be cheking. if( wMvI == 1 ) { if( indCurSqOx[0] == indRB[0] ) { Hx = rb; } if( indCurSqOx[0] < indRB[0] ) { if( indCurSqOx[1] >= 0) { Hx = c_h * indCurSqOx[1] ; } if( indCurSqOx[1] < 0) { Hx = h * indCurSqOx[1]; } } buf_D = d_integUnderRectAng_OneCell( bv[1], // - double Py, uv[1], // - double Qy, // mv[0], // - double Gx, Hx, // - double Hx, // iCurrTL, // - Index of current time layer. // indCurSqOx, // - Index of current square by Ox axis. indCurSqOy, // - Index of current square by Oy axis. rhoInPrevTL_asV ); integ += buf_D; } // Second step: from "mas OX[ indCurSqOx[1] ]" to "rb" by iteration. indCurSqOxToCh[0] = indCurSqOx[0] +1; indCurSqOxToCh[1] = indCurSqOxToCh[0] +1; for( j = indCurSqOx[0] +1; j< indRB[0] +1; j++ ) { // If this is first cell we should integrate under triangle only. if( indCurSqOxToCh[1] > 0 ) { Gx = c_h * indCurSqOxToCh[0] ; Hx = c_h * indCurSqOxToCh[1]; } if( indCurSqOxToCh[1] <= 0 ) { Gx = h * indCurSqOxToCh[0]; Hx = h * indCurSqOxToCh[1]; } if( j == indRB[0] ) { Hx = rb; } buf_D = d_integUnderRectAng_OneCell( bv[1], // - double Py, uv[1], // - double Qy, // Gx, // - double Gx, Hx, // - double Hx, // iCurrTL, // - Index of current time layer. // indCurSqOxToCh, // - Index of current square by Ox axis. indCurSqOy, // - Index of current square by Oy axis. rhoInPrevTL_asV ); integ += buf_D; indCurSqOxToCh[0] += 1; indCurSqOxToCh[1] = indCurSqOxToCh[0] +1; } return integ; } __device__ double d_integUnderRigAngTr_BottLeft( int iCurrTL, // - Index of current time layer. // double *bv, double *uv, double * rhoInPrevTL_asV ) { double trPC[2]; // - Travel point current; int wTrPCI = 0; // - Where travel point current is? double trPN[2]; // - Travel point next; int wTrPNI = 0; // - Where travel point next is? double ang; // - Angle of slant line. Should be greater zero. int indCurSqOx[2], indCurSqOy[2]; // - Index of current square by Ox and Oy axes. int indRB[2]; // - Index of right boundary. double distOx, distOy; // - Distance to near Ox and Oy straight lines. bool isTrDone = false; // - Is travel done. double integOfBottTr = 0.; // - Value which we are computing. double buf_D; // Initial data. trPC[0] = bv[0]; trPC[1] = bv[1]; if( ( fabs(bv[0] - uv[0]) ) < 1.e-12 ) { // This triangle has very small width. I guess further computation isn't correct. return fabs(bv[0] - uv[0]); } ang = (uv[1] - bv[1]) / (bv[0] - uv[0]); if( fabs(ang) < 1.e-12 ) { // This triangle has very small height. I guess further computation isn't correct. return fabs(ang); } indCurSqOx[0] = (int)( (trPC[0] - 1.e-14) /c_h); // - If trPC[0] is in grid edge I want it will be between in the left side of indCurSqOx[1]. if( (trPC[0] - 1.e-14) <= 0 ) { indCurSqOx[0] -= 1; // - The case when "trPC[0]" ia negative. } indCurSqOx[1] = indCurSqOx[0] +1; // - It's important only in rare case then trPC is in grid edge. indRB[0] = indCurSqOx[0]; indRB[1] = indRB[0] +1; indCurSqOy[0] = (int)( (trPC[1] + 1.e-14) /c_h); // - If trPC[1] is in grid edge I want it will be between indCurSqOx[0] and indCurSqOx[1]. if( (trPC[1] + 1.e-14) <= 0 ) { indCurSqOy[0] -= 1; // - The case when "trPC[0]" ia negative. } indCurSqOy[1] = indCurSqOy[0] +1; // - It's important only in rare case then trPC is in grid edge. if( indCurSqOx[0] >= 0) { distOx = trPC[0] - c_h * indCurSqOx[0] ; } if( indCurSqOx[0] < 0 ) { distOx = fabs( trPC[0] - c_h * indCurSqOx[0] ); } if( indCurSqOy[1] >= 0 ) { distOy = c_h * indCurSqOy[1] - trPC[1]; } if( indCurSqOy[1] < 0 ) { distOy = fabs( c_h * indCurSqOy[1] - trPC[1] ); } do { // a. First case. if( (distOy /distOx) <= ang ) { // Across with straight line parallel Ox axis. wTrPNI = 1; if( indCurSqOy[1] >= 0) { trPN[1] = c_h * indCurSqOy[1]; } if( indCurSqOy[1] < 0) { trPN[1] = c_h * indCurSqOy[1]; } trPN[0] = bv[0] - (trPN[1] - bv[1]) /ang; } // b. Second case. if( (distOy /distOx) > ang ) { // Across with straight line parallel Oy axis. wTrPNI = 2; if( indCurSqOx[0] >= 0 ) { trPN[0] = c_h * indCurSqOx[0]; } if( indCurSqOx[0] < 0 ) { trPN[0] = c_h * indCurSqOx[0]; } trPN[1] = bv[1] - ang * (trPN[0] - bv[0]); } // c. Cheking. if( trPN[0] < (uv[0] + 1.e-14) ) { trPN[0] = uv[0]; trPN[1] = uv[1]; isTrDone = true; wTrPNI = 0; } // d. Integration. buf_D = d_integOfChan_SLLeftSd( iCurrTL, // - Index of current time layer. // trPC, wTrPCI, // - double *bv, trPN, wTrPNI, // - double *uv, // indCurSqOx, // - Indices where trPC and trPN are. // bv[0], indRB, // - double rb = Right boundary by Ox. // indCurSqOy, // - Index of current square by Oy axis. rhoInPrevTL_asV ); integOfBottTr = integOfBottTr + buf_D; // e. Updating. if( isTrDone == false ) { // We will compute more. We need to redefine some values. wTrPCI = wTrPNI; trPC[0] = trPN[0]; trPC[1] = trPN[1]; if( wTrPNI == 1) { indCurSqOy[0] += 1; indCurSqOy[1] += 1; } if( wTrPNI == 2) { indCurSqOx[0] -= 1; indCurSqOx[1] -= 1; } if( indCurSqOx[0] >= 0) { distOx = trPC[0] - c_h * indCurSqOx[0] ; } if( indCurSqOx[0] < 0) { distOx = fabs( trPC[0] - c_h * indCurSqOx[0] ); } if( indCurSqOy[1] >= 0 ) { distOy = c_h * indCurSqOy[1] - trPC[1]; } if( indCurSqOy[1] < 0 ) { distOy = fabs( c_h * indCurSqOy[1] - trPC[1] ); } } } while( !isTrDone ); return integOfBottTr; } __device__ double d_integUnderRigAngTr_BottRight( int iCurrTL, // - Index of current time layer. // double *bv, double *uv, double * rhoInPrevTL_asV ) { double trPC[2]; // - Travel point current; int wTrPCI = 0; // - Where travel point current is? double trPN[2]; // - Travel point next; int wTrPNI = 0; // - Where travel point next is? double ang; // - Angle of slant line. Should be greater zero. int indCurSqOx[2], indCurSqOy[2]; // - Index of current square by Ox and Oy axes. int indLB[2]; // - Index of left boundary. double distOx, distOy; // - Distance to near Ox and Oy straight lines. bool isTrDone = false; // - Is travel done. double integOfBottTr = 0.; // - Value which we are computing. double buf_D; trPC[0] = bv[0]; trPC[1] = bv[1]; if( ( fabs(bv[0] - uv[0]) ) < 1.e-12 ) return fabs(bv[0] - uv[0]); ang = (uv[1] - bv[1]) / (uv[0] - bv[0]); if( fabs(ang) < 1.e-12 ) return fabs(ang); indCurSqOx[0] = (int)( (trPC[0] + 1.e-14) /c_h); // - If trPC[0] is in grid edge I want it will be between in the right side. if( (trPC[0] + 1.e-14) <= 0 ) indCurSqOx[0] -= 1; // - The case when "trPC[0]" ia negative. indCurSqOx[1] = indCurSqOx[0] +1; // - It's important only in rare case then trPC is in grid edge. indLB[0] = indCurSqOx[0]; indLB[1] = indLB[0] +1; indCurSqOy[0] = (int)( (trPC[1] + 1.e-14) /c_h); // - If trPC[1] is in grid edge I want it will be in the upper side. if( (trPC[1] + 1.e-14) <= 0 ) { indCurSqOy[0] -= 1; // - The case when "trPC[0]" ia negative. } indCurSqOy[1] = indCurSqOy[0] +1; // - It's important only in rare case then trPC is in grid edge. if( indCurSqOx[1] >=0 ) { distOx = fabs( c_h * indCurSqOx[1] - trPC[0] ); } if( indCurSqOx[1] < 0 ) { distOx = fabs( c_h * indCurSqOx[1] - trPC[0] ); } if( indCurSqOy[1] >=0 ) { distOy = fabs( c_h * indCurSqOy[1] - trPC[1] ); } if( indCurSqOy[1] < 0 ) { distOy = fabs( c_h * indCurSqOy[1] - trPC[1] ); } do { // a. First case. if( (distOy /distOx) <= ang ) { // Across with straight line parallel Ox axis. wTrPNI = 1; if( indCurSqOy[1] >=0 ) { trPN[1] = c_h * indCurSqOy[1]; } if( indCurSqOy[1] < 0 ) { trPN[1] = c_h * indCurSqOy[1]; } trPN[0] = bv[0] + (trPN[1] - bv[1]) /ang; } // b. Second case. if( (distOy /distOx) > ang ) { // Across with straight line parallel Oy axis. wTrPNI = 2; if( indCurSqOx[1] >= 0 ) { trPN[0] = c_h * indCurSqOx[1]; } if( indCurSqOx[1] < 0 ) { trPN[0] = c_h * indCurSqOx[1]; } trPN[1] = bv[1] + ang * (trPN[0] - bv[0]); } // c. Cheking. if( trPN[0] > (uv[0] - 1.e-14) ) { // - Without "fabs"!!! trPN[0] = uv[0]; trPN[1] = uv[1]; isTrDone = true; wTrPNI = 0; } // d. Integration. buf_D = d_integOfChan_SLRightSd( iCurrTL, // - Index of current time layer. // trPC, wTrPCI, // - double *bv, trPN, wTrPNI, // - double *uv, // indCurSqOx, // - Indices where trPC and trPN are. // bv[0], indLB, // - double lb = Left boundary by Ox. // indCurSqOy, // - Index of current square by Oy axis. rhoInPrevTL_asV ); integOfBottTr = integOfBottTr + buf_D; // e. Updating. if( isTrDone == false ) { // We will compute more. We need to redefine some values. wTrPCI = wTrPNI; trPC[0] = trPN[0]; trPC[1] = trPN[1]; if( wTrPNI == 1) { indCurSqOy[0] += 1; indCurSqOy[1] += 1; } if( wTrPNI == 2) { indCurSqOx[0] += 1; indCurSqOx[1] += 1; } if( indCurSqOx[1] >=0 ) { distOx = fabs( c_h * indCurSqOx[1] - trPC[0] ); } if( indCurSqOx[1] < 0 ) { distOx = fabs( c_h * indCurSqOx[1] - trPC[0] ); } if( indCurSqOy[1] >=0 ) { distOy = fabs( c_h * indCurSqOy[1] - trPC[1] ); } if( indCurSqOy[1] < 0 ) { distOy = fabs( c_h * indCurSqOy[1] - trPC[1] ); } } } while( !isTrDone ); return integOfBottTr; } __device__ double d_integUnderBottTr( int iCurrTL, // - Index of current time layer. // double * LvBt, // - Left, Right and Botton vertices of Botton triangle. double * RvBt, // - Left, Right and Botton vertices of Botton triangle. double * BvBt, // - Left, Right and Botton vertices of Botton triangle. double * rhoInPrevTL_asV, int ii, int jj ) // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! { double integOfBottTr; double buf_D; // Three ways are possible. // 1. if( BvBt[0] <= LvBt[0] ) { buf_D = d_integUnderRigAngTr_BottRight( iCurrTL, // BvBt, RvBt, rhoInPrevTL_asV ); integOfBottTr = buf_D; buf_D = d_integUnderRigAngTr_BottRight( iCurrTL, // BvBt, LvBt, rhoInPrevTL_asV ); integOfBottTr = integOfBottTr - buf_D; // printf("Bv<Lv: i= %d, j= %d res= %le",ii,jj,integOfBottTr); // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! return integOfBottTr; } // 2. if( (BvBt[0] > LvBt[0]) && (BvBt[0] < RvBt[0]) ) { buf_D = d_integUnderRigAngTr_BottLeft( iCurrTL, // BvBt, LvBt, rhoInPrevTL_asV ); integOfBottTr = buf_D; buf_D = d_integUnderRigAngTr_BottRight( iCurrTL, // BvBt, RvBt, rhoInPrevTL_asV ); integOfBottTr = integOfBottTr + buf_D; // printf("Bv>Lv & Bv<Rv: i= %d, j= %d res= %le",ii,jj,integOfBottTr); // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! return integOfBottTr; } // 3. if( BvBt[0] >= RvBt[0] ) { buf_D = d_integUnderRigAngTr_BottLeft( iCurrTL, // BvBt, LvBt, rhoInPrevTL_asV ); integOfBottTr = buf_D; buf_D = d_integUnderRigAngTr_BottLeft( iCurrTL, // BvBt, RvBt, rhoInPrevTL_asV ); integOfBottTr = integOfBottTr - buf_D; // printf("Bv>Rv: i= %d, j= %d res= %le",ii,jj,integOfBottTr); // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! return integOfBottTr; } return integOfBottTr; } __device__ double d_integUnderRigAngTr_UppLeft( int iCurrTL, // - Index of current time layer. // double *bv, double *uv, double * rhoInPrevTL_asV ) { // return ( fabs( (uv[1] - bv[1]) * (bv[0] - uv[0]) /2.) ); double trPC[2]; // - Travel point current; int wTrPCI = 0; // - Where travel point current is? double trPN[2]; // - Travel point next; int wTrPNI = 0; // - Where travel point next is? double ang; // - Angle of slant line. Should be greater zero. int indCurSqOx[2], indCurSqOy[2]; // - Index of current square by Ox and Oy axes. int indRB[2]; // - Index of right boundary. double distOx, distOy; // - Distance to near Ox and Oy straight lines. bool isTrDone = false; // - Is travel done. double integOfUppTr = 0.; // - Value which we are computing. double buf_D; // Initial data. trPC[0] = bv[0]; trPC[1] = bv[1]; if( ( fabs(bv[0] - uv[0]) ) < 1.e-12 ) return fabs(bv[0] - uv[0]); ang = (uv[1] - bv[1]) / (uv[0] - bv[0]); if( fabs(ang) < 1.e-12 ) return fabs(ang); // The follow equations are quite important. indCurSqOx[0] = (int)( (trPC[0] + 1.e-14) /c_h); // - If trPC[0] is in grid edge I want it will be in the right side. if( (trPC[0] + 1.e-14) <= 0 ) { indCurSqOx[0] -= 1; // - The case when "trPC[0]" ia negative. } indCurSqOx[1] = indCurSqOx[0] +1; // - It's important only in rare case then trPC is in grid edge. indCurSqOy[0] = (int)( (trPC[1] + 1.e-14) /c_h); // - If trPC[1] is in grid edge I want it will be in the upper square. if( (trPC[1] + 1.e-14) <= 0 ) { indCurSqOy[0] -= 1; // - The case when "trPC[0]" ia negative. } indCurSqOy[1] = indCurSqOy[0] +1; indRB[0] = (int)( (uv[0] - 1.e-14) /c_h); // - If uv[0] is in grid edge I want it will be in the left side. if( (uv[0] - 1.e-14) <= 0 ) { indRB[0] -= 1; // - The case when "trPC[0]" ia negative. } indRB[1] = indRB[0] +1; if( indCurSqOx[1] >= 0) { distOx = c_h * indCurSqOx[1] - trPC[0]; } if( indCurSqOx[1] < 0) { distOx = fabs( c_h * indCurSqOx[1] - trPC[0] ); } if( indCurSqOy[1] >= 0 ) { distOy = c_h * indCurSqOy[1] - trPC[1]; } if( indCurSqOy[1] < 0 ) { distOy = fabs( c_h * indCurSqOy[1] - trPC[1] ); } do { // a. First case. if( (distOy /distOx) <= ang ) { // Across with straight line parallel Ox axis. wTrPNI = 1; if( indCurSqOy[1] >= 0 ) { trPN[1] = c_h * indCurSqOy[1]; } if( indCurSqOy[1] < 0 ) { trPN[1] = c_h * indCurSqOy[1]; } trPN[0] = bv[0] + (trPN[1] - bv[1]) /ang; } // b. Second case. if( (distOy /distOx) > ang ) { // Across with straight line parallel Oy axis. wTrPNI = 2; if( indCurSqOx[1] >= 0 ) { trPN[0] = c_h * indCurSqOx[1]; } if( indCurSqOx[1] < 0 ) { trPN[0] = c_h * indCurSqOx[1]; } trPN[1] = bv[1] + ang * (trPN[0] - bv[0]); } // c. Cheking. if( trPN[0] > (uv[0] - 1.e-14) ) { trPN[0] = uv[0]; trPN[1] = uv[1]; isTrDone = true; wTrPNI = 0; } // d. Integration. buf_D = d_integOfChan_SLLeftSd( iCurrTL, // - Index of current time layer. // trPC, wTrPCI, // - double *bv, trPN, wTrPNI, // - double *uv, // indCurSqOx, // - Indices where trPC and trPN are. // uv[0], indRB, // - double rb = Right boundary by Ox. // indCurSqOy, // - Index of current square by Oy axis. rhoInPrevTL_asV ); integOfUppTr = integOfUppTr + buf_D; // e. Updating. if( isTrDone == false ) { // We will compute more. We need to redefine some values. wTrPCI = wTrPNI; trPC[0] = trPN[0]; trPC[1] = trPN[1]; if( wTrPNI == 1) { indCurSqOy[0] += 1; indCurSqOy[1] += 1; } if( wTrPNI == 2) { indCurSqOx[0] += 1; indCurSqOx[1] += 1; } if( indCurSqOx[1] >= 0) { distOx = fabs( c_h * indCurSqOx[1] - trPC[0] ); } if( indCurSqOx[1] < 0) { distOx = fabs( c_h * indCurSqOx[1] - trPC[0] ); } if( indCurSqOy[1] >= 0 ) { distOy = fabs( c_h * indCurSqOy[1] - trPC[1] ); } if( indCurSqOy[1] < 0 ) { distOy = fabs( c_h * indCurSqOy[1] - trPC[1] ); } } } while( !isTrDone ); return integOfUppTr; } __device__ double d_integUnderRigAngTr_UppRight( int iCurrTL, // - Index of current time layer. // double *bv, double *uv, double * rhoInPrevTL_asV ) { // return ( fabs( (uv[1] - bv[1]) * (bv[0] - uv[0]) /2.) ); double trPC[2]; // - Travel point current; int wTrPCI = 0; // - Where travel point current is? double trPN[2]; // - Travel point next; int wTrPNI = 0; // - Where travel point next is? double ang; // - Angle of slant line. Should be greater zero. int indCurSqOx[2], indCurSqOy[2]; // - Index of current square by Ox and Oy axes. int indLB[2]; // - Index of left boundary. double distOx, distOy; // - Distance to near Ox and Oy straight lines. bool isTrDone = false; // - Is travel done. double integOfUppTr = 0.; // - Value which we are computing. double buf_D; // Initial data. trPC[0] = bv[0]; trPC[1] = bv[1]; if( ( fabs(bv[0] - uv[0]) ) < 1.e-12 ) { // This triangle has very small width. I guess further computation isn't correct. return fabs(bv[0] - uv[0]); } ang = (uv[1] - bv[1]) / (bv[0] - uv[0]); if( fabs(ang) < 1.e-12 ) { // This triangle has very small height. I guess further computation isn't correct. return fabs(ang); } indCurSqOx[0] = (int)( (trPC[0] - 1.e-14) /c_h); // - If trPC[0] is in grid edge I want it will be between in the left side. if( (trPC[0] - 1.e-14) <= 0 ) { indCurSqOx[0] -= 1; // - The case when "trPC[0]" ia negative. } indCurSqOx[1] = indCurSqOx[0] +1; // - It's important only in rare case then trPC is in grid edge. indLB[0] = (int)( (uv[0] + 1.e-14) /c_h); if( (uv[0] + 1.e-14) <=0 ) { indLB[0] -= 1; // - The case when "trPC[0]" ia negative. } indLB[1] = indLB[0] +1; indCurSqOy[0] = (int)( (trPC[1] + 1.e-14) /c_h); // - If trPC[1] is in grid edge I want it will be in the upper side. if( (trPC[1] + 1.e-14) <= 0 ) { indCurSqOy[0] -= 1; // - The case when "trPC[0]" ia negative. } indCurSqOy[1] = indCurSqOy[0] +1; // - It's important only in rare case then trPC is in grid edge. if( indCurSqOx[0] >= 0 ) { distOx = fabs( trPC[0] - c_h * indCurSqOx[0] ); } if( indCurSqOx[0] < 0 ) { distOx = fabs( trPC[0] - c_h * indCurSqOx[0] ); } if( indCurSqOy[1] >= 0 ) { distOy = fabs( c_h * indCurSqOy[1] - trPC[1] ); } if( indCurSqOy[1] < 0 ) { distOy = fabs( c_h * indCurSqOy[1] - trPC[1] ); } do { // a. First case. if( (distOy /distOx) <= ang ) { // Across with straight line parallel Ox axis. wTrPNI = 1; if( indCurSqOy[1] >= 0 ) { trPN[1] = c_h * indCurSqOy[1]; } if( indCurSqOy[1] < 0 ) { trPN[1] = c_h * indCurSqOy[1]; } trPN[0] = bv[0] - (trPN[1] - bv[1]) /ang; } // b. Second case. if( (distOy /distOx) > ang ) { // Across with straight line parallel Oy axis. wTrPNI = 2; if( indCurSqOx[0] >= 0 ) { trPN[0] = c_h * indCurSqOx[0]; } if( indCurSqOx[0] < 0 ) { trPN[0] = c_h * indCurSqOx[0]; } trPN[1] = bv[1] - ang * (trPN[0] - bv[0]); } // c. Cheking. if( trPN[0] < (uv[0] + 1.e-14) ) { trPN[0] = uv[0]; trPN[1] = uv[1]; isTrDone = true; wTrPNI = 0; } // d. Integration. buf_D = d_integOfChan_SLRightSd( iCurrTL, // - Index of current time layer. // trPC, wTrPCI, // - double *bv, trPN, wTrPNI, // - double *uv, // indCurSqOx, // - Indices where trPC and trPN are. // uv[0], indLB, // - double lb = Left boundary by Ox. // indCurSqOy, // - Index of current square by Oy axis. rhoInPrevTL_asV ); integOfUppTr = integOfUppTr + buf_D; // e. Updating. if( isTrDone == false ) { // We will compute more. We need to redefine some values. wTrPCI = wTrPNI; trPC[0] = trPN[0]; trPC[1] = trPN[1]; if( wTrPNI == 1) { indCurSqOy[0] += 1; indCurSqOy[1] += 1; } if( wTrPNI == 2) { indCurSqOx[0] -= 1; indCurSqOx[1] -= 1; } if( indCurSqOx[0] >= 0 ) { distOx = fabs( trPC[0] - c_h * indCurSqOx[0] ); } if( indCurSqOx[0] < 0 ) { distOx = fabs( trPC[0] - c_h * indCurSqOx[0] ); } if( indCurSqOy[1] >= 0 ) { distOy = fabs( c_h * indCurSqOy[1] - trPC[1] ); } if( indCurSqOy[1] < 0 ) { distOy = fabs( c_h * indCurSqOy[1] - trPC[1] ); } } } while(!isTrDone); return integOfUppTr; } __device__ double d_integUnderUpperTr( int iCurrTL, // - Index of current time layer. // double * LvUt, // - Left, Right and Upper vertices of Upper triangle. double * RvUt, // - Left, Right and Upper vertices of Upper triangle. double * UvUt, // - Left, Right and Upper vertices of Upper triangle. double * rhoInPrevTL_asV) { double integOfUppTr; double buf_D; // Three ways are possible. // 1. if( UvUt[0] <= LvUt[0] ) { buf_D = d_integUnderRigAngTr_UppRight( iCurrTL, // RvUt, UvUt, rhoInPrevTL_asV ); integOfUppTr = buf_D; buf_D = d_integUnderRigAngTr_UppRight( iCurrTL, // LvUt, UvUt, rhoInPrevTL_asV ); integOfUppTr = integOfUppTr - buf_D; return integOfUppTr; } // 2. if( (UvUt[0] > LvUt[0]) && (UvUt[0] < RvUt[0]) ) { buf_D = d_integUnderRigAngTr_UppLeft( iCurrTL, // LvUt, UvUt, rhoInPrevTL_asV ); integOfUppTr = buf_D; buf_D = d_integUnderRigAngTr_UppRight( iCurrTL, // RvUt, UvUt, rhoInPrevTL_asV ); integOfUppTr = integOfUppTr + buf_D; return integOfUppTr; } // 3. if( UvUt[0] >= RvUt[0] ) { buf_D = d_integUnderRigAngTr_UppLeft( iCurrTL, // LvUt, UvUt, rhoInPrevTL_asV ); integOfUppTr = buf_D; buf_D = d_integUnderRigAngTr_UppLeft( iCurrTL, // RvUt, UvUt, rhoInPrevTL_asV ); integOfUppTr = integOfUppTr - buf_D; return integOfUppTr; } return integOfUppTr; } __device__ double d_integUnderUnunifTr( int iCurrTL, // - Index of current time layer. // double * firVer, // - First vertex of triangle. double * secVer, // - Second vertex of triangle. double * thiVer, // - Third vertex of triangle. double * rhoInPrevTL_asV, int ii, int jj ) //!!!!!!!!!!!!!!!!!!! { double bv[2], mv[2], uv[2]; // - Botton, middle and upper vertices of triangle. bool isFirVUsed = false; bool isSecVUsed = false; bool isThiVUsed = false; bool is1VUsed, is2VUsed, is3VUsed; double a_LC, b_LC, c_LC; // - Coefficients of line betweeen "bv" and "uv" vertices. double ap[2]; // - Across point of line through "bv" to "uv" and "y == mv[1]" double LvBt[2], RvBt[2], BvBt[2]; // - Left, Right and Botton vertices of Botton triangle. double integOfBottTr; // - Item of integral under Botton triangle. double LvUt[2], RvUt[2], UvUt[2]; // - Left, Right and Upper vertices of Upper triangle. double integOfUppTr; // - Item of integral under Upper triangle. double integ = 0.; // - Item which I'm computing. // 1. I need to understand which vertex is botton, middle and upper. bv[1] = firVer[1]; bv[0] = firVer[0]; isFirVUsed = true; if( bv[1] > secVer[1] ) { bv[1] = secVer[1]; bv[0] = secVer[0]; isFirVUsed = false; isSecVUsed = true; } if( bv[1] > thiVer[1] ) { bv[1] = thiVer[1]; bv[0] = thiVer[0]; isFirVUsed = false; isSecVUsed = false; isThiVUsed = true; } uv[1] = 0; // - The minimum possible value. is1VUsed = false; is2VUsed = false; is3VUsed = false; if( (uv[1] < firVer[1]) && (isFirVUsed == false) ) { uv[1] = firVer[1]; uv[0] = firVer[0]; is1VUsed = true; } if( (uv[1] < secVer[1]) && (isSecVUsed == false) ) { uv[1] = secVer[1]; uv[0] = secVer[0]; is2VUsed = true; is1VUsed = false; } if( (uv[1] < thiVer[1]) && (isThiVUsed == false) ) { uv[1] = thiVer[1]; uv[0] = thiVer[0]; is3VUsed = true; is2VUsed = false; is1VUsed = false; } // Dangerous. if( (isFirVUsed == false) && (is1VUsed == false) ) { mv[1] = firVer[1]; mv[0] = firVer[0]; } if( (isSecVUsed == false) && (is2VUsed == false) ) { mv[1] = secVer[1]; mv[0] = secVer[0]; } if( (isThiVUsed == false) && (is3VUsed == false) ) { mv[1] = thiVer[1]; mv[0] = thiVer[0]; } // 2. I want to compute across point. // 2.a Let's compute line coefficients betweeen "bv" and "uv" vertices. // a_LC * x + b_LC * y = c_LC. a_LC = uv[1] - bv[1]; b_LC = bv[0] - uv[0]; c_LC = (bv[0] - uv[0])*bv[1] + (uv[1] - bv[1])*bv[0]; // 2.b Across point. ap[1] = mv[1]; if( fabs(a_LC) < 1.e-12 ) { // This triangle has very small height. I guess further computation isn't correct. return 1.e-12; } ap[0] = (c_LC - b_LC * ap[1]) /a_LC; // printf("i= %d, j= %d : ap[0]= %le mv[0]= %le \n",ii,jj, ap[0], mv[0]); // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! // 3. There the middle vertex relativly straight line is? Two ways are possible. if( mv[0] < ap[0] ) { // Left, Right and Botton vertices of Botton triangle. LvBt[0] = mv[0]; LvBt[1] = mv[1]; RvBt[0] = ap[0]; RvBt[1] = ap[1]; BvBt[0] = bv[0]; BvBt[1] = bv[1]; integOfBottTr = d_integUnderBottTr( iCurrTL, // - Index of current time layer. // LvBt, RvBt, BvBt, // - Left, Right and Botton vertices of Botton triangle. // rhoInPrevTL_asV, ii, jj ); // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! integ = integOfBottTr; // Left, Right and Upper vertices of Upper triangle. LvUt[0] = mv[0]; LvUt[1] = mv[1]; RvUt[0] = ap[0]; RvUt[1] = ap[1]; UvUt[0] = uv[0]; UvUt[1] = uv[1]; integOfUppTr = d_integUnderUpperTr( iCurrTL, // - Index of current time layer. // LvUt, RvUt, UvUt, // - Left, Right and Botton vertices of Upper triangle. // rhoInPrevTL_asV); integ = integ + integOfUppTr; return integ; } if( mv[0] >= ap[0] ) { // Left, Right and Botton vertices of Botton triangle. LvBt[0] = ap[0]; LvBt[1] = ap[1]; RvBt[0] = mv[0]; RvBt[1] = mv[1]; BvBt[0] = bv[0]; BvBt[1] = bv[1]; integOfBottTr = d_integUnderBottTr( iCurrTL, // - Index of current time layer. // LvBt, RvBt, BvBt, // - Left, Right and Botton vertices of Botton triangle. // rhoInPrevTL_asV, ii, jj ); // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! integ = integOfBottTr; // Left, Right and Upper vertices of Upper triangle. LvUt[0] = ap[0]; LvUt[1] = ap[1]; RvUt[0] = mv[0]; RvUt[1] = mv[1]; UvUt[0] = uv[0]; UvUt[1] = uv[1]; integOfUppTr = d_integUnderUpperTr( iCurrTL, // - Index of current time layer. // LvUt, RvUt, UvUt, // - Left, Right and Botton vertices of Upper triangle. rhoInPrevTL_asV ); return integ + integOfUppTr; } return integ; } __device__ double d_f_function(const int current_tl, const int i, const int j) { double x = c_h * i ; double y = c_h * j ; double arg_v = (x - c_lb) * (x - c_rb) * (1.+c_tau*current_tl) /10. * (y - c_ub) * (y - c_bb); double rho, dRhoDT, dRhoDX, dRhoDY; double u, duDX; double v, dvDY; rho = d_analytSolut(c_tau*current_tl, x, y ); dRhoDT = x * y * cos( c_tau*current_tl*x*y ); dRhoDX = c_tau*current_tl * y * cos( c_tau*current_tl*x*y ); dRhoDY = c_tau*current_tl * x * cos( c_tau*current_tl*x*y ); u = d_u_function(c_tau*current_tl, x, y ); duDX = -c_b * y * (1.-y) / ( 1. + x * x ); v = d_v_function(c_tau*current_tl, x, y ); dvDY = (x - c_lb) * (x - c_rb) * (1.+c_tau*current_tl) /10. * (y - c_bb + y - c_ub); dvDY = dvDY / ( 1. + arg_v * arg_v ); double res = dRhoDT + rho * duDX + u * dRhoDX + rho * dvDY + v * dRhoDY; return res; } __device__ double space_volume_in_prev_tl(double* prev_result, int current_tl, int i, int j) { double first1[2]; double second1[2]; double third1[2]; double first2[2]; double second2[2]; double third2[2]; double x, y; double c_tau_to_current_tl = (1. + current_tl * c_tau) / 10.; // A x = (c_h*(i - 1) + c_h*i) / 2.; y = (c_h*(j - 1) + c_h*j) / 2.; first1[0] = first2[0] = x - c_tau_b * y * (1. - y) * (c_pi_half + atan(-x)); first1[1] = first2[1] = y - c_tau * atan((x - c_lb) * (x - c_rb) * c_tau_to_current_tl * (y - c_ub) * (y - c_bb)); // B x = (c_h*(i + 1) + c_h*i) / 2.; second1[0] = x - c_tau_b * y * (1. - y) * (c_pi_half + atan(-x)); second1[1] = y - c_tau * atan((x - c_lb) * (x - c_rb) * c_tau_to_current_tl * (y - c_ub) * (y - c_bb)); // C y = (c_h*(j + 1) + c_h*j) / 2.; third1[0] = third2[0] = x - c_tau_b * y * (1. - y) * (c_pi_half + atan(-x)); third1[1] = third2[1] = y - c_tau * atan((x - c_lb) * (x - c_rb) * c_tau_to_current_tl * (y - c_ub) * (y - c_bb)); // D x = (c_h*(i - 1) + c_h*i) / 2.; second2[0] = x - c_tau_b * y * (1. - y) * (c_pi_half + atan(-x)); second2[1] = y - c_tau * atan((x - c_lb) * (x - c_rb) * c_tau_to_current_tl * (y - c_ub) * (y - c_bb)); double buf_D = d_integUnderUnunifTr( current_tl, first1, second1, third1, prev_result, i, j); return buf_D + d_integUnderUnunifTr( current_tl, first2, second2, third2, prev_result, i, j ); } __global__ void kernel_diff(double *diff, double *result, int tl, double tau) { for (int opt = blockIdx.x * blockDim.x + threadIdx.x; opt < c_n; opt += blockDim.x * gridDim.x) { /*if (opt == 1) { for (int i = 0; i < 11; i++) { for(int j = 0; j < 11 ; j++) { printf("%le ", result[i*11 + j]); } printf("\n"); } }*/ int i = opt % (c_x_length + 1); int j = opt / (c_y_length + 1); double f = 0; diff [opt] = f; // if (i > 0 && j > 0 && j != c_x_length && i != c_x_length) // { f = d_analytSolut(tl*tau, i*c_h, j*c_h); // f += c_tau * d_f_function(tl, i, j); diff [opt] = fabs(result[opt] - f); /* if (i == 1 && j == 1) { printf("tl = %d\n", tl); printf("f = %le result[opt] = %le diff[opt] = %le opt = %d\n", f, result[opt], diff[opt], opt); } */ // } } } __global__ void kernel(double* prev_result, double* result, int current_tl) { for (int opt = blockIdx.x * blockDim.x + threadIdx.x; opt < c_n; opt += blockDim.x * gridDim.x) { int i = opt % (c_x_length + 1); int j = opt / (c_y_length + 1); // if (j == 0) // bottom bound { result[ opt ] = 1.1 + sin( c_tau_to_h * current_tl * j * c_bb ); } else if (i == 0) // left bound { result[ opt ] = 1.1 + sin( c_tau_to_h * current_tl * i * c_lb ); } else if (j == c_y_length) // upper bound { result[ opt ] = 1.1 + sin( c_tau_to_h * current_tl * i * c_ub ); } else if (i == c_x_length) // right bound { result[ opt ] = 1.1 + sin( c_tau_to_h * current_tl * j * c_rb ); } else if (i > 0 && j > 0 && j != c_x_length && i != c_x_length) { result[ opt ] = space_volume_in_prev_tl(prev_result, current_tl, i, j); /*if (opt == 22) { printf("[gpu] result = %le\n", result[opt]); }*/ double t = space_volume_in_prev_tl(prev_result, current_tl, i, j) / c_h; t = t / c_h; result[ opt ] = t; result[ opt ] += c_tau * d_f_function(current_tl, i, j); } } } double* init_rho(ComputeParameters *p) { double *rhoInPrevTL_asV; rhoInPrevTL_asV = new double [ p->size ]; // Initial data of rho. for( int k = 0; k <= p->x_size; k++ ) { for( int j = 0; j <= p->y_size; j++ ) { rhoInPrevTL_asV[ (p->x_size+1)*k + j ] = 1.1; } } return rhoInPrevTL_asV; } float solve_at_gpu(ComputeParameters *p, bool tl1, bool compute_diff) { assert(p != NULL); assert(p->result != NULL); const int gridSize = 256; const int blockSize = 512; //const int gridSize = 1; //const int blockSize = 1; size_t n(0); int temp_i(0); double temp_d(0); double *result = NULL, *prev_result = NULL, *d_diff = NULL; n = p->get_real_matrix_size(); int size = sizeof(double)*n; double *rhoInPrevTL_asV = init_rho(p); hipEvent_t start, stop; float time; hipEventCreate(&start); hipEventCreate(&stop); hipMemcpyToSymbol(c_tau, &p->tau, sizeof(double)); hipMemcpyToSymbol(c_lb, &p->lb, sizeof(double)); hipMemcpyToSymbol(c_b, &p->b, sizeof(double)); hipMemcpyToSymbol(c_rb, &p->rb, sizeof(double)); hipMemcpyToSymbol(c_bb, &p->bb, sizeof(double)); hipMemcpyToSymbol(c_ub, &p->ub, sizeof(double)); hipMemcpyToSymbol(c_n, &n, sizeof(int)); temp_i = p->x_size; hipMemcpyToSymbol(c_x_length, &temp_i, sizeof(int)); temp_i = p->y_size; hipMemcpyToSymbol(c_y_length, &temp_i, sizeof(int)); temp_d = 1. / (p->x_size); hipMemcpyToSymbol(c_h, &temp_d, sizeof(double)); temp_d = p->tau / (p->x_size); hipMemcpyToSymbol(c_tau_to_h, &temp_d, sizeof(double)); temp_d = p->b * p->tau; hipMemcpyToSymbol(c_tau_b, &temp_d, sizeof(double)); temp_d = M_PI / 2.; hipMemcpyToSymbol(c_pi_half, &temp_d, sizeof(double)); checkCuda(hipMalloc((void**)&(result), size) ); checkCuda(hipMalloc((void**)&(prev_result), size) ); if (compute_diff) { checkCuda(hipMalloc((void**)&d_diff, size)); } hipMemcpy(prev_result, rhoInPrevTL_asV, size, hipMemcpyHostToDevice); hipEventRecord(start, 0); if (tl1 == true) { hipLaunchKernelGGL(( kernel), dim3(gridSize), dim3(blockSize), 0, 0, prev_result, result, 1); hipMemcpy(p->result, result, size, hipMemcpyDeviceToHost); } else { int tl = 0; int tempTl = p->t_count - 1; while(tl < tempTl) { hipLaunchKernelGGL(( kernel), dim3(gridSize), dim3(blockSize), 0, 0, prev_result, result, tl + 1); hipLaunchKernelGGL(( kernel), dim3(gridSize), dim3(blockSize), 0, 0, result, prev_result, tl + 2); tl += 2; } hipMemcpy(p->result, prev_result, size, hipMemcpyDeviceToHost); } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); if (compute_diff) { int ttt = tl1 ? 1 : p->t_count; printf("[gpu] compute diff t = %d\n", ttt); if (tl1) { hipLaunchKernelGGL(( kernel_diff), dim3(1), dim3(1), 0, 0, d_diff, result, ttt, p->tau); } else {hipLaunchKernelGGL(( kernel_diff), dim3(1), dim3(1), 0, 0, d_diff, prev_result, ttt, p->tau); } hipMemcpy(p->diff, d_diff, size, hipMemcpyDeviceToHost); } hipFree(result); hipFree(prev_result); hipDeviceReset(); delete[] rhoInPrevTL_asV; if (compute_diff) { hipFree(d_diff); } return time; }
ca309870d27a22754e37314e9de77da90e801fd0.cu
#include "cuda.h" #include "cuda_runtime.h" #include <stdio.h> #include <stdlib.h> #include <assert.h> #include "math.h" #include "hemi.h" #include "common.h" #include "cuda_constant.cuh" #include <float.h> __device__ double d_u_function(double t, double x, double y) { return c_b * y * (1. - y) * (M_PI / 2. + atan(-x)); } __device__ double d_v_function(double t, double x, double y) { return atan( (x - c_lb) * (x - c_rb) * (1. + t) / 10. * (y - c_ub) * (y - c_bb)); } __device__ double d_itemOfInteg_1SpecType( double Py, double Qy, // double Gx, double Hx, // double a, double b ) { double integ; integ = (Hx - a)*(Hx - a) - (Gx - a)*(Gx - a); integ = integ * ( (Qy - b)*(Qy - b) - (Py - b)*(Py - b) ); return integ / 4.; } __device__ double d_analytSolut(double t, double x, double y ) { return 1.1 + sin( t * x * y); } __device__ double d_itemOfInteg_2SpecType( double Py, double Qy, // double alpha, // double a, double b, double betta ) { double buf_D, integ; // Computing... buf_D = (Qy - alpha) * (a*Qy + b - betta) * (a*Qy + b - betta) * (a*Qy + b - betta); buf_D = buf_D - (Py - alpha) * (a*Py + b - betta) * (a*Py + b - betta) * (a*Py + b - betta); integ = buf_D / (3. * a); buf_D = (a*Qy + b - betta) * (a*Qy + b - betta) * (a*Qy + b - betta) * (a*Qy + b - betta); buf_D = buf_D - (a*Py + b - betta) * (a*Py + b - betta) * (a*Py + b - betta) * (a*Py + b - betta); return integ - buf_D / (12. *a *a); } __device__ double d_integUnderLeftTr_OneCell( double Py, double Qy, // double a_SL, double b_SL, double Hx, int iCurrTL, // - Index of current time layer. // int * indCurSqOx, // - Index of current square by Ox axis. int * indCurSqOy, // - Index of current square by Oy axis. double * rhoInPrevTL_asV ) { double integ = 0; double buf_D, bufInteg_D; double rho[2][2]; double t = c_tau * (iCurrTL - 1.); double x, y; if( (indCurSqOx[0] >=0) && (indCurSqOx[1] <= c_x_length) ) { if( (indCurSqOy[0] >=0) && (indCurSqOy[1] <=c_y_length) ) { rho[0][0] = rhoInPrevTL_asV[ ((c_x_length +1)*indCurSqOy[0] + indCurSqOx[0]) ]; rho[0][1] = rhoInPrevTL_asV[ ((c_x_length +1)*indCurSqOy[1] + indCurSqOx[0]) ]; rho[1][0] = rhoInPrevTL_asV[ ((c_x_length +1)*indCurSqOy[0] + indCurSqOx[1]) ]; rho[1][1] = rhoInPrevTL_asV[ ((c_x_length +1)*indCurSqOy[1] + indCurSqOx[1]) ]; } } if( (indCurSqOx[0] < 0) || (indCurSqOx[1] > c_x_length) || (indCurSqOy[0] < 0) || (indCurSqOy[1] > c_y_length) ) { x = indCurSqOx[0] * c_h; y = indCurSqOy[0] * c_h; rho[0][0] = d_analytSolut(t, x, y ); x = indCurSqOx[0] * c_h; y = indCurSqOy[1] * c_h; rho[0][1] = d_analytSolut(t, x, y ); x = indCurSqOx[1] * c_h; y = indCurSqOy[0] * c_h; rho[1][0] = d_analytSolut(t, x, y ); x = indCurSqOx[1] * c_h; y = indCurSqOy[1] * c_h; rho[1][1] = d_analytSolut(t, x, y ); } // 1. buf_D = (Qy - c_h * indCurSqOy[1]) * (Qy - c_h * indCurSqOy[1]) - (Py - c_h * indCurSqOy[1]) * (Py - c_h * indCurSqOy[1]); if( (indCurSqOx[1] >= 0) && (indCurSqOy[1] >= 0) ) { buf_D = buf_D * (Hx - c_h * indCurSqOx[1]) * (Hx - c_h * indCurSqOx[1]) /4.; bufInteg_D = d_itemOfInteg_2SpecType( Py, Qy, c_h * indCurSqOy[1], a_SL, b_SL, c_h * indCurSqOx[1] ); } else { buf_D = buf_D * (Hx - c_h * indCurSqOx[1] ) * (Hx - c_h * indCurSqOx[1] ) /4.; bufInteg_D = d_itemOfInteg_2SpecType( Py, Qy, c_h * indCurSqOy[1], a_SL, b_SL, c_h * indCurSqOx[1] ); } buf_D = buf_D - bufInteg_D /2.; integ = buf_D * rho[0][0] /c_h /c_h; // 2. buf_D = (Qy - c_h * indCurSqOy[1]) * (Qy - c_h * indCurSqOy[1]) - (Py - c_h * indCurSqOy[1]) * (Py - c_h * indCurSqOy[1]); if( (indCurSqOx[0] >= 0) && (indCurSqOy[1] >= 0) ) { buf_D = -1. * buf_D * (Hx - c_h * indCurSqOx[0]) * (Hx - c_h * indCurSqOx[0]) /4.; bufInteg_D = d_itemOfInteg_2SpecType( Py, Qy, c_h * indCurSqOy[1], a_SL, b_SL, c_h * indCurSqOx[0] ); } else { buf_D = -1. * buf_D * (Hx - c_h * indCurSqOx[0] ) * (Hx - c_h * indCurSqOx[0] ) /4.; bufInteg_D = d_itemOfInteg_2SpecType( Py, Qy, c_h * indCurSqOy[1], a_SL, b_SL, c_h * indCurSqOx[0] ); } buf_D = buf_D + bufInteg_D /2.; integ = integ + buf_D * rho[1][0] /c_h /c_h; // 3. buf_D = (Qy - c_h * indCurSqOy[0]) * (Qy - c_h * indCurSqOy[0]) - (Py - c_h * indCurSqOy[0]) * (Py - c_h * indCurSqOy[0]); if( (indCurSqOx[1] >= 0) && (indCurSqOy[0] >= 0) ) { buf_D = -1. * buf_D * (Hx - c_h * indCurSqOx[1]) * (Hx - c_h * indCurSqOx[1]) /4.; bufInteg_D = d_itemOfInteg_2SpecType( Py, Qy, c_h * indCurSqOy[0], a_SL, b_SL, c_h * indCurSqOx[1] ); } else { buf_D = -1. * buf_D * (Hx - c_h * indCurSqOx[1] ) * (Hx - c_h * indCurSqOx[1] ) /4.; bufInteg_D = d_itemOfInteg_2SpecType( Py, Qy, c_h * indCurSqOy[0], a_SL, b_SL, c_h * indCurSqOx[1] ); } buf_D = buf_D + bufInteg_D /2.; integ = integ + buf_D * rho[0][1] /c_h /c_h; // 4. buf_D = (Qy - c_h * indCurSqOy[0]) * (Qy - c_h * indCurSqOy[0]) - (Py - c_h * indCurSqOy[0]) * (Py - c_h * indCurSqOy[0]); if( (indCurSqOx[0] >= 0) && (indCurSqOy[0] >= 0) ) { buf_D = buf_D * (Hx - c_h * indCurSqOx[0]) * (Hx - c_h * indCurSqOx[0]) /4.; bufInteg_D = d_itemOfInteg_2SpecType( Py, Qy, c_h * indCurSqOy[0], a_SL, b_SL, c_h * indCurSqOx[0] ); } else { buf_D = buf_D * (Hx - c_h * indCurSqOx[0] ) * (Hx - c_h * indCurSqOx[0] ) /4.; bufInteg_D = d_itemOfInteg_2SpecType( Py, Qy, c_h * indCurSqOy[0], a_SL, b_SL, c_h * indCurSqOx[0] ); } buf_D = buf_D - bufInteg_D /2.; integ += buf_D * rho[1][1] /c_h /c_h; return integ; } __device__ double d_integUnderRightTr_OneCell( double Py, double Qy, // double a_SL, double b_SL, double Gx, int iCurrTL, // - Index of current time layer. // int * indCurSqOx, // - Index of current square by Ox axis. int * indCurSqOy, // - Index of current square by Oy axis. double * rhoInPrevTL_asV ) { return -1. * d_integUnderLeftTr_OneCell( Py, Qy, // a_SL, b_SL, Gx, // - double Hx, iCurrTL, // - Index of current time layer. // indCurSqOx, // - Index of current square by Ox axis. indCurSqOy, // - Index of current square by Oy axis. // rhoInPrevTL_asV ); } __device__ double d_integUnderRectAng_OneCell( double Py, double Qy, // double Gx, double Hx, int iCurrTL, // - Index of current time layer. // int * indCurSqOx, // - Index of current square by Ox axis. int * indCurSqOy, // - Index of current square by Oy axis. double * rhoInPrevTL_asV ) { double integ = 0; double buf_D; double rho[2][2]; double t = c_tau * (iCurrTL -1.); double x, y; if( (indCurSqOx[0] >=0) && (indCurSqOy[0] >=0) ) { rho[0][0] = rhoInPrevTL_asV[ ((c_x_length +1)*indCurSqOy[0] + indCurSqOx[0]) ]; rho[0][1] = rhoInPrevTL_asV[ ((c_x_length +1)*indCurSqOy[1] + indCurSqOx[0]) ]; rho[1][0] = rhoInPrevTL_asV[ ((c_x_length +1)*indCurSqOy[0] + indCurSqOx[1]) ]; rho[1][1] = rhoInPrevTL_asV[ ((c_x_length +1)*indCurSqOy[1] + indCurSqOx[1]) ]; } else { x = indCurSqOx[0] * c_h; y = indCurSqOy[0] * c_h; rho[0][0] = d_analytSolut(t, x, y ); x = indCurSqOx[0] * c_h; y = indCurSqOy[1] * c_h; rho[0][1] = d_analytSolut(t, x, y ); x = indCurSqOx[1] * c_h; y = indCurSqOy[0] * c_h; rho[1][0] = d_analytSolut(t, x, y ); x = indCurSqOx[1] * c_h; y = indCurSqOy[1] * c_h; rho[1][1] = d_analytSolut(t, x, y ); } if( (indCurSqOx[1] >= 0) && (indCurSqOy[1] >= 0) ) { buf_D = d_itemOfInteg_1SpecType( Py, Qy, Gx, Hx, c_h * indCurSqOx[1], c_h * indCurSqOy[1] ); } else { buf_D = d_itemOfInteg_1SpecType( Py, Qy, Gx, Hx, c_h *indCurSqOx[1] , c_h * indCurSqOy[1] ); } buf_D = buf_D /c_h /c_h; integ = buf_D * rho[0][0]; // rhoInPrevTL[ indCurSqOx[0] ][ indCurSqOy[0] ]; if( (indCurSqOx[0] >= 0) && (indCurSqOy[1] >= 0) ) { buf_D = d_itemOfInteg_1SpecType( Py, Qy, Gx, Hx, c_h *indCurSqOx[0] , c_h * indCurSqOy[1] ); } else { buf_D = d_itemOfInteg_1SpecType( Py, Qy, Gx, Hx, c_h * indCurSqOx[0] , c_h * indCurSqOy[1] ); } buf_D = buf_D /c_h /c_h; integ = integ - buf_D * rho[1][0]; // rhoInPrevTL[ indCurSqOx[1] ][ indCurSqOy[0] ]; if( (indCurSqOx[1] >= 0) && (indCurSqOy[0] >= 0) ) { buf_D = d_itemOfInteg_1SpecType( Py, Qy, Gx, Hx, c_h * indCurSqOx[1] , c_h * indCurSqOy[0] ); } else { buf_D = d_itemOfInteg_1SpecType( Py, Qy, Gx, Hx, c_h * indCurSqOx[1] , c_h * indCurSqOy[0] ); } buf_D = buf_D /c_h /c_h; integ = integ - buf_D * rho[0][1]; // rhoInPrevTL[ indCurSqOx[0] ][ indCurSqOy[1] ]; if( (indCurSqOx[0] >= 0) && (indCurSqOy[0] >= 0) ) { buf_D = d_itemOfInteg_1SpecType( Py, Qy, Gx, Hx, c_h *indCurSqOx[0], c_h * indCurSqOy[0] ); } else { buf_D = d_itemOfInteg_1SpecType( Py, Qy, Gx, Hx, c_h * indCurSqOx[0] , c_h * indCurSqOy[0] ); } buf_D = buf_D /c_h /c_h; return integ + buf_D * rho[1][1]; // rhoInPrevTL[ indCurSqOx[1] ][ indCurSqOy[1] ]; } __device__ double d_integOfChan_SLRightSd( int iCurrTL, // - Index of current time layer. // double *bv, int wTrPCI, // - Where travel point current (botton vertex) is. double *uv, int wTrPNI, // - Where travel point next (upper vertex) is. // int * indCurSqOx, // - Index by OX axis where bv and uv are. // double lb, int * indLB, // - Left boundary by Ox. Index by OX axis where lb is. // int * indCurSqOy, // - Index of current square by Oy axis. double * rhoInPrevTL_asV ) { double mv[2]; // - Middle and right vertices. int wMvI; // - Where middle vertex is. int indCurSqOxToCh[2]; // - Indices of current square by Ox axis to be changed. Under which we want to integrate. double h = c_h; double a_SL, b_SL; // - Coefficients of slant line: x = a_SL *y + b_SL. double Gx, Hx; // - Left boundary for each integration. double integ = 0.; double buf_D; int j; // Let's compute helpful values. if( uv[0] <= bv[0] ) { mv[0] = uv[0]; mv[1] = uv[1]; wMvI = wTrPNI; } if( uv[0] > bv[0] ) { mv[0] = bv[0]; mv[1] = bv[1]; wMvI = wTrPCI; } if( ( fabs(uv[1] - bv[1]) ) <= 1.e-12 ) { // Computation is impossible. Too smale values. Let's return some approximate value. // buf_D = (uv[1] - bv[1]) * ((uv[0] + bv[0]) /2. - lb) * rhoInPrevTL[ indCurSqOx[0] ][ indCurSqOy[0] ]; return fabs(uv[1] - bv[1]); // fabs(uv[1] - bv[1]); } // First step: from "lb" to "mas OX[ indCurSqOx[0] ]" by iteration. // integ += fabs( mv[0] - lb) * fabs(uv[1] - bv[1]); indCurSqOxToCh[0] = indLB[0]; indCurSqOxToCh[1] = indCurSqOxToCh[0] +1; for( j = indLB[0]; j< indCurSqOx[0]; j++ ) { // If this is first cell we should integrate under rectangle only. if( indCurSqOxToCh[0] >= 0 ) { Gx = c_h * indCurSqOxToCh[0]; Hx = c_h * indCurSqOxToCh[1]; } if( indCurSqOxToCh[0] < 0 ) { Gx = h * indCurSqOxToCh[0]; Hx = h * indCurSqOxToCh[1]; } if( j == indLB[0] ) { Gx = lb; } buf_D = d_integUnderRectAng_OneCell( bv[1], // - double Py, uv[1], // - double Qy, // Gx, // - double Gx, Hx, // - double Hx, // iCurrTL, // - Index of current time layer. // indCurSqOxToCh, // - Index of current square by Ox axis. indCurSqOy, // - Index of current square by Oy axis. rhoInPrevTL_asV ); integ += buf_D; indCurSqOxToCh[0] += 1; indCurSqOxToCh[1] = indCurSqOxToCh[0] +1; } // Integration. Second step: under [ indCurSqOx[0]; indCurSqOx[1] ] square. // A. Under rectangle. if( wMvI == 1 ) { if( indCurSqOx[0] == indLB[0] ) { Gx = lb; } if( indCurSqOx[0] > indLB[0] ) { if( indCurSqOx[0] >= 0) { Gx = c_h * indCurSqOx[0]; } if( indCurSqOx[0] < 0) { Gx = h * indCurSqOx[0]; } } buf_D = d_integUnderRectAng_OneCell( bv[1], // - double Py, uv[1], // - double Qy, // Gx, // - double Gx, mv[0], // - double Hx, // iCurrTL, // - Index of current time layer. // indCurSqOx, // - Index of current square by Ox axis. indCurSqOy, // - Index of current square by Oy axis. rhoInPrevTL_asV ); integ += buf_D; } // B. Under triangle. if( ( fabs(uv[1] - bv[1]) ) > 1.e-12 ) { // integ += fabs(uv[1] - bv[1]) * (rv[0] - mv[0]) /2.; // Coefficients of slant line: x = a_SL *y + b_SL. a_SL = (uv[0] - bv[0]) / (uv[1] - bv[1]); b_SL = bv[0] - a_SL * bv[1]; // Integration under one cell triangle. if( fabs( a_SL ) > 1.e-12 ) { buf_D = d_integUnderRightTr_OneCell( bv[1], // - double Py, uv[1], // - double Qy, // a_SL, b_SL, mv[0], // - double Gx, // iCurrTL, // - Index of current time layer. // indCurSqOx, // - Index of current square by Ox axis. indCurSqOy, // - Index of current square by Oy axis. rhoInPrevTL_asV ); integ += buf_D; } } return integ; } __device__ double d_integOfChan_SLLeftSd( int iCurrTL, // - Index of current time layer. // double *bv, int wTrPCI, // - Where travel point current (botton vertex) is. double *uv, int wTrPNI, // - Where travel point next (upper vertex) is. // int * indCurSqOx, // - Index by OX axis where bv and uv are. // double rb, int * indRB, // - Right boundary by Ox. Index by OX axis where rb is. // int * indCurSqOy, // - Index of current square by Oy axis. double * rhoInPrevTL_asV ) { double mv[2]; // - Left and middle vertices. int wMvI; // - Where middle vertex is. int indCurSqOxToCh[2]; // - Indices of current square by Ox axis to be changed. Under which we want to integrate. double h = c_h; double a_SL, b_SL; // - Coefficients of slant line: x = a_SL *y + b_SL. double Gx, Hx; // - Left and right boundary for each integration. double integ = 0.; double buf_D; int j; // Let's compute helpful values. if( uv[0] <= bv[0] ) { mv[0] = bv[0]; mv[1] = bv[1]; wMvI = wTrPCI; } if( uv[0] > bv[0] ) { mv[0] = uv[0]; mv[1] = uv[1]; wMvI = wTrPNI; } if( ( fabs(uv[1] - bv[1]) ) <= 1.e-12 ) { // Computation is impossible. Too smale values. Let's return some approximate value. // buf_D = (uv[1] - bv[1]) * (rb - (uv[0] + bv[0]) /2.) * rhoInPrevTL[ indCurSqOx[0] ][ indCurSqOy[0] ]; return fabs(uv[1] - bv[1]); // fabs(uv[1] - bv[1]); } // Integration. First step: under [ indCurSqOx[0]; indCurSqOx[1] ] square. // A. Under triangle. if( ( fabs(uv[1] - bv[1]) ) > 1.e-12 ) { // Coefficients of slant line: x = a_SL *y + b_SL. a_SL = (uv[0] - bv[0]) / (uv[1] - bv[1]); b_SL = bv[0] - a_SL * bv[1]; // Integration under one cell triangle. if( fabs( a_SL ) > 1.e-12 ) { buf_D = d_integUnderLeftTr_OneCell( bv[1], // - double Py, uv[1], // - double Qy, // a_SL, b_SL, mv[0], // - double Hx, // iCurrTL, // - Index of current time layer. // indCurSqOx, // - Index of current square by Ox axis. indCurSqOy, // - Index of current square by Oy axis. rhoInPrevTL_asV ); integ += buf_D; } } // B. Under rectangle. Need to be cheking. if( wMvI == 1 ) { if( indCurSqOx[0] == indRB[0] ) { Hx = rb; } if( indCurSqOx[0] < indRB[0] ) { if( indCurSqOx[1] >= 0) { Hx = c_h * indCurSqOx[1] ; } if( indCurSqOx[1] < 0) { Hx = h * indCurSqOx[1]; } } buf_D = d_integUnderRectAng_OneCell( bv[1], // - double Py, uv[1], // - double Qy, // mv[0], // - double Gx, Hx, // - double Hx, // iCurrTL, // - Index of current time layer. // indCurSqOx, // - Index of current square by Ox axis. indCurSqOy, // - Index of current square by Oy axis. rhoInPrevTL_asV ); integ += buf_D; } // Second step: from "mas OX[ indCurSqOx[1] ]" to "rb" by iteration. indCurSqOxToCh[0] = indCurSqOx[0] +1; indCurSqOxToCh[1] = indCurSqOxToCh[0] +1; for( j = indCurSqOx[0] +1; j< indRB[0] +1; j++ ) { // If this is first cell we should integrate under triangle only. if( indCurSqOxToCh[1] > 0 ) { Gx = c_h * indCurSqOxToCh[0] ; Hx = c_h * indCurSqOxToCh[1]; } if( indCurSqOxToCh[1] <= 0 ) { Gx = h * indCurSqOxToCh[0]; Hx = h * indCurSqOxToCh[1]; } if( j == indRB[0] ) { Hx = rb; } buf_D = d_integUnderRectAng_OneCell( bv[1], // - double Py, uv[1], // - double Qy, // Gx, // - double Gx, Hx, // - double Hx, // iCurrTL, // - Index of current time layer. // indCurSqOxToCh, // - Index of current square by Ox axis. indCurSqOy, // - Index of current square by Oy axis. rhoInPrevTL_asV ); integ += buf_D; indCurSqOxToCh[0] += 1; indCurSqOxToCh[1] = indCurSqOxToCh[0] +1; } return integ; } __device__ double d_integUnderRigAngTr_BottLeft( int iCurrTL, // - Index of current time layer. // double *bv, double *uv, double * rhoInPrevTL_asV ) { double trPC[2]; // - Travel point current; int wTrPCI = 0; // - Where travel point current is? double trPN[2]; // - Travel point next; int wTrPNI = 0; // - Where travel point next is? double ang; // - Angle of slant line. Should be greater zero. int indCurSqOx[2], indCurSqOy[2]; // - Index of current square by Ox and Oy axes. int indRB[2]; // - Index of right boundary. double distOx, distOy; // - Distance to near Ox and Oy straight lines. bool isTrDone = false; // - Is travel done. double integOfBottTr = 0.; // - Value which we are computing. double buf_D; // Initial data. trPC[0] = bv[0]; trPC[1] = bv[1]; if( ( fabs(bv[0] - uv[0]) ) < 1.e-12 ) { // This triangle has very small width. I guess further computation isn't correct. return fabs(bv[0] - uv[0]); } ang = (uv[1] - bv[1]) / (bv[0] - uv[0]); if( fabs(ang) < 1.e-12 ) { // This triangle has very small height. I guess further computation isn't correct. return fabs(ang); } indCurSqOx[0] = (int)( (trPC[0] - 1.e-14) /c_h); // - If trPC[0] is in grid edge I want it will be between in the left side of indCurSqOx[1]. if( (trPC[0] - 1.e-14) <= 0 ) { indCurSqOx[0] -= 1; // - The case when "trPC[0]" ia negative. } indCurSqOx[1] = indCurSqOx[0] +1; // - It's important only in rare case then trPC is in grid edge. indRB[0] = indCurSqOx[0]; indRB[1] = indRB[0] +1; indCurSqOy[0] = (int)( (trPC[1] + 1.e-14) /c_h); // - If trPC[1] is in grid edge I want it will be between indCurSqOx[0] and indCurSqOx[1]. if( (trPC[1] + 1.e-14) <= 0 ) { indCurSqOy[0] -= 1; // - The case when "trPC[0]" ia negative. } indCurSqOy[1] = indCurSqOy[0] +1; // - It's important only in rare case then trPC is in grid edge. if( indCurSqOx[0] >= 0) { distOx = trPC[0] - c_h * indCurSqOx[0] ; } if( indCurSqOx[0] < 0 ) { distOx = fabs( trPC[0] - c_h * indCurSqOx[0] ); } if( indCurSqOy[1] >= 0 ) { distOy = c_h * indCurSqOy[1] - trPC[1]; } if( indCurSqOy[1] < 0 ) { distOy = fabs( c_h * indCurSqOy[1] - trPC[1] ); } do { // a. First case. if( (distOy /distOx) <= ang ) { // Across with straight line parallel Ox axis. wTrPNI = 1; if( indCurSqOy[1] >= 0) { trPN[1] = c_h * indCurSqOy[1]; } if( indCurSqOy[1] < 0) { trPN[1] = c_h * indCurSqOy[1]; } trPN[0] = bv[0] - (trPN[1] - bv[1]) /ang; } // b. Second case. if( (distOy /distOx) > ang ) { // Across with straight line parallel Oy axis. wTrPNI = 2; if( indCurSqOx[0] >= 0 ) { trPN[0] = c_h * indCurSqOx[0]; } if( indCurSqOx[0] < 0 ) { trPN[0] = c_h * indCurSqOx[0]; } trPN[1] = bv[1] - ang * (trPN[0] - bv[0]); } // c. Cheking. if( trPN[0] < (uv[0] + 1.e-14) ) { trPN[0] = uv[0]; trPN[1] = uv[1]; isTrDone = true; wTrPNI = 0; } // d. Integration. buf_D = d_integOfChan_SLLeftSd( iCurrTL, // - Index of current time layer. // trPC, wTrPCI, // - double *bv, trPN, wTrPNI, // - double *uv, // indCurSqOx, // - Indices where trPC and trPN are. // bv[0], indRB, // - double rb = Right boundary by Ox. // indCurSqOy, // - Index of current square by Oy axis. rhoInPrevTL_asV ); integOfBottTr = integOfBottTr + buf_D; // e. Updating. if( isTrDone == false ) { // We will compute more. We need to redefine some values. wTrPCI = wTrPNI; trPC[0] = trPN[0]; trPC[1] = trPN[1]; if( wTrPNI == 1) { indCurSqOy[0] += 1; indCurSqOy[1] += 1; } if( wTrPNI == 2) { indCurSqOx[0] -= 1; indCurSqOx[1] -= 1; } if( indCurSqOx[0] >= 0) { distOx = trPC[0] - c_h * indCurSqOx[0] ; } if( indCurSqOx[0] < 0) { distOx = fabs( trPC[0] - c_h * indCurSqOx[0] ); } if( indCurSqOy[1] >= 0 ) { distOy = c_h * indCurSqOy[1] - trPC[1]; } if( indCurSqOy[1] < 0 ) { distOy = fabs( c_h * indCurSqOy[1] - trPC[1] ); } } } while( !isTrDone ); return integOfBottTr; } __device__ double d_integUnderRigAngTr_BottRight( int iCurrTL, // - Index of current time layer. // double *bv, double *uv, double * rhoInPrevTL_asV ) { double trPC[2]; // - Travel point current; int wTrPCI = 0; // - Where travel point current is? double trPN[2]; // - Travel point next; int wTrPNI = 0; // - Where travel point next is? double ang; // - Angle of slant line. Should be greater zero. int indCurSqOx[2], indCurSqOy[2]; // - Index of current square by Ox and Oy axes. int indLB[2]; // - Index of left boundary. double distOx, distOy; // - Distance to near Ox and Oy straight lines. bool isTrDone = false; // - Is travel done. double integOfBottTr = 0.; // - Value which we are computing. double buf_D; trPC[0] = bv[0]; trPC[1] = bv[1]; if( ( fabs(bv[0] - uv[0]) ) < 1.e-12 ) return fabs(bv[0] - uv[0]); ang = (uv[1] - bv[1]) / (uv[0] - bv[0]); if( fabs(ang) < 1.e-12 ) return fabs(ang); indCurSqOx[0] = (int)( (trPC[0] + 1.e-14) /c_h); // - If trPC[0] is in grid edge I want it will be between in the right side. if( (trPC[0] + 1.e-14) <= 0 ) indCurSqOx[0] -= 1; // - The case when "trPC[0]" ia negative. indCurSqOx[1] = indCurSqOx[0] +1; // - It's important only in rare case then trPC is in grid edge. indLB[0] = indCurSqOx[0]; indLB[1] = indLB[0] +1; indCurSqOy[0] = (int)( (trPC[1] + 1.e-14) /c_h); // - If trPC[1] is in grid edge I want it will be in the upper side. if( (trPC[1] + 1.e-14) <= 0 ) { indCurSqOy[0] -= 1; // - The case when "trPC[0]" ia negative. } indCurSqOy[1] = indCurSqOy[0] +1; // - It's important only in rare case then trPC is in grid edge. if( indCurSqOx[1] >=0 ) { distOx = fabs( c_h * indCurSqOx[1] - trPC[0] ); } if( indCurSqOx[1] < 0 ) { distOx = fabs( c_h * indCurSqOx[1] - trPC[0] ); } if( indCurSqOy[1] >=0 ) { distOy = fabs( c_h * indCurSqOy[1] - trPC[1] ); } if( indCurSqOy[1] < 0 ) { distOy = fabs( c_h * indCurSqOy[1] - trPC[1] ); } do { // a. First case. if( (distOy /distOx) <= ang ) { // Across with straight line parallel Ox axis. wTrPNI = 1; if( indCurSqOy[1] >=0 ) { trPN[1] = c_h * indCurSqOy[1]; } if( indCurSqOy[1] < 0 ) { trPN[1] = c_h * indCurSqOy[1]; } trPN[0] = bv[0] + (trPN[1] - bv[1]) /ang; } // b. Second case. if( (distOy /distOx) > ang ) { // Across with straight line parallel Oy axis. wTrPNI = 2; if( indCurSqOx[1] >= 0 ) { trPN[0] = c_h * indCurSqOx[1]; } if( indCurSqOx[1] < 0 ) { trPN[0] = c_h * indCurSqOx[1]; } trPN[1] = bv[1] + ang * (trPN[0] - bv[0]); } // c. Cheking. if( trPN[0] > (uv[0] - 1.e-14) ) { // - Without "fabs"!!! trPN[0] = uv[0]; trPN[1] = uv[1]; isTrDone = true; wTrPNI = 0; } // d. Integration. buf_D = d_integOfChan_SLRightSd( iCurrTL, // - Index of current time layer. // trPC, wTrPCI, // - double *bv, trPN, wTrPNI, // - double *uv, // indCurSqOx, // - Indices where trPC and trPN are. // bv[0], indLB, // - double lb = Left boundary by Ox. // indCurSqOy, // - Index of current square by Oy axis. rhoInPrevTL_asV ); integOfBottTr = integOfBottTr + buf_D; // e. Updating. if( isTrDone == false ) { // We will compute more. We need to redefine some values. wTrPCI = wTrPNI; trPC[0] = trPN[0]; trPC[1] = trPN[1]; if( wTrPNI == 1) { indCurSqOy[0] += 1; indCurSqOy[1] += 1; } if( wTrPNI == 2) { indCurSqOx[0] += 1; indCurSqOx[1] += 1; } if( indCurSqOx[1] >=0 ) { distOx = fabs( c_h * indCurSqOx[1] - trPC[0] ); } if( indCurSqOx[1] < 0 ) { distOx = fabs( c_h * indCurSqOx[1] - trPC[0] ); } if( indCurSqOy[1] >=0 ) { distOy = fabs( c_h * indCurSqOy[1] - trPC[1] ); } if( indCurSqOy[1] < 0 ) { distOy = fabs( c_h * indCurSqOy[1] - trPC[1] ); } } } while( !isTrDone ); return integOfBottTr; } __device__ double d_integUnderBottTr( int iCurrTL, // - Index of current time layer. // double * LvBt, // - Left, Right and Botton vertices of Botton triangle. double * RvBt, // - Left, Right and Botton vertices of Botton triangle. double * BvBt, // - Left, Right and Botton vertices of Botton triangle. double * rhoInPrevTL_asV, int ii, int jj ) // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! { double integOfBottTr; double buf_D; // Three ways are possible. // 1. if( BvBt[0] <= LvBt[0] ) { buf_D = d_integUnderRigAngTr_BottRight( iCurrTL, // BvBt, RvBt, rhoInPrevTL_asV ); integOfBottTr = buf_D; buf_D = d_integUnderRigAngTr_BottRight( iCurrTL, // BvBt, LvBt, rhoInPrevTL_asV ); integOfBottTr = integOfBottTr - buf_D; // printf("Bv<Lv: i= %d, j= %d res= %le",ii,jj,integOfBottTr); // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! return integOfBottTr; } // 2. if( (BvBt[0] > LvBt[0]) && (BvBt[0] < RvBt[0]) ) { buf_D = d_integUnderRigAngTr_BottLeft( iCurrTL, // BvBt, LvBt, rhoInPrevTL_asV ); integOfBottTr = buf_D; buf_D = d_integUnderRigAngTr_BottRight( iCurrTL, // BvBt, RvBt, rhoInPrevTL_asV ); integOfBottTr = integOfBottTr + buf_D; // printf("Bv>Lv & Bv<Rv: i= %d, j= %d res= %le",ii,jj,integOfBottTr); // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! return integOfBottTr; } // 3. if( BvBt[0] >= RvBt[0] ) { buf_D = d_integUnderRigAngTr_BottLeft( iCurrTL, // BvBt, LvBt, rhoInPrevTL_asV ); integOfBottTr = buf_D; buf_D = d_integUnderRigAngTr_BottLeft( iCurrTL, // BvBt, RvBt, rhoInPrevTL_asV ); integOfBottTr = integOfBottTr - buf_D; // printf("Bv>Rv: i= %d, j= %d res= %le",ii,jj,integOfBottTr); // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! return integOfBottTr; } return integOfBottTr; } __device__ double d_integUnderRigAngTr_UppLeft( int iCurrTL, // - Index of current time layer. // double *bv, double *uv, double * rhoInPrevTL_asV ) { // return ( fabs( (uv[1] - bv[1]) * (bv[0] - uv[0]) /2.) ); double trPC[2]; // - Travel point current; int wTrPCI = 0; // - Where travel point current is? double trPN[2]; // - Travel point next; int wTrPNI = 0; // - Where travel point next is? double ang; // - Angle of slant line. Should be greater zero. int indCurSqOx[2], indCurSqOy[2]; // - Index of current square by Ox and Oy axes. int indRB[2]; // - Index of right boundary. double distOx, distOy; // - Distance to near Ox and Oy straight lines. bool isTrDone = false; // - Is travel done. double integOfUppTr = 0.; // - Value which we are computing. double buf_D; // Initial data. trPC[0] = bv[0]; trPC[1] = bv[1]; if( ( fabs(bv[0] - uv[0]) ) < 1.e-12 ) return fabs(bv[0] - uv[0]); ang = (uv[1] - bv[1]) / (uv[0] - bv[0]); if( fabs(ang) < 1.e-12 ) return fabs(ang); // The follow equations are quite important. indCurSqOx[0] = (int)( (trPC[0] + 1.e-14) /c_h); // - If trPC[0] is in grid edge I want it will be in the right side. if( (trPC[0] + 1.e-14) <= 0 ) { indCurSqOx[0] -= 1; // - The case when "trPC[0]" ia negative. } indCurSqOx[1] = indCurSqOx[0] +1; // - It's important only in rare case then trPC is in grid edge. indCurSqOy[0] = (int)( (trPC[1] + 1.e-14) /c_h); // - If trPC[1] is in grid edge I want it will be in the upper square. if( (trPC[1] + 1.e-14) <= 0 ) { indCurSqOy[0] -= 1; // - The case when "trPC[0]" ia negative. } indCurSqOy[1] = indCurSqOy[0] +1; indRB[0] = (int)( (uv[0] - 1.e-14) /c_h); // - If uv[0] is in grid edge I want it will be in the left side. if( (uv[0] - 1.e-14) <= 0 ) { indRB[0] -= 1; // - The case when "trPC[0]" ia negative. } indRB[1] = indRB[0] +1; if( indCurSqOx[1] >= 0) { distOx = c_h * indCurSqOx[1] - trPC[0]; } if( indCurSqOx[1] < 0) { distOx = fabs( c_h * indCurSqOx[1] - trPC[0] ); } if( indCurSqOy[1] >= 0 ) { distOy = c_h * indCurSqOy[1] - trPC[1]; } if( indCurSqOy[1] < 0 ) { distOy = fabs( c_h * indCurSqOy[1] - trPC[1] ); } do { // a. First case. if( (distOy /distOx) <= ang ) { // Across with straight line parallel Ox axis. wTrPNI = 1; if( indCurSqOy[1] >= 0 ) { trPN[1] = c_h * indCurSqOy[1]; } if( indCurSqOy[1] < 0 ) { trPN[1] = c_h * indCurSqOy[1]; } trPN[0] = bv[0] + (trPN[1] - bv[1]) /ang; } // b. Second case. if( (distOy /distOx) > ang ) { // Across with straight line parallel Oy axis. wTrPNI = 2; if( indCurSqOx[1] >= 0 ) { trPN[0] = c_h * indCurSqOx[1]; } if( indCurSqOx[1] < 0 ) { trPN[0] = c_h * indCurSqOx[1]; } trPN[1] = bv[1] + ang * (trPN[0] - bv[0]); } // c. Cheking. if( trPN[0] > (uv[0] - 1.e-14) ) { trPN[0] = uv[0]; trPN[1] = uv[1]; isTrDone = true; wTrPNI = 0; } // d. Integration. buf_D = d_integOfChan_SLLeftSd( iCurrTL, // - Index of current time layer. // trPC, wTrPCI, // - double *bv, trPN, wTrPNI, // - double *uv, // indCurSqOx, // - Indices where trPC and trPN are. // uv[0], indRB, // - double rb = Right boundary by Ox. // indCurSqOy, // - Index of current square by Oy axis. rhoInPrevTL_asV ); integOfUppTr = integOfUppTr + buf_D; // e. Updating. if( isTrDone == false ) { // We will compute more. We need to redefine some values. wTrPCI = wTrPNI; trPC[0] = trPN[0]; trPC[1] = trPN[1]; if( wTrPNI == 1) { indCurSqOy[0] += 1; indCurSqOy[1] += 1; } if( wTrPNI == 2) { indCurSqOx[0] += 1; indCurSqOx[1] += 1; } if( indCurSqOx[1] >= 0) { distOx = fabs( c_h * indCurSqOx[1] - trPC[0] ); } if( indCurSqOx[1] < 0) { distOx = fabs( c_h * indCurSqOx[1] - trPC[0] ); } if( indCurSqOy[1] >= 0 ) { distOy = fabs( c_h * indCurSqOy[1] - trPC[1] ); } if( indCurSqOy[1] < 0 ) { distOy = fabs( c_h * indCurSqOy[1] - trPC[1] ); } } } while( !isTrDone ); return integOfUppTr; } __device__ double d_integUnderRigAngTr_UppRight( int iCurrTL, // - Index of current time layer. // double *bv, double *uv, double * rhoInPrevTL_asV ) { // return ( fabs( (uv[1] - bv[1]) * (bv[0] - uv[0]) /2.) ); double trPC[2]; // - Travel point current; int wTrPCI = 0; // - Where travel point current is? double trPN[2]; // - Travel point next; int wTrPNI = 0; // - Where travel point next is? double ang; // - Angle of slant line. Should be greater zero. int indCurSqOx[2], indCurSqOy[2]; // - Index of current square by Ox and Oy axes. int indLB[2]; // - Index of left boundary. double distOx, distOy; // - Distance to near Ox and Oy straight lines. bool isTrDone = false; // - Is travel done. double integOfUppTr = 0.; // - Value which we are computing. double buf_D; // Initial data. trPC[0] = bv[0]; trPC[1] = bv[1]; if( ( fabs(bv[0] - uv[0]) ) < 1.e-12 ) { // This triangle has very small width. I guess further computation isn't correct. return fabs(bv[0] - uv[0]); } ang = (uv[1] - bv[1]) / (bv[0] - uv[0]); if( fabs(ang) < 1.e-12 ) { // This triangle has very small height. I guess further computation isn't correct. return fabs(ang); } indCurSqOx[0] = (int)( (trPC[0] - 1.e-14) /c_h); // - If trPC[0] is in grid edge I want it will be between in the left side. if( (trPC[0] - 1.e-14) <= 0 ) { indCurSqOx[0] -= 1; // - The case when "trPC[0]" ia negative. } indCurSqOx[1] = indCurSqOx[0] +1; // - It's important only in rare case then trPC is in grid edge. indLB[0] = (int)( (uv[0] + 1.e-14) /c_h); if( (uv[0] + 1.e-14) <=0 ) { indLB[0] -= 1; // - The case when "trPC[0]" ia negative. } indLB[1] = indLB[0] +1; indCurSqOy[0] = (int)( (trPC[1] + 1.e-14) /c_h); // - If trPC[1] is in grid edge I want it will be in the upper side. if( (trPC[1] + 1.e-14) <= 0 ) { indCurSqOy[0] -= 1; // - The case when "trPC[0]" ia negative. } indCurSqOy[1] = indCurSqOy[0] +1; // - It's important only in rare case then trPC is in grid edge. if( indCurSqOx[0] >= 0 ) { distOx = fabs( trPC[0] - c_h * indCurSqOx[0] ); } if( indCurSqOx[0] < 0 ) { distOx = fabs( trPC[0] - c_h * indCurSqOx[0] ); } if( indCurSqOy[1] >= 0 ) { distOy = fabs( c_h * indCurSqOy[1] - trPC[1] ); } if( indCurSqOy[1] < 0 ) { distOy = fabs( c_h * indCurSqOy[1] - trPC[1] ); } do { // a. First case. if( (distOy /distOx) <= ang ) { // Across with straight line parallel Ox axis. wTrPNI = 1; if( indCurSqOy[1] >= 0 ) { trPN[1] = c_h * indCurSqOy[1]; } if( indCurSqOy[1] < 0 ) { trPN[1] = c_h * indCurSqOy[1]; } trPN[0] = bv[0] - (trPN[1] - bv[1]) /ang; } // b. Second case. if( (distOy /distOx) > ang ) { // Across with straight line parallel Oy axis. wTrPNI = 2; if( indCurSqOx[0] >= 0 ) { trPN[0] = c_h * indCurSqOx[0]; } if( indCurSqOx[0] < 0 ) { trPN[0] = c_h * indCurSqOx[0]; } trPN[1] = bv[1] - ang * (trPN[0] - bv[0]); } // c. Cheking. if( trPN[0] < (uv[0] + 1.e-14) ) { trPN[0] = uv[0]; trPN[1] = uv[1]; isTrDone = true; wTrPNI = 0; } // d. Integration. buf_D = d_integOfChan_SLRightSd( iCurrTL, // - Index of current time layer. // trPC, wTrPCI, // - double *bv, trPN, wTrPNI, // - double *uv, // indCurSqOx, // - Indices where trPC and trPN are. // uv[0], indLB, // - double lb = Left boundary by Ox. // indCurSqOy, // - Index of current square by Oy axis. rhoInPrevTL_asV ); integOfUppTr = integOfUppTr + buf_D; // e. Updating. if( isTrDone == false ) { // We will compute more. We need to redefine some values. wTrPCI = wTrPNI; trPC[0] = trPN[0]; trPC[1] = trPN[1]; if( wTrPNI == 1) { indCurSqOy[0] += 1; indCurSqOy[1] += 1; } if( wTrPNI == 2) { indCurSqOx[0] -= 1; indCurSqOx[1] -= 1; } if( indCurSqOx[0] >= 0 ) { distOx = fabs( trPC[0] - c_h * indCurSqOx[0] ); } if( indCurSqOx[0] < 0 ) { distOx = fabs( trPC[0] - c_h * indCurSqOx[0] ); } if( indCurSqOy[1] >= 0 ) { distOy = fabs( c_h * indCurSqOy[1] - trPC[1] ); } if( indCurSqOy[1] < 0 ) { distOy = fabs( c_h * indCurSqOy[1] - trPC[1] ); } } } while(!isTrDone); return integOfUppTr; } __device__ double d_integUnderUpperTr( int iCurrTL, // - Index of current time layer. // double * LvUt, // - Left, Right and Upper vertices of Upper triangle. double * RvUt, // - Left, Right and Upper vertices of Upper triangle. double * UvUt, // - Left, Right and Upper vertices of Upper triangle. double * rhoInPrevTL_asV) { double integOfUppTr; double buf_D; // Three ways are possible. // 1. if( UvUt[0] <= LvUt[0] ) { buf_D = d_integUnderRigAngTr_UppRight( iCurrTL, // RvUt, UvUt, rhoInPrevTL_asV ); integOfUppTr = buf_D; buf_D = d_integUnderRigAngTr_UppRight( iCurrTL, // LvUt, UvUt, rhoInPrevTL_asV ); integOfUppTr = integOfUppTr - buf_D; return integOfUppTr; } // 2. if( (UvUt[0] > LvUt[0]) && (UvUt[0] < RvUt[0]) ) { buf_D = d_integUnderRigAngTr_UppLeft( iCurrTL, // LvUt, UvUt, rhoInPrevTL_asV ); integOfUppTr = buf_D; buf_D = d_integUnderRigAngTr_UppRight( iCurrTL, // RvUt, UvUt, rhoInPrevTL_asV ); integOfUppTr = integOfUppTr + buf_D; return integOfUppTr; } // 3. if( UvUt[0] >= RvUt[0] ) { buf_D = d_integUnderRigAngTr_UppLeft( iCurrTL, // LvUt, UvUt, rhoInPrevTL_asV ); integOfUppTr = buf_D; buf_D = d_integUnderRigAngTr_UppLeft( iCurrTL, // RvUt, UvUt, rhoInPrevTL_asV ); integOfUppTr = integOfUppTr - buf_D; return integOfUppTr; } return integOfUppTr; } __device__ double d_integUnderUnunifTr( int iCurrTL, // - Index of current time layer. // double * firVer, // - First vertex of triangle. double * secVer, // - Second vertex of triangle. double * thiVer, // - Third vertex of triangle. double * rhoInPrevTL_asV, int ii, int jj ) //!!!!!!!!!!!!!!!!!!! { double bv[2], mv[2], uv[2]; // - Botton, middle and upper vertices of triangle. bool isFirVUsed = false; bool isSecVUsed = false; bool isThiVUsed = false; bool is1VUsed, is2VUsed, is3VUsed; double a_LC, b_LC, c_LC; // - Coefficients of line betweeen "bv" and "uv" vertices. double ap[2]; // - Across point of line through "bv" to "uv" and "y == mv[1]" double LvBt[2], RvBt[2], BvBt[2]; // - Left, Right and Botton vertices of Botton triangle. double integOfBottTr; // - Item of integral under Botton triangle. double LvUt[2], RvUt[2], UvUt[2]; // - Left, Right and Upper vertices of Upper triangle. double integOfUppTr; // - Item of integral under Upper triangle. double integ = 0.; // - Item which I'm computing. // 1. I need to understand which vertex is botton, middle and upper. bv[1] = firVer[1]; bv[0] = firVer[0]; isFirVUsed = true; if( bv[1] > secVer[1] ) { bv[1] = secVer[1]; bv[0] = secVer[0]; isFirVUsed = false; isSecVUsed = true; } if( bv[1] > thiVer[1] ) { bv[1] = thiVer[1]; bv[0] = thiVer[0]; isFirVUsed = false; isSecVUsed = false; isThiVUsed = true; } uv[1] = 0; // - The minimum possible value. is1VUsed = false; is2VUsed = false; is3VUsed = false; if( (uv[1] < firVer[1]) && (isFirVUsed == false) ) { uv[1] = firVer[1]; uv[0] = firVer[0]; is1VUsed = true; } if( (uv[1] < secVer[1]) && (isSecVUsed == false) ) { uv[1] = secVer[1]; uv[0] = secVer[0]; is2VUsed = true; is1VUsed = false; } if( (uv[1] < thiVer[1]) && (isThiVUsed == false) ) { uv[1] = thiVer[1]; uv[0] = thiVer[0]; is3VUsed = true; is2VUsed = false; is1VUsed = false; } // Dangerous. if( (isFirVUsed == false) && (is1VUsed == false) ) { mv[1] = firVer[1]; mv[0] = firVer[0]; } if( (isSecVUsed == false) && (is2VUsed == false) ) { mv[1] = secVer[1]; mv[0] = secVer[0]; } if( (isThiVUsed == false) && (is3VUsed == false) ) { mv[1] = thiVer[1]; mv[0] = thiVer[0]; } // 2. I want to compute across point. // 2.a Let's compute line coefficients betweeen "bv" and "uv" vertices. // a_LC * x + b_LC * y = c_LC. a_LC = uv[1] - bv[1]; b_LC = bv[0] - uv[0]; c_LC = (bv[0] - uv[0])*bv[1] + (uv[1] - bv[1])*bv[0]; // 2.b Across point. ap[1] = mv[1]; if( fabs(a_LC) < 1.e-12 ) { // This triangle has very small height. I guess further computation isn't correct. return 1.e-12; } ap[0] = (c_LC - b_LC * ap[1]) /a_LC; // printf("i= %d, j= %d : ap[0]= %le mv[0]= %le \n",ii,jj, ap[0], mv[0]); // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! // 3. There the middle vertex relativly straight line is? Two ways are possible. if( mv[0] < ap[0] ) { // Left, Right and Botton vertices of Botton triangle. LvBt[0] = mv[0]; LvBt[1] = mv[1]; RvBt[0] = ap[0]; RvBt[1] = ap[1]; BvBt[0] = bv[0]; BvBt[1] = bv[1]; integOfBottTr = d_integUnderBottTr( iCurrTL, // - Index of current time layer. // LvBt, RvBt, BvBt, // - Left, Right and Botton vertices of Botton triangle. // rhoInPrevTL_asV, ii, jj ); // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! integ = integOfBottTr; // Left, Right and Upper vertices of Upper triangle. LvUt[0] = mv[0]; LvUt[1] = mv[1]; RvUt[0] = ap[0]; RvUt[1] = ap[1]; UvUt[0] = uv[0]; UvUt[1] = uv[1]; integOfUppTr = d_integUnderUpperTr( iCurrTL, // - Index of current time layer. // LvUt, RvUt, UvUt, // - Left, Right and Botton vertices of Upper triangle. // rhoInPrevTL_asV); integ = integ + integOfUppTr; return integ; } if( mv[0] >= ap[0] ) { // Left, Right and Botton vertices of Botton triangle. LvBt[0] = ap[0]; LvBt[1] = ap[1]; RvBt[0] = mv[0]; RvBt[1] = mv[1]; BvBt[0] = bv[0]; BvBt[1] = bv[1]; integOfBottTr = d_integUnderBottTr( iCurrTL, // - Index of current time layer. // LvBt, RvBt, BvBt, // - Left, Right and Botton vertices of Botton triangle. // rhoInPrevTL_asV, ii, jj ); // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! integ = integOfBottTr; // Left, Right and Upper vertices of Upper triangle. LvUt[0] = ap[0]; LvUt[1] = ap[1]; RvUt[0] = mv[0]; RvUt[1] = mv[1]; UvUt[0] = uv[0]; UvUt[1] = uv[1]; integOfUppTr = d_integUnderUpperTr( iCurrTL, // - Index of current time layer. // LvUt, RvUt, UvUt, // - Left, Right and Botton vertices of Upper triangle. rhoInPrevTL_asV ); return integ + integOfUppTr; } return integ; } __device__ double d_f_function(const int current_tl, const int i, const int j) { double x = c_h * i ; double y = c_h * j ; double arg_v = (x - c_lb) * (x - c_rb) * (1.+c_tau*current_tl) /10. * (y - c_ub) * (y - c_bb); double rho, dRhoDT, dRhoDX, dRhoDY; double u, duDX; double v, dvDY; rho = d_analytSolut(c_tau*current_tl, x, y ); dRhoDT = x * y * cos( c_tau*current_tl*x*y ); dRhoDX = c_tau*current_tl * y * cos( c_tau*current_tl*x*y ); dRhoDY = c_tau*current_tl * x * cos( c_tau*current_tl*x*y ); u = d_u_function(c_tau*current_tl, x, y ); duDX = -c_b * y * (1.-y) / ( 1. + x * x ); v = d_v_function(c_tau*current_tl, x, y ); dvDY = (x - c_lb) * (x - c_rb) * (1.+c_tau*current_tl) /10. * (y - c_bb + y - c_ub); dvDY = dvDY / ( 1. + arg_v * arg_v ); double res = dRhoDT + rho * duDX + u * dRhoDX + rho * dvDY + v * dRhoDY; return res; } __device__ double space_volume_in_prev_tl(double* prev_result, int current_tl, int i, int j) { double first1[2]; double second1[2]; double third1[2]; double first2[2]; double second2[2]; double third2[2]; double x, y; double c_tau_to_current_tl = (1. + current_tl * c_tau) / 10.; // A x = (c_h*(i - 1) + c_h*i) / 2.; y = (c_h*(j - 1) + c_h*j) / 2.; first1[0] = first2[0] = x - c_tau_b * y * (1. - y) * (c_pi_half + atan(-x)); first1[1] = first2[1] = y - c_tau * atan((x - c_lb) * (x - c_rb) * c_tau_to_current_tl * (y - c_ub) * (y - c_bb)); // B x = (c_h*(i + 1) + c_h*i) / 2.; second1[0] = x - c_tau_b * y * (1. - y) * (c_pi_half + atan(-x)); second1[1] = y - c_tau * atan((x - c_lb) * (x - c_rb) * c_tau_to_current_tl * (y - c_ub) * (y - c_bb)); // C y = (c_h*(j + 1) + c_h*j) / 2.; third1[0] = third2[0] = x - c_tau_b * y * (1. - y) * (c_pi_half + atan(-x)); third1[1] = third2[1] = y - c_tau * atan((x - c_lb) * (x - c_rb) * c_tau_to_current_tl * (y - c_ub) * (y - c_bb)); // D x = (c_h*(i - 1) + c_h*i) / 2.; second2[0] = x - c_tau_b * y * (1. - y) * (c_pi_half + atan(-x)); second2[1] = y - c_tau * atan((x - c_lb) * (x - c_rb) * c_tau_to_current_tl * (y - c_ub) * (y - c_bb)); double buf_D = d_integUnderUnunifTr( current_tl, first1, second1, third1, prev_result, i, j); return buf_D + d_integUnderUnunifTr( current_tl, first2, second2, third2, prev_result, i, j ); } __global__ void kernel_diff(double *diff, double *result, int tl, double tau) { for (int opt = blockIdx.x * blockDim.x + threadIdx.x; opt < c_n; opt += blockDim.x * gridDim.x) { /*if (opt == 1) { for (int i = 0; i < 11; i++) { for(int j = 0; j < 11 ; j++) { printf("%le ", result[i*11 + j]); } printf("\n"); } }*/ int i = opt % (c_x_length + 1); int j = opt / (c_y_length + 1); double f = 0; diff [opt] = f; // if (i > 0 && j > 0 && j != c_x_length && i != c_x_length) // { f = d_analytSolut(tl*tau, i*c_h, j*c_h); // f += c_tau * d_f_function(tl, i, j); diff [opt] = fabs(result[opt] - f); /* if (i == 1 && j == 1) { printf("tl = %d\n", tl); printf("f = %le result[opt] = %le diff[opt] = %le opt = %d\n", f, result[opt], diff[opt], opt); } */ // } } } __global__ void kernel(double* prev_result, double* result, int current_tl) { for (int opt = blockIdx.x * blockDim.x + threadIdx.x; opt < c_n; opt += blockDim.x * gridDim.x) { int i = opt % (c_x_length + 1); int j = opt / (c_y_length + 1); // расчет границы if (j == 0) // bottom bound { result[ opt ] = 1.1 + sin( c_tau_to_h * current_tl * j * c_bb ); } else if (i == 0) // left bound { result[ opt ] = 1.1 + sin( c_tau_to_h * current_tl * i * c_lb ); } else if (j == c_y_length) // upper bound { result[ opt ] = 1.1 + sin( c_tau_to_h * current_tl * i * c_ub ); } else if (i == c_x_length) // right bound { result[ opt ] = 1.1 + sin( c_tau_to_h * current_tl * j * c_rb ); } else if (i > 0 && j > 0 && j != c_x_length && i != c_x_length) { result[ opt ] = space_volume_in_prev_tl(prev_result, current_tl, i, j); /*if (opt == 22) { printf("[gpu] result = %le\n", result[opt]); }*/ double t = space_volume_in_prev_tl(prev_result, current_tl, i, j) / c_h; t = t / c_h; result[ opt ] = t; result[ opt ] += c_tau * d_f_function(current_tl, i, j); } } } double* init_rho(ComputeParameters *p) { double *rhoInPrevTL_asV; rhoInPrevTL_asV = new double [ p->size ]; // Initial data of rho. for( int k = 0; k <= p->x_size; k++ ) { for( int j = 0; j <= p->y_size; j++ ) { rhoInPrevTL_asV[ (p->x_size+1)*k + j ] = 1.1; } } return rhoInPrevTL_asV; } float solve_at_gpu(ComputeParameters *p, bool tl1, bool compute_diff) { assert(p != NULL); assert(p->result != NULL); const int gridSize = 256; const int blockSize = 512; //const int gridSize = 1; //const int blockSize = 1; size_t n(0); int temp_i(0); double temp_d(0); double *result = NULL, *prev_result = NULL, *d_diff = NULL; n = p->get_real_matrix_size(); int size = sizeof(double)*n; double *rhoInPrevTL_asV = init_rho(p); cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); cudaMemcpyToSymbol(c_tau, &p->tau, sizeof(double)); cudaMemcpyToSymbol(c_lb, &p->lb, sizeof(double)); cudaMemcpyToSymbol(c_b, &p->b, sizeof(double)); cudaMemcpyToSymbol(c_rb, &p->rb, sizeof(double)); cudaMemcpyToSymbol(c_bb, &p->bb, sizeof(double)); cudaMemcpyToSymbol(c_ub, &p->ub, sizeof(double)); cudaMemcpyToSymbol(c_n, &n, sizeof(int)); temp_i = p->x_size; cudaMemcpyToSymbol(c_x_length, &temp_i, sizeof(int)); temp_i = p->y_size; cudaMemcpyToSymbol(c_y_length, &temp_i, sizeof(int)); temp_d = 1. / (p->x_size); cudaMemcpyToSymbol(c_h, &temp_d, sizeof(double)); temp_d = p->tau / (p->x_size); cudaMemcpyToSymbol(c_tau_to_h, &temp_d, sizeof(double)); temp_d = p->b * p->tau; cudaMemcpyToSymbol(c_tau_b, &temp_d, sizeof(double)); temp_d = M_PI / 2.; cudaMemcpyToSymbol(c_pi_half, &temp_d, sizeof(double)); checkCuda(cudaMalloc((void**)&(result), size) ); checkCuda(cudaMalloc((void**)&(prev_result), size) ); if (compute_diff) { checkCuda(cudaMalloc((void**)&d_diff, size)); } cudaMemcpy(prev_result, rhoInPrevTL_asV, size, cudaMemcpyHostToDevice); cudaEventRecord(start, 0); if (tl1 == true) { kernel<<<gridSize, blockSize>>>(prev_result, result, 1); cudaMemcpy(p->result, result, size, cudaMemcpyDeviceToHost); } else { int tl = 0; int tempTl = p->t_count - 1; while(tl < tempTl) { kernel<<<gridSize, blockSize>>>(prev_result, result, tl + 1); kernel<<<gridSize, blockSize>>>(result, prev_result, tl + 2); tl += 2; } cudaMemcpy(p->result, prev_result, size, cudaMemcpyDeviceToHost); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); if (compute_diff) { int ttt = tl1 ? 1 : p->t_count; printf("[gpu] compute diff t = %d\n", ttt); if (tl1) { kernel_diff<<<1, 1>>>(d_diff, result, ttt, p->tau); } else { kernel_diff<<<1, 1>>>(d_diff, prev_result, ttt, p->tau); } cudaMemcpy(p->diff, d_diff, size, cudaMemcpyDeviceToHost); } cudaFree(result); cudaFree(prev_result); cudaDeviceReset(); delete[] rhoInPrevTL_asV; if (compute_diff) { cudaFree(d_diff); } return time; }
99d66ce72959475449d4e91849f69aa44e49be29.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Yurii Shyrma (iuriish@yahoo.com), created on 19.04.2018 // @author raver119@gmail.com // #include <system/op_boilerplate.h> #include <ops/declarable/helpers/activations.h> #include <helpers/ShapeUtils.h> #include <numeric> #include <helpers/PointersManager.h> #include <helpers/ConstantTadHelper.h> namespace sd { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// template<typename X, typename Y> __global__ void preluCuda(const void *vx, const Nd4jLong *xShapeInfo, const void *vy, const Nd4jLong *yShapeInfo, void *vz) { const auto x = reinterpret_cast<const X*>(vx); const auto y = reinterpret_cast<const Y*>(vy); auto z = reinterpret_cast<X*>(vz); __shared__ Nd4jLong xzLen; __shared__ int xzRank, yRank; if (threadIdx.x == 0) { xzLen = shape::length(xShapeInfo); xzRank = shape::rank(xShapeInfo); yRank = shape::rank(yShapeInfo); } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; int coords[MAX_RANK]; for (int i = tid; i < xzLen; i += blockDim.x * gridDim.x) { shape::index2coords(i, xShapeInfo, coords); const auto xzOffset = shape::getOffset(xShapeInfo, coords); const auto xVal = x[xzOffset]; if(xVal < 0) { for (uint j = 0; j < yRank; ++j) if(yShapeInfo[j + 1] == 1) coords[j + 1] = 0; z[xzOffset] = xVal * y[shape::getOffset(yShapeInfo, coords + 1)]; } else z[xzOffset] = xVal; } } /////////////////////////////////////////////////////////////////// template<typename X, typename Y> linkage void preluCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void *vx, const Nd4jLong *xShapeInfo, const void *vy, const Nd4jLong *yShapeInfo, void *vz) { hipLaunchKernelGGL(( preluCuda<X, Y>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vy, yShapeInfo, vz); } /////////////////////////////////////////////////////////////////// void prelu(sd::LaunchContext * context, const NDArray& input, const NDArray& alpha, NDArray& output) { PointersManager manager(context, "prelu"); const int threadsPerBlock = 256; const int blocksPerGrid = 512; const int sharedMem = 512; const auto xType = input.dataType(); const auto yType = alpha.dataType(); NDArray::prepareSpecialUse({&output}, {&input, &alpha}); BUILD_SINGLE_SELECTOR_TWICE(xType, preluCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), alpha.specialBuffer(), alpha.specialShapeInfo(), output.specialBuffer()), FLOAT_TYPES); NDArray::registerSpecialUse({&output}, {&input, &alpha}); manager.synchronize(); } /////////////////////////////////////////////////////////////////// template<typename X, typename Y> __global__ linkage void preluBPCuda(const void *vIn, const Nd4jLong *inShapeInfo, const void *vAlpha, const Nd4jLong *alphaShapeInfo, const void *vdLdO, const Nd4jLong *dLdOShapeInfo, void *vdLdI, const Nd4jLong *dLdIShapeInfo, void *vdLdA, const Nd4jLong *dLdAShapeInfo) { const auto in = reinterpret_cast<const X*>(vIn); const auto alpha = reinterpret_cast<const Y*>(vAlpha); const auto dLdO = reinterpret_cast<const Y*>(vdLdO); auto dLdI = reinterpret_cast<Y*>(vdLdI); auto dLdA = reinterpret_cast<Y*>(vdLdA); __shared__ Nd4jLong inLen, totalThreads; __shared__ int inRank, alphaRank; if (threadIdx.x == 0) { inLen = shape::length(inShapeInfo); totalThreads = gridDim.x * blockDim.x; inRank = shape::rank(inShapeInfo); alphaRank = shape::rank(alphaShapeInfo); } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; int coords[MAX_RANK]; for (int i = tid; i < inLen; i += totalThreads) { shape::index2coords(i, inShapeInfo, coords); const auto inOffset = shape::getOffset(inShapeInfo, coords); const auto dLdOOffset = shape::getOffset(dLdOShapeInfo, coords); const auto dLdIOffset = shape::getOffset(dLdIShapeInfo, coords); const auto xVal = in[inOffset]; const auto grO = dLdO[dLdOOffset]; if(xVal < 0) { for (uint j = 0; j < alphaRank; ++j) if(alphaShapeInfo[j + 1] == 1) coords[j + 1] = 0; const auto alphaOffset = shape::getOffset(alphaShapeInfo, coords + 1); const auto dLdAOffset = shape::getOffset(dLdAShapeInfo, coords + 1); dLdI[dLdIOffset] = grO * alpha[alphaOffset]; sd::math::atomics::nd4j_atomicAdd<Y>(&dLdA[dLdAOffset], static_cast<Y>(grO * xVal)); } else dLdI[dLdIOffset] = grO; } } ////////////////////////////////////////////////////////////////////////// template<typename X, typename Y> __host__ linkage void preluBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void *vIn, const Nd4jLong *inShapeInfo, const void *vAlpha, const Nd4jLong *alphaShapeInfo, const void *vdLdO, const Nd4jLong *dLdOShapeInfo, void *vdLdI, const Nd4jLong *dLdIShapeInfo, void *vdLdA, const Nd4jLong *dLdAShapeInfo) { hipLaunchKernelGGL(( preluBPCuda<X, Y>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vIn, inShapeInfo, vAlpha, alphaShapeInfo, vdLdO, dLdOShapeInfo, vdLdI, dLdIShapeInfo, vdLdA, dLdAShapeInfo); } ////////////////////////////////////////////////////////////////////////// void preluBP(sd::LaunchContext* context, const NDArray& input, const NDArray& alpha, const NDArray& dLdO, NDArray& dLdI, NDArray& dLdA) { dLdA.nullify(); PointersManager manager(context, "preluBP"); const int threadsPerBlock = 256; const int blocksPerGrid = 512; const int sharedMem = 512; const auto xType = input.dataType(); const auto zType = alpha.dataType(); NDArray::prepareSpecialUse({&dLdI, &dLdA}, {&input, &alpha, &dLdO}); BUILD_SINGLE_SELECTOR_TWICE(xType, preluBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), alpha.specialBuffer(), alpha.specialShapeInfo(), dLdO.specialBuffer(), dLdO.specialShapeInfo(), dLdI.specialBuffer(), dLdI.specialShapeInfo(), dLdA.specialBuffer(), dLdA.specialShapeInfo()), FLOAT_TYPES); NDArray::registerSpecialUse({&dLdI, &dLdA}, {&input, &alpha, &dLdO}); manager.synchronize(); } /////////////////////////////////////////////////////////////////// template<typename T> __device__ void softMaxForVectorCuda(const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo) { // logic of this kernel is based on assumption gridDim = 1 const auto x = reinterpret_cast<const T*>(vx); auto z = reinterpret_cast<T*>(vz); __shared__ Nd4jLong len; __shared__ int numOfIters; __shared__ T shmem[CUDA_BLOCK_SIZE]; if (threadIdx.x == 0) { len = shape::length(xShapeInfo); numOfIters = (len + blockDim.x - 1) / blockDim.x; // ceil (len / blockDim.x) } __syncthreads(); T temp = -DataTypeUtils::max<T>(); // set start value to compare with at first iteration, FIXME: what if T is unsigned ?? // ************ evaluate max element in input array x ************ // for (int i = 0; i < numOfIters; ++i) { const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x; if(elemIdx < len) { const Nd4jLong xOffset = shape::getIndexOffset(elemIdx, xShapeInfo); shmem[threadIdx.x] = (threadIdx.x != 0) ? x[xOffset] : sd::math::nd4j_max<T>(x[xOffset], temp); // take into account max element evaluated on previous iteration and stored in temp } else shmem[threadIdx.x] = -DataTypeUtils::max<T>(); // FIXME: what if T is unsigned ?? __syncthreads(); for (int s = blockDim.x / 2; s > 0; s /= 2) { if(threadIdx.x < s) shmem[threadIdx.x] = sd::math::nd4j_max<T>(shmem[threadIdx.x], shmem[threadIdx.x + s]); __syncthreads(); } temp = shmem[0]; // save max value calculated at current iteration } const T max = temp; temp = 0; // ************ evaluate value of exp(x[offset] - max) per each element, store it to shared memory shmem ************ // // at the same evaluate sum of exponents, sum will be stored in shmem[0] for (int i = 0; i < numOfIters; ++i) { const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x; if(elemIdx < len) { const Nd4jLong xOffset = shape::getIndexOffset(elemIdx, xShapeInfo); const Nd4jLong zOffset = shape::getIndexOffset(elemIdx, zShapeInfo); z[zOffset] = sd::math::nd4j_exp<T, T>(x[xOffset] - max); shmem[threadIdx.x] = (threadIdx.x != 0) ? z[zOffset] : (z[zOffset] + temp); // take into account sum element evaluated on previous iteration and stored in temp } else shmem[threadIdx.x] = 0; __syncthreads(); for (int s = blockDim.x / 2; s > 0; s /= 2) { if(threadIdx.x < s) shmem[threadIdx.x] += shmem[threadIdx.x + s]; __syncthreads(); } temp = shmem[0]; // save sum calculated at current iteration } // ************ evaluate z[offset] / sum ************ // for (int i = 0; i < numOfIters; ++i) { const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x; if(elemIdx >= len) continue; const Nd4jLong zOffset = shape::getIndexOffset(elemIdx, zShapeInfo); z[zOffset] /= shmem[0]; } } template<typename T> __global__ void softMaxForVectorCudaGlobal(const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo) { softMaxForVectorCuda<T>(vx, xShapeInfo, vz, zShapeInfo); } /////////////////////////////////////////////////////////////////// template <typename T> linkage void softMaxForVectorCudaLauncher(const hipStream_t* stream, const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo) { hipLaunchKernelGGL(( softMaxForVectorCudaGlobal<T>), dim3(1), dim3(CUDA_BLOCK_SIZE), 1024, *stream, vx, xShapeInfo, vz, zShapeInfo); } /////////////////////////////////////////////////////////////////// template<typename T> __global__ static void softMaxCuda(const void* vx, const Nd4jLong *xTadShapeInfo, const Nd4jLong *xOffsets, void* vz, const Nd4jLong *zTadShapeInfo, const Nd4jLong *zOffsets) { const auto x = reinterpret_cast<const T*>(vx); auto z = reinterpret_cast<T*>(vz); const auto* xTad = x + xOffsets[blockIdx.x]; auto* zTad = z + zOffsets[blockIdx.x]; softMaxForVectorCuda<T>(xTad, xTadShapeInfo, zTad, zTadShapeInfo); } /////////////////////////////////////////////////////////////////// template<typename T> static void softMaxCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void* vx, const Nd4jLong *xTadShapeInfo, const Nd4jLong *xOffsets, void* vz, const Nd4jLong *zTadShapeInfo, const Nd4jLong *zOffsets) { hipLaunchKernelGGL(( softMaxCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xTadShapeInfo, xOffsets, vz, zTadShapeInfo, zOffsets); } ////////////////////////////////////////////////////////////////////////// void softmax(sd::LaunchContext * context, const NDArray& input, NDArray& output, const int dimension) { if(!input.isActualOnDeviceSide()) input.syncToDevice(); const int rank = input.rankOf(); PointersManager manager(context, "helpers::softmax"); if(input.isVector()) { if(rank == 1 || input.sizeAt(dimension) != 1) { NDArray::prepareSpecialUse({&output}, {&input}); BUILD_SINGLE_SELECTOR(input.dataType(), softMaxForVectorCudaLauncher, (context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo()), FLOAT_TYPES); NDArray::registerSpecialUse({&output}, {&input}); } else output = 1.; } else { auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input.shapeInfo(), {dimension}); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output.shapeInfo(), {dimension}); const int threadsPerBlock = CUDA_BLOCK_SIZE; const int blocksPerGrid = packZ.numberOfTads(); const int sharedMem = 1024; NDArray::prepareSpecialUse({&output}, {&input}); BUILD_SINGLE_SELECTOR(input.dataType(), softMaxCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.specialBuffer(), packX.specialShapeInfo(), packX.specialOffsets(), output.specialBuffer(), packZ.specialShapeInfo(), packZ.specialOffsets()), FLOAT_TYPES); NDArray::registerSpecialUse({&output}, {&input}); // auto maxAlongDim = const_cast<NDArray&>(input).reduceAlongDimension(reduce::Max, {dimension}, true); // (input - maxAlongDim).applyTransform(transform::Exp, &output); // output contains exponents temporarily // auto sumAlongDim = output.reduceAlongDimension(reduce::Sum, {dimension}, true); // output /= sumAlongDim; // input.tickReadDevice(); } manager.synchronize(); output.tickWriteDevice(); } /////////////////////////////////////////////////////////////////// template<typename T> __global__ void logSoftMaxForVectorCuda(const void *vx, const Nd4jLong *xzShapeInfo, void *vz) { // logic of this kernel is based on assumption gridDim = 1 const auto x = reinterpret_cast<const T*>(vx); auto z = reinterpret_cast<T*>(vz); __shared__ Nd4jLong len; __shared__ int numOfIters; __shared__ T shmem[CUDA_BLOCK_SIZE]; if (threadIdx.x == 0) { len = shape::length(xzShapeInfo); numOfIters = (len + blockDim.x - 1) / blockDim.x; // ceil (len / blockDim.x) } __syncthreads(); T temp = -DataTypeUtils::max<T>(); // set start value to compare with at first iteration, FIXME: what if T is unsigned ?? // ************ evaluate max element in input array x ************ // for (int i = 0; i < numOfIters; ++i) { const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x; if(elemIdx < len) { const Nd4jLong offset = shape::getIndexOffset(elemIdx, xzShapeInfo); shmem[threadIdx.x] = (threadIdx.x != 0) ? x[offset] : sd::math::nd4j_max<T>(x[offset], temp); // take into account max element evaluated on previous iteration and stored in temp } else shmem[threadIdx.x] = -DataTypeUtils::max<T>(); // FIXME: what if T is unsigned ?? __syncthreads(); for (int s = blockDim.x / 2; s > 0; s /= 2) { if(threadIdx.x < s) shmem[threadIdx.x] = sd::math::nd4j_max<T>(shmem[threadIdx.x], shmem[threadIdx.x + s]); __syncthreads(); } temp = shmem[0]; // save max value calculated at current iteration } const T max = temp; temp = 0; // ************ evaluate value of exp(x[offset] - max) per each element, store it to shared memory shmem ************ // // at the same time evaluate sum of exponents, sum will be stored in shmem[0] for (int i = 0; i < numOfIters; ++i) { const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x; if(elemIdx < len) { const Nd4jLong offset = shape::getIndexOffset(elemIdx, xzShapeInfo); z[offset] = sd::math::nd4j_exp<T, T>(x[offset] - max); shmem[threadIdx.x] = (threadIdx.x != 0) ? z[offset] : (z[offset] + temp); // take into account sum element evaluated on previous iteration and stored in temp } else shmem[threadIdx.x] = 0; __syncthreads(); for (int s = blockDim.x / 2; s > 0; s /= 2) { if(threadIdx.x < s) shmem[threadIdx.x] += shmem[threadIdx.x + s]; __syncthreads(); } temp = shmem[0]; // save sum calculated at current iteration } // ************ evaluate log(z[offset] / sum) ************ // for (int i = 0; i < numOfIters; ++i) { const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x; if(elemIdx >= len) continue; const Nd4jLong offset = shape::getIndexOffset(elemIdx, xzShapeInfo); z[offset] = sd::math::nd4j_log<T,T>(z[offset] / shmem[0]); } } /////////////////////////////////////////////////////////////////// template <typename T> linkage void logSoftMaxForVectorCudaLauncher(const hipStream_t* stream, const void *vx, const Nd4jLong *xzShapeInfo, void *vz) { hipLaunchKernelGGL(( logSoftMaxForVectorCuda<T>), dim3(1), dim3(CUDA_BLOCK_SIZE), 1024, *stream, vx, xzShapeInfo, vz); } ////////////////////////////////////////////////////////////////////////// void logSoftmax(sd::LaunchContext * context, const NDArray& input, NDArray& output, const int dimension) { if(!input.isActualOnDeviceSide()) input.syncToDevice(); const int rank = input.rankOf(); if(input.isVector()) { if(rank == 1 || input.sizeAt(dimension) != 1) { BUILD_SINGLE_SELECTOR(input.dataType(), logSoftMaxForVectorCudaLauncher, (context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer()), FLOAT_TYPES); input.tickReadDevice(); } else output = 0.; } else { auto maxAlongDim = const_cast<NDArray&>(input).reduceAlongDimension(reduce::Max, {dimension}, true); (input - maxAlongDim).applyTransform(transform::Exp, output); // output contains exponents temporarily auto sumAlongDim = output.reduceAlongDimension(reduce::Sum, {dimension}, true); output /= sumAlongDim; output.applyTransform(transform::Log, output); input.tickReadDevice(); } PointersManager manager(context, "helpers::logSoftmax"); manager.synchronize(); output.tickWriteDevice(); } /////////////////////////////////////////////////////////////////// template<typename T> __global__ linkage void softMaxDerivForVectorCuda(const void *vx, const Nd4jLong *xzShapeInfo, void *vz) { // logic of this kernel is based on assumption gridDim = 1 const auto x = reinterpret_cast<const T*>(vx); auto z = reinterpret_cast<T*>(vz); __shared__ Nd4jLong len; __shared__ int numOfIters; __shared__ T shmem[CUDA_BLOCK_SIZE]; if (threadIdx.x == 0) { len = shape::length(xzShapeInfo); numOfIters = (len + blockDim.x - 1) / blockDim.x; // ceil (len / blockDim.x) } __syncthreads(); T temp = -DataTypeUtils::max<T>(); // set start value to compare with at first iteration, FIXME: what if T is unsigned ?? // ************ evaluate max element in input array x ************ // for (int i = 0; i < numOfIters; ++i) { const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x; if(elemIdx < len) { const Nd4jLong offset = shape::getIndexOffset(elemIdx, xzShapeInfo); shmem[threadIdx.x] = (threadIdx.x != 0) ? x[offset] : sd::math::nd4j_max<T>(x[offset], temp); // take into account max element evaluated on previous iteration and stored in temp } else shmem[threadIdx.x] = -DataTypeUtils::max<T>(); // FIXME: what if T is unsigned ?? __syncthreads(); for (int s = blockDim.x / 2; s > 0; s /= 2) { if(threadIdx.x < s) shmem[threadIdx.x] = sd::math::nd4j_max<T>(shmem[threadIdx.x], shmem[threadIdx.x + s]); __syncthreads(); } temp = shmem[0]; // save max value calculated at current iteration } const T max = temp; temp = 0; // ************ evaluate value of exp(x[offset] - max) per each element, store it to shared memory shmem ************ // // at the same evaluate sum of exponents, sum will be stored in shmem[0] for (int i = 0; i < numOfIters; ++i) { const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x; if(elemIdx < len) { const Nd4jLong offset = shape::getIndexOffset(elemIdx, xzShapeInfo); z[offset] = sd::math::nd4j_exp<T, T>(x[offset] - max); shmem[threadIdx.x] = (threadIdx.x != 0) ? z[offset] : (z[offset] + temp); // take into account sum element evaluated on previous iteration and stored in temp } else shmem[threadIdx.x] = 0; __syncthreads(); for (int s = blockDim.x / 2; s > 0; s /= 2) { if(threadIdx.x < s) shmem[threadIdx.x] += shmem[threadIdx.x + s]; __syncthreads(); } temp = shmem[0]; // save sum calculated at current iteration } // ************ evaluate (z[offset] / sum) and derivative z[offset] = z[offset] * (1 - z[offset]) ************ // for (int i = 0; i < numOfIters; ++i) { const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x; if(elemIdx >= len) continue; const Nd4jLong offset = shape::getIndexOffset(elemIdx, xzShapeInfo); z[offset] /= shmem[0]; z[offset] *= (1.f - z[offset]); // derivative } } /////////////////////////////////////////////////////////////////// template <typename T> linkage void softMaxDerivForVectorCudaLauncher(const hipStream_t* stream, const void *vx, const Nd4jLong *xzShapeInfo, void *vz) { hipLaunchKernelGGL(( softMaxDerivForVectorCuda<T>), dim3(1), dim3(CUDA_BLOCK_SIZE), 1024, *stream, vx, xzShapeInfo, vz); } /////////////////////////////////////////////////////////////////// void softmaxDerivative(sd::LaunchContext * context, const NDArray& input, NDArray& output, const int dimension) { if(!input.isActualOnDeviceSide()) input.syncToDevice(); const int rank = input.rankOf(); int temp; if(shape::isCommonVector(input.shapeInfo(), temp)) { BUILD_SINGLE_SELECTOR(input.dataType(), softMaxDerivForVectorCudaLauncher, (context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer()), FLOAT_TYPES); input.tickReadDevice(); } else { auto maxAlongDim = const_cast<NDArray&>(input).reduceAlongDimension(reduce::Max, {dimension}, true); (input - maxAlongDim).applyTransform(transform::Exp, output); // output contains exponents temporarily auto sumAlongDim = output.reduceAlongDimension(reduce::Sum, {dimension}, true); output /= sumAlongDim; output *= (1.f - output); // derivative input.tickReadDevice(); } PointersManager manager(context, "helpers::softmaxDerivative"); manager.synchronize(); output.tickWriteDevice(); } template <typename T> linkage void thresholdRelu_(NDArray const& input, double threshold, NDArray& output) { auto routine = LAMBDA_T(_x, threshold) { return _x > (T)threshold ? _x: (T)0.f; }; const_cast<NDArray&>(input).applyLambda(routine, output); } void thresholdRelu(sd::LaunchContext * context, NDArray const& input, double threshold, NDArray& output) { BUILD_SINGLE_SELECTOR(input.dataType(), thresholdRelu_, (input, threshold, output), FLOAT_TYPES); } template <typename T> linkage void thresholdReluDerivative_(NDArray* input, double theta, NDArray* dLdO, NDArray* output) { auto derivative = LAMBDA_TT(_x, grO, theta) {if (_x > theta) return grO; else return static_cast<T>(0); }; input->applyPairwiseLambda(*dLdO, derivative, *output); } void thresholdReluDerivative(sd::LaunchContext * context, NDArray* input, double threshold, NDArray* dLdO, NDArray* output) { BUILD_SINGLE_SELECTOR(input->dataType(), thresholdReluDerivative_, (input, threshold, dLdO, output), FLOAT_TYPES); } } } }
99d66ce72959475449d4e91849f69aa44e49be29.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Yurii Shyrma (iuriish@yahoo.com), created on 19.04.2018 // @author raver119@gmail.com // #include <system/op_boilerplate.h> #include <ops/declarable/helpers/activations.h> #include <helpers/ShapeUtils.h> #include <numeric> #include <helpers/PointersManager.h> #include <helpers/ConstantTadHelper.h> namespace sd { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// template<typename X, typename Y> __global__ void preluCuda(const void *vx, const Nd4jLong *xShapeInfo, const void *vy, const Nd4jLong *yShapeInfo, void *vz) { const auto x = reinterpret_cast<const X*>(vx); const auto y = reinterpret_cast<const Y*>(vy); auto z = reinterpret_cast<X*>(vz); __shared__ Nd4jLong xzLen; __shared__ int xzRank, yRank; if (threadIdx.x == 0) { xzLen = shape::length(xShapeInfo); xzRank = shape::rank(xShapeInfo); yRank = shape::rank(yShapeInfo); } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; int coords[MAX_RANK]; for (int i = tid; i < xzLen; i += blockDim.x * gridDim.x) { shape::index2coords(i, xShapeInfo, coords); const auto xzOffset = shape::getOffset(xShapeInfo, coords); const auto xVal = x[xzOffset]; if(xVal < 0) { for (uint j = 0; j < yRank; ++j) if(yShapeInfo[j + 1] == 1) coords[j + 1] = 0; z[xzOffset] = xVal * y[shape::getOffset(yShapeInfo, coords + 1)]; } else z[xzOffset] = xVal; } } /////////////////////////////////////////////////////////////////// template<typename X, typename Y> linkage void preluCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void *vx, const Nd4jLong *xShapeInfo, const void *vy, const Nd4jLong *yShapeInfo, void *vz) { preluCuda<X, Y><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vy, yShapeInfo, vz); } /////////////////////////////////////////////////////////////////// void prelu(sd::LaunchContext * context, const NDArray& input, const NDArray& alpha, NDArray& output) { PointersManager manager(context, "prelu"); const int threadsPerBlock = 256; const int blocksPerGrid = 512; const int sharedMem = 512; const auto xType = input.dataType(); const auto yType = alpha.dataType(); NDArray::prepareSpecialUse({&output}, {&input, &alpha}); BUILD_SINGLE_SELECTOR_TWICE(xType, preluCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), alpha.specialBuffer(), alpha.specialShapeInfo(), output.specialBuffer()), FLOAT_TYPES); NDArray::registerSpecialUse({&output}, {&input, &alpha}); manager.synchronize(); } /////////////////////////////////////////////////////////////////// template<typename X, typename Y> __global__ linkage void preluBPCuda(const void *vIn, const Nd4jLong *inShapeInfo, const void *vAlpha, const Nd4jLong *alphaShapeInfo, const void *vdLdO, const Nd4jLong *dLdOShapeInfo, void *vdLdI, const Nd4jLong *dLdIShapeInfo, void *vdLdA, const Nd4jLong *dLdAShapeInfo) { const auto in = reinterpret_cast<const X*>(vIn); const auto alpha = reinterpret_cast<const Y*>(vAlpha); const auto dLdO = reinterpret_cast<const Y*>(vdLdO); auto dLdI = reinterpret_cast<Y*>(vdLdI); auto dLdA = reinterpret_cast<Y*>(vdLdA); __shared__ Nd4jLong inLen, totalThreads; __shared__ int inRank, alphaRank; if (threadIdx.x == 0) { inLen = shape::length(inShapeInfo); totalThreads = gridDim.x * blockDim.x; inRank = shape::rank(inShapeInfo); alphaRank = shape::rank(alphaShapeInfo); } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; int coords[MAX_RANK]; for (int i = tid; i < inLen; i += totalThreads) { shape::index2coords(i, inShapeInfo, coords); const auto inOffset = shape::getOffset(inShapeInfo, coords); const auto dLdOOffset = shape::getOffset(dLdOShapeInfo, coords); const auto dLdIOffset = shape::getOffset(dLdIShapeInfo, coords); const auto xVal = in[inOffset]; const auto grO = dLdO[dLdOOffset]; if(xVal < 0) { for (uint j = 0; j < alphaRank; ++j) if(alphaShapeInfo[j + 1] == 1) coords[j + 1] = 0; const auto alphaOffset = shape::getOffset(alphaShapeInfo, coords + 1); const auto dLdAOffset = shape::getOffset(dLdAShapeInfo, coords + 1); dLdI[dLdIOffset] = grO * alpha[alphaOffset]; sd::math::atomics::nd4j_atomicAdd<Y>(&dLdA[dLdAOffset], static_cast<Y>(grO * xVal)); } else dLdI[dLdIOffset] = grO; } } ////////////////////////////////////////////////////////////////////////// template<typename X, typename Y> __host__ linkage void preluBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void *vIn, const Nd4jLong *inShapeInfo, const void *vAlpha, const Nd4jLong *alphaShapeInfo, const void *vdLdO, const Nd4jLong *dLdOShapeInfo, void *vdLdI, const Nd4jLong *dLdIShapeInfo, void *vdLdA, const Nd4jLong *dLdAShapeInfo) { preluBPCuda<X, Y><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vIn, inShapeInfo, vAlpha, alphaShapeInfo, vdLdO, dLdOShapeInfo, vdLdI, dLdIShapeInfo, vdLdA, dLdAShapeInfo); } ////////////////////////////////////////////////////////////////////////// void preluBP(sd::LaunchContext* context, const NDArray& input, const NDArray& alpha, const NDArray& dLdO, NDArray& dLdI, NDArray& dLdA) { dLdA.nullify(); PointersManager manager(context, "preluBP"); const int threadsPerBlock = 256; const int blocksPerGrid = 512; const int sharedMem = 512; const auto xType = input.dataType(); const auto zType = alpha.dataType(); NDArray::prepareSpecialUse({&dLdI, &dLdA}, {&input, &alpha, &dLdO}); BUILD_SINGLE_SELECTOR_TWICE(xType, preluBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), alpha.specialBuffer(), alpha.specialShapeInfo(), dLdO.specialBuffer(), dLdO.specialShapeInfo(), dLdI.specialBuffer(), dLdI.specialShapeInfo(), dLdA.specialBuffer(), dLdA.specialShapeInfo()), FLOAT_TYPES); NDArray::registerSpecialUse({&dLdI, &dLdA}, {&input, &alpha, &dLdO}); manager.synchronize(); } /////////////////////////////////////////////////////////////////// template<typename T> __device__ void softMaxForVectorCuda(const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo) { // logic of this kernel is based on assumption gridDim = 1 const auto x = reinterpret_cast<const T*>(vx); auto z = reinterpret_cast<T*>(vz); __shared__ Nd4jLong len; __shared__ int numOfIters; __shared__ T shmem[CUDA_BLOCK_SIZE]; if (threadIdx.x == 0) { len = shape::length(xShapeInfo); numOfIters = (len + blockDim.x - 1) / blockDim.x; // ceil (len / blockDim.x) } __syncthreads(); T temp = -DataTypeUtils::max<T>(); // set start value to compare with at first iteration, FIXME: what if T is unsigned ?? // ************ evaluate max element in input array x ************ // for (int i = 0; i < numOfIters; ++i) { const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x; if(elemIdx < len) { const Nd4jLong xOffset = shape::getIndexOffset(elemIdx, xShapeInfo); shmem[threadIdx.x] = (threadIdx.x != 0) ? x[xOffset] : sd::math::nd4j_max<T>(x[xOffset], temp); // take into account max element evaluated on previous iteration and stored in temp } else shmem[threadIdx.x] = -DataTypeUtils::max<T>(); // FIXME: what if T is unsigned ?? __syncthreads(); for (int s = blockDim.x / 2; s > 0; s /= 2) { if(threadIdx.x < s) shmem[threadIdx.x] = sd::math::nd4j_max<T>(shmem[threadIdx.x], shmem[threadIdx.x + s]); __syncthreads(); } temp = shmem[0]; // save max value calculated at current iteration } const T max = temp; temp = 0; // ************ evaluate value of exp(x[offset] - max) per each element, store it to shared memory shmem ************ // // at the same evaluate sum of exponents, sum will be stored in shmem[0] for (int i = 0; i < numOfIters; ++i) { const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x; if(elemIdx < len) { const Nd4jLong xOffset = shape::getIndexOffset(elemIdx, xShapeInfo); const Nd4jLong zOffset = shape::getIndexOffset(elemIdx, zShapeInfo); z[zOffset] = sd::math::nd4j_exp<T, T>(x[xOffset] - max); shmem[threadIdx.x] = (threadIdx.x != 0) ? z[zOffset] : (z[zOffset] + temp); // take into account sum element evaluated on previous iteration and stored in temp } else shmem[threadIdx.x] = 0; __syncthreads(); for (int s = blockDim.x / 2; s > 0; s /= 2) { if(threadIdx.x < s) shmem[threadIdx.x] += shmem[threadIdx.x + s]; __syncthreads(); } temp = shmem[0]; // save sum calculated at current iteration } // ************ evaluate z[offset] / sum ************ // for (int i = 0; i < numOfIters; ++i) { const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x; if(elemIdx >= len) continue; const Nd4jLong zOffset = shape::getIndexOffset(elemIdx, zShapeInfo); z[zOffset] /= shmem[0]; } } template<typename T> __global__ void softMaxForVectorCudaGlobal(const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo) { softMaxForVectorCuda<T>(vx, xShapeInfo, vz, zShapeInfo); } /////////////////////////////////////////////////////////////////// template <typename T> linkage void softMaxForVectorCudaLauncher(const cudaStream_t* stream, const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo) { softMaxForVectorCudaGlobal<T><<<1, CUDA_BLOCK_SIZE, 1024, *stream>>>(vx, xShapeInfo, vz, zShapeInfo); } /////////////////////////////////////////////////////////////////// template<typename T> __global__ static void softMaxCuda(const void* vx, const Nd4jLong *xTadShapeInfo, const Nd4jLong *xOffsets, void* vz, const Nd4jLong *zTadShapeInfo, const Nd4jLong *zOffsets) { const auto x = reinterpret_cast<const T*>(vx); auto z = reinterpret_cast<T*>(vz); const auto* xTad = x + xOffsets[blockIdx.x]; auto* zTad = z + zOffsets[blockIdx.x]; softMaxForVectorCuda<T>(xTad, xTadShapeInfo, zTad, zTadShapeInfo); } /////////////////////////////////////////////////////////////////// template<typename T> static void softMaxCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong *xTadShapeInfo, const Nd4jLong *xOffsets, void* vz, const Nd4jLong *zTadShapeInfo, const Nd4jLong *zOffsets) { softMaxCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xTadShapeInfo, xOffsets, vz, zTadShapeInfo, zOffsets); } ////////////////////////////////////////////////////////////////////////// void softmax(sd::LaunchContext * context, const NDArray& input, NDArray& output, const int dimension) { if(!input.isActualOnDeviceSide()) input.syncToDevice(); const int rank = input.rankOf(); PointersManager manager(context, "helpers::softmax"); if(input.isVector()) { if(rank == 1 || input.sizeAt(dimension) != 1) { NDArray::prepareSpecialUse({&output}, {&input}); BUILD_SINGLE_SELECTOR(input.dataType(), softMaxForVectorCudaLauncher, (context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo()), FLOAT_TYPES); NDArray::registerSpecialUse({&output}, {&input}); } else output = 1.; } else { auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input.shapeInfo(), {dimension}); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output.shapeInfo(), {dimension}); const int threadsPerBlock = CUDA_BLOCK_SIZE; const int blocksPerGrid = packZ.numberOfTads(); const int sharedMem = 1024; NDArray::prepareSpecialUse({&output}, {&input}); BUILD_SINGLE_SELECTOR(input.dataType(), softMaxCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.specialBuffer(), packX.specialShapeInfo(), packX.specialOffsets(), output.specialBuffer(), packZ.specialShapeInfo(), packZ.specialOffsets()), FLOAT_TYPES); NDArray::registerSpecialUse({&output}, {&input}); // auto maxAlongDim = const_cast<NDArray&>(input).reduceAlongDimension(reduce::Max, {dimension}, true); // (input - maxAlongDim).applyTransform(transform::Exp, &output); // output contains exponents temporarily // auto sumAlongDim = output.reduceAlongDimension(reduce::Sum, {dimension}, true); // output /= sumAlongDim; // input.tickReadDevice(); } manager.synchronize(); output.tickWriteDevice(); } /////////////////////////////////////////////////////////////////// template<typename T> __global__ void logSoftMaxForVectorCuda(const void *vx, const Nd4jLong *xzShapeInfo, void *vz) { // logic of this kernel is based on assumption gridDim = 1 const auto x = reinterpret_cast<const T*>(vx); auto z = reinterpret_cast<T*>(vz); __shared__ Nd4jLong len; __shared__ int numOfIters; __shared__ T shmem[CUDA_BLOCK_SIZE]; if (threadIdx.x == 0) { len = shape::length(xzShapeInfo); numOfIters = (len + blockDim.x - 1) / blockDim.x; // ceil (len / blockDim.x) } __syncthreads(); T temp = -DataTypeUtils::max<T>(); // set start value to compare with at first iteration, FIXME: what if T is unsigned ?? // ************ evaluate max element in input array x ************ // for (int i = 0; i < numOfIters; ++i) { const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x; if(elemIdx < len) { const Nd4jLong offset = shape::getIndexOffset(elemIdx, xzShapeInfo); shmem[threadIdx.x] = (threadIdx.x != 0) ? x[offset] : sd::math::nd4j_max<T>(x[offset], temp); // take into account max element evaluated on previous iteration and stored in temp } else shmem[threadIdx.x] = -DataTypeUtils::max<T>(); // FIXME: what if T is unsigned ?? __syncthreads(); for (int s = blockDim.x / 2; s > 0; s /= 2) { if(threadIdx.x < s) shmem[threadIdx.x] = sd::math::nd4j_max<T>(shmem[threadIdx.x], shmem[threadIdx.x + s]); __syncthreads(); } temp = shmem[0]; // save max value calculated at current iteration } const T max = temp; temp = 0; // ************ evaluate value of exp(x[offset] - max) per each element, store it to shared memory shmem ************ // // at the same time evaluate sum of exponents, sum will be stored in shmem[0] for (int i = 0; i < numOfIters; ++i) { const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x; if(elemIdx < len) { const Nd4jLong offset = shape::getIndexOffset(elemIdx, xzShapeInfo); z[offset] = sd::math::nd4j_exp<T, T>(x[offset] - max); shmem[threadIdx.x] = (threadIdx.x != 0) ? z[offset] : (z[offset] + temp); // take into account sum element evaluated on previous iteration and stored in temp } else shmem[threadIdx.x] = 0; __syncthreads(); for (int s = blockDim.x / 2; s > 0; s /= 2) { if(threadIdx.x < s) shmem[threadIdx.x] += shmem[threadIdx.x + s]; __syncthreads(); } temp = shmem[0]; // save sum calculated at current iteration } // ************ evaluate log(z[offset] / sum) ************ // for (int i = 0; i < numOfIters; ++i) { const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x; if(elemIdx >= len) continue; const Nd4jLong offset = shape::getIndexOffset(elemIdx, xzShapeInfo); z[offset] = sd::math::nd4j_log<T,T>(z[offset] / shmem[0]); } } /////////////////////////////////////////////////////////////////// template <typename T> linkage void logSoftMaxForVectorCudaLauncher(const cudaStream_t* stream, const void *vx, const Nd4jLong *xzShapeInfo, void *vz) { logSoftMaxForVectorCuda<T><<<1, CUDA_BLOCK_SIZE, 1024, *stream>>>(vx, xzShapeInfo, vz); } ////////////////////////////////////////////////////////////////////////// void logSoftmax(sd::LaunchContext * context, const NDArray& input, NDArray& output, const int dimension) { if(!input.isActualOnDeviceSide()) input.syncToDevice(); const int rank = input.rankOf(); if(input.isVector()) { if(rank == 1 || input.sizeAt(dimension) != 1) { BUILD_SINGLE_SELECTOR(input.dataType(), logSoftMaxForVectorCudaLauncher, (context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer()), FLOAT_TYPES); input.tickReadDevice(); } else output = 0.; } else { auto maxAlongDim = const_cast<NDArray&>(input).reduceAlongDimension(reduce::Max, {dimension}, true); (input - maxAlongDim).applyTransform(transform::Exp, output); // output contains exponents temporarily auto sumAlongDim = output.reduceAlongDimension(reduce::Sum, {dimension}, true); output /= sumAlongDim; output.applyTransform(transform::Log, output); input.tickReadDevice(); } PointersManager manager(context, "helpers::logSoftmax"); manager.synchronize(); output.tickWriteDevice(); } /////////////////////////////////////////////////////////////////// template<typename T> __global__ linkage void softMaxDerivForVectorCuda(const void *vx, const Nd4jLong *xzShapeInfo, void *vz) { // logic of this kernel is based on assumption gridDim = 1 const auto x = reinterpret_cast<const T*>(vx); auto z = reinterpret_cast<T*>(vz); __shared__ Nd4jLong len; __shared__ int numOfIters; __shared__ T shmem[CUDA_BLOCK_SIZE]; if (threadIdx.x == 0) { len = shape::length(xzShapeInfo); numOfIters = (len + blockDim.x - 1) / blockDim.x; // ceil (len / blockDim.x) } __syncthreads(); T temp = -DataTypeUtils::max<T>(); // set start value to compare with at first iteration, FIXME: what if T is unsigned ?? // ************ evaluate max element in input array x ************ // for (int i = 0; i < numOfIters; ++i) { const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x; if(elemIdx < len) { const Nd4jLong offset = shape::getIndexOffset(elemIdx, xzShapeInfo); shmem[threadIdx.x] = (threadIdx.x != 0) ? x[offset] : sd::math::nd4j_max<T>(x[offset], temp); // take into account max element evaluated on previous iteration and stored in temp } else shmem[threadIdx.x] = -DataTypeUtils::max<T>(); // FIXME: what if T is unsigned ?? __syncthreads(); for (int s = blockDim.x / 2; s > 0; s /= 2) { if(threadIdx.x < s) shmem[threadIdx.x] = sd::math::nd4j_max<T>(shmem[threadIdx.x], shmem[threadIdx.x + s]); __syncthreads(); } temp = shmem[0]; // save max value calculated at current iteration } const T max = temp; temp = 0; // ************ evaluate value of exp(x[offset] - max) per each element, store it to shared memory shmem ************ // // at the same evaluate sum of exponents, sum will be stored in shmem[0] for (int i = 0; i < numOfIters; ++i) { const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x; if(elemIdx < len) { const Nd4jLong offset = shape::getIndexOffset(elemIdx, xzShapeInfo); z[offset] = sd::math::nd4j_exp<T, T>(x[offset] - max); shmem[threadIdx.x] = (threadIdx.x != 0) ? z[offset] : (z[offset] + temp); // take into account sum element evaluated on previous iteration and stored in temp } else shmem[threadIdx.x] = 0; __syncthreads(); for (int s = blockDim.x / 2; s > 0; s /= 2) { if(threadIdx.x < s) shmem[threadIdx.x] += shmem[threadIdx.x + s]; __syncthreads(); } temp = shmem[0]; // save sum calculated at current iteration } // ************ evaluate (z[offset] / sum) and derivative z[offset] = z[offset] * (1 - z[offset]) ************ // for (int i = 0; i < numOfIters; ++i) { const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x; if(elemIdx >= len) continue; const Nd4jLong offset = shape::getIndexOffset(elemIdx, xzShapeInfo); z[offset] /= shmem[0]; z[offset] *= (1.f - z[offset]); // derivative } } /////////////////////////////////////////////////////////////////// template <typename T> linkage void softMaxDerivForVectorCudaLauncher(const cudaStream_t* stream, const void *vx, const Nd4jLong *xzShapeInfo, void *vz) { softMaxDerivForVectorCuda<T><<<1, CUDA_BLOCK_SIZE, 1024, *stream>>>(vx, xzShapeInfo, vz); } /////////////////////////////////////////////////////////////////// void softmaxDerivative(sd::LaunchContext * context, const NDArray& input, NDArray& output, const int dimension) { if(!input.isActualOnDeviceSide()) input.syncToDevice(); const int rank = input.rankOf(); int temp; if(shape::isCommonVector(input.shapeInfo(), temp)) { BUILD_SINGLE_SELECTOR(input.dataType(), softMaxDerivForVectorCudaLauncher, (context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer()), FLOAT_TYPES); input.tickReadDevice(); } else { auto maxAlongDim = const_cast<NDArray&>(input).reduceAlongDimension(reduce::Max, {dimension}, true); (input - maxAlongDim).applyTransform(transform::Exp, output); // output contains exponents temporarily auto sumAlongDim = output.reduceAlongDimension(reduce::Sum, {dimension}, true); output /= sumAlongDim; output *= (1.f - output); // derivative input.tickReadDevice(); } PointersManager manager(context, "helpers::softmaxDerivative"); manager.synchronize(); output.tickWriteDevice(); } template <typename T> linkage void thresholdRelu_(NDArray const& input, double threshold, NDArray& output) { auto routine = LAMBDA_T(_x, threshold) { return _x > (T)threshold ? _x: (T)0.f; }; const_cast<NDArray&>(input).applyLambda(routine, output); } void thresholdRelu(sd::LaunchContext * context, NDArray const& input, double threshold, NDArray& output) { BUILD_SINGLE_SELECTOR(input.dataType(), thresholdRelu_, (input, threshold, output), FLOAT_TYPES); } template <typename T> linkage void thresholdReluDerivative_(NDArray* input, double theta, NDArray* dLdO, NDArray* output) { auto derivative = LAMBDA_TT(_x, grO, theta) {if (_x > theta) return grO; else return static_cast<T>(0); }; input->applyPairwiseLambda(*dLdO, derivative, *output); } void thresholdReluDerivative(sd::LaunchContext * context, NDArray* input, double threshold, NDArray* dLdO, NDArray* output) { BUILD_SINGLE_SELECTOR(input->dataType(), thresholdReluDerivative_, (input, threshold, dLdO, output), FLOAT_TYPES); } } } }
be3dff9a759d2cd0c8ea03106ae5974f06eabec8.hip
// !!! This is a file automatically generated by hipify!!! /********************************************************************** Copyright 2013 Advanced Micro Devices, Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ********************************************************************/ #include <chrono> #include <cmath> #include <cstdlib> #include <iostream> #include <hip/hip_runtime.h> #include "urng.h" #include "kernel.hip" int main(int argc, char** argv) { if (argc != 5) { printf("Usage: %s <path to file> <blockSizeX> <blockSizeY> <repeat>\n", argv[0]); return 1; } const char* filePath = argv[1]; const int blockSizeX = atoi(argv[2]); const int blockSizeY = atoi(argv[3]); const int iterations = atoi(argv[4]); // load input bitmap image SDKBitMap inputBitmap; inputBitmap.load(filePath); if(!inputBitmap.isLoaded()) { std::cout << "Failed to load input image!"; return -1; } // get width and height of input image int height = inputBitmap.getHeight(); int width = inputBitmap.getWidth(); size_t imageSize = height * width * sizeof(uchar4); std::cout << "Image " << filePath; std::cout << " height: " << height; std::cout << " width: " << width << std::endl; // allocate memory for input & output image data uchar4* inputImageData = (uchar4*)malloc(imageSize); // allocate memory for output image data uchar4* outputImageData = (uchar4*)malloc(imageSize); // initializa the Image data to NULL memset(outputImageData, 0, imageSize); // get the pointer to pixel data uchar4 *pixelData = inputBitmap.getPixels(); if(pixelData == NULL) { std::cout << "Failed to read pixel Data!"; free(inputImageData); free(outputImageData); return -1; } // Copy pixel data into inputImageData memcpy(inputImageData, pixelData, imageSize); // allocate memory for verification output uchar4 *verificationOutput = (uchar4*)malloc(imageSize); // initialize the data to NULL memset(verificationOutput, 0, imageSize); const int factor = FACTOR; uchar4 *inputImageBuffer; hipMalloc((void**)&inputImageBuffer, imageSize); hipMemcpy(inputImageBuffer, inputImageData, imageSize, hipMemcpyHostToDevice); uchar4 *outputImageBuffer; hipMalloc((void**)&outputImageBuffer, imageSize); dim3 grid (height * width / (blockSizeY * blockSizeX)); dim3 block (blockSizeY * blockSizeX); // maximum work-group size is 256 std::cout << "Executing kernel for " << iterations << " iterations" <<std::endl; std::cout << "-------------------------------------------" << std::endl; hipDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); for(int i = 0; i < iterations; i++) { hipLaunchKernelGGL(( noise_uniform), dim3(grid), dim3(block), 0, 0, inputImageBuffer, outputImageBuffer, factor); } hipDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); std::cout << "Average kernel execution time: " << (time * 1e-3f) / iterations << " (us)\n"; hipMemcpy(outputImageData, outputImageBuffer, imageSize, hipMemcpyDeviceToHost); hipFree(inputImageBuffer); hipFree(outputImageBuffer); // verify float mean = 0; for(int i = 0; i < (int)(width * height); i++) { mean += outputImageData[i].x - inputImageData[i].x; mean += outputImageData[i].y - inputImageData[i].y; mean += outputImageData[i].z - inputImageData[i].z; mean += outputImageData[i].w - inputImageData[i].w; } mean /= (imageSize * factor); std::cout << "The averaged mean of the image: " << mean << std::endl; if(fabs(mean) < 1.0) { std::cout << "PASS\n" << std::endl; } else { std::cout << "FAIL\n" << std::endl; } #ifdef DUMP // copy output image data back to original pixel data memcpy(pixelData, outputImageData, imageSize); // write the output bmp file if(!inputBitmap.write(OUTPUT_IMAGE)) std::cout << "Failed to write output image!"; else std::cout << "Write output image!"; #endif // release program resources (input memory etc.) free(inputImageData); free(outputImageData); free(verificationOutput); return 0; }
be3dff9a759d2cd0c8ea03106ae5974f06eabec8.cu
/********************************************************************** Copyright ©2013 Advanced Micro Devices, Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: • Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. • Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ********************************************************************/ #include <chrono> #include <cmath> #include <cstdlib> #include <iostream> #include <cuda.h> #include "urng.h" #include "kernel.cu" int main(int argc, char** argv) { if (argc != 5) { printf("Usage: %s <path to file> <blockSizeX> <blockSizeY> <repeat>\n", argv[0]); return 1; } const char* filePath = argv[1]; const int blockSizeX = atoi(argv[2]); const int blockSizeY = atoi(argv[3]); const int iterations = atoi(argv[4]); // load input bitmap image SDKBitMap inputBitmap; inputBitmap.load(filePath); if(!inputBitmap.isLoaded()) { std::cout << "Failed to load input image!"; return -1; } // get width and height of input image int height = inputBitmap.getHeight(); int width = inputBitmap.getWidth(); size_t imageSize = height * width * sizeof(uchar4); std::cout << "Image " << filePath; std::cout << " height: " << height; std::cout << " width: " << width << std::endl; // allocate memory for input & output image data uchar4* inputImageData = (uchar4*)malloc(imageSize); // allocate memory for output image data uchar4* outputImageData = (uchar4*)malloc(imageSize); // initializa the Image data to NULL memset(outputImageData, 0, imageSize); // get the pointer to pixel data uchar4 *pixelData = inputBitmap.getPixels(); if(pixelData == NULL) { std::cout << "Failed to read pixel Data!"; free(inputImageData); free(outputImageData); return -1; } // Copy pixel data into inputImageData memcpy(inputImageData, pixelData, imageSize); // allocate memory for verification output uchar4 *verificationOutput = (uchar4*)malloc(imageSize); // initialize the data to NULL memset(verificationOutput, 0, imageSize); const int factor = FACTOR; uchar4 *inputImageBuffer; cudaMalloc((void**)&inputImageBuffer, imageSize); cudaMemcpy(inputImageBuffer, inputImageData, imageSize, cudaMemcpyHostToDevice); uchar4 *outputImageBuffer; cudaMalloc((void**)&outputImageBuffer, imageSize); dim3 grid (height * width / (blockSizeY * blockSizeX)); dim3 block (blockSizeY * blockSizeX); // maximum work-group size is 256 std::cout << "Executing kernel for " << iterations << " iterations" <<std::endl; std::cout << "-------------------------------------------" << std::endl; cudaDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); for(int i = 0; i < iterations; i++) { noise_uniform<<<grid, block>>>(inputImageBuffer, outputImageBuffer, factor); } cudaDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); std::cout << "Average kernel execution time: " << (time * 1e-3f) / iterations << " (us)\n"; cudaMemcpy(outputImageData, outputImageBuffer, imageSize, cudaMemcpyDeviceToHost); cudaFree(inputImageBuffer); cudaFree(outputImageBuffer); // verify float mean = 0; for(int i = 0; i < (int)(width * height); i++) { mean += outputImageData[i].x - inputImageData[i].x; mean += outputImageData[i].y - inputImageData[i].y; mean += outputImageData[i].z - inputImageData[i].z; mean += outputImageData[i].w - inputImageData[i].w; } mean /= (imageSize * factor); std::cout << "The averaged mean of the image: " << mean << std::endl; if(fabs(mean) < 1.0) { std::cout << "PASS\n" << std::endl; } else { std::cout << "FAIL\n" << std::endl; } #ifdef DUMP // copy output image data back to original pixel data memcpy(pixelData, outputImageData, imageSize); // write the output bmp file if(!inputBitmap.write(OUTPUT_IMAGE)) std::cout << "Failed to write output image!"; else std::cout << "Write output image!"; #endif // release program resources (input memory etc.) free(inputImageData); free(outputImageData); free(verificationOutput); return 0; }
5f59a3b5421cb4164273dbfbb9a0804c07906c84.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /// LSU EE X70X-X (Fall 2019), GPU Programming // /// CUDA code for computing intersections and time-stepping physics model. // $Id:$ /// Purpose // // Demonstrate Several Graphical and Simulation Techniques. // This file contains GPU/cuda code. // See demo-x-collide.cc for main program. #include <gp/cuda-util-kernel.h> #include "k-main.cuh" /// /// Variables Read or Written By With Host Code /// /// Ball Information Structure // // This is in soa (structure of arrays) form, rather than // in the programmer-friendly aos (array of structure) form. // In soa form it is easier for multiple thread to read contiguous // blocks of data. // __constant__ CUDA_Ball_X balls_x; /// /// Ball Contact (tact) Pair Information /// /// Balls needed by block. // // This array identifies those balls that will be used by each block // during each contact pass. When a thread starts balls are placed in // shared memory, then contact between a pair of balls is tested for // and resolved. // __constant__ int *block_balls_needed; /// Shared memory array holding balls updated cooperating threads in a block. #undef USE_STRUCT #ifdef USE_STRUCT extern __shared__ CUDA_Phys_W sm_balls[]; #else extern __shared__ float3 sm_balls[]; __shared__ uchar4 sm_balls_misc[300]; #endif /// Pairs of Balls to Check // __constant__ SM_Idx2 *tacts_schedule; /// Box/Box Intersect // __constant__ XX_Pair *xx_pairs; __constant__ float4 *xx_sects_center; __constant__ float4 *xx_sects_dir; __constant__ float4 *xx_sects_debug; __constant__ float3 gravity_accel_dt; __constant__ float opt_bounce_loss, opt_bounce_loss_box; __constant__ float opt_friction_coeff, opt_friction_roll; __constant__ float opt_air_resistance; __constant__ bool opt_platform_curved; __constant__ float platform_xmin, platform_xmax; __constant__ float platform_zmin, platform_zmax; __constant__ float platform_xmid, platform_xrad; __constant__ float delta_t; __constant__ float elasticity_inv_dt; __constant__ bool opt_debug, opt_debug2; __constant__ CUDA_Wheel wheel; extern __shared__ float block_torque_dt[]; static __host__ void collect_symbols(); /// /// Useful Functions and Types /// typedef float3 pCoor; typedef float3 pVect; __device__ float3 make_float3(float4 f4){return make_float3(f4.x,f4.y,f4.z);} __device__ float3 m3(float4 a){ return make_float3(a); } __device__ float3 xyz(float4 a){ return m3(a); } __device__ float4 m4(float3 v, float w) { return make_float4(v.x,v.y,v.z,w); } __device__ pVect operator +(pVect a,pVect b) { return make_float3(a.x+b.x,a.y+b.y,a.z+b.z); } __device__ pVect operator -(pVect a,pVect b) { return make_float3(a.x-b.x,a.y-b.y,a.z-b.z); } __device__ pVect operator -(float4 a,float4 b) { return make_float3(a.x-b.x,a.y-b.y,a.z-b.z); } __device__ pVect operator -(pCoor a,float4 b) { return make_float3(a.x-b.x,a.y-b.y,a.z-b.z); } __device__ pVect operator *(float s, pVect v) {return make_float3(s*v.x,s*v.y,s*v.z);} __device__ float4 operator *(float s, float4 v) {return make_float4(s*v.x,s*v.y,s*v.z,s*v.w);} __device__ pVect operator *(pVect u, pVect v) {return make_float3(u.x*v.x,u.y*v.y,u.z*v.z);} __device__ pVect operator -(pVect v) { return make_float3(-v.x,-v.y,-v.z); } __device__ float3 operator -=(float3& a, pVect b) {a = a - b; return a;} __device__ float3 operator +=(float3& a, pVect b) {a = a + b; return a;} struct pNorm { pVect v; float mag_sq, magnitude; }; __device__ pVect operator *(float s, pNorm n) { return s * n.v;} // Make a Coordinate __device__ pCoor mc(float x, float y, float z){ return make_float3(x,y,z); } __device__ pCoor mc(float4 c){ return make_float3(c.x,c.y,c.z); } __device__ void set_f3(float3& a, float4 b){a.x = b.x; a.y = b.y; a.z = b.z;} __device__ void set_f4(float4& a, float3 b) {a.x = b.x; a.y = b.y; a.z = b.z; a.w = 1;} __device__ void set_f4(float4& a, float3 b, float c) {a.x = b.x; a.y = b.y; a.z = b.z; a.w = c;} // Make a Vector __device__ pVect mv(float x, float y, float z){ return make_float3(x,y,z); } __device__ pVect mv(float3 a, float3 b) { return b-a; } __device__ pVect mv(float a) { return make_float3(a,a,a); } __device__ float dot(float4 a, float4 b) { return a.x*b.x + a.y*b.y + a.z*b.z + a.w*b.w;} __device__ float dot(pVect a, pVect b){ return a.x*b.x + a.y*b.y + a.z*b.z;} __device__ float dot(pVect a, pNorm b){ return dot(a,b.v); } __device__ float dot(pNorm a, pVect b){ return dot(a.v,b); } __device__ float dot3(float4 a, float4 b){ return dot(m3(a),m3(b)); } __device__ float mag_sq(pVect v){ return dot(v,v); } __device__ float length(pVect a) {return sqrtf(mag_sq(a));} __device__ pVect normalize(pVect a) { return rsqrtf(mag_sq(a))*a; } // Make a Normal (a structure containing a normalized vector and length) __device__ pNorm mn(pVect v) { pNorm n; n.mag_sq = mag_sq(v); if ( n.mag_sq == 0 ) { n.magnitude = 0; n.v.x = n.v.y = n.v.z = 0; } else { n.magnitude = sqrtf(n.mag_sq); n.v = (1.0f/n.magnitude) * v; } return n; } __device__ pNorm mn(float4 a, float4 b) {return mn(b-a);} __device__ pNorm mn(pCoor a, pCoor b) {return mn(b-a);} __device__ pNorm mn(float x, float y, float z) {return mn(mv(x,y,z));} __device__ pNorm mn(float4 v4) { pNorm n; n.v = m3(v4); n.magnitude = v4.w; return n; } __device__ pNorm mn(float3 v3, float mag) { pNorm n; n.v = v3; n.magnitude = mag; return n; } // The unary - operator doesn't seem to work when used in an argument. __device__ pNorm operator -(pNorm n) { pNorm m; m.magnitude = n.magnitude; m.mag_sq = n.mag_sq; m.v = -n.v; return m; } struct pQuat { float w; pVect v; }; // Make Quaternion __device__ float4 mq(pNorm axis, float angle) { return m4( __sinf(angle/2) * axis.v, __cosf(angle/2) ); } __device__ float4 quat_normalize(float4 q) { float len_sq = dot(q,q); float norm_factor = 1.0f / sqrtf(len_sq); return norm_factor * q; } // Make float4 __device__ float4 m4(pQuat q){ return make_float4(q.v.x,q.v.y,q.v.z,q.w); } __device__ float4 m4(pNorm v, float w) { return m4(v.v,w); } __device__ pVect fabs(pVect v){ return mv(fabs(v.x),fabs(v.y),fabs(v.z)); } __device__ float min(pVect v){ return min(min(v.x,v.y),v.z); } __device__ float max(pVect v){ return max(max(v.x,v.y),v.z); } __device__ float sum(pVect v){ return v.x+v.y+v.z; } // Cross Product of Two Vectors __device__ float3 cross(float3 a, float3 b) { return make_float3 ( a.y * b.z - a.z * b.y, a.z * b.x - a.x * b.z, a.x * b.y - a.y * b.x ); } __device__ pVect cross(pVect a, pNorm b){ return cross(a,b.v); } __device__ pVect cross(pNorm a, pVect b){ return cross(a.v,b); } __device__ pVect crossf3(float4 a, float4 b) { return cross(m3(a),m3(b)); } // Cross Product of Vectors Between Coordinates __device__ float3 cross3(float3 a, float3 b, float3 c) { float3 ab = a - b; float3 cb = c - b; return cross(ab,cb); } __device__ pVect cross3(pVect a, pVect b, pNorm c) { return cross3(a,b,c.v); } __device__ float4 quat_mult(float4 a, float4 b) { float w = a.w * b.w - dot3(a,b); float3 v = a.w * m3(b) + b.w * m3(a) + crossf3(a,b); return make_float4(v.x,v.y,v.z,w); }; __device__ void pMatrix_set_rotation(pcMatrix3x3& m, pVect u, float theta) { const float cos_theta = __cosf(theta); const float sin_theta = sqrtf(1.0f - cos_theta * cos_theta ); m.r0.x = u.x * u.x + cos_theta * ( 1 - u.x * u.x ); m.r0.y = u.x * u.y * ( 1 - cos_theta ) - u.z * sin_theta; m.r0.z = u.z * u.x * ( 1 - cos_theta ) + u.y * sin_theta; m.r1.x = u.x * u.y * ( 1 - cos_theta ) + u.z * sin_theta; m.r1.y = u.y * u.y + cos_theta * ( 1 - u.y * u.y ); m.r1.z = u.y * u.z * ( 1 - cos_theta ) - u.x * sin_theta; m.r2.x = u.z * u.x * ( 1 - cos_theta ) - u.y * sin_theta; m.r2.y = u.y * u.z * ( 1 - cos_theta ) + u.x * sin_theta; m.r2.z = u.z * u.z + cos_theta * ( 1 - u.z * u.z ); } __device__ float3 operator *(pcMatrix3x3 m, float3 coor) { return make_float3(dot(m.r0,coor), dot(m.r1,coor), dot(m.r2,coor)); } // /// Ball Physics Functions // // See demo-x-collide.cc for details. __device__ pVect point_rot_vel(float3 omega, float r, pNorm direction) { /// Return velocity of point on surface of sphere of radius r. // return r * cross( omega, direction ); } __device__ float get_fdt_to_do(float r, float mass_inv) { return 2.5f * mass_inv / r; } __device__ float3 tan_force_dt (pNorm tact_dir, float3 force_dt, float fdt_to_do) { /// Change rotation rate due to force_dt at tact_dir in direction force_dir. // return cross(tact_dir, fdt_to_do * force_dt ); } /// /// Major Ball Physics Routines /// // A time step is computed using two kernels, pass_pairs and // pass_platform. The pass_pairs kernel, which might be launched // several times, handles collisions between balls. The pass_platform // kernel handles collision between balls and the platform, and also // updates position and orientation, and spins the wheel. __device__ bool tile_ball_collide (CUDA_Tile_W& tile, CUDA_Ball_W& ball, pCoor& tact_pos, pVect& tact_dir) { // If tile in contact with ball return true and write contact // point on tile to tact_pos and ball-center-to-tact-pos direction // to tact_dir. pVect tile_to_ball = mv(tile.pt_ll,ball.position); // Distance from tile's plane to the ball. const float dist = dot(tile_to_ball,tile.normal); const float radius = ball.radius; if ( fabs(dist) > radius ) return false; // The closest point on tile plane to the ball. pCoor pt_closest = ball.position - dist * tile.normal; // How far up the tile in the y direction the center of the ball sits const float dist_ht = dot(tile.norm_up,tile_to_ball); if ( dist_ht < -radius ) return false; if ( dist_ht > tile.height + radius ) return false; // How far up the tile in the x direction the center of the ball sits const float dist_wd = dot(tile.norm_rt,tile_to_ball); if ( dist_wd < -radius ) return false; if ( dist_wd > tile.width + radius ) return false; // If ball touching tile surface (not including an edge or corner) // then set up the pseudo ball for collision handling if ( dist_ht >= 0 && dist_ht <= tile.height && dist_wd >= 0 && dist_wd <= tile.width ) { tact_pos = pt_closest; tact_dir = dist > 0 ? -tile.normal : tile.normal; return true; } float3 pt_lr = tile.pt_ll + tile.width * tile.norm_rt; float3 pt_ul = tile.pt_ll + tile.height * tile.norm_up; float3 pt_ur = pt_lr + tile.height * tile.norm_up; // Test whether the ball is touching a corner if ( ( dist_ht < 0 || dist_ht > tile.height ) && ( dist_wd < 0 || dist_wd > tile.width) ) { pCoor ref_pt; // We need to place the pseudo ball based upon the vector from // ball position to the corner. First step is to figure out which // corner. if ( dist_ht < 0 && dist_wd < 0 ) { ref_pt = tile.pt_ll; } else if ( dist_ht < 0 && dist_wd > tile.width ) { ref_pt = pt_lr; } else if ( dist_ht > tile.height && dist_wd < 0 ) { ref_pt = pt_ul; } else { ref_pt = pt_ur; } tact_pos = ref_pt; tact_dir = normalize(mv(ball.position,ref_pt)); return true; } // Else the ball is touching an edge const bool tact_horiz = dist_ht < 0 || dist_ht > tile.height; const pVect corner_to_tact = tact_horiz ? dist_wd * tile.norm_rt : dist_ht * tile.norm_up; const pCoor ref_pt = tact_horiz ? ( dist_ht < 0 ? tile.pt_ll : pt_ul ) : ( dist_wd < 0 ? tile.pt_ll : pt_lr ); // Find the closest edge point of the tile to the ball tact_pos = ref_pt + corner_to_tact; tact_dir = normalize(mv(ball.position,tact_pos)); return true; } __device__ void wheel_collect_tile_force(CUDA_Tile_W& tile, pCoor tact, pVect delta_mo) { pVect to_center = mv(wheel.center,tact); // Formula below needs to be checked. const float torque_dt = dot(wheel.axis_dir,cross(to_center,delta_mo)); tile.torque += torque_dt; } /// /// Collision (Penetration) Detection and Resolution Routines /// // Used in both passes. __device__ bool penetration_balls_resolve (CUDA_Ball_W& ball1, CUDA_Ball_W& ball2, bool b2_real, Force_Types ft) { /// Update velocity and angular momentum for a pair of balls in contact. // Later, separate friction and other forces. if ( ft == FT_Friction ) return false; pVect zero_vec = mv(0,0,0); pNorm dist = mn(ball1.position,ball2.position); float3 v1 = ball1.velocity; float3 v2 = ball2.velocity; float3 omega1 = ball1.omega; float3 omega2 = ball2.omega; const float mass_inv1 = ball1.mass_inv; const float mass_inv2 = ball2.mass_inv; const float r1 = ball1.radius; const float r2 = ball2.radius; const float radii_sum = r1 + r2; if ( dist.magnitude >= radii_sum ) return false; /// WARNING: This doesn't work: somefunc(-dist); pNorm ndist = -dist; // Compute relative (approach) velocity. // pVect prev_appr_vel = ball1.prev_velocity - ball2.prev_velocity; const float prev_approach_speed = dot( prev_appr_vel, dist ); const float loss_factor = 1 - opt_bounce_loss; // Compute change in speed based on how close balls touching, ignoring // energy loss. // const float appr_force_dt_no_loss = ( radii_sum - dist.magnitude ) * ( radii_sum - dist.magnitude ) * elasticity_inv_dt; // Change in speed accounting for energy loss. Only applied when // balls separating. // const float appr_force_dt = prev_approach_speed > 0 ? appr_force_dt_no_loss : loss_factor * appr_force_dt_no_loss; const float appr_deltas_1 = appr_force_dt * mass_inv1; /// Update Linear Velocity // v1 -= appr_deltas_1 * dist; if ( b2_real ) v2 += appr_force_dt * mass_inv2 * dist; const float fdt_to_do_1 = get_fdt_to_do(r1,mass_inv1); const float fdt_to_do_2 = get_fdt_to_do(r2,mass_inv2); // Find speed on surface of balls at point of contact. // pVect tact1_rot_vel = point_rot_vel(omega1,r1,dist); pVect tact2_rot_vel = point_rot_vel(omega2,r2,ndist); // Find relative velocity of surfaces at point of contact // in the plane formed by their surfaces. // pVect tan_vel = prev_appr_vel - prev_approach_speed * dist; pNorm tact_vel_dir = mn(tact1_rot_vel - tact2_rot_vel + tan_vel); // Find change in velocity due to friction. // const float fric_force_dt_potential = appr_force_dt_no_loss * opt_friction_coeff; const float mass_inv_sum = b2_real ? mass_inv1 + mass_inv2 : mass_inv1; const float force_dt_limit = tact_vel_dir.magnitude / ( 3.5f * mass_inv_sum ); // If true, surfaces are not sliding or will stop sliding after // frictional forces applied. (If a ball surface isn't sliding // against another surface than it must be rolling.) // const bool will_roll = force_dt_limit <= fric_force_dt_potential; const float sliding_fric_force_dt = will_roll ? force_dt_limit : fric_force_dt_potential; const float dv_tolerance = 0.000001f; const float sliding_fric_dv_1 = sliding_fric_force_dt * mass_inv1; const float3 sliding_fric_fdt_vec = sliding_fric_force_dt * tact_vel_dir; if ( sliding_fric_dv_1 > dv_tolerance ) { // Apply tangential force (resulting in angular momentum change) and // linear force (resulting in velocity change). // omega1 += tan_force_dt(dist, sliding_fric_fdt_vec, -fdt_to_do_1); v1 -= sliding_fric_dv_1 * tact_vel_dir; } const float sliding_fric_dv_2 = sliding_fric_force_dt * mass_inv2; if ( b2_real && sliding_fric_dv_2 > dv_tolerance ) { // Apply frictional forces for ball 2. // omega2 += tan_force_dt(ndist, sliding_fric_fdt_vec, fdt_to_do_2); v2 += sliding_fric_dv_2 * tact_vel_dir;; } { /// Torque // // // Account for forces of surfaces twisting against each // other. (For example, if one ball is spinning on top of // another.) // const float appr_omega = dot(omega2,dist) - dot(omega1,dist); const float fdt_to_do_sum = b2_real ? fdt_to_do_1 + fdt_to_do_2 : fdt_to_do_1; const float fdt_limit = fabs(appr_omega) / fdt_to_do_sum; const bool rev = appr_omega < 0; const float fdt_raw = min(fdt_limit,fric_force_dt_potential); const pVect fdt_v = ( rev ? -fdt_raw : fdt_raw ) * dist; omega1 += fdt_to_do_1 * fdt_v; if ( b2_real ) omega2 -= fdt_to_do_2 * fdt_v; } ball1.velocity = v1; ball1.omega = omega1; if ( !b2_real ) return true; ball2.velocity = v2; ball2.omega = omega2; const bool skip_rolling_friction = true; if ( skip_rolling_friction ) return true; #if 0 { /// Rolling Friction // // The rolling friction model used here is ad-hoc. pVect tan_b12_vel = b2_real ? 0.5f * tan_vel : zero_vec; const float torque_limit_sort_of = appr_force_dt_no_loss * sqrt( radii_sum - dist.mag_sq / radii_sum ); // * sqrt( ball1.radius - 0.25 * dist.mag_sq * r_inv ); pVect tact1_rot_vel = point_rot_vel(omega1,r1,dist); pVect tact1_roll_vel = tact1_rot_vel + tan_b12_vel; pNorm tact1_roll_vel_dir = mn(tact1_roll_vel); pVect lost_vel = zero_vec; const float rfric_loss_dv_1 = torque_limit_sort_of * 2.5f * mass_inv1 * ( tact1_roll_vel_dir.magnitude * opt_friction_roll / ( 1 + tact1_roll_vel_dir.magnitude * opt_friction_roll ) ); pVect lost_vel1 = min(tact1_roll_vel_dir.magnitude, rfric_loss_dv_1) * tact1_roll_vel_dir; lost_vel = -lost_vel1; if ( b2_real ) { pVect tact2_rot_vel = point_rot_vel(omega2,r2,ndist); pVect tact2_roll_vel = tact2_rot_vel - tan_b12_vel; pNorm tact2_roll_vel_dir = mn(tact2_roll_vel); const float rfric_loss_dv_2 = torque_limit_sort_of * 2.5f * mass_inv2 * ( tact2_roll_vel_dir.magnitude * opt_friction_roll / ( 1 + tact2_roll_vel_dir.magnitude * opt_friction_roll ) ); pVect lost_vel2 = min(tact2_roll_vel_dir.magnitude, rfric_loss_dv_2 ) * tact2_roll_vel_dir; lost_vel += lost_vel2; } omega1 += tan_force_dt(dist, 0.4f / mass_inv1 * lost_vel, fdt_to_do_1); if ( b2_real ) omega2 += tan_force_dt(dist, 0.4f / mass_inv2 * lost_vel, fdt_to_do_2); } return true; #endif } // // Generic operations used by box code. // __device__ float3 sign_mask(int idx, float3 v) { return make_float3 (idx & 4 ? v.x : -v.x, idx & 2 ? v.y : -v.y, idx & 1 ? v.z : -v.z ); } // Multiply transpose of matrix m by column vector v. __device__ float3 mm_transpose(pcMatrix3x3 m, float3 v) { return v.x * m.r0 + v.y * m.r1 + v.z * m.r2; } __device__ float set_min(float &a, float b) { if ( b < a ) a = b; return a; } __device__ float set_max(float &a, float b) { if ( b > a ) a = b; return a; } // Set matrix m to a rotation matrix based on quaternion q. __device__ void pMatrix_set_rotation(pcMatrix3x3& m, float4 q) { m.r0.x = 1.f - 2.f * q.y * q.y - 2.f * q.z * q.z; m.r0.y = 2.f * q.x * q.y - 2.f * q.w * q.z; m.r0.z = 2.f * q.x * q.z + 2.f * q.w * q.y; m.r1.x = 2.f * q.x * q.y + 2.f * q.w * q.z; m.r1.y = 1.f - 2.f * q.x * q.x - 2.f * q.z * q.z; m.r1.z = 2.f * q.y * q.z - 2.f * q.w * q.x; m.r2.x = 2.f * q.x * q.z - 2.f * q.w * q.y; m.r2.y = 2.f * q.y * q.z + 2.f * q.w * q.x; m.r2.z = 1.f - 2.f * q.x * q.x - 2.f * q.y * q.y; } // Set transpose of matrix m to a rotation matrix based on quaternion q. __device__ void pMatrix_set_rotation_transpose(pcMatrix3x3& m, float4 q) { m.r0.x = 1.f - 2.f * q.y * q.y - 2.f * q.z * q.z; m.r1.x = 2.f * q.x * q.y - 2.f * q.w * q.z; m.r2.x = 2.f * q.x * q.z + 2.f * q.w * q.y; m.r0.y = 2.f * q.x * q.y + 2.f * q.w * q.z; m.r1.y = 1.f - 2.f * q.x * q.x - 2.f * q.z * q.z; m.r2.y = 2.f * q.y * q.z - 2.f * q.w * q.x; m.r0.z = 2.f * q.x * q.z - 2.f * q.w * q.y; m.r1.z = 2.f * q.y * q.z + 2.f * q.w * q.x; m.r2.z = 1.f - 2.f * q.x * q.x - 2.f * q.y * q.y; } // // Box operations. // struct pLine { __device__ pLine() {}; __device__ pLine(pCoor s, pVect d, float l):start(s),dir(d),len(l){}; pCoor start; pVect dir; float len; }; __device__ int8_t get_edge_vtx_idx(int edge) { // Index: xyz (z is LSB). #if 1 const int axis = edge >> 2; const int mask = 0xc >> axis; const int face_vtx = edge & 3; const int box_vtx_check = ( face_vtx & mask ) + face_vtx; return box_vtx_check; #else static const int8_t bi[12] = { 0, 1, 2, 3, 0, 1, 4, 5, 0, 2, 4, 6 }; return bi[edge]; #endif } __device__ float3 box_get_vertices(CUDA_Box_W& box, int vertex) { return box.position + mm_transpose(box.rot_inv,sign_mask(vertex,box.to_111)); } __device__ float3 box_get_axis_norm(CUDA_Box_W& box, int axis) { return axis == 0 ? box.rot_inv.r0 : axis == 1 ? box.rot_inv.r1 : box.rot_inv.r2; } __device__ float3 box_get_face_norm(CUDA_Box_W& box, int face) { pVect norm_raw = box_get_axis_norm(box,face>>1); return face & 1 ? norm_raw : -norm_raw; } __device__ float box_get_axis_len(CUDA_Box_W& box, int axis) { return 2.0f * ( axis == 0 ? box.to_111.x : axis == 1 ? box.to_111.y : box.to_111.z ); } __device__ float box_get_axis_area(CUDA_Box_W& box, int d) { return 4 * ( d == 0 ? box.to_111.x * box.to_111.y : d == 1 ? box.to_111.z * box.to_111.x : box.to_111.y * box.to_111.z ); } __device__ pLine box_get_edge(CUDA_Box_W& box, int edge) { const int axis = edge >> 2; const int8_t box_vtx = get_edge_vtx_idx(edge); return pLine(box_get_vertices(box,box_vtx), box_get_axis_norm(box,axis), box_get_axis_len(box,axis)); } __device__ void box_set_mi_vec(CUDA_Box_W& box,float3 to_111) { pVect dsq = to_111 * to_111; float dsqs = dsq.x + dsq.y + dsq.z; float mass_factor = 1.0f / ( box.mass_inv * 3.0f ); box.mi_vec = mass_factor * ( mv(dsqs) - dsq ); } __device__ void box_set_mi_vec(CUDA_Box_W& box) { box_set_mi_vec(box,box.to_111); } __device__ float box_get_moment_of_inertia_inv(CUDA_Box_W& box, pNorm axis); __device__ float3 box_get_vel(CUDA_Box_W&box, float3 pos) { pVect cent_to_pt = mv(box.position,pos); pVect rot_vel = cross(box.omega,cent_to_pt); return rot_vel + box.velocity; } __device__ void box_geometry_update(CUDA_Box_W& box) { pMatrix_set_rotation_transpose(box.rot_inv, box.orientation); box_set_mi_vec(box); } __device__ void box_apply_force_dt(CUDA_Box_W& box, float3 tact, float3 force) { if ( box.mass_inv == 0 ) return; box.velocity += box.mass_inv * force; pVect cent_to_tact = mv(box.position,tact); pVect torque = cross(cent_to_tact,force); pNorm torqueN = mn(torque); float mi_inv = box_get_moment_of_inertia_inv(box,torqueN); box.omega += mi_inv * torque; } __device__ float box_get_moment_of_inertia_inv(CUDA_Box_W& box, pNorm axis) { if ( axis.mag_sq < 1e-11f || box.mass_inv == 0 ) return 0; pVect tl = box.rot_inv * axis.v; pVect tls = tl * tl; float mi = dot(tls,box.mi_vec); return 1.0f / mi; } __device__ float box_get_moment_of_inertia_inv(CUDA_Box_W& box, float3 tact, pNorm dir) { pVect cent_to_tact = mv(box.position,tact); pNorm torque_axis = mn(cross(cent_to_tact,dir)); return box_get_moment_of_inertia_inv(box,torque_axis); } __device__ void box_apply_force_fric_dt (CUDA_Box_W& box, float3 tact, pNorm force_dir, float force_mag_dt) { box_apply_force_dt(box,tact,force_mag_dt*force_dir); } __device__ CUDA_SectTT sect_init() { CUDA_SectTT sect; sect.exists = false; return sect; } #include "k-boxes.h" /// /// Pass Box/Box Intersect /// __global__ void pass_xx_intersect(int xx_pairs_count); __host__ void pass_xx_intersect_launch(dim3 dg, dim3 db, int xx_pairs_count) { const int shared_amt = 0; hipLaunchKernelGGL(( pass_xx_intersect), dim3(dg),dim3(db),shared_amt, 0, xx_pairs_count); } __device__ void penetration_boxes_resolve_force (CUDA_Box_W& box1, CUDA_Box_W& box2, float3 pos, pNorm sep_normal) { const float pen_dist = 0.1f * sep_normal.magnitude; pVect vel1 = box_get_vel(box1,pos); pVect vel2 = box_get_vel(box2,pos); pVect velto1 = vel2 - vel1; const float sep_vel = dot(velto1,sep_normal.v); const float loss_factor = 1 - opt_bounce_loss_box; const float force_dt_no_loss = elasticity_inv_dt * pen_dist; const bool separating = sep_vel >= 0; const float appr_force_dt = separating ? force_dt_no_loss * loss_factor : force_dt_no_loss; pVect sep_force = appr_force_dt * sep_normal.v; box_apply_force_dt(box1, pos, -sep_force ); box_apply_force_dt(box2, pos, sep_force ); } __device__ void penetration_boxes_resolve_fric (CUDA_Box_W& box1, CUDA_Box_W& box2, float3 pos, pNorm sep_normal) { const float pen_dist = 0.1f * sep_normal.magnitude; const float force_dt_no_loss = elasticity_inv_dt * pen_dist; const float fric_force_dt_potential = force_dt_no_loss * opt_friction_coeff; /// Torque // // // Account for forces of surfaces twisting against each // other. (For example, if one box is spinning on top of // another.) // const float appr_omega = dot(box2.omega,sep_normal) - dot(box1.omega,sep_normal); { const float mi1_inv = box_get_moment_of_inertia_inv(box1,sep_normal); const float mi2_inv = box_get_moment_of_inertia_inv(box2,sep_normal); const float fdt_limit = fabs(appr_omega) / ( mi1_inv + mi2_inv ); const bool rev = appr_omega < 0; const float fdt_raw = min(fdt_limit,fric_force_dt_potential); const pVect fdt_v = ( rev ? -fdt_raw : fdt_raw ) * sep_normal; box1.omega += mi1_inv * fdt_v; box2.omega -= mi2_inv * fdt_v; } pVect vel1b = box_get_vel(box1,pos); pVect vel2b = box_get_vel(box2,pos); pVect velto1b = vel2b - vel1b; const float sep_velb = dot(velto1b,sep_normal); pNorm tan_vel = mn(velto1b - sep_velb * sep_normal); const float fdt_limit = 0.5f * tan_vel.magnitude / ( box1.mass_inv + box2.mass_inv + box_get_moment_of_inertia_inv(box1,pos,tan_vel) + box_get_moment_of_inertia_inv(box2,pos,tan_vel) ); const float fric_force_dt = min(fdt_limit,fric_force_dt_potential); box_apply_force_fric_dt(box1,pos, tan_vel, fric_force_dt); box_apply_force_fric_dt(box2,pos, -tan_vel, fric_force_dt); } __device__ bool penetration_boxes_resolve (CUDA_Phys_W& phys1, CUDA_Phys_W& phys2, int tsidx, Force_Types ft) { /// Update velocity and angular momentum for a pair of boxes in contact. CUDA_Box_W& box1 = phys1.box; CUDA_Box_W& box2 = phys2.box; float4 dir_and_mag = xx_sects_dir[tsidx]; if ( dir_and_mag.w == 0 ) return false; float4 center_and_um = xx_sects_center[tsidx]; float3 center = m3(center_and_um); pNorm sep_normal = mn(dir_and_mag); if ( ft & FT_NonFriction ) penetration_boxes_resolve_force(box1,box2,center,sep_normal); if ( ft & FT_Friction ) penetration_boxes_resolve_fric(box1,box2,center,sep_normal); return true; } /// /// Pairs Pass /// // // Resolve ball collisions with each other. __global__ void pass_pairs (int prefetch_offset, int schedule_offset, int round_cnt, int max_balls_per_thread, int balls_per_block, Force_Types ft); __host__ void pass_pairs_launch (dim3 dg, dim3 db, int prefetch_offset, int schedule_offset, int round_cnt, int max_balls_per_thread, int balls_per_block, Force_Types ft) { #ifdef USE_STRUCT const int shared_amt = balls_per_block * sizeof(CUDA_Phys_W); #else const int shared_amt = balls_per_block * sizeof(sm_balls[0]) * 8; #endif hipLaunchKernelGGL(( pass_pairs), dim3(dg),dim3(db),shared_amt, 0, prefetch_offset, schedule_offset, round_cnt, max_balls_per_thread, balls_per_block, ft); } #ifndef USE_STRUCT struct SM_Offsets { int idx_pos; int idx_vel; int idx_omega; int idx_prev_vel; int idx_rad_etc; int idx_to_111; int idx_ori_xyz; int factor; }; __device__ CUDA_Phys_W get_sm_ball(SM_Offsets& smo, int idx) { CUDA_Phys_W phys; const int sidx = idx * smo.factor; phys.box.velocity = sm_balls[smo.idx_vel+sidx]; phys.box.prev_velocity = sm_balls[smo.idx_prev_vel+sidx]; phys.box.position = sm_balls[smo.idx_pos+sidx]; phys.box.omega = sm_balls[smo.idx_omega+sidx]; phys.box.radius = sm_balls[smo.idx_rad_etc+sidx].x; phys.box.mass_inv = sm_balls[smo.idx_rad_etc+sidx].y; phys.read_only = phys.box.mass_inv == 0; return phys; } __device__ void upgrade_sm_box(CUDA_Phys_W& phys, SM_Offsets& smo, int idx) { const int sidx = idx * smo.factor; float4 ori; set_f4(ori,sm_balls[smo.idx_ori_xyz+sidx], sm_balls[smo.idx_rad_etc+sidx].z); pMatrix_set_rotation_transpose(phys.box.rot_inv,ori); float3 to_111 = sm_balls[smo.idx_to_111+sidx]; phys.box.to_111 = to_111; box_set_mi_vec(phys.box); } __device__ void put_sm_phys(SM_Offsets& smo, int sidx, CUDA_Phys_W& phys) { sm_balls[smo.idx_vel+sidx] = phys.ball.velocity; sm_balls[smo.idx_omega+sidx] = phys.ball.omega; } #endif __global__ void pass_pairs(int prefetch_offset, int schedule_offset, int round_cnt, int max_balls_per_thread, int balls_per_block, Force_Types ft) { const int tid = threadIdx.x; // Initialized variables used to access balls_needed and tacts_schedule // arrays. // const int si_block_size = blockIdx.x * max_balls_per_thread * blockDim.x; const int si_block_base = prefetch_offset + si_block_size + tid; const int sp_block_size = blockIdx.x * round_cnt * blockDim.x; const int sp_block_base = schedule_offset + sp_block_size + tid; /// Prefetch objects to shared memory. // #ifdef USE_STRUCT for ( int i=0; i<max_balls_per_thread; i++ ) { int idx = tid + i * blockDim.x; if ( idx >= balls_per_block ) continue; const int m_idx = block_balls_needed[ si_block_base + i * blockDim.x ]; CUDA_Phys_W& phys = sm_balls[idx]; CUDA_Ball_W& ball = phys.ball; CUDA_Box_W& box = phys.box; phys.m_idx = m_idx; if ( m_idx < 0 ) continue; int4 tact_counts = balls_x.tact_counts[m_idx]; phys.pt_type = tact_counts.x; phys.contact_count = tact_counts.y; phys.debug_pair_calls = tact_counts.z; phys.part_of_wheel = bool(tact_counts.w & 2); phys.read_only = tact_counts.w & 1; ball.velocity = xyz(balls_x.velocity[m_idx]); ball.prev_velocity = xyz(balls_x.prev_velocity[m_idx]); ball.position = xyz(balls_x.position[m_idx]); ball.omega = xyz(balls_x.omega[m_idx]); float4 ball_props = balls_x.ball_props[m_idx]; ball.radius = ball_props.x; ball.mass_inv = ball_props.y; ball.pad1 = ball_props.z; ball.pad2 = ball_props.w; if ( phys.pt_type == PT_Box ) { set_f3(box.to_111, balls_x.to_111[m_idx]); box.orientation = balls_x.orientation[m_idx]; box_geometry_update(box); } } #else SM_Offsets smo; smo.idx_pos = 0; smo.idx_vel = 1; smo.idx_omega = 2; smo.idx_prev_vel = 3; smo.idx_rad_etc = 4; smo.idx_to_111 = 5; smo.idx_ori_xyz = 6; smo.factor = 7; for ( int i=0; i<max_balls_per_thread; i++ ) { int idx = tid + i * blockDim.x; if ( idx >= balls_per_block ) continue; const int m_idx = block_balls_needed[ si_block_base + i * blockDim.x ]; if ( m_idx < 0 ) continue; int4 tact_counts = balls_x.tact_counts[m_idx]; const int pt_type = tact_counts.x; sm_balls_misc[idx].x = tact_counts.x; // pt_type sm_balls_misc[idx].y = tact_counts.y; // contact count sm_balls_misc[idx].z = tact_counts.z; // debug_pair_calls sm_balls_misc[idx].w = tact_counts.w; // Part of wheel is bit 0x2 const int sidx = idx * smo.factor; sm_balls[smo.idx_vel+sidx] = m3(balls_x.velocity[m_idx]); sm_balls[smo.idx_prev_vel+sidx] = m3(balls_x.prev_velocity[m_idx]); sm_balls[smo.idx_pos+sidx] = m3(balls_x.position[m_idx]); sm_balls[smo.idx_omega+sidx] = m3(balls_x.omega[m_idx]); float4 props =balls_x.ball_props[m_idx]; sm_balls[smo.idx_rad_etc+sidx] = m3(props); if ( pt_type == PT_Box ) { sm_balls[smo.idx_to_111+sidx] = m3(balls_x.to_111[m_idx]); const float4 orientation = balls_x.orientation[m_idx]; sm_balls[smo.idx_ori_xyz+sidx] = m3(orientation); sm_balls[smo.idx_rad_etc+sidx].z = orientation.w; } } #endif const pVect zero_vec = mv(0,0,0); /// Resolve Collisions // for ( int round=0; round<round_cnt; round++ ) { const int tsidx = sp_block_base + round * blockDim.x; SM_Idx2 indices = tacts_schedule[ tsidx ]; const int ix = indices.x; const int iy = indices.y; // Wait for all threads to reach this point (to avoid having // two threads operate on the same ball simultaneously). // __syncthreads(); if ( indices.x == indices.y ) continue; #ifdef USE_STRUCT CUDA_Phys_W& physx = sm_balls[ix]; CUDA_Phys_W& physy = sm_balls[iy]; const unsigned char ptx = physx.pt_type; const unsigned char pty = physy.pt_type; #else const int six = ix * smo.factor; const int siy = iy * smo.factor; CUDA_Phys_W physx = get_sm_ball(smo,ix); CUDA_Phys_W physy = get_sm_ball(smo,iy); const int ptx = sm_balls_misc[ix].x; const int pty = sm_balls_misc[iy].x; #endif if ( ft & FT_NonFriction ) { #ifdef USE_STRUCT physx.debug_pair_calls++; physy.debug_pair_calls++; #else sm_balls_misc[ix].z++; sm_balls_misc[iy].z++; #endif } char rv; if ( ptx == PT_Box && pty == PT_Box ) { #ifndef USE_STRUCT upgrade_sm_box(physx,smo,ix); upgrade_sm_box(physy,smo,iy); #endif rv = penetration_boxes_resolve(physx,physy,tsidx,ft); } else if ( ptx == PT_Ball && pty == PT_Box ) { #ifndef USE_STRUCT upgrade_sm_box(physy,smo,iy); #endif rv = penetration_box_ball_resolve(physy,physx,ft); } else if ( pty == PT_Ball ) { CUDA_Ball_W& ballx = physx.ball; CUDA_Ball_W& bally = physy.ball; rv = penetration_balls_resolve(ballx,bally,true,ft); } else if ( pty == PT_Box ) { // Note: Tile / Box collisions not yet handled. rv = 0; } else { CUDA_Ball_W& ballx = physx.ball; CUDA_Tile_W& tiley = physy.tile; pCoor tact_pos; pVect tact_dir; rv = tile_ball_collide(tiley, ballx, tact_pos, tact_dir); if ( !rv ) continue; CUDA_Ball_W pball; pball.radius = 1; pball.omega = pball.prev_velocity = pball.velocity = zero_vec; pball.position = tact_pos + tact_dir; pVect vbefore = physx.ball.velocity; penetration_balls_resolve(ballx, pball, false, ft); pVect delta_mo = ( 1.0f / ballx.mass_inv ) * ( ballx.velocity - vbefore ); #ifdef USE_STRUCT const bool part_of_wheel = physy.part_of_wheel; #else const bool part_of_wheel = sm_balls_misc[iy].w & 2; #endif if ( part_of_wheel ) { wheel_collect_tile_force(tiley, tact_pos, delta_mo); // Note: Need to fix this. } #ifndef USE_STRUCT put_sm_phys(smo,six,physx); sm_balls_misc[ix].y += 1; continue; #endif } #ifdef USE_STRUCT physx.contact_count += rv; physy.contact_count += rv; #else put_sm_phys(smo,six,physx); put_sm_phys(smo,siy,physy); sm_balls_misc[ix].y += rv; sm_balls_misc[iy].y += rv; #endif } __syncthreads(); /// Copy Ball Data to Memory // for ( int i=0; i<max_balls_per_thread; i++ ) { int idx = tid + i * blockDim.x; if ( idx >= balls_per_block ) continue; #ifdef USE_STRUCT CUDA_Phys_W& phys = sm_balls[idx]; const int m_idx = phys.m_idx; if ( m_idx < 0 ) continue; if ( phys.read_only ) continue; #else const int sidx = idx * smo.factor; const int m_idx = block_balls_needed[ si_block_base + i * blockDim.x ]; if ( m_idx < 0 ) continue; const float mass_inv = sm_balls[smo.idx_rad_etc+sidx].y; const bool read_only = mass_inv == 0; if ( read_only ) continue; #endif #ifdef USE_STRUCT CUDA_Ball_W& ball = phys.ball; int4 tact_counts; tact_counts.x = phys.pt_type; tact_counts.y = phys.contact_count; tact_counts.z = phys.debug_pair_calls; tact_counts.w = phys.part_of_wheel; balls_x.tact_counts[m_idx] = tact_counts; const char pt_type = phys.pt_type; set_f4(balls_x.velocity[m_idx], ball.velocity); if ( pt_type == PT_Tile ) continue; set_f4(balls_x.omega[m_idx], ball.omega); #else balls_x.tact_counts[m_idx].y = sm_balls_misc[idx].y; balls_x.tact_counts[m_idx].z = sm_balls_misc[idx].z; const unsigned char pt_type = sm_balls_misc[idx].x; set_f4(balls_x.velocity[m_idx], sm_balls[smo.idx_vel+sidx]); if ( pt_type == PT_Tile ) continue; set_f4(balls_x.omega[m_idx], sm_balls[smo.idx_omega+sidx]); #endif } } /// /// Platform Pass /// // // Resolve ball collisions with platform, also update ball position // and orientation. __device__ void platform_collision(CUDA_Phys_W& phys); __device__ void platform_collision_box(CUDA_Phys_W& phys); __global__ void pass_platform(int ball_count); __device__ void pass_platform_ball(CUDA_Phys_W& phys, int idx); __device__ void pass_platform_tile(CUDA_Phys_W& phys, int idx); __device__ void pass_platform_box(CUDA_Phys_W& phys, int idx); __host__ hipError_t cuda_get_attr_plat_pairs (struct hipFuncAttributes *attr_platform, struct hipFuncAttributes *attr_pairs, struct hipFuncAttributes *attr_xx_intersect) { collect_symbols(); // Return attributes of CUDA functions. The code needs the // maximum number of threads. hipError_t e1 = hipFuncGetAttributes(attr_platform,pass_platform); if ( e1 ) return e1; hipError_t e2 = hipFuncGetAttributes(attr_pairs,pass_pairs); if ( e2 ) return e2; hipError_t e3 = hipFuncGetAttributes(attr_xx_intersect,pass_xx_intersect); return e3; } __host__ void pass_platform_launch (dim3 dg, dim3 db, int ball_count) { const int block_lg = 32 - __builtin_clz(db.x-1); const int shared_amt = sizeof(float) << block_lg; hipLaunchKernelGGL(( pass_platform), dim3(dg),dim3(db),shared_amt, 0, ball_count); } __global__ void pass_platform(int ball_count) { /// Main CUDA routine for resolving collisions with platform and /// updating ball position and orientation. // One ball per thread. const int idx_base = blockIdx.x * blockDim.x; const int idx = idx_base + threadIdx.x; if ( idx >= ball_count ) return; CUDA_Phys_W phys; /// Copy ball data from memory to local variables. // // Local variables hopefully will be in GPU registers, not // slow local memory. // int4 tact_counts = balls_x.tact_counts[idx]; phys.pt_type = tact_counts.x; phys.contact_count = tact_counts.y; phys.part_of_wheel = tact_counts.w & 1; if ( phys.pt_type == PT_Ball ) pass_platform_ball(phys, idx); else if ( phys.pt_type == PT_Box ) pass_platform_box(phys, idx); else pass_platform_tile(phys, idx); /// Copy other updated data to memory. // tact_counts.y = phys.contact_count << 8; tact_counts.z = tact_counts.z << 16; balls_x.tact_counts[idx] = tact_counts; } __device__ void pass_platform_ball(CUDA_Phys_W& phys, int idx) { // One ball per thread. CUDA_Ball_W& ball = phys.ball; /// Copy ball data from memory to local variables. // // Local variables hopefully will be in GPU registers, not // slow local memory. // ball.prev_velocity = xyz(balls_x.prev_velocity[idx]); ball.velocity = xyz(balls_x.velocity[idx]) + gravity_accel_dt; set_f3(ball.position,balls_x.position[idx]); set_f3(ball.omega, balls_x.omega[idx]); float4 ball_props = balls_x.ball_props[idx]; ball.radius = ball_props.x; ball.mass_inv = ball_props.y; /// Handle Ball/Platform Collision // if ( opt_platform_curved ) platform_collision(phys); /// Handle Air Resistance // const float area = M_PI * ball.radius * ball.radius; pNorm force = mn( -area * opt_air_resistance * ball.velocity ); const float v_change = exp( -force.magnitude * ball.mass_inv * delta_t ); ball.velocity = v_change * ball.velocity; /// Update Position and Orientation // ball.position += 0.5f * delta_t * ( ball.prev_velocity + ball.velocity ); pNorm axis = mn(ball.omega); balls_x.orientation[idx] = quat_normalize ( quat_mult ( mq( axis, delta_t * axis.magnitude ), balls_x.orientation[idx] )); /// Copy other updated data to memory. // set_f4(balls_x.velocity[idx], ball.velocity); set_f4(balls_x.prev_velocity[idx], ball.velocity); set_f4(balls_x.omega[idx], ball.omega); set_f4(balls_x.position[idx], ball.position, ball.radius); } __device__ void pass_platform_tile(CUDA_Phys_W& phys, int idx) { if ( !phys.part_of_wheel ) return; const int tid = threadIdx.x; float4 tile_props = balls_x.velocity[idx]; float torque = tile_props.z; block_torque_dt[tid] = torque; tile_props.z = 0; balls_x.velocity[idx] = tile_props; float omega = wheel.omega[0]; const float3 pt_ll = xyz(balls_x.position[idx]); const float3 norm_rt = xyz(balls_x.omega[idx]); const float3 norm_up = xyz(balls_x.prev_velocity[idx]); const float3 normal = xyz(balls_x.ball_props[idx]); float torque_sum = 0; // Assuming that all are on same warp. :-) for ( int i=wheel.idx_start; i<wheel.idx_stop; i++ ) torque_sum += block_torque_dt[i]; omega -= torque_sum * wheel.moment_of_inertia_inv; const float friction_delta_omega = wheel.friction_torque * wheel.moment_of_inertia_inv * delta_t; if ( fabs(omega) <= friction_delta_omega ) omega = 0; else if ( omega > 0 ) omega -= friction_delta_omega; else omega += friction_delta_omega; const float delta_theta = omega * delta_t; pcMatrix3x3 rot; pMatrix_set_rotation(rot,wheel.axis_dir,delta_theta); const float3 rpt_ll = wheel.center + rot * ( pt_ll - wheel.center ); const float3 rnorm_rt = rot * norm_rt; const float3 rnorm_up = rot * norm_up; const float3 rnormal = rot * normal; set_f4(balls_x.position[idx],rpt_ll); set_f4(balls_x.omega[idx], rnorm_rt); set_f4(balls_x.prev_velocity[idx], rnorm_up); set_f4(balls_x.ball_props[idx], rnormal); if ( idx == wheel.idx_start ) wheel.omega[0] = omega; } __device__ void pass_platform_box(CUDA_Phys_W& phys, int idx) { // One box per thread. CUDA_Box_W& box = phys.box; /// Copy data from memory to local variables. // // Local variables hopefully will be in GPU registers, not // slow local memory. // float4 box_props = balls_x.ball_props[idx]; box.mass_inv = box_props.y; if ( box.mass_inv == 0 ) return; // Read only. box.prev_velocity = xyz(balls_x.prev_velocity[idx]); box.velocity = xyz(balls_x.velocity[idx]) + gravity_accel_dt; set_f3(box.position,balls_x.position[idx]); set_f3(box.omega, balls_x.omega[idx]); set_f3(box.to_111, balls_x.to_111[idx]); box.orientation = balls_x.orientation[idx]; box_geometry_update(box); /// Handle Ball/Platform Collision // if ( opt_platform_curved ) platform_collision_box(phys); /// Handle Air Resistance // pVect force = mv(0,0,0); for ( int d=0; d<3; d++ ) { const pVect face_normal = box_get_axis_norm(box,2-d); const float amt = dot( face_normal, box.velocity ); const float area = box_get_axis_area(box,d); force += amt * area * face_normal; } pNorm force_dir = mn(force); const float v_dir = dot(force_dir,box.velocity); const float resistance = force_dir.magnitude * opt_air_resistance; const float v_change = expf(- resistance * box.mass_inv * delta_t ); box.velocity -= v_dir * (1.0f - v_change ) * force_dir; { float3 lsq = box.to_111 * box.to_111; pVect amoment1 = mv( lsq.y + lsq.z, lsq.x + lsq.z, lsq.x + lsq.y ); float3 omega = box.omega; pVect amoment = amoment1 * box.to_111; pVect omega_l = box.rot_inv * omega; const float torque = opt_air_resistance * dot(amoment,fabs(omega_l)); const float mi = box_get_moment_of_inertia_inv(box,mn(omega)); const float o_change = exp(- torque * mi * delta_t ); box.omega = o_change * omega; } /// Update Position and Orientation // box.position += 0.5f * delta_t * ( box.prev_velocity + box.velocity ); pNorm axis = mn(box.omega); balls_x.orientation[idx] = quat_normalize ( quat_mult ( mq( axis, delta_t * axis.magnitude ), box.orientation )); /// Copy other updated data to memory. // set_f4(balls_x.velocity[idx], box.velocity); set_f4(balls_x.prev_velocity[idx], box.velocity); set_f4(balls_x.omega[idx], box.omega); set_f4(balls_x.position[idx], box.position, box_props.x); } __device__ void platform_collision_box(CUDA_Phys_W& phys) { CUDA_Box_W& box = phys.box; float radius = length(box.to_111); if ( box.position.y - radius >= 0 ) return; if ( box.position.z + radius <= platform_zmin ) return; if ( box.position.z - radius >= platform_zmax ) return; float3 axis = mv(platform_xmid,0,box.position.z); pVect btoa = mv(box.position,axis); if ( dot(btoa,btoa) < (platform_xrad-radius)*(platform_xrad-radius) ) return; box_geometry_update(box); int inside = 0; int outside_under = 0; float pen_dists[8]; CUDA_SectTT psects[5]; int ps_next = 0; float min_pd = 0; // For vertices between ends. float max_pd = 0; // Find vertices that are under the platform. // for ( int v=0; v<8; v++ ) { int v_bit = 1 << v; float3 pos = box_get_vertices(box,v); if ( pos.y > 0 ) { pen_dists[v] = 0; continue; } float3 axis = mc(platform_xmid,0,pos.z); pNorm tact_dir = mn(axis,pos); float pen_dist = tact_dir.magnitude - platform_xrad; pen_dists[v] = pen_dist; if ( pos.z < platform_zmin || pos.z > platform_zmax ) { if ( pen_dist > 0 ) outside_under |= v_bit; continue; } set_min(min_pd,pen_dist); set_max(max_pd,pen_dist); if ( pen_dist > 1 ) continue; inside |= v_bit; if ( pen_dist <= 0 ) continue; CUDA_SectTT* sect = &psects[ps_next++]; sect->start = pos; sect->dir = tact_dir.v; sect->pen_dist = pen_dist; } bool object_inside = max_pd < -min_pd; if ( !object_inside ) return; // Examine vertices that are off the edge of the platform (in the // z direction), to see if an adjoining edge intersects the platform // edge. // for ( int v=0; v<8; v++ ) { int v_bit = 1 << v; if ( ! ( v_bit & outside_under ) ) continue; // Outside Vertex (beyond z_max or z_min). // pCoor pos = box_get_vertices(box,v); float pen_dist_out = pen_dists[v]; float v_z = pos.z; float ref_z = v_z >= platform_zmax ? platform_zmax : platform_zmin; float outside_z_len = fabs(v_z - ref_z); // Look for adjoining vertices that are over the platform. // for ( int axis = 0; axis < 3; axis++ ) { int vn = v ^ ( 1 << axis ); int vn_bit = 1 << vn; if ( ! ( inside & vn_bit ) ) continue; float pen_len = pen_dists[vn] - pen_dist_out; // Inside Vertex pCoor pos_in = box_get_vertices(box,vn); // Compute the contact point at penetration distance. // float z_len = fabs(v_z - pos_in.z); if ( z_len < 0.0001f ) continue; float scale = outside_z_len / z_len; pVect to_inside = mv(pos,pos_in); pCoor tact = pos + scale * to_inside; float pen_tact = pen_dist_out + scale * pen_len; if ( pen_tact <= 0 ) continue; CUDA_SectTT* sect = &psects[ps_next++]; sect->start = tact; sect->pen_dist = pen_tact; pNorm dir = mn(cross(to_inside,mv(-tact.y,tact.x,0))); sect->dir = pen_len >= 0 ? normalize(mv(tact.x,tact.y,0)) : dir.v; } } // if ( ps_next > 0 ) phys.contact_count++; for ( int i=0; i<ps_next; i++ ) { CUDA_SectTT *sect = &psects[i]; pCoor pos = sect->start; pVect tact_dir = sect->dir; pNorm ctopos = mn(box.position,pos); pVect vel = box_get_vel(box,pos); float pen_dist = sect->pen_dist; float rad_vel = dot(vel,tact_dir); double loss_factor = 1 - opt_bounce_loss; float force_dt_no_loss = elasticity_inv_dt * pen_dist; float max_fdt_in = rad_vel / box.mass_inv; float appr_force_dt = rad_vel > 0 ? min(max_fdt_in,force_dt_no_loss) : force_dt_no_loss * loss_factor; box_apply_force_dt(box,pos, - appr_force_dt * tact_dir ); } for ( int i=0; i<ps_next; i++ ) { CUDA_SectTT *sect = &psects[i]; pCoor pos = sect->start; pVect tact_dir = sect->dir; float pen_dist = sect->pen_dist; float force_dt_no_loss = elasticity_inv_dt * pen_dist; pVect vel2 = box_get_vel(box,pos); float rad_vel2 = dot(vel2,tact_dir); pNorm tan_vel = mn( vel2 - rad_vel2 * tact_dir ); float mi_inv = box_get_moment_of_inertia_inv(box,pos,tan_vel); float fdt_limit = tan_vel.magnitude / ( box.mass_inv + mi_inv ); float fric_force_dt_no_loss = force_dt_no_loss * opt_friction_coeff; float fric_force_dt = min(fdt_limit, fric_force_dt_no_loss); box_apply_force_fric_dt(box,pos, tan_vel, -fric_force_dt); } } __device__ void platform_collision(CUDA_Phys_W& phys) { /// Check if ball in contact with platform, if so apply forces. CUDA_Ball_W& ball = phys.ball; pCoor pos = ball.position; const float r = ball.radius; bool collision_possible = pos.y < r && pos.x >= platform_xmin - r && pos.x <= platform_xmax + r && pos.z >= platform_zmin - r && pos.z <= platform_zmax + r; if ( !collision_possible ) return; CUDA_Ball_W pball; pCoor axis = mc(platform_xmid,0,pos.z); const float short_xrad = platform_xrad - r; const float short_xrad_sq = short_xrad * short_xrad; const float long_xrad = platform_xrad + r; const float long_xrad_sq = long_xrad * long_xrad; // Test for different ways ball can touch platform. If contact // is found find position of an artificial platform ball (pball) // that touches the real ball at the same place and angle as // the platform. This pball will be used for the ball-ball penetration // routine, penetration_balls_resolve. if ( pos.y > 0 ) { // Possible contact with upper edge of platform. // pCoor tact = mc(pos.x > platform_xmid ? platform_xmax : platform_xmin, 0, pos.z); pNorm tact_dir = mn(pos,tact); if ( tact_dir.mag_sq >= r * r ) return; pball.position = tact + r * tact_dir; } else if ( pos.z > platform_zmax || pos.z < platform_zmin ) { // Possible contact with side (curved) edges of platform. // pNorm ball_dir = mn(axis,pos); if ( ball_dir.mag_sq <= short_xrad_sq ) return; const float zedge = pos.z > platform_zmax ? platform_zmax : platform_zmin; pCoor axis_edge = mc(platform_xmid,0,zedge); pCoor tact = axis_edge + platform_xrad * ball_dir; pNorm tact_dir = mn(pos,tact); if ( tact_dir.mag_sq >= r * r ) return; pball.position = tact + r * tact_dir; } else { // Possible contact with surface of platform. // pNorm tact_dir = mn(axis,pos); if ( tact_dir.mag_sq <= short_xrad_sq || tact_dir.mag_sq >= long_xrad_sq ) return; pball.position = axis + ( platform_xrad + ( tact_dir.magnitude < platform_xrad ? r : -r ) ) * tact_dir; } // Finish initializing platform ball, and call routine to // resolve penetration. // pVect zero_vec = mv(0,0,0); pball.omega = zero_vec; pball.prev_velocity = pball.velocity = zero_vec; pball.radius = ball.radius; pball.mass_inv = ball.mass_inv; if ( penetration_balls_resolve(phys.ball,pball,false,FT_All) ) phys.contact_count++; } /// Compute Phys Proximity Pairs // Mapping from z-sort index to ball array index. __constant__ int *z_sort_indices; // Pre-computed z_max values. __constant__ float *z_sort_z_max; // Computed proximity values, sent to CPU. __constant__ int64_t *cuda_prox; // An array that can be used to pass values back to the CPU for // use in debugging. __constant__ float3 *pass_sched_debug; texture<float4> balls_pos_tex; texture<float4> balls_vel_tex; __global__ void pass_sched(int ball_count, float lifetime_delta_t); __device__ float ball_min_z_get (float3 position, float3 velocity, float radius, float lifetime_delta_t); __host__ bool pass_sched_launch (dim3 dg, dim3 db, int ball_count, float lifetime_delta_t, void *pos_array_dev, void *vel_array_dev) { size_t offset; const size_t size = ball_count * sizeof(float4); const hipChannelFormatDesc fd = hipCreateChannelDesc(32,32,32,32,hipChannelFormatKindFloat); hipBindTexture(&offset, balls_pos_tex, pos_array_dev, fd, size); if ( offset ) return false; hipBindTexture(&offset, balls_vel_tex, vel_array_dev, fd, size); if ( offset ) return false; hipLaunchKernelGGL(( pass_sched), dim3(dg),dim3(db), 0, 0, ball_count,lifetime_delta_t); return true; } __global__ void pass_sched(int ball_count, float lifetime_delta_t) { // Determine which balls that are in proximity to a ball. This // routine only works for balls, if a tile is found an I-give-up // value is returned, and the CPU will have to determine proximity. const int idx_base = blockIdx.x * blockDim.x; // idx9 is an index into z-sorted arrays. const int idx9 = idx_base + threadIdx.x; if ( idx9 >= ball_count ) return; // bidx9 is an index into the balls arrays. const int bidx9 = z_sort_indices[idx9]; // If bidx9 is negative then Phys at index bidx9 is not a ball, // so just return a give-up code 't' (tile). if ( bidx9 < 0 ) { cuda_prox[idx9] = ( 't' << 8 ) | 0xff; return; } // Fetch position, radius (packed in position vector), and velocity. // const float4 pos_rad9 = tex1Dfetch(balls_pos_tex,bidx9); const float3 pos9 = xyz(pos_rad9); const float radius9 = pos_rad9.w; const float4 vel9_pad = tex1Dfetch(balls_vel_tex,bidx9); const float3 vel9 = xyz(vel9_pad); const float z_min = ball_min_z_get(pos9,vel9,radius9,lifetime_delta_t); // Number of nearby balls. int proximity_cnt = 0; // Reason for giving up, 0 means we didn't give up (yet). char incomplete = 0; // The list of balls in proximity, packed into a single integer. Prox_Offsets offsets = 0; for ( int idx1 = idx9-1; !incomplete && idx1 >= 0; idx1-- ) { const float z_max = z_sort_z_max[idx1]; // Break if this and subsequent z-ordered balls could not // possibly be in proximity. if ( z_max < z_min ) break; const int bidx1 = z_sort_indices[idx1]; // If there's a tile here give up. // (t is for tile) if ( bidx1 < 0 ) { incomplete = 't'; continue; } const float4 pos_rad = tex1Dfetch(balls_pos_tex,bidx1); const float3 pos1 = xyz(pos_rad); const float4 vel_pad1 = tex1Dfetch(balls_vel_tex,bidx1); const float3 vel1 = xyz(vel_pad1); const float radius1 = pos_rad.w; // Use the pNorm constructor to compute the distance between two balls. pNorm dist = mn(pos1,pos9); // Balls are considered in proximity if they can be // this close over schedule lifetime. const float region_length_small = 1.11f * ( radius9 + radius1 ); // Check if balls will be close enough over lifetime. pVect delta_v = vel9 - vel1; const float delta_d = lifetime_delta_t * length(delta_v); const float dist2 = dist.magnitude - delta_d; if ( dist2 > region_length_small ) continue; // At this point the balls are considered in proximity, now // squeeze the value of bidx1 into eight bits by taking // the difference of z-sort indices, which should be close // together. const int offset = idx9 - idx1; // Ooops, exceeded the limit on the number of proximities. // (f is for full) if ( proximity_cnt >= cuda_prox_per_ball ) incomplete = 'f'; // Ooops, the offset won't fit into 8 bits. // (o is for overflow) else if ( offset >= 255 ) incomplete = 'o'; // Everything is fine, slide the offset on to the list. else offsets = ( offsets << 8 ) | offset; proximity_cnt++; } // If code could not compute all proximities replace offsets with // the error code. if ( incomplete ) offsets = ( incomplete << 8 ) | 0xff; cuda_prox[idx9] = offsets; } __device__ float ball_min_z_get (float3 position, float3 velocity, float radius, float lifetime_delta_t) { const float m = fabs(velocity.x) + fabs(velocity.y) + fabs(velocity.z); const float z_min = position.z + position.x - m * lifetime_delta_t - 2 * radius; return z_min; } static __host__ void collect_symbols() { CU_SYM(balls_x); CU_SYM(block_balls_needed); CU_SYM(tacts_schedule); CU_SYM(xx_pairs); CU_SYM(xx_sects_center); CU_SYM(xx_sects_dir); CU_SYM(xx_sects_debug); CU_SYM(gravity_accel_dt); CU_SYM(opt_bounce_loss); CU_SYM(opt_bounce_loss_box); CU_SYM(opt_friction_coeff); CU_SYM(opt_friction_roll); CU_SYM(opt_air_resistance); CU_SYM(opt_platform_curved); CU_SYM(platform_xmin); CU_SYM(platform_xmax); CU_SYM(platform_zmin); CU_SYM(platform_zmax); CU_SYM(platform_xmid); CU_SYM(platform_xrad); CU_SYM(delta_t); CU_SYM(elasticity_inv_dt); CU_SYM(opt_debug); CU_SYM(opt_debug2); CU_SYM(wheel); CU_SYM(z_sort_indices); CU_SYM(z_sort_z_max); CU_SYM(cuda_prox); CU_SYM(pass_sched_debug); }
5f59a3b5421cb4164273dbfbb9a0804c07906c84.cu
/// LSU EE X70X-X (Fall 2019), GPU Programming // /// CUDA code for computing intersections and time-stepping physics model. // $Id:$ /// Purpose // // Demonstrate Several Graphical and Simulation Techniques. // This file contains GPU/cuda code. // See demo-x-collide.cc for main program. #include <gp/cuda-util-kernel.h> #include "k-main.cuh" /// /// Variables Read or Written By With Host Code /// /// Ball Information Structure // // This is in soa (structure of arrays) form, rather than // in the programmer-friendly aos (array of structure) form. // In soa form it is easier for multiple thread to read contiguous // blocks of data. // __constant__ CUDA_Ball_X balls_x; /// /// Ball Contact (tact) Pair Information /// /// Balls needed by block. // // This array identifies those balls that will be used by each block // during each contact pass. When a thread starts balls are placed in // shared memory, then contact between a pair of balls is tested for // and resolved. // __constant__ int *block_balls_needed; /// Shared memory array holding balls updated cooperating threads in a block. #undef USE_STRUCT #ifdef USE_STRUCT extern __shared__ CUDA_Phys_W sm_balls[]; #else extern __shared__ float3 sm_balls[]; __shared__ uchar4 sm_balls_misc[300]; #endif /// Pairs of Balls to Check // __constant__ SM_Idx2 *tacts_schedule; /// Box/Box Intersect // __constant__ XX_Pair *xx_pairs; __constant__ float4 *xx_sects_center; __constant__ float4 *xx_sects_dir; __constant__ float4 *xx_sects_debug; __constant__ float3 gravity_accel_dt; __constant__ float opt_bounce_loss, opt_bounce_loss_box; __constant__ float opt_friction_coeff, opt_friction_roll; __constant__ float opt_air_resistance; __constant__ bool opt_platform_curved; __constant__ float platform_xmin, platform_xmax; __constant__ float platform_zmin, platform_zmax; __constant__ float platform_xmid, platform_xrad; __constant__ float delta_t; __constant__ float elasticity_inv_dt; __constant__ bool opt_debug, opt_debug2; __constant__ CUDA_Wheel wheel; extern __shared__ float block_torque_dt[]; static __host__ void collect_symbols(); /// /// Useful Functions and Types /// typedef float3 pCoor; typedef float3 pVect; __device__ float3 make_float3(float4 f4){return make_float3(f4.x,f4.y,f4.z);} __device__ float3 m3(float4 a){ return make_float3(a); } __device__ float3 xyz(float4 a){ return m3(a); } __device__ float4 m4(float3 v, float w) { return make_float4(v.x,v.y,v.z,w); } __device__ pVect operator +(pVect a,pVect b) { return make_float3(a.x+b.x,a.y+b.y,a.z+b.z); } __device__ pVect operator -(pVect a,pVect b) { return make_float3(a.x-b.x,a.y-b.y,a.z-b.z); } __device__ pVect operator -(float4 a,float4 b) { return make_float3(a.x-b.x,a.y-b.y,a.z-b.z); } __device__ pVect operator -(pCoor a,float4 b) { return make_float3(a.x-b.x,a.y-b.y,a.z-b.z); } __device__ pVect operator *(float s, pVect v) {return make_float3(s*v.x,s*v.y,s*v.z);} __device__ float4 operator *(float s, float4 v) {return make_float4(s*v.x,s*v.y,s*v.z,s*v.w);} __device__ pVect operator *(pVect u, pVect v) {return make_float3(u.x*v.x,u.y*v.y,u.z*v.z);} __device__ pVect operator -(pVect v) { return make_float3(-v.x,-v.y,-v.z); } __device__ float3 operator -=(float3& a, pVect b) {a = a - b; return a;} __device__ float3 operator +=(float3& a, pVect b) {a = a + b; return a;} struct pNorm { pVect v; float mag_sq, magnitude; }; __device__ pVect operator *(float s, pNorm n) { return s * n.v;} // Make a Coordinate __device__ pCoor mc(float x, float y, float z){ return make_float3(x,y,z); } __device__ pCoor mc(float4 c){ return make_float3(c.x,c.y,c.z); } __device__ void set_f3(float3& a, float4 b){a.x = b.x; a.y = b.y; a.z = b.z;} __device__ void set_f4(float4& a, float3 b) {a.x = b.x; a.y = b.y; a.z = b.z; a.w = 1;} __device__ void set_f4(float4& a, float3 b, float c) {a.x = b.x; a.y = b.y; a.z = b.z; a.w = c;} // Make a Vector __device__ pVect mv(float x, float y, float z){ return make_float3(x,y,z); } __device__ pVect mv(float3 a, float3 b) { return b-a; } __device__ pVect mv(float a) { return make_float3(a,a,a); } __device__ float dot(float4 a, float4 b) { return a.x*b.x + a.y*b.y + a.z*b.z + a.w*b.w;} __device__ float dot(pVect a, pVect b){ return a.x*b.x + a.y*b.y + a.z*b.z;} __device__ float dot(pVect a, pNorm b){ return dot(a,b.v); } __device__ float dot(pNorm a, pVect b){ return dot(a.v,b); } __device__ float dot3(float4 a, float4 b){ return dot(m3(a),m3(b)); } __device__ float mag_sq(pVect v){ return dot(v,v); } __device__ float length(pVect a) {return sqrtf(mag_sq(a));} __device__ pVect normalize(pVect a) { return rsqrtf(mag_sq(a))*a; } // Make a Normal (a structure containing a normalized vector and length) __device__ pNorm mn(pVect v) { pNorm n; n.mag_sq = mag_sq(v); if ( n.mag_sq == 0 ) { n.magnitude = 0; n.v.x = n.v.y = n.v.z = 0; } else { n.magnitude = sqrtf(n.mag_sq); n.v = (1.0f/n.magnitude) * v; } return n; } __device__ pNorm mn(float4 a, float4 b) {return mn(b-a);} __device__ pNorm mn(pCoor a, pCoor b) {return mn(b-a);} __device__ pNorm mn(float x, float y, float z) {return mn(mv(x,y,z));} __device__ pNorm mn(float4 v4) { pNorm n; n.v = m3(v4); n.magnitude = v4.w; return n; } __device__ pNorm mn(float3 v3, float mag) { pNorm n; n.v = v3; n.magnitude = mag; return n; } // The unary - operator doesn't seem to work when used in an argument. __device__ pNorm operator -(pNorm n) { pNorm m; m.magnitude = n.magnitude; m.mag_sq = n.mag_sq; m.v = -n.v; return m; } struct pQuat { float w; pVect v; }; // Make Quaternion __device__ float4 mq(pNorm axis, float angle) { return m4( __sinf(angle/2) * axis.v, __cosf(angle/2) ); } __device__ float4 quat_normalize(float4 q) { float len_sq = dot(q,q); float norm_factor = 1.0f / sqrtf(len_sq); return norm_factor * q; } // Make float4 __device__ float4 m4(pQuat q){ return make_float4(q.v.x,q.v.y,q.v.z,q.w); } __device__ float4 m4(pNorm v, float w) { return m4(v.v,w); } __device__ pVect fabs(pVect v){ return mv(fabs(v.x),fabs(v.y),fabs(v.z)); } __device__ float min(pVect v){ return min(min(v.x,v.y),v.z); } __device__ float max(pVect v){ return max(max(v.x,v.y),v.z); } __device__ float sum(pVect v){ return v.x+v.y+v.z; } // Cross Product of Two Vectors __device__ float3 cross(float3 a, float3 b) { return make_float3 ( a.y * b.z - a.z * b.y, a.z * b.x - a.x * b.z, a.x * b.y - a.y * b.x ); } __device__ pVect cross(pVect a, pNorm b){ return cross(a,b.v); } __device__ pVect cross(pNorm a, pVect b){ return cross(a.v,b); } __device__ pVect crossf3(float4 a, float4 b) { return cross(m3(a),m3(b)); } // Cross Product of Vectors Between Coordinates __device__ float3 cross3(float3 a, float3 b, float3 c) { float3 ab = a - b; float3 cb = c - b; return cross(ab,cb); } __device__ pVect cross3(pVect a, pVect b, pNorm c) { return cross3(a,b,c.v); } __device__ float4 quat_mult(float4 a, float4 b) { float w = a.w * b.w - dot3(a,b); float3 v = a.w * m3(b) + b.w * m3(a) + crossf3(a,b); return make_float4(v.x,v.y,v.z,w); }; __device__ void pMatrix_set_rotation(pcMatrix3x3& m, pVect u, float theta) { const float cos_theta = __cosf(theta); const float sin_theta = sqrtf(1.0f - cos_theta * cos_theta ); m.r0.x = u.x * u.x + cos_theta * ( 1 - u.x * u.x ); m.r0.y = u.x * u.y * ( 1 - cos_theta ) - u.z * sin_theta; m.r0.z = u.z * u.x * ( 1 - cos_theta ) + u.y * sin_theta; m.r1.x = u.x * u.y * ( 1 - cos_theta ) + u.z * sin_theta; m.r1.y = u.y * u.y + cos_theta * ( 1 - u.y * u.y ); m.r1.z = u.y * u.z * ( 1 - cos_theta ) - u.x * sin_theta; m.r2.x = u.z * u.x * ( 1 - cos_theta ) - u.y * sin_theta; m.r2.y = u.y * u.z * ( 1 - cos_theta ) + u.x * sin_theta; m.r2.z = u.z * u.z + cos_theta * ( 1 - u.z * u.z ); } __device__ float3 operator *(pcMatrix3x3 m, float3 coor) { return make_float3(dot(m.r0,coor), dot(m.r1,coor), dot(m.r2,coor)); } // /// Ball Physics Functions // // See demo-x-collide.cc for details. __device__ pVect point_rot_vel(float3 omega, float r, pNorm direction) { /// Return velocity of point on surface of sphere of radius r. // return r * cross( omega, direction ); } __device__ float get_fdt_to_do(float r, float mass_inv) { return 2.5f * mass_inv / r; } __device__ float3 tan_force_dt (pNorm tact_dir, float3 force_dt, float fdt_to_do) { /// Change rotation rate due to force_dt at tact_dir in direction force_dir. // return cross(tact_dir, fdt_to_do * force_dt ); } /// /// Major Ball Physics Routines /// // A time step is computed using two kernels, pass_pairs and // pass_platform. The pass_pairs kernel, which might be launched // several times, handles collisions between balls. The pass_platform // kernel handles collision between balls and the platform, and also // updates position and orientation, and spins the wheel. __device__ bool tile_ball_collide (CUDA_Tile_W& tile, CUDA_Ball_W& ball, pCoor& tact_pos, pVect& tact_dir) { // If tile in contact with ball return true and write contact // point on tile to tact_pos and ball-center-to-tact-pos direction // to tact_dir. pVect tile_to_ball = mv(tile.pt_ll,ball.position); // Distance from tile's plane to the ball. const float dist = dot(tile_to_ball,tile.normal); const float radius = ball.radius; if ( fabs(dist) > radius ) return false; // The closest point on tile plane to the ball. pCoor pt_closest = ball.position - dist * tile.normal; // How far up the tile in the y direction the center of the ball sits const float dist_ht = dot(tile.norm_up,tile_to_ball); if ( dist_ht < -radius ) return false; if ( dist_ht > tile.height + radius ) return false; // How far up the tile in the x direction the center of the ball sits const float dist_wd = dot(tile.norm_rt,tile_to_ball); if ( dist_wd < -radius ) return false; if ( dist_wd > tile.width + radius ) return false; // If ball touching tile surface (not including an edge or corner) // then set up the pseudo ball for collision handling if ( dist_ht >= 0 && dist_ht <= tile.height && dist_wd >= 0 && dist_wd <= tile.width ) { tact_pos = pt_closest; tact_dir = dist > 0 ? -tile.normal : tile.normal; return true; } float3 pt_lr = tile.pt_ll + tile.width * tile.norm_rt; float3 pt_ul = tile.pt_ll + tile.height * tile.norm_up; float3 pt_ur = pt_lr + tile.height * tile.norm_up; // Test whether the ball is touching a corner if ( ( dist_ht < 0 || dist_ht > tile.height ) && ( dist_wd < 0 || dist_wd > tile.width) ) { pCoor ref_pt; // We need to place the pseudo ball based upon the vector from // ball position to the corner. First step is to figure out which // corner. if ( dist_ht < 0 && dist_wd < 0 ) { ref_pt = tile.pt_ll; } else if ( dist_ht < 0 && dist_wd > tile.width ) { ref_pt = pt_lr; } else if ( dist_ht > tile.height && dist_wd < 0 ) { ref_pt = pt_ul; } else { ref_pt = pt_ur; } tact_pos = ref_pt; tact_dir = normalize(mv(ball.position,ref_pt)); return true; } // Else the ball is touching an edge const bool tact_horiz = dist_ht < 0 || dist_ht > tile.height; const pVect corner_to_tact = tact_horiz ? dist_wd * tile.norm_rt : dist_ht * tile.norm_up; const pCoor ref_pt = tact_horiz ? ( dist_ht < 0 ? tile.pt_ll : pt_ul ) : ( dist_wd < 0 ? tile.pt_ll : pt_lr ); // Find the closest edge point of the tile to the ball tact_pos = ref_pt + corner_to_tact; tact_dir = normalize(mv(ball.position,tact_pos)); return true; } __device__ void wheel_collect_tile_force(CUDA_Tile_W& tile, pCoor tact, pVect delta_mo) { pVect to_center = mv(wheel.center,tact); // Formula below needs to be checked. const float torque_dt = dot(wheel.axis_dir,cross(to_center,delta_mo)); tile.torque += torque_dt; } /// /// Collision (Penetration) Detection and Resolution Routines /// // Used in both passes. __device__ bool penetration_balls_resolve (CUDA_Ball_W& ball1, CUDA_Ball_W& ball2, bool b2_real, Force_Types ft) { /// Update velocity and angular momentum for a pair of balls in contact. // Later, separate friction and other forces. if ( ft == FT_Friction ) return false; pVect zero_vec = mv(0,0,0); pNorm dist = mn(ball1.position,ball2.position); float3 v1 = ball1.velocity; float3 v2 = ball2.velocity; float3 omega1 = ball1.omega; float3 omega2 = ball2.omega; const float mass_inv1 = ball1.mass_inv; const float mass_inv2 = ball2.mass_inv; const float r1 = ball1.radius; const float r2 = ball2.radius; const float radii_sum = r1 + r2; if ( dist.magnitude >= radii_sum ) return false; /// WARNING: This doesn't work: somefunc(-dist); pNorm ndist = -dist; // Compute relative (approach) velocity. // pVect prev_appr_vel = ball1.prev_velocity - ball2.prev_velocity; const float prev_approach_speed = dot( prev_appr_vel, dist ); const float loss_factor = 1 - opt_bounce_loss; // Compute change in speed based on how close balls touching, ignoring // energy loss. // const float appr_force_dt_no_loss = ( radii_sum - dist.magnitude ) * ( radii_sum - dist.magnitude ) * elasticity_inv_dt; // Change in speed accounting for energy loss. Only applied when // balls separating. // const float appr_force_dt = prev_approach_speed > 0 ? appr_force_dt_no_loss : loss_factor * appr_force_dt_no_loss; const float appr_deltas_1 = appr_force_dt * mass_inv1; /// Update Linear Velocity // v1 -= appr_deltas_1 * dist; if ( b2_real ) v2 += appr_force_dt * mass_inv2 * dist; const float fdt_to_do_1 = get_fdt_to_do(r1,mass_inv1); const float fdt_to_do_2 = get_fdt_to_do(r2,mass_inv2); // Find speed on surface of balls at point of contact. // pVect tact1_rot_vel = point_rot_vel(omega1,r1,dist); pVect tact2_rot_vel = point_rot_vel(omega2,r2,ndist); // Find relative velocity of surfaces at point of contact // in the plane formed by their surfaces. // pVect tan_vel = prev_appr_vel - prev_approach_speed * dist; pNorm tact_vel_dir = mn(tact1_rot_vel - tact2_rot_vel + tan_vel); // Find change in velocity due to friction. // const float fric_force_dt_potential = appr_force_dt_no_loss * opt_friction_coeff; const float mass_inv_sum = b2_real ? mass_inv1 + mass_inv2 : mass_inv1; const float force_dt_limit = tact_vel_dir.magnitude / ( 3.5f * mass_inv_sum ); // If true, surfaces are not sliding or will stop sliding after // frictional forces applied. (If a ball surface isn't sliding // against another surface than it must be rolling.) // const bool will_roll = force_dt_limit <= fric_force_dt_potential; const float sliding_fric_force_dt = will_roll ? force_dt_limit : fric_force_dt_potential; const float dv_tolerance = 0.000001f; const float sliding_fric_dv_1 = sliding_fric_force_dt * mass_inv1; const float3 sliding_fric_fdt_vec = sliding_fric_force_dt * tact_vel_dir; if ( sliding_fric_dv_1 > dv_tolerance ) { // Apply tangential force (resulting in angular momentum change) and // linear force (resulting in velocity change). // omega1 += tan_force_dt(dist, sliding_fric_fdt_vec, -fdt_to_do_1); v1 -= sliding_fric_dv_1 * tact_vel_dir; } const float sliding_fric_dv_2 = sliding_fric_force_dt * mass_inv2; if ( b2_real && sliding_fric_dv_2 > dv_tolerance ) { // Apply frictional forces for ball 2. // omega2 += tan_force_dt(ndist, sliding_fric_fdt_vec, fdt_to_do_2); v2 += sliding_fric_dv_2 * tact_vel_dir;; } { /// Torque // // // Account for forces of surfaces twisting against each // other. (For example, if one ball is spinning on top of // another.) // const float appr_omega = dot(omega2,dist) - dot(omega1,dist); const float fdt_to_do_sum = b2_real ? fdt_to_do_1 + fdt_to_do_2 : fdt_to_do_1; const float fdt_limit = fabs(appr_omega) / fdt_to_do_sum; const bool rev = appr_omega < 0; const float fdt_raw = min(fdt_limit,fric_force_dt_potential); const pVect fdt_v = ( rev ? -fdt_raw : fdt_raw ) * dist; omega1 += fdt_to_do_1 * fdt_v; if ( b2_real ) omega2 -= fdt_to_do_2 * fdt_v; } ball1.velocity = v1; ball1.omega = omega1; if ( !b2_real ) return true; ball2.velocity = v2; ball2.omega = omega2; const bool skip_rolling_friction = true; if ( skip_rolling_friction ) return true; #if 0 { /// Rolling Friction // // The rolling friction model used here is ad-hoc. pVect tan_b12_vel = b2_real ? 0.5f * tan_vel : zero_vec; const float torque_limit_sort_of = appr_force_dt_no_loss * sqrt( radii_sum - dist.mag_sq / radii_sum ); // * sqrt( ball1.radius - 0.25 * dist.mag_sq * r_inv ); pVect tact1_rot_vel = point_rot_vel(omega1,r1,dist); pVect tact1_roll_vel = tact1_rot_vel + tan_b12_vel; pNorm tact1_roll_vel_dir = mn(tact1_roll_vel); pVect lost_vel = zero_vec; const float rfric_loss_dv_1 = torque_limit_sort_of * 2.5f * mass_inv1 * ( tact1_roll_vel_dir.magnitude * opt_friction_roll / ( 1 + tact1_roll_vel_dir.magnitude * opt_friction_roll ) ); pVect lost_vel1 = min(tact1_roll_vel_dir.magnitude, rfric_loss_dv_1) * tact1_roll_vel_dir; lost_vel = -lost_vel1; if ( b2_real ) { pVect tact2_rot_vel = point_rot_vel(omega2,r2,ndist); pVect tact2_roll_vel = tact2_rot_vel - tan_b12_vel; pNorm tact2_roll_vel_dir = mn(tact2_roll_vel); const float rfric_loss_dv_2 = torque_limit_sort_of * 2.5f * mass_inv2 * ( tact2_roll_vel_dir.magnitude * opt_friction_roll / ( 1 + tact2_roll_vel_dir.magnitude * opt_friction_roll ) ); pVect lost_vel2 = min(tact2_roll_vel_dir.magnitude, rfric_loss_dv_2 ) * tact2_roll_vel_dir; lost_vel += lost_vel2; } omega1 += tan_force_dt(dist, 0.4f / mass_inv1 * lost_vel, fdt_to_do_1); if ( b2_real ) omega2 += tan_force_dt(dist, 0.4f / mass_inv2 * lost_vel, fdt_to_do_2); } return true; #endif } // // Generic operations used by box code. // __device__ float3 sign_mask(int idx, float3 v) { return make_float3 (idx & 4 ? v.x : -v.x, idx & 2 ? v.y : -v.y, idx & 1 ? v.z : -v.z ); } // Multiply transpose of matrix m by column vector v. __device__ float3 mm_transpose(pcMatrix3x3 m, float3 v) { return v.x * m.r0 + v.y * m.r1 + v.z * m.r2; } __device__ float set_min(float &a, float b) { if ( b < a ) a = b; return a; } __device__ float set_max(float &a, float b) { if ( b > a ) a = b; return a; } // Set matrix m to a rotation matrix based on quaternion q. __device__ void pMatrix_set_rotation(pcMatrix3x3& m, float4 q) { m.r0.x = 1.f - 2.f * q.y * q.y - 2.f * q.z * q.z; m.r0.y = 2.f * q.x * q.y - 2.f * q.w * q.z; m.r0.z = 2.f * q.x * q.z + 2.f * q.w * q.y; m.r1.x = 2.f * q.x * q.y + 2.f * q.w * q.z; m.r1.y = 1.f - 2.f * q.x * q.x - 2.f * q.z * q.z; m.r1.z = 2.f * q.y * q.z - 2.f * q.w * q.x; m.r2.x = 2.f * q.x * q.z - 2.f * q.w * q.y; m.r2.y = 2.f * q.y * q.z + 2.f * q.w * q.x; m.r2.z = 1.f - 2.f * q.x * q.x - 2.f * q.y * q.y; } // Set transpose of matrix m to a rotation matrix based on quaternion q. __device__ void pMatrix_set_rotation_transpose(pcMatrix3x3& m, float4 q) { m.r0.x = 1.f - 2.f * q.y * q.y - 2.f * q.z * q.z; m.r1.x = 2.f * q.x * q.y - 2.f * q.w * q.z; m.r2.x = 2.f * q.x * q.z + 2.f * q.w * q.y; m.r0.y = 2.f * q.x * q.y + 2.f * q.w * q.z; m.r1.y = 1.f - 2.f * q.x * q.x - 2.f * q.z * q.z; m.r2.y = 2.f * q.y * q.z - 2.f * q.w * q.x; m.r0.z = 2.f * q.x * q.z - 2.f * q.w * q.y; m.r1.z = 2.f * q.y * q.z + 2.f * q.w * q.x; m.r2.z = 1.f - 2.f * q.x * q.x - 2.f * q.y * q.y; } // // Box operations. // struct pLine { __device__ pLine() {}; __device__ pLine(pCoor s, pVect d, float l):start(s),dir(d),len(l){}; pCoor start; pVect dir; float len; }; __device__ int8_t get_edge_vtx_idx(int edge) { // Index: xyz (z is LSB). #if 1 const int axis = edge >> 2; const int mask = 0xc >> axis; const int face_vtx = edge & 3; const int box_vtx_check = ( face_vtx & mask ) + face_vtx; return box_vtx_check; #else static const int8_t bi[12] = { 0, 1, 2, 3, 0, 1, 4, 5, 0, 2, 4, 6 }; return bi[edge]; #endif } __device__ float3 box_get_vertices(CUDA_Box_W& box, int vertex) { return box.position + mm_transpose(box.rot_inv,sign_mask(vertex,box.to_111)); } __device__ float3 box_get_axis_norm(CUDA_Box_W& box, int axis) { return axis == 0 ? box.rot_inv.r0 : axis == 1 ? box.rot_inv.r1 : box.rot_inv.r2; } __device__ float3 box_get_face_norm(CUDA_Box_W& box, int face) { pVect norm_raw = box_get_axis_norm(box,face>>1); return face & 1 ? norm_raw : -norm_raw; } __device__ float box_get_axis_len(CUDA_Box_W& box, int axis) { return 2.0f * ( axis == 0 ? box.to_111.x : axis == 1 ? box.to_111.y : box.to_111.z ); } __device__ float box_get_axis_area(CUDA_Box_W& box, int d) { return 4 * ( d == 0 ? box.to_111.x * box.to_111.y : d == 1 ? box.to_111.z * box.to_111.x : box.to_111.y * box.to_111.z ); } __device__ pLine box_get_edge(CUDA_Box_W& box, int edge) { const int axis = edge >> 2; const int8_t box_vtx = get_edge_vtx_idx(edge); return pLine(box_get_vertices(box,box_vtx), box_get_axis_norm(box,axis), box_get_axis_len(box,axis)); } __device__ void box_set_mi_vec(CUDA_Box_W& box,float3 to_111) { pVect dsq = to_111 * to_111; float dsqs = dsq.x + dsq.y + dsq.z; float mass_factor = 1.0f / ( box.mass_inv * 3.0f ); box.mi_vec = mass_factor * ( mv(dsqs) - dsq ); } __device__ void box_set_mi_vec(CUDA_Box_W& box) { box_set_mi_vec(box,box.to_111); } __device__ float box_get_moment_of_inertia_inv(CUDA_Box_W& box, pNorm axis); __device__ float3 box_get_vel(CUDA_Box_W&box, float3 pos) { pVect cent_to_pt = mv(box.position,pos); pVect rot_vel = cross(box.omega,cent_to_pt); return rot_vel + box.velocity; } __device__ void box_geometry_update(CUDA_Box_W& box) { pMatrix_set_rotation_transpose(box.rot_inv, box.orientation); box_set_mi_vec(box); } __device__ void box_apply_force_dt(CUDA_Box_W& box, float3 tact, float3 force) { if ( box.mass_inv == 0 ) return; box.velocity += box.mass_inv * force; pVect cent_to_tact = mv(box.position,tact); pVect torque = cross(cent_to_tact,force); pNorm torqueN = mn(torque); float mi_inv = box_get_moment_of_inertia_inv(box,torqueN); box.omega += mi_inv * torque; } __device__ float box_get_moment_of_inertia_inv(CUDA_Box_W& box, pNorm axis) { if ( axis.mag_sq < 1e-11f || box.mass_inv == 0 ) return 0; pVect tl = box.rot_inv * axis.v; pVect tls = tl * tl; float mi = dot(tls,box.mi_vec); return 1.0f / mi; } __device__ float box_get_moment_of_inertia_inv(CUDA_Box_W& box, float3 tact, pNorm dir) { pVect cent_to_tact = mv(box.position,tact); pNorm torque_axis = mn(cross(cent_to_tact,dir)); return box_get_moment_of_inertia_inv(box,torque_axis); } __device__ void box_apply_force_fric_dt (CUDA_Box_W& box, float3 tact, pNorm force_dir, float force_mag_dt) { box_apply_force_dt(box,tact,force_mag_dt*force_dir); } __device__ CUDA_SectTT sect_init() { CUDA_SectTT sect; sect.exists = false; return sect; } #include "k-boxes.h" /// /// Pass Box/Box Intersect /// __global__ void pass_xx_intersect(int xx_pairs_count); __host__ void pass_xx_intersect_launch(dim3 dg, dim3 db, int xx_pairs_count) { const int shared_amt = 0; pass_xx_intersect<<<dg,db,shared_amt>>>(xx_pairs_count); } __device__ void penetration_boxes_resolve_force (CUDA_Box_W& box1, CUDA_Box_W& box2, float3 pos, pNorm sep_normal) { const float pen_dist = 0.1f * sep_normal.magnitude; pVect vel1 = box_get_vel(box1,pos); pVect vel2 = box_get_vel(box2,pos); pVect velto1 = vel2 - vel1; const float sep_vel = dot(velto1,sep_normal.v); const float loss_factor = 1 - opt_bounce_loss_box; const float force_dt_no_loss = elasticity_inv_dt * pen_dist; const bool separating = sep_vel >= 0; const float appr_force_dt = separating ? force_dt_no_loss * loss_factor : force_dt_no_loss; pVect sep_force = appr_force_dt * sep_normal.v; box_apply_force_dt(box1, pos, -sep_force ); box_apply_force_dt(box2, pos, sep_force ); } __device__ void penetration_boxes_resolve_fric (CUDA_Box_W& box1, CUDA_Box_W& box2, float3 pos, pNorm sep_normal) { const float pen_dist = 0.1f * sep_normal.magnitude; const float force_dt_no_loss = elasticity_inv_dt * pen_dist; const float fric_force_dt_potential = force_dt_no_loss * opt_friction_coeff; /// Torque // // // Account for forces of surfaces twisting against each // other. (For example, if one box is spinning on top of // another.) // const float appr_omega = dot(box2.omega,sep_normal) - dot(box1.omega,sep_normal); { const float mi1_inv = box_get_moment_of_inertia_inv(box1,sep_normal); const float mi2_inv = box_get_moment_of_inertia_inv(box2,sep_normal); const float fdt_limit = fabs(appr_omega) / ( mi1_inv + mi2_inv ); const bool rev = appr_omega < 0; const float fdt_raw = min(fdt_limit,fric_force_dt_potential); const pVect fdt_v = ( rev ? -fdt_raw : fdt_raw ) * sep_normal; box1.omega += mi1_inv * fdt_v; box2.omega -= mi2_inv * fdt_v; } pVect vel1b = box_get_vel(box1,pos); pVect vel2b = box_get_vel(box2,pos); pVect velto1b = vel2b - vel1b; const float sep_velb = dot(velto1b,sep_normal); pNorm tan_vel = mn(velto1b - sep_velb * sep_normal); const float fdt_limit = 0.5f * tan_vel.magnitude / ( box1.mass_inv + box2.mass_inv + box_get_moment_of_inertia_inv(box1,pos,tan_vel) + box_get_moment_of_inertia_inv(box2,pos,tan_vel) ); const float fric_force_dt = min(fdt_limit,fric_force_dt_potential); box_apply_force_fric_dt(box1,pos, tan_vel, fric_force_dt); box_apply_force_fric_dt(box2,pos, -tan_vel, fric_force_dt); } __device__ bool penetration_boxes_resolve (CUDA_Phys_W& phys1, CUDA_Phys_W& phys2, int tsidx, Force_Types ft) { /// Update velocity and angular momentum for a pair of boxes in contact. CUDA_Box_W& box1 = phys1.box; CUDA_Box_W& box2 = phys2.box; float4 dir_and_mag = xx_sects_dir[tsidx]; if ( dir_and_mag.w == 0 ) return false; float4 center_and_um = xx_sects_center[tsidx]; float3 center = m3(center_and_um); pNorm sep_normal = mn(dir_and_mag); if ( ft & FT_NonFriction ) penetration_boxes_resolve_force(box1,box2,center,sep_normal); if ( ft & FT_Friction ) penetration_boxes_resolve_fric(box1,box2,center,sep_normal); return true; } /// /// Pairs Pass /// // // Resolve ball collisions with each other. __global__ void pass_pairs (int prefetch_offset, int schedule_offset, int round_cnt, int max_balls_per_thread, int balls_per_block, Force_Types ft); __host__ void pass_pairs_launch (dim3 dg, dim3 db, int prefetch_offset, int schedule_offset, int round_cnt, int max_balls_per_thread, int balls_per_block, Force_Types ft) { #ifdef USE_STRUCT const int shared_amt = balls_per_block * sizeof(CUDA_Phys_W); #else const int shared_amt = balls_per_block * sizeof(sm_balls[0]) * 8; #endif pass_pairs<<<dg,db,shared_amt>>> (prefetch_offset, schedule_offset, round_cnt, max_balls_per_thread, balls_per_block, ft); } #ifndef USE_STRUCT struct SM_Offsets { int idx_pos; int idx_vel; int idx_omega; int idx_prev_vel; int idx_rad_etc; int idx_to_111; int idx_ori_xyz; int factor; }; __device__ CUDA_Phys_W get_sm_ball(SM_Offsets& smo, int idx) { CUDA_Phys_W phys; const int sidx = idx * smo.factor; phys.box.velocity = sm_balls[smo.idx_vel+sidx]; phys.box.prev_velocity = sm_balls[smo.idx_prev_vel+sidx]; phys.box.position = sm_balls[smo.idx_pos+sidx]; phys.box.omega = sm_balls[smo.idx_omega+sidx]; phys.box.radius = sm_balls[smo.idx_rad_etc+sidx].x; phys.box.mass_inv = sm_balls[smo.idx_rad_etc+sidx].y; phys.read_only = phys.box.mass_inv == 0; return phys; } __device__ void upgrade_sm_box(CUDA_Phys_W& phys, SM_Offsets& smo, int idx) { const int sidx = idx * smo.factor; float4 ori; set_f4(ori,sm_balls[smo.idx_ori_xyz+sidx], sm_balls[smo.idx_rad_etc+sidx].z); pMatrix_set_rotation_transpose(phys.box.rot_inv,ori); float3 to_111 = sm_balls[smo.idx_to_111+sidx]; phys.box.to_111 = to_111; box_set_mi_vec(phys.box); } __device__ void put_sm_phys(SM_Offsets& smo, int sidx, CUDA_Phys_W& phys) { sm_balls[smo.idx_vel+sidx] = phys.ball.velocity; sm_balls[smo.idx_omega+sidx] = phys.ball.omega; } #endif __global__ void pass_pairs(int prefetch_offset, int schedule_offset, int round_cnt, int max_balls_per_thread, int balls_per_block, Force_Types ft) { const int tid = threadIdx.x; // Initialized variables used to access balls_needed and tacts_schedule // arrays. // const int si_block_size = blockIdx.x * max_balls_per_thread * blockDim.x; const int si_block_base = prefetch_offset + si_block_size + tid; const int sp_block_size = blockIdx.x * round_cnt * blockDim.x; const int sp_block_base = schedule_offset + sp_block_size + tid; /// Prefetch objects to shared memory. // #ifdef USE_STRUCT for ( int i=0; i<max_balls_per_thread; i++ ) { int idx = tid + i * blockDim.x; if ( idx >= balls_per_block ) continue; const int m_idx = block_balls_needed[ si_block_base + i * blockDim.x ]; CUDA_Phys_W& phys = sm_balls[idx]; CUDA_Ball_W& ball = phys.ball; CUDA_Box_W& box = phys.box; phys.m_idx = m_idx; if ( m_idx < 0 ) continue; int4 tact_counts = balls_x.tact_counts[m_idx]; phys.pt_type = tact_counts.x; phys.contact_count = tact_counts.y; phys.debug_pair_calls = tact_counts.z; phys.part_of_wheel = bool(tact_counts.w & 2); phys.read_only = tact_counts.w & 1; ball.velocity = xyz(balls_x.velocity[m_idx]); ball.prev_velocity = xyz(balls_x.prev_velocity[m_idx]); ball.position = xyz(balls_x.position[m_idx]); ball.omega = xyz(balls_x.omega[m_idx]); float4 ball_props = balls_x.ball_props[m_idx]; ball.radius = ball_props.x; ball.mass_inv = ball_props.y; ball.pad1 = ball_props.z; ball.pad2 = ball_props.w; if ( phys.pt_type == PT_Box ) { set_f3(box.to_111, balls_x.to_111[m_idx]); box.orientation = balls_x.orientation[m_idx]; box_geometry_update(box); } } #else SM_Offsets smo; smo.idx_pos = 0; smo.idx_vel = 1; smo.idx_omega = 2; smo.idx_prev_vel = 3; smo.idx_rad_etc = 4; smo.idx_to_111 = 5; smo.idx_ori_xyz = 6; smo.factor = 7; for ( int i=0; i<max_balls_per_thread; i++ ) { int idx = tid + i * blockDim.x; if ( idx >= balls_per_block ) continue; const int m_idx = block_balls_needed[ si_block_base + i * blockDim.x ]; if ( m_idx < 0 ) continue; int4 tact_counts = balls_x.tact_counts[m_idx]; const int pt_type = tact_counts.x; sm_balls_misc[idx].x = tact_counts.x; // pt_type sm_balls_misc[idx].y = tact_counts.y; // contact count sm_balls_misc[idx].z = tact_counts.z; // debug_pair_calls sm_balls_misc[idx].w = tact_counts.w; // Part of wheel is bit 0x2 const int sidx = idx * smo.factor; sm_balls[smo.idx_vel+sidx] = m3(balls_x.velocity[m_idx]); sm_balls[smo.idx_prev_vel+sidx] = m3(balls_x.prev_velocity[m_idx]); sm_balls[smo.idx_pos+sidx] = m3(balls_x.position[m_idx]); sm_balls[smo.idx_omega+sidx] = m3(balls_x.omega[m_idx]); float4 props =balls_x.ball_props[m_idx]; sm_balls[smo.idx_rad_etc+sidx] = m3(props); if ( pt_type == PT_Box ) { sm_balls[smo.idx_to_111+sidx] = m3(balls_x.to_111[m_idx]); const float4 orientation = balls_x.orientation[m_idx]; sm_balls[smo.idx_ori_xyz+sidx] = m3(orientation); sm_balls[smo.idx_rad_etc+sidx].z = orientation.w; } } #endif const pVect zero_vec = mv(0,0,0); /// Resolve Collisions // for ( int round=0; round<round_cnt; round++ ) { const int tsidx = sp_block_base + round * blockDim.x; SM_Idx2 indices = tacts_schedule[ tsidx ]; const int ix = indices.x; const int iy = indices.y; // Wait for all threads to reach this point (to avoid having // two threads operate on the same ball simultaneously). // __syncthreads(); if ( indices.x == indices.y ) continue; #ifdef USE_STRUCT CUDA_Phys_W& physx = sm_balls[ix]; CUDA_Phys_W& physy = sm_balls[iy]; const unsigned char ptx = physx.pt_type; const unsigned char pty = physy.pt_type; #else const int six = ix * smo.factor; const int siy = iy * smo.factor; CUDA_Phys_W physx = get_sm_ball(smo,ix); CUDA_Phys_W physy = get_sm_ball(smo,iy); const int ptx = sm_balls_misc[ix].x; const int pty = sm_balls_misc[iy].x; #endif if ( ft & FT_NonFriction ) { #ifdef USE_STRUCT physx.debug_pair_calls++; physy.debug_pair_calls++; #else sm_balls_misc[ix].z++; sm_balls_misc[iy].z++; #endif } char rv; if ( ptx == PT_Box && pty == PT_Box ) { #ifndef USE_STRUCT upgrade_sm_box(physx,smo,ix); upgrade_sm_box(physy,smo,iy); #endif rv = penetration_boxes_resolve(physx,physy,tsidx,ft); } else if ( ptx == PT_Ball && pty == PT_Box ) { #ifndef USE_STRUCT upgrade_sm_box(physy,smo,iy); #endif rv = penetration_box_ball_resolve(physy,physx,ft); } else if ( pty == PT_Ball ) { CUDA_Ball_W& ballx = physx.ball; CUDA_Ball_W& bally = physy.ball; rv = penetration_balls_resolve(ballx,bally,true,ft); } else if ( pty == PT_Box ) { // Note: Tile / Box collisions not yet handled. rv = 0; } else { CUDA_Ball_W& ballx = physx.ball; CUDA_Tile_W& tiley = physy.tile; pCoor tact_pos; pVect tact_dir; rv = tile_ball_collide(tiley, ballx, tact_pos, tact_dir); if ( !rv ) continue; CUDA_Ball_W pball; pball.radius = 1; pball.omega = pball.prev_velocity = pball.velocity = zero_vec; pball.position = tact_pos + tact_dir; pVect vbefore = physx.ball.velocity; penetration_balls_resolve(ballx, pball, false, ft); pVect delta_mo = ( 1.0f / ballx.mass_inv ) * ( ballx.velocity - vbefore ); #ifdef USE_STRUCT const bool part_of_wheel = physy.part_of_wheel; #else const bool part_of_wheel = sm_balls_misc[iy].w & 2; #endif if ( part_of_wheel ) { wheel_collect_tile_force(tiley, tact_pos, delta_mo); // Note: Need to fix this. } #ifndef USE_STRUCT put_sm_phys(smo,six,physx); sm_balls_misc[ix].y += 1; continue; #endif } #ifdef USE_STRUCT physx.contact_count += rv; physy.contact_count += rv; #else put_sm_phys(smo,six,physx); put_sm_phys(smo,siy,physy); sm_balls_misc[ix].y += rv; sm_balls_misc[iy].y += rv; #endif } __syncthreads(); /// Copy Ball Data to Memory // for ( int i=0; i<max_balls_per_thread; i++ ) { int idx = tid + i * blockDim.x; if ( idx >= balls_per_block ) continue; #ifdef USE_STRUCT CUDA_Phys_W& phys = sm_balls[idx]; const int m_idx = phys.m_idx; if ( m_idx < 0 ) continue; if ( phys.read_only ) continue; #else const int sidx = idx * smo.factor; const int m_idx = block_balls_needed[ si_block_base + i * blockDim.x ]; if ( m_idx < 0 ) continue; const float mass_inv = sm_balls[smo.idx_rad_etc+sidx].y; const bool read_only = mass_inv == 0; if ( read_only ) continue; #endif #ifdef USE_STRUCT CUDA_Ball_W& ball = phys.ball; int4 tact_counts; tact_counts.x = phys.pt_type; tact_counts.y = phys.contact_count; tact_counts.z = phys.debug_pair_calls; tact_counts.w = phys.part_of_wheel; balls_x.tact_counts[m_idx] = tact_counts; const char pt_type = phys.pt_type; set_f4(balls_x.velocity[m_idx], ball.velocity); if ( pt_type == PT_Tile ) continue; set_f4(balls_x.omega[m_idx], ball.omega); #else balls_x.tact_counts[m_idx].y = sm_balls_misc[idx].y; balls_x.tact_counts[m_idx].z = sm_balls_misc[idx].z; const unsigned char pt_type = sm_balls_misc[idx].x; set_f4(balls_x.velocity[m_idx], sm_balls[smo.idx_vel+sidx]); if ( pt_type == PT_Tile ) continue; set_f4(balls_x.omega[m_idx], sm_balls[smo.idx_omega+sidx]); #endif } } /// /// Platform Pass /// // // Resolve ball collisions with platform, also update ball position // and orientation. __device__ void platform_collision(CUDA_Phys_W& phys); __device__ void platform_collision_box(CUDA_Phys_W& phys); __global__ void pass_platform(int ball_count); __device__ void pass_platform_ball(CUDA_Phys_W& phys, int idx); __device__ void pass_platform_tile(CUDA_Phys_W& phys, int idx); __device__ void pass_platform_box(CUDA_Phys_W& phys, int idx); __host__ cudaError_t cuda_get_attr_plat_pairs (struct cudaFuncAttributes *attr_platform, struct cudaFuncAttributes *attr_pairs, struct cudaFuncAttributes *attr_xx_intersect) { collect_symbols(); // Return attributes of CUDA functions. The code needs the // maximum number of threads. cudaError_t e1 = cudaFuncGetAttributes(attr_platform,pass_platform); if ( e1 ) return e1; cudaError_t e2 = cudaFuncGetAttributes(attr_pairs,pass_pairs); if ( e2 ) return e2; cudaError_t e3 = cudaFuncGetAttributes(attr_xx_intersect,pass_xx_intersect); return e3; } __host__ void pass_platform_launch (dim3 dg, dim3 db, int ball_count) { const int block_lg = 32 - __builtin_clz(db.x-1); const int shared_amt = sizeof(float) << block_lg; pass_platform<<<dg,db,shared_amt>>>(ball_count); } __global__ void pass_platform(int ball_count) { /// Main CUDA routine for resolving collisions with platform and /// updating ball position and orientation. // One ball per thread. const int idx_base = blockIdx.x * blockDim.x; const int idx = idx_base + threadIdx.x; if ( idx >= ball_count ) return; CUDA_Phys_W phys; /// Copy ball data from memory to local variables. // // Local variables hopefully will be in GPU registers, not // slow local memory. // int4 tact_counts = balls_x.tact_counts[idx]; phys.pt_type = tact_counts.x; phys.contact_count = tact_counts.y; phys.part_of_wheel = tact_counts.w & 1; if ( phys.pt_type == PT_Ball ) pass_platform_ball(phys, idx); else if ( phys.pt_type == PT_Box ) pass_platform_box(phys, idx); else pass_platform_tile(phys, idx); /// Copy other updated data to memory. // tact_counts.y = phys.contact_count << 8; tact_counts.z = tact_counts.z << 16; balls_x.tact_counts[idx] = tact_counts; } __device__ void pass_platform_ball(CUDA_Phys_W& phys, int idx) { // One ball per thread. CUDA_Ball_W& ball = phys.ball; /// Copy ball data from memory to local variables. // // Local variables hopefully will be in GPU registers, not // slow local memory. // ball.prev_velocity = xyz(balls_x.prev_velocity[idx]); ball.velocity = xyz(balls_x.velocity[idx]) + gravity_accel_dt; set_f3(ball.position,balls_x.position[idx]); set_f3(ball.omega, balls_x.omega[idx]); float4 ball_props = balls_x.ball_props[idx]; ball.radius = ball_props.x; ball.mass_inv = ball_props.y; /// Handle Ball/Platform Collision // if ( opt_platform_curved ) platform_collision(phys); /// Handle Air Resistance // const float area = M_PI * ball.radius * ball.radius; pNorm force = mn( -area * opt_air_resistance * ball.velocity ); const float v_change = exp( -force.magnitude * ball.mass_inv * delta_t ); ball.velocity = v_change * ball.velocity; /// Update Position and Orientation // ball.position += 0.5f * delta_t * ( ball.prev_velocity + ball.velocity ); pNorm axis = mn(ball.omega); balls_x.orientation[idx] = quat_normalize ( quat_mult ( mq( axis, delta_t * axis.magnitude ), balls_x.orientation[idx] )); /// Copy other updated data to memory. // set_f4(balls_x.velocity[idx], ball.velocity); set_f4(balls_x.prev_velocity[idx], ball.velocity); set_f4(balls_x.omega[idx], ball.omega); set_f4(balls_x.position[idx], ball.position, ball.radius); } __device__ void pass_platform_tile(CUDA_Phys_W& phys, int idx) { if ( !phys.part_of_wheel ) return; const int tid = threadIdx.x; float4 tile_props = balls_x.velocity[idx]; float torque = tile_props.z; block_torque_dt[tid] = torque; tile_props.z = 0; balls_x.velocity[idx] = tile_props; float omega = wheel.omega[0]; const float3 pt_ll = xyz(balls_x.position[idx]); const float3 norm_rt = xyz(balls_x.omega[idx]); const float3 norm_up = xyz(balls_x.prev_velocity[idx]); const float3 normal = xyz(balls_x.ball_props[idx]); float torque_sum = 0; // Assuming that all are on same warp. :-) for ( int i=wheel.idx_start; i<wheel.idx_stop; i++ ) torque_sum += block_torque_dt[i]; omega -= torque_sum * wheel.moment_of_inertia_inv; const float friction_delta_omega = wheel.friction_torque * wheel.moment_of_inertia_inv * delta_t; if ( fabs(omega) <= friction_delta_omega ) omega = 0; else if ( omega > 0 ) omega -= friction_delta_omega; else omega += friction_delta_omega; const float delta_theta = omega * delta_t; pcMatrix3x3 rot; pMatrix_set_rotation(rot,wheel.axis_dir,delta_theta); const float3 rpt_ll = wheel.center + rot * ( pt_ll - wheel.center ); const float3 rnorm_rt = rot * norm_rt; const float3 rnorm_up = rot * norm_up; const float3 rnormal = rot * normal; set_f4(balls_x.position[idx],rpt_ll); set_f4(balls_x.omega[idx], rnorm_rt); set_f4(balls_x.prev_velocity[idx], rnorm_up); set_f4(balls_x.ball_props[idx], rnormal); if ( idx == wheel.idx_start ) wheel.omega[0] = omega; } __device__ void pass_platform_box(CUDA_Phys_W& phys, int idx) { // One box per thread. CUDA_Box_W& box = phys.box; /// Copy data from memory to local variables. // // Local variables hopefully will be in GPU registers, not // slow local memory. // float4 box_props = balls_x.ball_props[idx]; box.mass_inv = box_props.y; if ( box.mass_inv == 0 ) return; // Read only. box.prev_velocity = xyz(balls_x.prev_velocity[idx]); box.velocity = xyz(balls_x.velocity[idx]) + gravity_accel_dt; set_f3(box.position,balls_x.position[idx]); set_f3(box.omega, balls_x.omega[idx]); set_f3(box.to_111, balls_x.to_111[idx]); box.orientation = balls_x.orientation[idx]; box_geometry_update(box); /// Handle Ball/Platform Collision // if ( opt_platform_curved ) platform_collision_box(phys); /// Handle Air Resistance // pVect force = mv(0,0,0); for ( int d=0; d<3; d++ ) { const pVect face_normal = box_get_axis_norm(box,2-d); const float amt = dot( face_normal, box.velocity ); const float area = box_get_axis_area(box,d); force += amt * area * face_normal; } pNorm force_dir = mn(force); const float v_dir = dot(force_dir,box.velocity); const float resistance = force_dir.magnitude * opt_air_resistance; const float v_change = expf(- resistance * box.mass_inv * delta_t ); box.velocity -= v_dir * (1.0f - v_change ) * force_dir; { float3 lsq = box.to_111 * box.to_111; pVect amoment1 = mv( lsq.y + lsq.z, lsq.x + lsq.z, lsq.x + lsq.y ); float3 omega = box.omega; pVect amoment = amoment1 * box.to_111; pVect omega_l = box.rot_inv * omega; const float torque = opt_air_resistance * dot(amoment,fabs(omega_l)); const float mi = box_get_moment_of_inertia_inv(box,mn(omega)); const float o_change = exp(- torque * mi * delta_t ); box.omega = o_change * omega; } /// Update Position and Orientation // box.position += 0.5f * delta_t * ( box.prev_velocity + box.velocity ); pNorm axis = mn(box.omega); balls_x.orientation[idx] = quat_normalize ( quat_mult ( mq( axis, delta_t * axis.magnitude ), box.orientation )); /// Copy other updated data to memory. // set_f4(balls_x.velocity[idx], box.velocity); set_f4(balls_x.prev_velocity[idx], box.velocity); set_f4(balls_x.omega[idx], box.omega); set_f4(balls_x.position[idx], box.position, box_props.x); } __device__ void platform_collision_box(CUDA_Phys_W& phys) { CUDA_Box_W& box = phys.box; float radius = length(box.to_111); if ( box.position.y - radius >= 0 ) return; if ( box.position.z + radius <= platform_zmin ) return; if ( box.position.z - radius >= platform_zmax ) return; float3 axis = mv(platform_xmid,0,box.position.z); pVect btoa = mv(box.position,axis); if ( dot(btoa,btoa) < (platform_xrad-radius)*(platform_xrad-radius) ) return; box_geometry_update(box); int inside = 0; int outside_under = 0; float pen_dists[8]; CUDA_SectTT psects[5]; int ps_next = 0; float min_pd = 0; // For vertices between ends. float max_pd = 0; // Find vertices that are under the platform. // for ( int v=0; v<8; v++ ) { int v_bit = 1 << v; float3 pos = box_get_vertices(box,v); if ( pos.y > 0 ) { pen_dists[v] = 0; continue; } float3 axis = mc(platform_xmid,0,pos.z); pNorm tact_dir = mn(axis,pos); float pen_dist = tact_dir.magnitude - platform_xrad; pen_dists[v] = pen_dist; if ( pos.z < platform_zmin || pos.z > platform_zmax ) { if ( pen_dist > 0 ) outside_under |= v_bit; continue; } set_min(min_pd,pen_dist); set_max(max_pd,pen_dist); if ( pen_dist > 1 ) continue; inside |= v_bit; if ( pen_dist <= 0 ) continue; CUDA_SectTT* sect = &psects[ps_next++]; sect->start = pos; sect->dir = tact_dir.v; sect->pen_dist = pen_dist; } bool object_inside = max_pd < -min_pd; if ( !object_inside ) return; // Examine vertices that are off the edge of the platform (in the // z direction), to see if an adjoining edge intersects the platform // edge. // for ( int v=0; v<8; v++ ) { int v_bit = 1 << v; if ( ! ( v_bit & outside_under ) ) continue; // Outside Vertex (beyond z_max or z_min). // pCoor pos = box_get_vertices(box,v); float pen_dist_out = pen_dists[v]; float v_z = pos.z; float ref_z = v_z >= platform_zmax ? platform_zmax : platform_zmin; float outside_z_len = fabs(v_z - ref_z); // Look for adjoining vertices that are over the platform. // for ( int axis = 0; axis < 3; axis++ ) { int vn = v ^ ( 1 << axis ); int vn_bit = 1 << vn; if ( ! ( inside & vn_bit ) ) continue; float pen_len = pen_dists[vn] - pen_dist_out; // Inside Vertex pCoor pos_in = box_get_vertices(box,vn); // Compute the contact point at penetration distance. // float z_len = fabs(v_z - pos_in.z); if ( z_len < 0.0001f ) continue; float scale = outside_z_len / z_len; pVect to_inside = mv(pos,pos_in); pCoor tact = pos + scale * to_inside; float pen_tact = pen_dist_out + scale * pen_len; if ( pen_tact <= 0 ) continue; CUDA_SectTT* sect = &psects[ps_next++]; sect->start = tact; sect->pen_dist = pen_tact; pNorm dir = mn(cross(to_inside,mv(-tact.y,tact.x,0))); sect->dir = pen_len >= 0 ? normalize(mv(tact.x,tact.y,0)) : dir.v; } } // if ( ps_next > 0 ) phys.contact_count++; for ( int i=0; i<ps_next; i++ ) { CUDA_SectTT *sect = &psects[i]; pCoor pos = sect->start; pVect tact_dir = sect->dir; pNorm ctopos = mn(box.position,pos); pVect vel = box_get_vel(box,pos); float pen_dist = sect->pen_dist; float rad_vel = dot(vel,tact_dir); double loss_factor = 1 - opt_bounce_loss; float force_dt_no_loss = elasticity_inv_dt * pen_dist; float max_fdt_in = rad_vel / box.mass_inv; float appr_force_dt = rad_vel > 0 ? min(max_fdt_in,force_dt_no_loss) : force_dt_no_loss * loss_factor; box_apply_force_dt(box,pos, - appr_force_dt * tact_dir ); } for ( int i=0; i<ps_next; i++ ) { CUDA_SectTT *sect = &psects[i]; pCoor pos = sect->start; pVect tact_dir = sect->dir; float pen_dist = sect->pen_dist; float force_dt_no_loss = elasticity_inv_dt * pen_dist; pVect vel2 = box_get_vel(box,pos); float rad_vel2 = dot(vel2,tact_dir); pNorm tan_vel = mn( vel2 - rad_vel2 * tact_dir ); float mi_inv = box_get_moment_of_inertia_inv(box,pos,tan_vel); float fdt_limit = tan_vel.magnitude / ( box.mass_inv + mi_inv ); float fric_force_dt_no_loss = force_dt_no_loss * opt_friction_coeff; float fric_force_dt = min(fdt_limit, fric_force_dt_no_loss); box_apply_force_fric_dt(box,pos, tan_vel, -fric_force_dt); } } __device__ void platform_collision(CUDA_Phys_W& phys) { /// Check if ball in contact with platform, if so apply forces. CUDA_Ball_W& ball = phys.ball; pCoor pos = ball.position; const float r = ball.radius; bool collision_possible = pos.y < r && pos.x >= platform_xmin - r && pos.x <= platform_xmax + r && pos.z >= platform_zmin - r && pos.z <= platform_zmax + r; if ( !collision_possible ) return; CUDA_Ball_W pball; pCoor axis = mc(platform_xmid,0,pos.z); const float short_xrad = platform_xrad - r; const float short_xrad_sq = short_xrad * short_xrad; const float long_xrad = platform_xrad + r; const float long_xrad_sq = long_xrad * long_xrad; // Test for different ways ball can touch platform. If contact // is found find position of an artificial platform ball (pball) // that touches the real ball at the same place and angle as // the platform. This pball will be used for the ball-ball penetration // routine, penetration_balls_resolve. if ( pos.y > 0 ) { // Possible contact with upper edge of platform. // pCoor tact = mc(pos.x > platform_xmid ? platform_xmax : platform_xmin, 0, pos.z); pNorm tact_dir = mn(pos,tact); if ( tact_dir.mag_sq >= r * r ) return; pball.position = tact + r * tact_dir; } else if ( pos.z > platform_zmax || pos.z < platform_zmin ) { // Possible contact with side (curved) edges of platform. // pNorm ball_dir = mn(axis,pos); if ( ball_dir.mag_sq <= short_xrad_sq ) return; const float zedge = pos.z > platform_zmax ? platform_zmax : platform_zmin; pCoor axis_edge = mc(platform_xmid,0,zedge); pCoor tact = axis_edge + platform_xrad * ball_dir; pNorm tact_dir = mn(pos,tact); if ( tact_dir.mag_sq >= r * r ) return; pball.position = tact + r * tact_dir; } else { // Possible contact with surface of platform. // pNorm tact_dir = mn(axis,pos); if ( tact_dir.mag_sq <= short_xrad_sq || tact_dir.mag_sq >= long_xrad_sq ) return; pball.position = axis + ( platform_xrad + ( tact_dir.magnitude < platform_xrad ? r : -r ) ) * tact_dir; } // Finish initializing platform ball, and call routine to // resolve penetration. // pVect zero_vec = mv(0,0,0); pball.omega = zero_vec; pball.prev_velocity = pball.velocity = zero_vec; pball.radius = ball.radius; pball.mass_inv = ball.mass_inv; if ( penetration_balls_resolve(phys.ball,pball,false,FT_All) ) phys.contact_count++; } /// Compute Phys Proximity Pairs // Mapping from z-sort index to ball array index. __constant__ int *z_sort_indices; // Pre-computed z_max values. __constant__ float *z_sort_z_max; // Computed proximity values, sent to CPU. __constant__ int64_t *cuda_prox; // An array that can be used to pass values back to the CPU for // use in debugging. __constant__ float3 *pass_sched_debug; texture<float4> balls_pos_tex; texture<float4> balls_vel_tex; __global__ void pass_sched(int ball_count, float lifetime_delta_t); __device__ float ball_min_z_get (float3 position, float3 velocity, float radius, float lifetime_delta_t); __host__ bool pass_sched_launch (dim3 dg, dim3 db, int ball_count, float lifetime_delta_t, void *pos_array_dev, void *vel_array_dev) { size_t offset; const size_t size = ball_count * sizeof(float4); const cudaChannelFormatDesc fd = cudaCreateChannelDesc(32,32,32,32,cudaChannelFormatKindFloat); cudaBindTexture(&offset, balls_pos_tex, pos_array_dev, fd, size); if ( offset ) return false; cudaBindTexture(&offset, balls_vel_tex, vel_array_dev, fd, size); if ( offset ) return false; pass_sched<<<dg,db>>>(ball_count,lifetime_delta_t); return true; } __global__ void pass_sched(int ball_count, float lifetime_delta_t) { // Determine which balls that are in proximity to a ball. This // routine only works for balls, if a tile is found an I-give-up // value is returned, and the CPU will have to determine proximity. const int idx_base = blockIdx.x * blockDim.x; // idx9 is an index into z-sorted arrays. const int idx9 = idx_base + threadIdx.x; if ( idx9 >= ball_count ) return; // bidx9 is an index into the balls arrays. const int bidx9 = z_sort_indices[idx9]; // If bidx9 is negative then Phys at index bidx9 is not a ball, // so just return a give-up code 't' (tile). if ( bidx9 < 0 ) { cuda_prox[idx9] = ( 't' << 8 ) | 0xff; return; } // Fetch position, radius (packed in position vector), and velocity. // const float4 pos_rad9 = tex1Dfetch(balls_pos_tex,bidx9); const float3 pos9 = xyz(pos_rad9); const float radius9 = pos_rad9.w; const float4 vel9_pad = tex1Dfetch(balls_vel_tex,bidx9); const float3 vel9 = xyz(vel9_pad); const float z_min = ball_min_z_get(pos9,vel9,radius9,lifetime_delta_t); // Number of nearby balls. int proximity_cnt = 0; // Reason for giving up, 0 means we didn't give up (yet). char incomplete = 0; // The list of balls in proximity, packed into a single integer. Prox_Offsets offsets = 0; for ( int idx1 = idx9-1; !incomplete && idx1 >= 0; idx1-- ) { const float z_max = z_sort_z_max[idx1]; // Break if this and subsequent z-ordered balls could not // possibly be in proximity. if ( z_max < z_min ) break; const int bidx1 = z_sort_indices[idx1]; // If there's a tile here give up. // (t is for tile) if ( bidx1 < 0 ) { incomplete = 't'; continue; } const float4 pos_rad = tex1Dfetch(balls_pos_tex,bidx1); const float3 pos1 = xyz(pos_rad); const float4 vel_pad1 = tex1Dfetch(balls_vel_tex,bidx1); const float3 vel1 = xyz(vel_pad1); const float radius1 = pos_rad.w; // Use the pNorm constructor to compute the distance between two balls. pNorm dist = mn(pos1,pos9); // Balls are considered in proximity if they can be // this close over schedule lifetime. const float region_length_small = 1.11f * ( radius9 + radius1 ); // Check if balls will be close enough over lifetime. pVect delta_v = vel9 - vel1; const float delta_d = lifetime_delta_t * length(delta_v); const float dist2 = dist.magnitude - delta_d; if ( dist2 > region_length_small ) continue; // At this point the balls are considered in proximity, now // squeeze the value of bidx1 into eight bits by taking // the difference of z-sort indices, which should be close // together. const int offset = idx9 - idx1; // Ooops, exceeded the limit on the number of proximities. // (f is for full) if ( proximity_cnt >= cuda_prox_per_ball ) incomplete = 'f'; // Ooops, the offset won't fit into 8 bits. // (o is for overflow) else if ( offset >= 255 ) incomplete = 'o'; // Everything is fine, slide the offset on to the list. else offsets = ( offsets << 8 ) | offset; proximity_cnt++; } // If code could not compute all proximities replace offsets with // the error code. if ( incomplete ) offsets = ( incomplete << 8 ) | 0xff; cuda_prox[idx9] = offsets; } __device__ float ball_min_z_get (float3 position, float3 velocity, float radius, float lifetime_delta_t) { const float m = fabs(velocity.x) + fabs(velocity.y) + fabs(velocity.z); const float z_min = position.z + position.x - m * lifetime_delta_t - 2 * radius; return z_min; } static __host__ void collect_symbols() { CU_SYM(balls_x); CU_SYM(block_balls_needed); CU_SYM(tacts_schedule); CU_SYM(xx_pairs); CU_SYM(xx_sects_center); CU_SYM(xx_sects_dir); CU_SYM(xx_sects_debug); CU_SYM(gravity_accel_dt); CU_SYM(opt_bounce_loss); CU_SYM(opt_bounce_loss_box); CU_SYM(opt_friction_coeff); CU_SYM(opt_friction_roll); CU_SYM(opt_air_resistance); CU_SYM(opt_platform_curved); CU_SYM(platform_xmin); CU_SYM(platform_xmax); CU_SYM(platform_zmin); CU_SYM(platform_zmax); CU_SYM(platform_xmid); CU_SYM(platform_xrad); CU_SYM(delta_t); CU_SYM(elasticity_inv_dt); CU_SYM(opt_debug); CU_SYM(opt_debug2); CU_SYM(wheel); CU_SYM(z_sort_indices); CU_SYM(z_sort_z_max); CU_SYM(cuda_prox); CU_SYM(pass_sched_debug); }
ae9f7e801db3f472b813e88261746e7891e51a5a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 2019-2020 NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "myers_gpu.cuh" #include "batched_device_matrices.cuh" #include <claraparabricks/genomeworks/cudaaligner/aligner.hpp> #include <claraparabricks/genomeworks/utils/signed_integer_utils.hpp> #include <claraparabricks/genomeworks/utils/limits.cuh> #include <claraparabricks/genomeworks/utils/mathutils.hpp> #include <claraparabricks/genomeworks/utils/cudautils.hpp> #include <claraparabricks/genomeworks/utils/allocator.hpp> #include <claraparabricks/genomeworks/utils/device_buffer.hpp> #include <cassert> #include <climits> #include <vector> #include <numeric> namespace claraparabricks { namespace genomeworks { namespace cudaaligner { constexpr int32_t warp_size = 32; namespace myers { constexpr int32_t initial_distance_guess_factor = 20; inline __device__ WordType warp_leftshift_sync(uint32_t warp_mask, WordType v) { assert(((warp_mask >> (threadIdx.x % warp_size)) & 1u) == 1u); // 4 threads, word_size = 4 example: thread 0 | thread 1 | thread 2 | thread 3 // v = 0101 | 0111 | 0011 | 1101 -> 1010 | 1110 | 0111 | 1010 const WordType x = __shfl_up_sync(warp_mask, v >> (word_size - 1), 1); assert((x & ~WordType(1)) == 0); v <<= 1; if (threadIdx.x != 0) v |= x; return v; } inline __device__ WordType warp_rightshift_sync(uint32_t warp_mask, WordType v) { assert(((warp_mask >> (threadIdx.x % warp_size)) & 1u) == 1u); // 4 threads, word_size = 4 example: thread 0 | thread 1 | thread 2 | thread 3 // v = 0101 | 0111 | 0011 | 1101 -> 0010 | 1011 | 1001 | 1110 const WordType x = __shfl_down_sync(warp_mask, v << (word_size - 1), 1); assert((x & ~(WordType(1) << (word_size - 1))) == 0); v >>= 1; if ((warp_mask >> threadIdx.x) > 1u) v |= x; return v; } inline __device__ WordType warp_add_sync(uint32_t warp_mask, WordType a, WordType b) { static_assert(sizeof(WordType) == 4, "This function assumes WordType to have 4 bytes."); static_assert(CHAR_BIT == 8, "This function assumes a char width of 8 bit."); assert(((warp_mask >> (threadIdx.x % warp_size)) & 1u) == 1u); const uint64_t ax = a; const uint64_t bx = b; uint64_t r = ax + bx; uint32_t carry = static_cast<uint32_t>(r >> 32); if (warp_mask == 1u) { return static_cast<WordType>(r); } r &= 0xffff'ffffull; // TODO: I think due to the structure of the Myer blocks, // a carry cannot propagate over more than a single block. // I.e. a single carry propagation without the loop should be sufficient. while (__any_sync(warp_mask, carry)) { uint32_t x = __shfl_up_sync(warp_mask, carry, 1); if (threadIdx.x != 0) r += x; carry = static_cast<uint32_t>(r >> 32); r &= 0xffff'ffffull; } return static_cast<WordType>(r); } __device__ int32_t myers_advance_block(uint32_t warp_mask, WordType highest_bit, WordType eq, WordType& pv, WordType& mv, int32_t carry_in) { assert((pv & mv) == WordType(0)); // Stage 1 WordType xv = eq | mv; if (carry_in < 0) eq |= WordType(1); WordType xh = warp_add_sync(warp_mask, eq & pv, pv); xh = (xh ^ pv) | eq; WordType ph = mv | (~(xh | pv)); WordType mh = pv & xh; int32_t carry_out = ((ph & highest_bit) == WordType(0) ? 0 : 1) - ((mh & highest_bit) == WordType(0) ? 0 : 1); ph = warp_leftshift_sync(warp_mask, ph); mh = warp_leftshift_sync(warp_mask, mh); if (carry_in < 0) mh |= WordType(1); if (carry_in > 0) ph |= WordType(1); // Stage 2 pv = mh | (~(xv | ph)); mv = ph & xv; return carry_out; } __device__ int2 myers_advance_block2(uint32_t warp_mask, WordType highest_bit, WordType eq, WordType& pv, WordType& mv, int32_t carry_in) { assert((pv & mv) == WordType(0)); // Stage 1 WordType xv = eq | mv; if (carry_in < 0) eq |= WordType(1); WordType xh = warp_add_sync(warp_mask, eq & pv, pv); xh = (xh ^ pv) | eq; WordType ph = mv | (~(xh | pv)); WordType mh = pv & xh; int2 carry_out; carry_out.x = ((ph & highest_bit) == WordType(0) ? 0 : 1) - ((mh & highest_bit) == WordType(0) ? 0 : 1); carry_out.y = ((ph & (highest_bit << 1)) == WordType(0) ? 0 : 1) - ((mh & (highest_bit << 1)) == WordType(0) ? 0 : 1); ph = warp_leftshift_sync(warp_mask, ph); mh = warp_leftshift_sync(warp_mask, mh); if (carry_in < 0) mh |= WordType(1); if (carry_in > 0) ph |= WordType(1); // Stage 2 pv = mh | (~(xv | ph)); mv = ph & xv; return carry_out; } __device__ WordType myers_generate_query_pattern(char x, char const* query, int32_t query_size, int32_t offset) { // Sets a 1 bit at the position of every matching character assert(offset < query_size); const int32_t max_i = min(query_size - offset, word_size); WordType r = 0; for (int32_t i = 0; i < max_i; ++i) { if (x == query[i + offset]) r = r | (WordType(1) << i); } return r; } inline __device__ WordType get_query_pattern(device_matrix_view<WordType>& query_patterns, int32_t idx, int32_t query_begin_offset, char x, bool reverse) { static_assert(std::is_unsigned<WordType>::value, "WordType has to be an unsigned type for well-defined >> operations."); assert(x >= 0); assert(x == 'A' || x == 'C' || x == 'G' || x == 'T'); const int32_t char_idx = (x >> 1) & 0x3u; // [A,C,T,G] -> [0,1,2,3] // 4-bit word example: // query_patterns contains character match bit patterns "XXXX" for the full query string. // we want the bit pattern "yyyy" for a view of on the query string starting at eg. character 11: // 4 3 2 1 0 (pattern index) // XXXX XXXX XXXX [XXXX] [XXXX] // YYY Yyyy y // 1 0 (idx) // // query_begin_offset = 11 // => idx_offset = 11/4 = 2, shift = 11%4 = 3 const int32_t idx_offset = query_begin_offset / word_size; const int32_t shift = query_begin_offset % word_size; WordType r = query_patterns(idx + idx_offset, char_idx); if (shift != 0) { r >>= shift; if (idx + idx_offset + 1 < query_patterns.num_rows()) { r |= query_patterns(idx + idx_offset + 1, char_idx) << (word_size - shift); } } return r; } inline __device__ int32_t get_myers_score(int32_t i, int32_t j, device_matrix_view<WordType> const& pv, device_matrix_view<WordType> const& mv, device_matrix_view<int32_t> const& score, WordType last_entry_mask) { assert(i > 0); // row 0 is implicit, NW matrix is shifted by i -> i-1 const int32_t word_idx = (i - 1) / word_size; const int32_t bit_idx = (i - 1) % word_size; int32_t s = score(word_idx, j); WordType mask = (~WordType(1)) << bit_idx; if (word_idx == score.num_rows() - 1) mask &= last_entry_mask; s -= __popc(mask & pv(word_idx, j)); s += __popc(mask & mv(word_idx, j)); return s; } __device__ void myers_backtrace(int8_t* paths_base, int32_t* lengths, int32_t max_path_length, device_matrix_view<WordType> const& pv, device_matrix_view<WordType> const& mv, device_matrix_view<int32_t> const& score, int32_t query_size, int32_t id) { using nw_score_t = int32_t; assert(pv.num_rows() == score.num_rows()); assert(mv.num_rows() == score.num_rows()); assert(pv.num_cols() == score.num_cols()); assert(mv.num_cols() == score.num_cols()); assert(score.num_rows() == ceiling_divide(query_size, word_size)); int32_t i = query_size; int32_t j = score.num_cols() - 1; int8_t* path = paths_base + id * static_cast<ptrdiff_t>(max_path_length); const WordType last_entry_mask = query_size % word_size != 0 ? (WordType(1) << (query_size % word_size)) - 1 : ~WordType(0); nw_score_t myscore = score((i - 1) / word_size, j); // row 0 is implicit, NW matrix is shifted by i -> i-1 (see get_myers_score) int32_t pos = 0; while (i > 0 && j > 0) { int8_t r = 0; nw_score_t const above = i == 1 ? j : get_myers_score(i - 1, j, pv, mv, score, last_entry_mask); nw_score_t const diag = i == 1 ? j - 1 : get_myers_score(i - 1, j - 1, pv, mv, score, last_entry_mask); nw_score_t const left = get_myers_score(i, j - 1, pv, mv, score, last_entry_mask); if (left + 1 == myscore) { r = static_cast<int8_t>(AlignmentState::insertion); myscore = left; --j; } else if (above + 1 == myscore) { r = static_cast<int8_t>(AlignmentState::deletion); myscore = above; --i; } else { r = (diag == myscore ? static_cast<int8_t>(AlignmentState::match) : static_cast<int8_t>(AlignmentState::mismatch)); myscore = diag; --i; --j; } path[pos] = r; ++pos; } while (i > 0) { path[pos] = static_cast<int8_t>(AlignmentState::deletion); ++pos; --i; } while (j > 0) { path[pos] = static_cast<int8_t>(AlignmentState::insertion); ++pos; --j; } lengths[id] = pos; } __global__ void myers_backtrace_kernel(int8_t* paths_base, int32_t* lengths, int32_t max_path_length, batched_device_matrices<WordType>::device_interface* pvi, batched_device_matrices<WordType>::device_interface* mvi, batched_device_matrices<int32_t>::device_interface* scorei, int32_t const* sequence_lengths_d, int32_t n_alignments) { const int32_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= n_alignments) return; GW_CONSTEXPR int32_t word_size = sizeof(WordType) * CHAR_BIT; const int32_t query_size = sequence_lengths_d[2 * idx]; const int32_t target_size = sequence_lengths_d[2 * idx + 1]; const int32_t n_words = (query_size + word_size - 1) / word_size; const device_matrix_view<WordType> pv = pvi->get_matrix_view(idx, n_words, target_size + 1); const device_matrix_view<WordType> mv = mvi->get_matrix_view(idx, n_words, target_size + 1); const device_matrix_view<int32_t> score = scorei->get_matrix_view(idx, n_words, target_size + 1); myers_backtrace(paths_base, lengths, max_path_length, pv, mv, score, query_size, idx); } __global__ void myers_convert_to_full_score_matrix_kernel(batched_device_matrices<int32_t>::device_interface* fullscorei, batched_device_matrices<WordType>::device_interface* pvi, batched_device_matrices<WordType>::device_interface* mvi, batched_device_matrices<int32_t>::device_interface* scorei, int32_t const* sequence_lengths_d, int32_t alignment) { GW_CONSTEXPR int32_t word_size = sizeof(WordType) * CHAR_BIT; const int32_t query_size = sequence_lengths_d[2 * alignment]; const int32_t target_size = sequence_lengths_d[2 * alignment + 1]; const int32_t n_words = (query_size + word_size - 1) / word_size; assert(query_size > 0); int32_t i = blockIdx.x * blockDim.x + threadIdx.x; int32_t j = blockIdx.y * blockDim.y + threadIdx.y; if (j < target_size + 1 && i < query_size + 1) { const WordType last_entry_mask = query_size % word_size != 0 ? (WordType(1) << (query_size % word_size)) - 1 : ~WordType(0); device_matrix_view<WordType> pv = pvi->get_matrix_view(0, n_words, target_size + 1); device_matrix_view<WordType> mv = mvi->get_matrix_view(0, n_words, target_size + 1); device_matrix_view<int32_t> score = scorei->get_matrix_view(0, n_words, target_size + 1); device_matrix_view<int32_t> fullscore = fullscorei->get_matrix_view(0, query_size + 1, target_size + 1); int32_t myscore = 0; if (i == 0) myscore = j; else myscore = get_myers_score(i, j, pv, mv, score, last_entry_mask); fullscore(i, j) = myscore; } } __global__ void myers_compute_score_matrix_kernel( batched_device_matrices<WordType>::device_interface* pvi, batched_device_matrices<WordType>::device_interface* mvi, batched_device_matrices<int32_t>::device_interface* scorei, batched_device_matrices<WordType>::device_interface* query_patternsi, char const* sequences_d, int32_t const* sequence_lengths_d, int32_t max_sequence_length, int32_t n_alignments) { GW_CONSTEXPR int32_t word_size = sizeof(WordType) * CHAR_BIT; GW_CONSTEXPR int32_t warp_size = 32; assert(warpSize == warp_size); assert(threadIdx.x < warp_size); assert(blockIdx.x == 0); const int32_t alignment_idx = blockIdx.y * blockDim.y + threadIdx.y; if (alignment_idx >= n_alignments) return; const int32_t query_size = sequence_lengths_d[2 * alignment_idx]; const int32_t target_size = sequence_lengths_d[2 * alignment_idx + 1]; const char* const query = sequences_d + 2 * alignment_idx * max_sequence_length; const char* const target = sequences_d + (2 * alignment_idx + 1) * max_sequence_length; const int32_t n_words = (query_size + word_size - 1) / word_size; const int32_t n_warp_iterations = ceiling_divide(n_words, warp_size) * warp_size; assert(query_size > 0); device_matrix_view<WordType> pv = pvi->get_matrix_view(alignment_idx, n_words, target_size + 1); device_matrix_view<WordType> mv = mvi->get_matrix_view(alignment_idx, n_words, target_size + 1); device_matrix_view<int32_t> score = scorei->get_matrix_view(alignment_idx, n_words, target_size + 1); device_matrix_view<WordType> query_patterns = query_patternsi->get_matrix_view(alignment_idx, n_words, 4); for (int32_t idx = threadIdx.x; idx < n_words; idx += warp_size) { pv(idx, 0) = ~WordType(0); mv(idx, 0) = 0; score(idx, 0) = min((idx + 1) * word_size, query_size); // TODO query load is inefficient query_patterns(idx, 0) = myers_generate_query_pattern('A', query, query_size, idx * word_size); query_patterns(idx, 1) = myers_generate_query_pattern('C', query, query_size, idx * word_size); query_patterns(idx, 2) = myers_generate_query_pattern('T', query, query_size, idx * word_size); query_patterns(idx, 3) = myers_generate_query_pattern('G', query, query_size, idx * word_size); } __syncwarp(); for (int32_t t = 1; t <= target_size; ++t) { int32_t warp_carry = 0; if (threadIdx.x == 0) warp_carry = 1; // for global alignment the (implicit) first row has to be 0,1,2,3,... -> carry 1 for (int32_t idx = threadIdx.x; idx < n_warp_iterations; idx += warp_size) { if (idx < n_words) { const uint32_t warp_mask = idx / warp_size < n_words / warp_size ? 0xffff'ffffu : (1u << (n_words % warp_size)) - 1; WordType pv_local = pv(idx, t - 1); WordType mv_local = mv(idx, t - 1); const WordType highest_bit = WordType(1) << (idx == (n_words - 1) ? query_size - (n_words - 1) * word_size - 1 : word_size - 1); const WordType eq = get_query_pattern(query_patterns, idx, 0, target[t - 1], false); warp_carry = myers_advance_block(warp_mask, highest_bit, eq, pv_local, mv_local, warp_carry); score(idx, t) = score(idx, t - 1) + warp_carry; if (threadIdx.x == 0) warp_carry = 0; if (warp_mask == 0xffff'ffffu && (threadIdx.x == 31 || threadIdx.x == 0)) warp_carry = __shfl_down_sync(0x8000'0001u, warp_carry, warp_size - 1); if (threadIdx.x != 0) warp_carry = 0; pv(idx, t) = pv_local; mv(idx, t) = mv_local; } __syncwarp(); } } } __device__ int32_t myers_backtrace_banded(int8_t* path, device_matrix_view<WordType> const& pv, device_matrix_view<WordType> const& mv, device_matrix_view<int32_t> const& score, int32_t diagonal_begin, int32_t diagonal_end, int32_t band_width, int32_t target_size, int32_t query_size) { assert(threadIdx.x == 0); using nw_score_t = int32_t; GW_CONSTEXPR nw_score_t out_of_band = numeric_limits<nw_score_t>::max() - 1; // -1 to avoid integer overflow further down. assert(pv.num_rows() == score.num_rows()); assert(mv.num_rows() == score.num_rows()); assert(pv.num_cols() == score.num_cols()); assert(mv.num_cols() == score.num_cols()); assert(score.num_rows() == ceiling_divide(band_width, word_size)); int32_t i = band_width; int32_t j = target_size; const WordType last_entry_mask = band_width % word_size != 0 ? (WordType(1) << (band_width % word_size)) - 1 : ~WordType(0); nw_score_t myscore = score((i - 1) / word_size, j); // row 0 is implicit, NW matrix is shifted by i -> i-1 (see get_myers_score) int32_t pos = 0; while (j >= diagonal_end) { int8_t r = 0; nw_score_t const above = i <= 1 ? j : get_myers_score(i - 1, j, pv, mv, score, last_entry_mask); nw_score_t const diag = i <= 1 ? j - 1 : get_myers_score(i - 1, j - 1, pv, mv, score, last_entry_mask); nw_score_t const left = get_myers_score(i, j - 1, pv, mv, score, last_entry_mask); if (left + 1 == myscore) { r = static_cast<int8_t>(AlignmentState::insertion); myscore = left; --j; } else if (above + 1 == myscore) { r = static_cast<int8_t>(AlignmentState::deletion); myscore = above; --i; } else { r = (diag == myscore ? static_cast<int8_t>(AlignmentState::match) : static_cast<int8_t>(AlignmentState::mismatch)); myscore = diag; --i; --j; } path[pos] = r; ++pos; } while (j >= diagonal_begin) { int8_t r = 0; nw_score_t const above = i <= 1 ? j : get_myers_score(i - 1, j, pv, mv, score, last_entry_mask); nw_score_t const diag = i <= 0 ? j - 1 : get_myers_score(i, j - 1, pv, mv, score, last_entry_mask); nw_score_t const left = i >= band_width ? out_of_band : get_myers_score(i + 1, j - 1, pv, mv, score, last_entry_mask); if (left + 1 == myscore) { r = static_cast<int8_t>(AlignmentState::insertion); myscore = left; ++i; --j; } else if (above + 1 == myscore) { r = static_cast<int8_t>(AlignmentState::deletion); myscore = above; --i; } else { r = (diag == myscore ? static_cast<int8_t>(AlignmentState::match) : static_cast<int8_t>(AlignmentState::mismatch)); myscore = diag; --j; } path[pos] = r; ++pos; } while (i > 0 && j > 0) { int8_t r = 0; nw_score_t const above = i == 1 ? j : get_myers_score(i - 1, j, pv, mv, score, last_entry_mask); nw_score_t const diag = i == 1 ? j - 1 : get_myers_score(i - 1, j - 1, pv, mv, score, last_entry_mask); nw_score_t const left = i > band_width ? out_of_band : get_myers_score(i, j - 1, pv, mv, score, last_entry_mask); if (left + 1 == myscore) { r = static_cast<int8_t>(AlignmentState::insertion); myscore = left; --j; } else if (above + 1 == myscore) { r = static_cast<int8_t>(AlignmentState::deletion); myscore = above; --i; } else { r = (diag == myscore ? static_cast<int8_t>(AlignmentState::match) : static_cast<int8_t>(AlignmentState::mismatch)); myscore = diag; --i; --j; } path[pos] = r; ++pos; } while (i > 0) { path[pos] = static_cast<int8_t>(AlignmentState::deletion); ++pos; --i; } while (j > 0) { path[pos] = static_cast<int8_t>(AlignmentState::insertion); ++pos; --j; } return pos; } __device__ void myers_compute_scores_horizontal_band_impl( device_matrix_view<WordType>& pv, device_matrix_view<WordType>& mv, device_matrix_view<int32_t>& score, device_matrix_view<WordType>& query_patterns, char const* target_begin, char const* query_begin, const int32_t target_size, const int32_t t_begin, const int32_t t_end, const int32_t width, const int32_t n_words, const int32_t pattern_idx_offset) { assert(n_words == ceiling_divide(width, word_size)); assert(target_size >= 0); assert(t_begin <= t_end); const int32_t n_warp_iterations = ceiling_divide(n_words, warp_size) * warp_size; for (int32_t t = t_begin; t < t_end; ++t) { int32_t warp_carry = 0; if (threadIdx.x == 0) warp_carry = 1; // worst case for the top boarder of the band for (int32_t idx = threadIdx.x; idx < n_warp_iterations; idx += warp_size) { if (idx < n_words) { const uint32_t warp_mask = idx / warp_size < n_words / warp_size ? 0xffff'ffffu : (1u << (n_words % warp_size)) - 1; WordType pv_local = pv(idx, t - 1); WordType mv_local = mv(idx, t - 1); const WordType highest_bit = WordType(1) << (idx == (n_words - 1) ? width - (n_words - 1) * word_size - 1 : word_size - 1); const WordType eq = get_query_pattern(query_patterns, idx, pattern_idx_offset, target_begin[t - 1], false); warp_carry = myers_advance_block(warp_mask, highest_bit, eq, pv_local, mv_local, warp_carry); score(idx, t) = score(idx, t - 1) + warp_carry; if (threadIdx.x == 0) warp_carry = 0; if (warp_mask == 0xffff'ffffu && (threadIdx.x == 0 || threadIdx.x == 31)) warp_carry = __shfl_down_sync(0x8000'0001u, warp_carry, warp_size - 1); if (threadIdx.x != 0) warp_carry = 0; pv(idx, t) = pv_local; mv(idx, t) = mv_local; } __syncwarp(); } } } __device__ void myers_compute_scores_diagonal_band_impl( device_matrix_view<WordType>& pv, device_matrix_view<WordType>& mv, device_matrix_view<int32_t>& score, device_matrix_view<WordType>& query_patterns, char const* target_begin, char const* query_begin, const int32_t target_size, const int32_t t_begin, const int32_t t_end, const int32_t band_width, const int32_t n_words_band, const int32_t pattern_idx_offset) { assert(n_words_band == ceiling_divide(band_width, warp_size)); assert(band_width - (n_words_band - 1) * word_size >= 2); // we need at least two bits in the last word const int32_t n_warp_iterations = ceiling_divide(n_words_band, warp_size) * warp_size; for (int32_t t = t_begin; t < t_end; ++t) { int32_t carry = 0; if (threadIdx.x == 0) carry = 1; // worst case for the top boarder of the band for (int32_t idx = threadIdx.x; idx < n_warp_iterations; idx += warp_size) { // idx within band column const uint32_t warp_mask = idx / warp_size < n_words_band / warp_size ? 0xffff'ffffu : (1u << (n_words_band % warp_size)) - 1; if (idx < n_words_band) { // data from the previous column WordType pv_local = warp_rightshift_sync(warp_mask, pv(idx, t - 1)); WordType mv_local = warp_rightshift_sync(warp_mask, mv(idx, t - 1)); if (threadIdx.x == 31 && warp_mask == 0xffff'ffffu) { if (idx < n_words_band - 1) { pv_local |= pv(idx + 1, t - 1) << (word_size - 1); mv_local |= mv(idx + 1, t - 1) << (word_size - 1); } } const WordType eq = get_query_pattern(query_patterns, idx, pattern_idx_offset + t - t_begin + 1, target_begin[t - 1], false); const WordType delta_right_bit = WordType(1) << (idx == (n_words_band - 1) ? band_width - (n_words_band - 1) * word_size - 2 : word_size - 2); const WordType delta_down_bit = delta_right_bit << 1; assert(delta_down_bit != 0); if (idx == n_words_band - 1) { // bits who have no left neighbor -> assume worst case: +1 pv_local |= delta_down_bit; mv_local &= ~delta_down_bit; } const int2 delta_right = myers_advance_block2(warp_mask, delta_right_bit, eq, pv_local, mv_local, carry); const int32_t delta_down = ((pv_local & delta_down_bit) == WordType(0) ? 0 : 1) - ((mv_local & delta_down_bit) == WordType(0) ? 0 : 1); // Since idx is relative to diagonal band, (idx, t-1) -> (idx,t) // corresponds to (n-1,t-1) -> (n,t) in the NW matrix. // To get from score'(n-1, t-1) -> score'(n, t-1) // add horizontal delta in row n-1 (delta_right.x) // and the vertical delta in column t (delta_down). score(idx, t) = score(idx, t - 1) + delta_right.x + delta_down; // Carry horizontal delta in row n (= delta_right.y) to next warp iteration if (threadIdx.x == 0) carry = 0; if (warp_mask == 0xffff'ffffu && (threadIdx.x == 0 || threadIdx.x == 31)) carry = __shfl_down_sync(0x8000'0001u, delta_right.y, warp_size - 1); if (threadIdx.x != 0) carry = 0; pv(idx, t) = pv_local; mv(idx, t) = mv_local; } __syncwarp(); } } } __device__ void myers_compute_scores_edit_dist_banded( int32_t& diagonal_begin, int32_t& diagonal_end, device_matrix_view<WordType>& pv, device_matrix_view<WordType>& mv, device_matrix_view<int32_t>& score, device_matrix_view<WordType>& query_patterns, char const* target_begin, char const* query_begin, int32_t const target_size, int32_t const query_size, int32_t const band_width, int32_t const n_words_band, int32_t const p, int32_t const alignment_idx) { // Note: 0-th row of the NW matrix is implicit for pv, mv and score! (given by the inital warp_carry) assert(warpSize == warp_size); assert(threadIdx.x < warp_size); assert(blockIdx.x == 0); assert(target_size > 0); assert(query_size > 0); assert(band_width > 0); assert(n_words_band > 0); assert(p >= 0); assert(alignment_idx >= 0); assert(pv.num_rows() == n_words_band); assert(mv.num_rows() == n_words_band); assert(score.num_rows() == n_words_band); assert(pv.num_cols() == target_size + 1); assert(mv.num_cols() == target_size + 1); assert(score.num_cols() == target_size + 1); for (int32_t idx = threadIdx.x; idx < n_words_band; idx += warp_size) { pv(idx, 0) = ~WordType(0); mv(idx, 0) = 0; score(idx, 0) = min((idx + 1) * word_size, band_width); } __syncwarp(); // This function computes a diagonal band of the NW matrix (Ukkonen algorithm). // In essence it computes the diagonals [-p, ..., 0, ..., p + target_size - query_size] (for query_size < target_size), // where diagonal -p starts at m(p,0), and p + target_size - query_size starts at m(0,p+target_size-query_size) // using Myers bit-vector algorithm with a word size of warp_size * sizeof(WordType). // // band_width is the width of this band = 1 + 2*p + abs(target_size - query_size). // // Note that for query_size >= target_size the diagonals [-p - (query_size - target_size), ..., 0, ..., p] are used. // This implementation computes the matrix band column by column. // To ease implementation band_width elements per column are computed for every column, // even though they are not needed for the first few and last few columns. // // In more detail: instead of just computing the diagonals: // // \\\\\00000| // \\\\\\0000| target_size=9, query_size=7, p=1 // 0\\\\\\000| // 00\\\\\\00| ("|" has no meaning - just to avoid multi-line comments with trailing"\") // 000\\\\\\0| // 0000\\\\\\| // 00000\\\\\| // // we compute horizontal stripes with n=band_width rows at the beginning and at the end. // Only the range [diagonal_begin,diagonal_end) // // ----\00000| // ----\\0000| // ----\\----| // ----\\----| // ----\\----| // 0000\\----| // 00000\----| if (band_width >= query_size) { // If the band_width is larger than the query_size just do a full Myers // i.e. do only one large horizontal stripe of width query_size. diagonal_begin = target_size + 1; diagonal_end = target_size + 1; myers_compute_scores_horizontal_band_impl(pv, mv, score, query_patterns, target_begin, query_begin, target_size, 1, target_size + 1, query_size, n_words_band, 0); } else { const int32_t symmetric_band = (band_width - min(1 + 2 * p + abs(target_size - query_size), query_size) == 0) ? 1 : 0; diagonal_begin = query_size < target_size ? target_size - query_size + p + 2 : p + 2 + (1 - symmetric_band); diagonal_end = query_size < target_size ? query_size - p + symmetric_band : query_size - (query_size - target_size) - p + 1; myers_compute_scores_horizontal_band_impl(pv, mv, score, query_patterns, target_begin, query_begin, target_size, 1, diagonal_begin, band_width, n_words_band, 0); myers_compute_scores_diagonal_band_impl(pv, mv, score, query_patterns, target_begin, query_begin, target_size, diagonal_begin, diagonal_end, band_width, n_words_band, 0); myers_compute_scores_horizontal_band_impl(pv, mv, score, query_patterns, target_begin, query_begin, target_size, diagonal_end, target_size + 1, band_width, n_words_band, query_size - band_width); } } __global__ void myers_banded_kernel( int8_t* paths_base, int32_t* path_lengths, int64_t const* path_starts, batched_device_matrices<WordType>::device_interface* pvi, batched_device_matrices<WordType>::device_interface* mvi, batched_device_matrices<int32_t>::device_interface* scorei, batched_device_matrices<WordType>::device_interface* query_patternsi, char const* sequences_d, int64_t const* sequence_starts_d, const int32_t max_bandwidth, const int32_t n_alignments) { assert(warpSize == warp_size); assert(threadIdx.x < warp_size); assert(blockIdx.x == 0); assert(max_bandwidth % word_size != 1); // we need at least two bits in the last word const int32_t alignment_idx = blockIdx.y * blockDim.y + threadIdx.y; if (alignment_idx >= n_alignments) return; const char* const query = sequences_d + sequence_starts_d[2 * alignment_idx]; const char* const target = sequences_d + sequence_starts_d[2 * alignment_idx + 1]; const int32_t query_size = target - query; const int32_t target_size = sequences_d + sequence_starts_d[2 * alignment_idx + 2] - target; const int32_t n_words = ceiling_divide(query_size, word_size); int8_t* path = paths_base + path_starts[alignment_idx]; if (max_bandwidth - 1 < abs(target_size - query_size)) { if (threadIdx.x == 0) { path_lengths[alignment_idx] = 0; } return; } device_matrix_view<WordType> query_pattern = query_patternsi->get_matrix_view(alignment_idx, n_words, 4); for (int32_t idx = threadIdx.x; idx < n_words; idx += warp_size) { // TODO query load is inefficient query_pattern(idx, 0) = myers_generate_query_pattern('A', query, query_size, idx * word_size); query_pattern(idx, 1) = myers_generate_query_pattern('C', query, query_size, idx * word_size); query_pattern(idx, 2) = myers_generate_query_pattern('T', query, query_size, idx * word_size); query_pattern(idx, 3) = myers_generate_query_pattern('G', query, query_size, idx * word_size); } __syncwarp(); assert(query_size > 0); // Use the Ukkonen algorithm for banding. // Take an initial guess for the edit distance: max_distance_estimate // and compute the maximal band of the NW matrix which is required for this distance. // If the computed distance is smaller accept and compute the backtrace/path, // otherwise retry with a larger guess (i.e. and larger band). int32_t max_distance_estimate = max(1, abs(target_size - query_size) + min(target_size, query_size) / initial_distance_guess_factor); device_matrix_view<WordType> pv; device_matrix_view<WordType> mv; device_matrix_view<int32_t> score; int32_t diagonal_begin = -1; int32_t diagonal_end = -1; int32_t band_width = 0; while (1) { int32_t p = min3(target_size, query_size, (max_distance_estimate - abs(target_size - query_size)) / 2); int32_t band_width_new = min(1 + 2 * p + abs(target_size - query_size), query_size); if (band_width_new % word_size == 1 && band_width_new != query_size) // we need at least two bits in the last word { p += 1; band_width_new = min(1 + 2 * p + abs(target_size - query_size), query_size); } if (band_width_new > max_bandwidth) { band_width_new = max_bandwidth; p = (band_width_new - 1 - abs(target_size - query_size)) / 2; } const int32_t n_words_band = ceiling_divide(band_width_new, word_size); if (static_cast<int64_t>(n_words_band) * static_cast<int64_t>(target_size + 1) > pvi->get_max_elements_per_matrix(alignment_idx)) { band_width = -band_width; break; } band_width = band_width_new; pv = pvi->get_matrix_view(alignment_idx, n_words_band, target_size + 1); mv = mvi->get_matrix_view(alignment_idx, n_words_band, target_size + 1); score = scorei->get_matrix_view(alignment_idx, n_words_band, target_size + 1); diagonal_begin = -1; diagonal_end = -1; myers_compute_scores_edit_dist_banded(diagonal_begin, diagonal_end, pv, mv, score, query_pattern, target, query, target_size, query_size, band_width, n_words_band, p, alignment_idx); __syncwarp(); const int32_t cur_edit_distance = score(n_words_band - 1, target_size); if (cur_edit_distance <= max_distance_estimate || band_width == query_size) { break; } if (band_width == max_bandwidth) { band_width = -band_width; break; } max_distance_estimate *= 2; } if (threadIdx.x == 0) { int32_t path_length = 0; if (band_width != 0) { path_length = band_width > 0 ? 1 : -1; band_width = abs(band_width); path_length *= myers_backtrace_banded(path, pv, mv, score, diagonal_begin, diagonal_end, band_width, target_size, query_size); } path_lengths[alignment_idx] = path_length; } } } // namespace myers int32_t myers_compute_edit_distance(std::string const& target, std::string const& query) { constexpr int32_t warp_size = 32; constexpr int32_t word_size = sizeof(myers::WordType) * CHAR_BIT; if (get_size(query) == 0) return get_size(target); const int32_t n_words = (get_size(query) + word_size - 1) / word_size; matrix<int32_t> score_host; hipStream_t stream; GW_CU_CHECK_ERR(hipStreamCreate(&stream)); { DefaultDeviceAllocator allocator = create_default_device_allocator(); int32_t max_sequence_length = ::max(get_size(target), get_size(query)); device_buffer<char> sequences_d(2 * max_sequence_length, allocator, stream); device_buffer<int32_t> sequence_lengths_d(2, allocator, stream); batched_device_matrices<myers::WordType> pv(1, n_words * (get_size(target) + 1), allocator, stream); batched_device_matrices<myers::WordType> mv(1, n_words * (get_size(target) + 1), allocator, stream); batched_device_matrices<int32_t> score(1, n_words * (get_size(target) + 1), allocator, stream); batched_device_matrices<myers::WordType> query_patterns(1, n_words * 4, allocator, stream); std::array<int32_t, 2> lengths = {static_cast<int32_t>(get_size(query)), static_cast<int32_t>(get_size(target))}; GW_CU_CHECK_ERR(hipMemcpyAsync(sequences_d.data(), query.data(), sizeof(char) * get_size(query), hipMemcpyHostToDevice, stream)); GW_CU_CHECK_ERR(hipMemcpyAsync(sequences_d.data() + max_sequence_length, target.data(), sizeof(char) * get_size(target), hipMemcpyHostToDevice, stream)); GW_CU_CHECK_ERR(hipMemcpyAsync(sequence_lengths_d.data(), lengths.data(), sizeof(int32_t) * 2, hipMemcpyHostToDevice, stream)); hipLaunchKernelGGL(( myers::myers_compute_score_matrix_kernel), dim3(1), dim3(warp_size), 0, stream, pv.get_device_interface(), mv.get_device_interface(), score.get_device_interface(), query_patterns.get_device_interface(), sequences_d.data(), sequence_lengths_d.data(), max_sequence_length, 1); score_host = score.get_matrix(0, n_words, get_size(target) + 1, stream); GW_CU_CHECK_ERR(hipStreamSynchronize(stream)); } GW_CU_CHECK_ERR(hipStreamDestroy(stream)); return score_host(n_words - 1, get_size(target)); } matrix<int32_t> myers_get_full_score_matrix(std::string const& target, std::string const& query) { constexpr int32_t warp_size = 32; constexpr int32_t word_size = sizeof(myers::WordType) * CHAR_BIT; if (get_size(target) == 0) { matrix<int32_t> r(get_size(query) + 1, 1); std::iota(r.data(), r.data() + get_size(query) + 1, 0); return r; } if (get_size(query) == 0) { matrix<int32_t> r(1, get_size(target) + 1); std::iota(r.data(), r.data() + get_size(target) + 1, 0); return r; } matrix<int32_t> fullscore_host; hipStream_t stream; GW_CU_CHECK_ERR(hipStreamCreate(&stream)); { DefaultDeviceAllocator allocator = create_default_device_allocator(); int32_t max_sequence_length = ::max(get_size(target), get_size(query)); device_buffer<char> sequences_d(2 * max_sequence_length, allocator, stream); device_buffer<int32_t> sequence_lengths_d(2, allocator, stream); const int32_t n_words = (get_size(query) + word_size - 1) / word_size; batched_device_matrices<myers::WordType> pv(1, n_words * (get_size(target) + 1), allocator, stream); batched_device_matrices<myers::WordType> mv(1, n_words * (get_size(target) + 1), allocator, stream); batched_device_matrices<int32_t> score(1, n_words * (get_size(target) + 1), allocator, stream); batched_device_matrices<myers::WordType> query_patterns(1, n_words * 4, allocator, stream); batched_device_matrices<int32_t> fullscore(1, (get_size(query) + 1) * (get_size(target) + 1), allocator, stream); std::array<int32_t, 2> lengths = {static_cast<int32_t>(get_size(query)), static_cast<int32_t>(get_size(target))}; GW_CU_CHECK_ERR(hipMemcpyAsync(sequences_d.data(), query.data(), sizeof(char) * get_size(query), hipMemcpyHostToDevice, stream)); GW_CU_CHECK_ERR(hipMemcpyAsync(sequences_d.data() + max_sequence_length, target.data(), sizeof(char) * get_size(target), hipMemcpyHostToDevice, stream)); GW_CU_CHECK_ERR(hipMemcpyAsync(sequence_lengths_d.data(), lengths.data(), sizeof(int32_t) * 2, hipMemcpyHostToDevice, stream)); hipLaunchKernelGGL(( myers::myers_compute_score_matrix_kernel), dim3(1), dim3(warp_size), 0, stream, pv.get_device_interface(), mv.get_device_interface(), score.get_device_interface(), query_patterns.get_device_interface(), sequences_d.data(), sequence_lengths_d.data(), max_sequence_length, 1); { dim3 n_threads = {32, 4, 1}; dim3 n_blocks = {1, 1, 1}; n_blocks.x = ceiling_divide<int32_t>(get_size<int32_t>(query) + 1, n_threads.x); n_blocks.y = ceiling_divide<int32_t>(get_size<int32_t>(target) + 1, n_threads.y); hipLaunchKernelGGL(( myers::myers_convert_to_full_score_matrix_kernel), dim3(n_blocks), dim3(n_threads), 0, stream, fullscore.get_device_interface(), pv.get_device_interface(), mv.get_device_interface(), score.get_device_interface(), sequence_lengths_d.data(), 0); } fullscore_host = fullscore.get_matrix(0, get_size(query) + 1, get_size(target) + 1, stream); } GW_CU_CHECK_ERR(hipStreamSynchronize(stream)); GW_CU_CHECK_ERR(hipStreamDestroy(stream)); return fullscore_host; } void myers_gpu(int8_t* paths_d, int32_t* path_lengths_d, int32_t max_path_length, char const* sequences_d, int32_t const* sequence_lengths_d, int32_t max_sequence_length, int32_t n_alignments, batched_device_matrices<myers::WordType>& pv, batched_device_matrices<myers::WordType>& mv, batched_device_matrices<int32_t>& score, batched_device_matrices<myers::WordType>& query_patterns, hipStream_t stream) { { const dim3 threads(warp_size, 1, 1); const dim3 blocks(1, ceiling_divide<int32_t>(n_alignments, threads.y), 1); hipLaunchKernelGGL(( myers::myers_compute_score_matrix_kernel), dim3(blocks), dim3(threads), 0, stream, pv.get_device_interface(), mv.get_device_interface(), score.get_device_interface(), query_patterns.get_device_interface(), sequences_d, sequence_lengths_d, max_sequence_length, n_alignments); } { const dim3 threads(128, 1, 1); const dim3 blocks(ceiling_divide<int32_t>(n_alignments, threads.x), 1, 1); hipLaunchKernelGGL(( myers::myers_backtrace_kernel), dim3(blocks), dim3(threads), 0, stream, paths_d, path_lengths_d, max_path_length, pv.get_device_interface(), mv.get_device_interface(), score.get_device_interface(), sequence_lengths_d, n_alignments); } } void myers_banded_gpu(int8_t* paths_d, int32_t* path_lengths_d, int64_t const* path_starts_d, char const* sequences_d, int64_t const* sequence_starts_d, int32_t n_alignments, int32_t max_bandwidth, batched_device_matrices<myers::WordType>& pv, batched_device_matrices<myers::WordType>& mv, batched_device_matrices<int32_t>& score, batched_device_matrices<myers::WordType>& query_patterns, hipStream_t stream) { const dim3 threads(warp_size, 1, 1); const dim3 blocks(1, ceiling_divide<int32_t>(n_alignments, threads.y), 1); hipLaunchKernelGGL(( myers::myers_banded_kernel), dim3(blocks), dim3(threads), 0, stream, paths_d, path_lengths_d, path_starts_d, pv.get_device_interface(), mv.get_device_interface(), score.get_device_interface(), query_patterns.get_device_interface(), sequences_d, sequence_starts_d, max_bandwidth, n_alignments); } } // namespace cudaaligner } // namespace genomeworks } // namespace claraparabricks
ae9f7e801db3f472b813e88261746e7891e51a5a.cu
/* * Copyright 2019-2020 NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "myers_gpu.cuh" #include "batched_device_matrices.cuh" #include <claraparabricks/genomeworks/cudaaligner/aligner.hpp> #include <claraparabricks/genomeworks/utils/signed_integer_utils.hpp> #include <claraparabricks/genomeworks/utils/limits.cuh> #include <claraparabricks/genomeworks/utils/mathutils.hpp> #include <claraparabricks/genomeworks/utils/cudautils.hpp> #include <claraparabricks/genomeworks/utils/allocator.hpp> #include <claraparabricks/genomeworks/utils/device_buffer.hpp> #include <cassert> #include <climits> #include <vector> #include <numeric> namespace claraparabricks { namespace genomeworks { namespace cudaaligner { constexpr int32_t warp_size = 32; namespace myers { constexpr int32_t initial_distance_guess_factor = 20; inline __device__ WordType warp_leftshift_sync(uint32_t warp_mask, WordType v) { assert(((warp_mask >> (threadIdx.x % warp_size)) & 1u) == 1u); // 4 threads, word_size = 4 example: thread 0 | thread 1 | thread 2 | thread 3 // v = 0101 | 0111 | 0011 | 1101 -> 1010 | 1110 | 0111 | 1010 const WordType x = __shfl_up_sync(warp_mask, v >> (word_size - 1), 1); assert((x & ~WordType(1)) == 0); v <<= 1; if (threadIdx.x != 0) v |= x; return v; } inline __device__ WordType warp_rightshift_sync(uint32_t warp_mask, WordType v) { assert(((warp_mask >> (threadIdx.x % warp_size)) & 1u) == 1u); // 4 threads, word_size = 4 example: thread 0 | thread 1 | thread 2 | thread 3 // v = 0101 | 0111 | 0011 | 1101 -> 0010 | 1011 | 1001 | 1110 const WordType x = __shfl_down_sync(warp_mask, v << (word_size - 1), 1); assert((x & ~(WordType(1) << (word_size - 1))) == 0); v >>= 1; if ((warp_mask >> threadIdx.x) > 1u) v |= x; return v; } inline __device__ WordType warp_add_sync(uint32_t warp_mask, WordType a, WordType b) { static_assert(sizeof(WordType) == 4, "This function assumes WordType to have 4 bytes."); static_assert(CHAR_BIT == 8, "This function assumes a char width of 8 bit."); assert(((warp_mask >> (threadIdx.x % warp_size)) & 1u) == 1u); const uint64_t ax = a; const uint64_t bx = b; uint64_t r = ax + bx; uint32_t carry = static_cast<uint32_t>(r >> 32); if (warp_mask == 1u) { return static_cast<WordType>(r); } r &= 0xffff'ffffull; // TODO: I think due to the structure of the Myer blocks, // a carry cannot propagate over more than a single block. // I.e. a single carry propagation without the loop should be sufficient. while (__any_sync(warp_mask, carry)) { uint32_t x = __shfl_up_sync(warp_mask, carry, 1); if (threadIdx.x != 0) r += x; carry = static_cast<uint32_t>(r >> 32); r &= 0xffff'ffffull; } return static_cast<WordType>(r); } __device__ int32_t myers_advance_block(uint32_t warp_mask, WordType highest_bit, WordType eq, WordType& pv, WordType& mv, int32_t carry_in) { assert((pv & mv) == WordType(0)); // Stage 1 WordType xv = eq | mv; if (carry_in < 0) eq |= WordType(1); WordType xh = warp_add_sync(warp_mask, eq & pv, pv); xh = (xh ^ pv) | eq; WordType ph = mv | (~(xh | pv)); WordType mh = pv & xh; int32_t carry_out = ((ph & highest_bit) == WordType(0) ? 0 : 1) - ((mh & highest_bit) == WordType(0) ? 0 : 1); ph = warp_leftshift_sync(warp_mask, ph); mh = warp_leftshift_sync(warp_mask, mh); if (carry_in < 0) mh |= WordType(1); if (carry_in > 0) ph |= WordType(1); // Stage 2 pv = mh | (~(xv | ph)); mv = ph & xv; return carry_out; } __device__ int2 myers_advance_block2(uint32_t warp_mask, WordType highest_bit, WordType eq, WordType& pv, WordType& mv, int32_t carry_in) { assert((pv & mv) == WordType(0)); // Stage 1 WordType xv = eq | mv; if (carry_in < 0) eq |= WordType(1); WordType xh = warp_add_sync(warp_mask, eq & pv, pv); xh = (xh ^ pv) | eq; WordType ph = mv | (~(xh | pv)); WordType mh = pv & xh; int2 carry_out; carry_out.x = ((ph & highest_bit) == WordType(0) ? 0 : 1) - ((mh & highest_bit) == WordType(0) ? 0 : 1); carry_out.y = ((ph & (highest_bit << 1)) == WordType(0) ? 0 : 1) - ((mh & (highest_bit << 1)) == WordType(0) ? 0 : 1); ph = warp_leftshift_sync(warp_mask, ph); mh = warp_leftshift_sync(warp_mask, mh); if (carry_in < 0) mh |= WordType(1); if (carry_in > 0) ph |= WordType(1); // Stage 2 pv = mh | (~(xv | ph)); mv = ph & xv; return carry_out; } __device__ WordType myers_generate_query_pattern(char x, char const* query, int32_t query_size, int32_t offset) { // Sets a 1 bit at the position of every matching character assert(offset < query_size); const int32_t max_i = min(query_size - offset, word_size); WordType r = 0; for (int32_t i = 0; i < max_i; ++i) { if (x == query[i + offset]) r = r | (WordType(1) << i); } return r; } inline __device__ WordType get_query_pattern(device_matrix_view<WordType>& query_patterns, int32_t idx, int32_t query_begin_offset, char x, bool reverse) { static_assert(std::is_unsigned<WordType>::value, "WordType has to be an unsigned type for well-defined >> operations."); assert(x >= 0); assert(x == 'A' || x == 'C' || x == 'G' || x == 'T'); const int32_t char_idx = (x >> 1) & 0x3u; // [A,C,T,G] -> [0,1,2,3] // 4-bit word example: // query_patterns contains character match bit patterns "XXXX" for the full query string. // we want the bit pattern "yyyy" for a view of on the query string starting at eg. character 11: // 4 3 2 1 0 (pattern index) // XXXX XXXX XXXX [XXXX] [XXXX] // YYY Yyyy y // 1 0 (idx) // // query_begin_offset = 11 // => idx_offset = 11/4 = 2, shift = 11%4 = 3 const int32_t idx_offset = query_begin_offset / word_size; const int32_t shift = query_begin_offset % word_size; WordType r = query_patterns(idx + idx_offset, char_idx); if (shift != 0) { r >>= shift; if (idx + idx_offset + 1 < query_patterns.num_rows()) { r |= query_patterns(idx + idx_offset + 1, char_idx) << (word_size - shift); } } return r; } inline __device__ int32_t get_myers_score(int32_t i, int32_t j, device_matrix_view<WordType> const& pv, device_matrix_view<WordType> const& mv, device_matrix_view<int32_t> const& score, WordType last_entry_mask) { assert(i > 0); // row 0 is implicit, NW matrix is shifted by i -> i-1 const int32_t word_idx = (i - 1) / word_size; const int32_t bit_idx = (i - 1) % word_size; int32_t s = score(word_idx, j); WordType mask = (~WordType(1)) << bit_idx; if (word_idx == score.num_rows() - 1) mask &= last_entry_mask; s -= __popc(mask & pv(word_idx, j)); s += __popc(mask & mv(word_idx, j)); return s; } __device__ void myers_backtrace(int8_t* paths_base, int32_t* lengths, int32_t max_path_length, device_matrix_view<WordType> const& pv, device_matrix_view<WordType> const& mv, device_matrix_view<int32_t> const& score, int32_t query_size, int32_t id) { using nw_score_t = int32_t; assert(pv.num_rows() == score.num_rows()); assert(mv.num_rows() == score.num_rows()); assert(pv.num_cols() == score.num_cols()); assert(mv.num_cols() == score.num_cols()); assert(score.num_rows() == ceiling_divide(query_size, word_size)); int32_t i = query_size; int32_t j = score.num_cols() - 1; int8_t* path = paths_base + id * static_cast<ptrdiff_t>(max_path_length); const WordType last_entry_mask = query_size % word_size != 0 ? (WordType(1) << (query_size % word_size)) - 1 : ~WordType(0); nw_score_t myscore = score((i - 1) / word_size, j); // row 0 is implicit, NW matrix is shifted by i -> i-1 (see get_myers_score) int32_t pos = 0; while (i > 0 && j > 0) { int8_t r = 0; nw_score_t const above = i == 1 ? j : get_myers_score(i - 1, j, pv, mv, score, last_entry_mask); nw_score_t const diag = i == 1 ? j - 1 : get_myers_score(i - 1, j - 1, pv, mv, score, last_entry_mask); nw_score_t const left = get_myers_score(i, j - 1, pv, mv, score, last_entry_mask); if (left + 1 == myscore) { r = static_cast<int8_t>(AlignmentState::insertion); myscore = left; --j; } else if (above + 1 == myscore) { r = static_cast<int8_t>(AlignmentState::deletion); myscore = above; --i; } else { r = (diag == myscore ? static_cast<int8_t>(AlignmentState::match) : static_cast<int8_t>(AlignmentState::mismatch)); myscore = diag; --i; --j; } path[pos] = r; ++pos; } while (i > 0) { path[pos] = static_cast<int8_t>(AlignmentState::deletion); ++pos; --i; } while (j > 0) { path[pos] = static_cast<int8_t>(AlignmentState::insertion); ++pos; --j; } lengths[id] = pos; } __global__ void myers_backtrace_kernel(int8_t* paths_base, int32_t* lengths, int32_t max_path_length, batched_device_matrices<WordType>::device_interface* pvi, batched_device_matrices<WordType>::device_interface* mvi, batched_device_matrices<int32_t>::device_interface* scorei, int32_t const* sequence_lengths_d, int32_t n_alignments) { const int32_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= n_alignments) return; GW_CONSTEXPR int32_t word_size = sizeof(WordType) * CHAR_BIT; const int32_t query_size = sequence_lengths_d[2 * idx]; const int32_t target_size = sequence_lengths_d[2 * idx + 1]; const int32_t n_words = (query_size + word_size - 1) / word_size; const device_matrix_view<WordType> pv = pvi->get_matrix_view(idx, n_words, target_size + 1); const device_matrix_view<WordType> mv = mvi->get_matrix_view(idx, n_words, target_size + 1); const device_matrix_view<int32_t> score = scorei->get_matrix_view(idx, n_words, target_size + 1); myers_backtrace(paths_base, lengths, max_path_length, pv, mv, score, query_size, idx); } __global__ void myers_convert_to_full_score_matrix_kernel(batched_device_matrices<int32_t>::device_interface* fullscorei, batched_device_matrices<WordType>::device_interface* pvi, batched_device_matrices<WordType>::device_interface* mvi, batched_device_matrices<int32_t>::device_interface* scorei, int32_t const* sequence_lengths_d, int32_t alignment) { GW_CONSTEXPR int32_t word_size = sizeof(WordType) * CHAR_BIT; const int32_t query_size = sequence_lengths_d[2 * alignment]; const int32_t target_size = sequence_lengths_d[2 * alignment + 1]; const int32_t n_words = (query_size + word_size - 1) / word_size; assert(query_size > 0); int32_t i = blockIdx.x * blockDim.x + threadIdx.x; int32_t j = blockIdx.y * blockDim.y + threadIdx.y; if (j < target_size + 1 && i < query_size + 1) { const WordType last_entry_mask = query_size % word_size != 0 ? (WordType(1) << (query_size % word_size)) - 1 : ~WordType(0); device_matrix_view<WordType> pv = pvi->get_matrix_view(0, n_words, target_size + 1); device_matrix_view<WordType> mv = mvi->get_matrix_view(0, n_words, target_size + 1); device_matrix_view<int32_t> score = scorei->get_matrix_view(0, n_words, target_size + 1); device_matrix_view<int32_t> fullscore = fullscorei->get_matrix_view(0, query_size + 1, target_size + 1); int32_t myscore = 0; if (i == 0) myscore = j; else myscore = get_myers_score(i, j, pv, mv, score, last_entry_mask); fullscore(i, j) = myscore; } } __global__ void myers_compute_score_matrix_kernel( batched_device_matrices<WordType>::device_interface* pvi, batched_device_matrices<WordType>::device_interface* mvi, batched_device_matrices<int32_t>::device_interface* scorei, batched_device_matrices<WordType>::device_interface* query_patternsi, char const* sequences_d, int32_t const* sequence_lengths_d, int32_t max_sequence_length, int32_t n_alignments) { GW_CONSTEXPR int32_t word_size = sizeof(WordType) * CHAR_BIT; GW_CONSTEXPR int32_t warp_size = 32; assert(warpSize == warp_size); assert(threadIdx.x < warp_size); assert(blockIdx.x == 0); const int32_t alignment_idx = blockIdx.y * blockDim.y + threadIdx.y; if (alignment_idx >= n_alignments) return; const int32_t query_size = sequence_lengths_d[2 * alignment_idx]; const int32_t target_size = sequence_lengths_d[2 * alignment_idx + 1]; const char* const query = sequences_d + 2 * alignment_idx * max_sequence_length; const char* const target = sequences_d + (2 * alignment_idx + 1) * max_sequence_length; const int32_t n_words = (query_size + word_size - 1) / word_size; const int32_t n_warp_iterations = ceiling_divide(n_words, warp_size) * warp_size; assert(query_size > 0); device_matrix_view<WordType> pv = pvi->get_matrix_view(alignment_idx, n_words, target_size + 1); device_matrix_view<WordType> mv = mvi->get_matrix_view(alignment_idx, n_words, target_size + 1); device_matrix_view<int32_t> score = scorei->get_matrix_view(alignment_idx, n_words, target_size + 1); device_matrix_view<WordType> query_patterns = query_patternsi->get_matrix_view(alignment_idx, n_words, 4); for (int32_t idx = threadIdx.x; idx < n_words; idx += warp_size) { pv(idx, 0) = ~WordType(0); mv(idx, 0) = 0; score(idx, 0) = min((idx + 1) * word_size, query_size); // TODO query load is inefficient query_patterns(idx, 0) = myers_generate_query_pattern('A', query, query_size, idx * word_size); query_patterns(idx, 1) = myers_generate_query_pattern('C', query, query_size, idx * word_size); query_patterns(idx, 2) = myers_generate_query_pattern('T', query, query_size, idx * word_size); query_patterns(idx, 3) = myers_generate_query_pattern('G', query, query_size, idx * word_size); } __syncwarp(); for (int32_t t = 1; t <= target_size; ++t) { int32_t warp_carry = 0; if (threadIdx.x == 0) warp_carry = 1; // for global alignment the (implicit) first row has to be 0,1,2,3,... -> carry 1 for (int32_t idx = threadIdx.x; idx < n_warp_iterations; idx += warp_size) { if (idx < n_words) { const uint32_t warp_mask = idx / warp_size < n_words / warp_size ? 0xffff'ffffu : (1u << (n_words % warp_size)) - 1; WordType pv_local = pv(idx, t - 1); WordType mv_local = mv(idx, t - 1); const WordType highest_bit = WordType(1) << (idx == (n_words - 1) ? query_size - (n_words - 1) * word_size - 1 : word_size - 1); const WordType eq = get_query_pattern(query_patterns, idx, 0, target[t - 1], false); warp_carry = myers_advance_block(warp_mask, highest_bit, eq, pv_local, mv_local, warp_carry); score(idx, t) = score(idx, t - 1) + warp_carry; if (threadIdx.x == 0) warp_carry = 0; if (warp_mask == 0xffff'ffffu && (threadIdx.x == 31 || threadIdx.x == 0)) warp_carry = __shfl_down_sync(0x8000'0001u, warp_carry, warp_size - 1); if (threadIdx.x != 0) warp_carry = 0; pv(idx, t) = pv_local; mv(idx, t) = mv_local; } __syncwarp(); } } } __device__ int32_t myers_backtrace_banded(int8_t* path, device_matrix_view<WordType> const& pv, device_matrix_view<WordType> const& mv, device_matrix_view<int32_t> const& score, int32_t diagonal_begin, int32_t diagonal_end, int32_t band_width, int32_t target_size, int32_t query_size) { assert(threadIdx.x == 0); using nw_score_t = int32_t; GW_CONSTEXPR nw_score_t out_of_band = numeric_limits<nw_score_t>::max() - 1; // -1 to avoid integer overflow further down. assert(pv.num_rows() == score.num_rows()); assert(mv.num_rows() == score.num_rows()); assert(pv.num_cols() == score.num_cols()); assert(mv.num_cols() == score.num_cols()); assert(score.num_rows() == ceiling_divide(band_width, word_size)); int32_t i = band_width; int32_t j = target_size; const WordType last_entry_mask = band_width % word_size != 0 ? (WordType(1) << (band_width % word_size)) - 1 : ~WordType(0); nw_score_t myscore = score((i - 1) / word_size, j); // row 0 is implicit, NW matrix is shifted by i -> i-1 (see get_myers_score) int32_t pos = 0; while (j >= diagonal_end) { int8_t r = 0; nw_score_t const above = i <= 1 ? j : get_myers_score(i - 1, j, pv, mv, score, last_entry_mask); nw_score_t const diag = i <= 1 ? j - 1 : get_myers_score(i - 1, j - 1, pv, mv, score, last_entry_mask); nw_score_t const left = get_myers_score(i, j - 1, pv, mv, score, last_entry_mask); if (left + 1 == myscore) { r = static_cast<int8_t>(AlignmentState::insertion); myscore = left; --j; } else if (above + 1 == myscore) { r = static_cast<int8_t>(AlignmentState::deletion); myscore = above; --i; } else { r = (diag == myscore ? static_cast<int8_t>(AlignmentState::match) : static_cast<int8_t>(AlignmentState::mismatch)); myscore = diag; --i; --j; } path[pos] = r; ++pos; } while (j >= diagonal_begin) { int8_t r = 0; nw_score_t const above = i <= 1 ? j : get_myers_score(i - 1, j, pv, mv, score, last_entry_mask); nw_score_t const diag = i <= 0 ? j - 1 : get_myers_score(i, j - 1, pv, mv, score, last_entry_mask); nw_score_t const left = i >= band_width ? out_of_band : get_myers_score(i + 1, j - 1, pv, mv, score, last_entry_mask); if (left + 1 == myscore) { r = static_cast<int8_t>(AlignmentState::insertion); myscore = left; ++i; --j; } else if (above + 1 == myscore) { r = static_cast<int8_t>(AlignmentState::deletion); myscore = above; --i; } else { r = (diag == myscore ? static_cast<int8_t>(AlignmentState::match) : static_cast<int8_t>(AlignmentState::mismatch)); myscore = diag; --j; } path[pos] = r; ++pos; } while (i > 0 && j > 0) { int8_t r = 0; nw_score_t const above = i == 1 ? j : get_myers_score(i - 1, j, pv, mv, score, last_entry_mask); nw_score_t const diag = i == 1 ? j - 1 : get_myers_score(i - 1, j - 1, pv, mv, score, last_entry_mask); nw_score_t const left = i > band_width ? out_of_band : get_myers_score(i, j - 1, pv, mv, score, last_entry_mask); if (left + 1 == myscore) { r = static_cast<int8_t>(AlignmentState::insertion); myscore = left; --j; } else if (above + 1 == myscore) { r = static_cast<int8_t>(AlignmentState::deletion); myscore = above; --i; } else { r = (diag == myscore ? static_cast<int8_t>(AlignmentState::match) : static_cast<int8_t>(AlignmentState::mismatch)); myscore = diag; --i; --j; } path[pos] = r; ++pos; } while (i > 0) { path[pos] = static_cast<int8_t>(AlignmentState::deletion); ++pos; --i; } while (j > 0) { path[pos] = static_cast<int8_t>(AlignmentState::insertion); ++pos; --j; } return pos; } __device__ void myers_compute_scores_horizontal_band_impl( device_matrix_view<WordType>& pv, device_matrix_view<WordType>& mv, device_matrix_view<int32_t>& score, device_matrix_view<WordType>& query_patterns, char const* target_begin, char const* query_begin, const int32_t target_size, const int32_t t_begin, const int32_t t_end, const int32_t width, const int32_t n_words, const int32_t pattern_idx_offset) { assert(n_words == ceiling_divide(width, word_size)); assert(target_size >= 0); assert(t_begin <= t_end); const int32_t n_warp_iterations = ceiling_divide(n_words, warp_size) * warp_size; for (int32_t t = t_begin; t < t_end; ++t) { int32_t warp_carry = 0; if (threadIdx.x == 0) warp_carry = 1; // worst case for the top boarder of the band for (int32_t idx = threadIdx.x; idx < n_warp_iterations; idx += warp_size) { if (idx < n_words) { const uint32_t warp_mask = idx / warp_size < n_words / warp_size ? 0xffff'ffffu : (1u << (n_words % warp_size)) - 1; WordType pv_local = pv(idx, t - 1); WordType mv_local = mv(idx, t - 1); const WordType highest_bit = WordType(1) << (idx == (n_words - 1) ? width - (n_words - 1) * word_size - 1 : word_size - 1); const WordType eq = get_query_pattern(query_patterns, idx, pattern_idx_offset, target_begin[t - 1], false); warp_carry = myers_advance_block(warp_mask, highest_bit, eq, pv_local, mv_local, warp_carry); score(idx, t) = score(idx, t - 1) + warp_carry; if (threadIdx.x == 0) warp_carry = 0; if (warp_mask == 0xffff'ffffu && (threadIdx.x == 0 || threadIdx.x == 31)) warp_carry = __shfl_down_sync(0x8000'0001u, warp_carry, warp_size - 1); if (threadIdx.x != 0) warp_carry = 0; pv(idx, t) = pv_local; mv(idx, t) = mv_local; } __syncwarp(); } } } __device__ void myers_compute_scores_diagonal_band_impl( device_matrix_view<WordType>& pv, device_matrix_view<WordType>& mv, device_matrix_view<int32_t>& score, device_matrix_view<WordType>& query_patterns, char const* target_begin, char const* query_begin, const int32_t target_size, const int32_t t_begin, const int32_t t_end, const int32_t band_width, const int32_t n_words_band, const int32_t pattern_idx_offset) { assert(n_words_band == ceiling_divide(band_width, warp_size)); assert(band_width - (n_words_band - 1) * word_size >= 2); // we need at least two bits in the last word const int32_t n_warp_iterations = ceiling_divide(n_words_band, warp_size) * warp_size; for (int32_t t = t_begin; t < t_end; ++t) { int32_t carry = 0; if (threadIdx.x == 0) carry = 1; // worst case for the top boarder of the band for (int32_t idx = threadIdx.x; idx < n_warp_iterations; idx += warp_size) { // idx within band column const uint32_t warp_mask = idx / warp_size < n_words_band / warp_size ? 0xffff'ffffu : (1u << (n_words_band % warp_size)) - 1; if (idx < n_words_band) { // data from the previous column WordType pv_local = warp_rightshift_sync(warp_mask, pv(idx, t - 1)); WordType mv_local = warp_rightshift_sync(warp_mask, mv(idx, t - 1)); if (threadIdx.x == 31 && warp_mask == 0xffff'ffffu) { if (idx < n_words_band - 1) { pv_local |= pv(idx + 1, t - 1) << (word_size - 1); mv_local |= mv(idx + 1, t - 1) << (word_size - 1); } } const WordType eq = get_query_pattern(query_patterns, idx, pattern_idx_offset + t - t_begin + 1, target_begin[t - 1], false); const WordType delta_right_bit = WordType(1) << (idx == (n_words_band - 1) ? band_width - (n_words_band - 1) * word_size - 2 : word_size - 2); const WordType delta_down_bit = delta_right_bit << 1; assert(delta_down_bit != 0); if (idx == n_words_band - 1) { // bits who have no left neighbor -> assume worst case: +1 pv_local |= delta_down_bit; mv_local &= ~delta_down_bit; } const int2 delta_right = myers_advance_block2(warp_mask, delta_right_bit, eq, pv_local, mv_local, carry); const int32_t delta_down = ((pv_local & delta_down_bit) == WordType(0) ? 0 : 1) - ((mv_local & delta_down_bit) == WordType(0) ? 0 : 1); // Since idx is relative to diagonal band, (idx, t-1) -> (idx,t) // corresponds to (n-1,t-1) -> (n,t) in the NW matrix. // To get from score'(n-1, t-1) -> score'(n, t-1) // add horizontal delta in row n-1 (delta_right.x) // and the vertical delta in column t (delta_down). score(idx, t) = score(idx, t - 1) + delta_right.x + delta_down; // Carry horizontal delta in row n (= delta_right.y) to next warp iteration if (threadIdx.x == 0) carry = 0; if (warp_mask == 0xffff'ffffu && (threadIdx.x == 0 || threadIdx.x == 31)) carry = __shfl_down_sync(0x8000'0001u, delta_right.y, warp_size - 1); if (threadIdx.x != 0) carry = 0; pv(idx, t) = pv_local; mv(idx, t) = mv_local; } __syncwarp(); } } } __device__ void myers_compute_scores_edit_dist_banded( int32_t& diagonal_begin, int32_t& diagonal_end, device_matrix_view<WordType>& pv, device_matrix_view<WordType>& mv, device_matrix_view<int32_t>& score, device_matrix_view<WordType>& query_patterns, char const* target_begin, char const* query_begin, int32_t const target_size, int32_t const query_size, int32_t const band_width, int32_t const n_words_band, int32_t const p, int32_t const alignment_idx) { // Note: 0-th row of the NW matrix is implicit for pv, mv and score! (given by the inital warp_carry) assert(warpSize == warp_size); assert(threadIdx.x < warp_size); assert(blockIdx.x == 0); assert(target_size > 0); assert(query_size > 0); assert(band_width > 0); assert(n_words_band > 0); assert(p >= 0); assert(alignment_idx >= 0); assert(pv.num_rows() == n_words_band); assert(mv.num_rows() == n_words_band); assert(score.num_rows() == n_words_band); assert(pv.num_cols() == target_size + 1); assert(mv.num_cols() == target_size + 1); assert(score.num_cols() == target_size + 1); for (int32_t idx = threadIdx.x; idx < n_words_band; idx += warp_size) { pv(idx, 0) = ~WordType(0); mv(idx, 0) = 0; score(idx, 0) = min((idx + 1) * word_size, band_width); } __syncwarp(); // This function computes a diagonal band of the NW matrix (Ukkonen algorithm). // In essence it computes the diagonals [-p, ..., 0, ..., p + target_size - query_size] (for query_size < target_size), // where diagonal -p starts at m(p,0), and p + target_size - query_size starts at m(0,p+target_size-query_size) // using Myers bit-vector algorithm with a word size of warp_size * sizeof(WordType). // // band_width is the width of this band = 1 + 2*p + abs(target_size - query_size). // // Note that for query_size >= target_size the diagonals [-p - (query_size - target_size), ..., 0, ..., p] are used. // This implementation computes the matrix band column by column. // To ease implementation band_width elements per column are computed for every column, // even though they are not needed for the first few and last few columns. // // In more detail: instead of just computing the diagonals: // // \\\\\00000| // \\\\\\0000| target_size=9, query_size=7, p=1 // 0\\\\\\000| // 00\\\\\\00| ("|" has no meaning - just to avoid multi-line comments with trailing"\") // 000\\\\\\0| // 0000\\\\\\| // 00000\\\\\| // // we compute horizontal stripes with n=band_width rows at the beginning and at the end. // Only the range [diagonal_begin,diagonal_end) // // ----\00000| // ----\\0000| // ----\\----| // ----\\----| // ----\\----| // 0000\\----| // 00000\----| if (band_width >= query_size) { // If the band_width is larger than the query_size just do a full Myers // i.e. do only one large horizontal stripe of width query_size. diagonal_begin = target_size + 1; diagonal_end = target_size + 1; myers_compute_scores_horizontal_band_impl(pv, mv, score, query_patterns, target_begin, query_begin, target_size, 1, target_size + 1, query_size, n_words_band, 0); } else { const int32_t symmetric_band = (band_width - min(1 + 2 * p + abs(target_size - query_size), query_size) == 0) ? 1 : 0; diagonal_begin = query_size < target_size ? target_size - query_size + p + 2 : p + 2 + (1 - symmetric_band); diagonal_end = query_size < target_size ? query_size - p + symmetric_band : query_size - (query_size - target_size) - p + 1; myers_compute_scores_horizontal_band_impl(pv, mv, score, query_patterns, target_begin, query_begin, target_size, 1, diagonal_begin, band_width, n_words_band, 0); myers_compute_scores_diagonal_band_impl(pv, mv, score, query_patterns, target_begin, query_begin, target_size, diagonal_begin, diagonal_end, band_width, n_words_band, 0); myers_compute_scores_horizontal_band_impl(pv, mv, score, query_patterns, target_begin, query_begin, target_size, diagonal_end, target_size + 1, band_width, n_words_band, query_size - band_width); } } __global__ void myers_banded_kernel( int8_t* paths_base, int32_t* path_lengths, int64_t const* path_starts, batched_device_matrices<WordType>::device_interface* pvi, batched_device_matrices<WordType>::device_interface* mvi, batched_device_matrices<int32_t>::device_interface* scorei, batched_device_matrices<WordType>::device_interface* query_patternsi, char const* sequences_d, int64_t const* sequence_starts_d, const int32_t max_bandwidth, const int32_t n_alignments) { assert(warpSize == warp_size); assert(threadIdx.x < warp_size); assert(blockIdx.x == 0); assert(max_bandwidth % word_size != 1); // we need at least two bits in the last word const int32_t alignment_idx = blockIdx.y * blockDim.y + threadIdx.y; if (alignment_idx >= n_alignments) return; const char* const query = sequences_d + sequence_starts_d[2 * alignment_idx]; const char* const target = sequences_d + sequence_starts_d[2 * alignment_idx + 1]; const int32_t query_size = target - query; const int32_t target_size = sequences_d + sequence_starts_d[2 * alignment_idx + 2] - target; const int32_t n_words = ceiling_divide(query_size, word_size); int8_t* path = paths_base + path_starts[alignment_idx]; if (max_bandwidth - 1 < abs(target_size - query_size)) { if (threadIdx.x == 0) { path_lengths[alignment_idx] = 0; } return; } device_matrix_view<WordType> query_pattern = query_patternsi->get_matrix_view(alignment_idx, n_words, 4); for (int32_t idx = threadIdx.x; idx < n_words; idx += warp_size) { // TODO query load is inefficient query_pattern(idx, 0) = myers_generate_query_pattern('A', query, query_size, idx * word_size); query_pattern(idx, 1) = myers_generate_query_pattern('C', query, query_size, idx * word_size); query_pattern(idx, 2) = myers_generate_query_pattern('T', query, query_size, idx * word_size); query_pattern(idx, 3) = myers_generate_query_pattern('G', query, query_size, idx * word_size); } __syncwarp(); assert(query_size > 0); // Use the Ukkonen algorithm for banding. // Take an initial guess for the edit distance: max_distance_estimate // and compute the maximal band of the NW matrix which is required for this distance. // If the computed distance is smaller accept and compute the backtrace/path, // otherwise retry with a larger guess (i.e. and larger band). int32_t max_distance_estimate = max(1, abs(target_size - query_size) + min(target_size, query_size) / initial_distance_guess_factor); device_matrix_view<WordType> pv; device_matrix_view<WordType> mv; device_matrix_view<int32_t> score; int32_t diagonal_begin = -1; int32_t diagonal_end = -1; int32_t band_width = 0; while (1) { int32_t p = min3(target_size, query_size, (max_distance_estimate - abs(target_size - query_size)) / 2); int32_t band_width_new = min(1 + 2 * p + abs(target_size - query_size), query_size); if (band_width_new % word_size == 1 && band_width_new != query_size) // we need at least two bits in the last word { p += 1; band_width_new = min(1 + 2 * p + abs(target_size - query_size), query_size); } if (band_width_new > max_bandwidth) { band_width_new = max_bandwidth; p = (band_width_new - 1 - abs(target_size - query_size)) / 2; } const int32_t n_words_band = ceiling_divide(band_width_new, word_size); if (static_cast<int64_t>(n_words_band) * static_cast<int64_t>(target_size + 1) > pvi->get_max_elements_per_matrix(alignment_idx)) { band_width = -band_width; break; } band_width = band_width_new; pv = pvi->get_matrix_view(alignment_idx, n_words_band, target_size + 1); mv = mvi->get_matrix_view(alignment_idx, n_words_band, target_size + 1); score = scorei->get_matrix_view(alignment_idx, n_words_band, target_size + 1); diagonal_begin = -1; diagonal_end = -1; myers_compute_scores_edit_dist_banded(diagonal_begin, diagonal_end, pv, mv, score, query_pattern, target, query, target_size, query_size, band_width, n_words_band, p, alignment_idx); __syncwarp(); const int32_t cur_edit_distance = score(n_words_band - 1, target_size); if (cur_edit_distance <= max_distance_estimate || band_width == query_size) { break; } if (band_width == max_bandwidth) { band_width = -band_width; break; } max_distance_estimate *= 2; } if (threadIdx.x == 0) { int32_t path_length = 0; if (band_width != 0) { path_length = band_width > 0 ? 1 : -1; band_width = abs(band_width); path_length *= myers_backtrace_banded(path, pv, mv, score, diagonal_begin, diagonal_end, band_width, target_size, query_size); } path_lengths[alignment_idx] = path_length; } } } // namespace myers int32_t myers_compute_edit_distance(std::string const& target, std::string const& query) { constexpr int32_t warp_size = 32; constexpr int32_t word_size = sizeof(myers::WordType) * CHAR_BIT; if (get_size(query) == 0) return get_size(target); const int32_t n_words = (get_size(query) + word_size - 1) / word_size; matrix<int32_t> score_host; cudaStream_t stream; GW_CU_CHECK_ERR(cudaStreamCreate(&stream)); { DefaultDeviceAllocator allocator = create_default_device_allocator(); int32_t max_sequence_length = std::max(get_size(target), get_size(query)); device_buffer<char> sequences_d(2 * max_sequence_length, allocator, stream); device_buffer<int32_t> sequence_lengths_d(2, allocator, stream); batched_device_matrices<myers::WordType> pv(1, n_words * (get_size(target) + 1), allocator, stream); batched_device_matrices<myers::WordType> mv(1, n_words * (get_size(target) + 1), allocator, stream); batched_device_matrices<int32_t> score(1, n_words * (get_size(target) + 1), allocator, stream); batched_device_matrices<myers::WordType> query_patterns(1, n_words * 4, allocator, stream); std::array<int32_t, 2> lengths = {static_cast<int32_t>(get_size(query)), static_cast<int32_t>(get_size(target))}; GW_CU_CHECK_ERR(cudaMemcpyAsync(sequences_d.data(), query.data(), sizeof(char) * get_size(query), cudaMemcpyHostToDevice, stream)); GW_CU_CHECK_ERR(cudaMemcpyAsync(sequences_d.data() + max_sequence_length, target.data(), sizeof(char) * get_size(target), cudaMemcpyHostToDevice, stream)); GW_CU_CHECK_ERR(cudaMemcpyAsync(sequence_lengths_d.data(), lengths.data(), sizeof(int32_t) * 2, cudaMemcpyHostToDevice, stream)); myers::myers_compute_score_matrix_kernel<<<1, warp_size, 0, stream>>>(pv.get_device_interface(), mv.get_device_interface(), score.get_device_interface(), query_patterns.get_device_interface(), sequences_d.data(), sequence_lengths_d.data(), max_sequence_length, 1); score_host = score.get_matrix(0, n_words, get_size(target) + 1, stream); GW_CU_CHECK_ERR(cudaStreamSynchronize(stream)); } GW_CU_CHECK_ERR(cudaStreamDestroy(stream)); return score_host(n_words - 1, get_size(target)); } matrix<int32_t> myers_get_full_score_matrix(std::string const& target, std::string const& query) { constexpr int32_t warp_size = 32; constexpr int32_t word_size = sizeof(myers::WordType) * CHAR_BIT; if (get_size(target) == 0) { matrix<int32_t> r(get_size(query) + 1, 1); std::iota(r.data(), r.data() + get_size(query) + 1, 0); return r; } if (get_size(query) == 0) { matrix<int32_t> r(1, get_size(target) + 1); std::iota(r.data(), r.data() + get_size(target) + 1, 0); return r; } matrix<int32_t> fullscore_host; cudaStream_t stream; GW_CU_CHECK_ERR(cudaStreamCreate(&stream)); { DefaultDeviceAllocator allocator = create_default_device_allocator(); int32_t max_sequence_length = std::max(get_size(target), get_size(query)); device_buffer<char> sequences_d(2 * max_sequence_length, allocator, stream); device_buffer<int32_t> sequence_lengths_d(2, allocator, stream); const int32_t n_words = (get_size(query) + word_size - 1) / word_size; batched_device_matrices<myers::WordType> pv(1, n_words * (get_size(target) + 1), allocator, stream); batched_device_matrices<myers::WordType> mv(1, n_words * (get_size(target) + 1), allocator, stream); batched_device_matrices<int32_t> score(1, n_words * (get_size(target) + 1), allocator, stream); batched_device_matrices<myers::WordType> query_patterns(1, n_words * 4, allocator, stream); batched_device_matrices<int32_t> fullscore(1, (get_size(query) + 1) * (get_size(target) + 1), allocator, stream); std::array<int32_t, 2> lengths = {static_cast<int32_t>(get_size(query)), static_cast<int32_t>(get_size(target))}; GW_CU_CHECK_ERR(cudaMemcpyAsync(sequences_d.data(), query.data(), sizeof(char) * get_size(query), cudaMemcpyHostToDevice, stream)); GW_CU_CHECK_ERR(cudaMemcpyAsync(sequences_d.data() + max_sequence_length, target.data(), sizeof(char) * get_size(target), cudaMemcpyHostToDevice, stream)); GW_CU_CHECK_ERR(cudaMemcpyAsync(sequence_lengths_d.data(), lengths.data(), sizeof(int32_t) * 2, cudaMemcpyHostToDevice, stream)); myers::myers_compute_score_matrix_kernel<<<1, warp_size, 0, stream>>>(pv.get_device_interface(), mv.get_device_interface(), score.get_device_interface(), query_patterns.get_device_interface(), sequences_d.data(), sequence_lengths_d.data(), max_sequence_length, 1); { dim3 n_threads = {32, 4, 1}; dim3 n_blocks = {1, 1, 1}; n_blocks.x = ceiling_divide<int32_t>(get_size<int32_t>(query) + 1, n_threads.x); n_blocks.y = ceiling_divide<int32_t>(get_size<int32_t>(target) + 1, n_threads.y); myers::myers_convert_to_full_score_matrix_kernel<<<n_blocks, n_threads, 0, stream>>>(fullscore.get_device_interface(), pv.get_device_interface(), mv.get_device_interface(), score.get_device_interface(), sequence_lengths_d.data(), 0); } fullscore_host = fullscore.get_matrix(0, get_size(query) + 1, get_size(target) + 1, stream); } GW_CU_CHECK_ERR(cudaStreamSynchronize(stream)); GW_CU_CHECK_ERR(cudaStreamDestroy(stream)); return fullscore_host; } void myers_gpu(int8_t* paths_d, int32_t* path_lengths_d, int32_t max_path_length, char const* sequences_d, int32_t const* sequence_lengths_d, int32_t max_sequence_length, int32_t n_alignments, batched_device_matrices<myers::WordType>& pv, batched_device_matrices<myers::WordType>& mv, batched_device_matrices<int32_t>& score, batched_device_matrices<myers::WordType>& query_patterns, cudaStream_t stream) { { const dim3 threads(warp_size, 1, 1); const dim3 blocks(1, ceiling_divide<int32_t>(n_alignments, threads.y), 1); myers::myers_compute_score_matrix_kernel<<<blocks, threads, 0, stream>>>(pv.get_device_interface(), mv.get_device_interface(), score.get_device_interface(), query_patterns.get_device_interface(), sequences_d, sequence_lengths_d, max_sequence_length, n_alignments); } { const dim3 threads(128, 1, 1); const dim3 blocks(ceiling_divide<int32_t>(n_alignments, threads.x), 1, 1); myers::myers_backtrace_kernel<<<blocks, threads, 0, stream>>>(paths_d, path_lengths_d, max_path_length, pv.get_device_interface(), mv.get_device_interface(), score.get_device_interface(), sequence_lengths_d, n_alignments); } } void myers_banded_gpu(int8_t* paths_d, int32_t* path_lengths_d, int64_t const* path_starts_d, char const* sequences_d, int64_t const* sequence_starts_d, int32_t n_alignments, int32_t max_bandwidth, batched_device_matrices<myers::WordType>& pv, batched_device_matrices<myers::WordType>& mv, batched_device_matrices<int32_t>& score, batched_device_matrices<myers::WordType>& query_patterns, cudaStream_t stream) { const dim3 threads(warp_size, 1, 1); const dim3 blocks(1, ceiling_divide<int32_t>(n_alignments, threads.y), 1); myers::myers_banded_kernel<<<blocks, threads, 0, stream>>>(paths_d, path_lengths_d, path_starts_d, pv.get_device_interface(), mv.get_device_interface(), score.get_device_interface(), query_patterns.get_device_interface(), sequences_d, sequence_starts_d, max_bandwidth, n_alignments); } } // namespace cudaaligner } // namespace genomeworks } // namespace claraparabricks
b762cb047d9aaff8afe24e6af3134d2fb5b54b3c.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <device_launch_parameters.h> #include <iostream> #include "MedianFIlter.h" #include "Bitmap.h" using namespace std; const int TILE_WIDTH = 31; // GPU Kernel to perform Median Filtering using global memory __global__ void MedianFilterKernel(unsigned char * Ad, unsigned char * Bd, int Width,int Height) { // Retrieve our global thread Id int j = blockIdx.y * blockDim.x + threadIdx.y; int i = blockIdx.x * blockDim.y + threadIdx.x; unsigned char borderarray[9]; if (i < Width && j < Height) { if (i == Width - 1 || j == Height - 1 || j == 0 || i == 0) { //Set the ouput pixel as 0 if it is a border element Bd[j*Width + i] = 0; } else { //Setting the border array or the filter around the pixel borderarray[0] = Ad[j * Width + i]; borderarray[1] = Ad[(j + 1) * Width + i]; borderarray[2] = Ad[(j - 1) * Width + i]; borderarray[3] = Ad[(j + 1) * Width + i + 1]; borderarray[4] = Ad[(j - 1) * Width + i + 1]; borderarray[5] = Ad[(j + 1) * Width + i - 1]; borderarray[6] = Ad[(j - 1) * Width + i - 1]; borderarray[7] = Ad[j * Width + i + 1]; borderarray[8] = Ad[j * Width + i - 1]; unsigned char temp; //Bubble sorting of the border elements for (int o = 0; o < 9 - 1; o++) {// Last i elements are already in place for (int p = 0; p < 9 - o - 1; p++) { if (borderarray[p] > borderarray[p + 1]) { temp = borderarray[p]; borderarray[p] = borderarray[p + 1]; borderarray[p + 1] = temp; } } } //Seting the median to be the pixel Bd[j*Width + i] = borderarray[4]; } } } __global__ void MedianFilterKernelShared(unsigned char * Ad, unsigned char * Bd, int Width, int Height) { // Retrieve our global thread Id int j = blockIdx.y * blockDim.x + threadIdx.y; int i = blockIdx.x * blockDim.y + threadIdx.x; __shared__ unsigned char shared[(TILE_WIDTH + 2)][(TILE_WIDTH + 2)]; if (i < Width && j < Height) { if (i == Width-1 || j == Height-1 || j == 0 || i == 0) { //Set the ouput pixel as 0 if it is a border element Bd[j * Width + i] = 0; } else { //Initialize with zero around the block(zero padding) to avoid illegal memory access if (threadIdx.x == 0) shared[threadIdx.x][threadIdx.y + 1] = 0; else if (threadIdx.x == TILE_WIDTH-1) shared[threadIdx.x + 2][threadIdx.y + 1] = 0; if (threadIdx.y == 0) { shared[threadIdx.x + 1][threadIdx.y] = 0; if (threadIdx.x == 0) shared[threadIdx.x][threadIdx.y] = 0; else if (threadIdx.x == TILE_WIDTH-1) shared[threadIdx.x + 2][threadIdx.y] = 0; } else if (threadIdx.y == TILE_WIDTH-1) { shared[threadIdx.x + 1][threadIdx.y + 2] = 0; if (threadIdx.x == TILE_WIDTH-1) shared[threadIdx.x + 2][threadIdx.y + 2] = 0; else if (threadIdx.x == 0) shared[threadIdx.x][threadIdx.y + 2] = 0; } __syncthreads(); //Setup pixel values shared[threadIdx.x + 1][threadIdx.y + 1] = Ad[j * Width + i]; //Check for boundary conditions within block and set the pixel values if (threadIdx.x == 0 && (i > 0)) shared[threadIdx.x][threadIdx.y + 1] = Ad[j * Width + (i - 1)]; else if (threadIdx.x == TILE_WIDTH-1 && (i < Width - 1)) shared[threadIdx.x + 2][threadIdx.y + 1] = Ad[j * Width + (i + 1)]; if (threadIdx.y == 0 && (j > 0)) { shared[threadIdx.x + 1][threadIdx.y] = Ad[(j - 1) * Width + i]; if (threadIdx.x == 0) shared[threadIdx.x][threadIdx.y] = Ad[(j - 1) * Width + (i - 1)]; else if (threadIdx.x == TILE_WIDTH-1) shared[threadIdx.x + 2][threadIdx.y] = Ad[(j - 1) * Width + (i + 1)]; } else if (threadIdx.y == TILE_WIDTH-1 && (j < Height - 1)) { shared[threadIdx.x + 1][threadIdx.y + 2] = Ad[(j + 1) * Width + i]; if (threadIdx.x == TILE_WIDTH-1) shared[threadIdx.x + 2][threadIdx.y + 2] = Ad[(j + 1) * Width + (i + 1)]; else if (threadIdx.x == 0) shared[threadIdx.x][threadIdx.y + 2] = Ad[(j + 1) * Width + (i - 1)]; } //Wait for all threads to be done. __syncthreads(); //Setup the filter. unsigned char borderarray[9] = { shared[threadIdx.x][threadIdx.y], shared[threadIdx.x + 1][threadIdx.y], shared[threadIdx.x + 2][threadIdx.y], shared[threadIdx.x][threadIdx.y + 1], shared[threadIdx.x + 1][threadIdx.y + 1], shared[threadIdx.x + 2][threadIdx.y + 1], shared[threadIdx.x][threadIdx.y + 2], shared[threadIdx.x + 1][threadIdx.y + 2], shared[threadIdx.x + 2][threadIdx.y + 2] }; unsigned char temp; //Bubble sorting of the border elements for (int o = 0; o < 9 - 1; o++) {// Last i elements are already in place for (int p = 0; p < 9 - o - 1; p++) { if (borderarray[p] > borderarray[p + 1]) { temp = borderarray[p]; borderarray[p] = borderarray[p + 1]; borderarray[p + 1] = temp; } } } //Seting the median to be the pixel Bd[(j)*Width + i] = borderarray[4]; } } } // Helper Function to run The GPU implementations bool MedianFilterGPU(Bitmap* image, Bitmap* outputImage, bool sharedoryUse) { // Error return value hipError_t status; // Number of bytes in the image int size = image->Height() * image->Width(); int bytes = image->Height() * image->Width() * sizeof(char); // Pointers to the device arrays unsigned char* Ad, * Bd; // Allocate memory on the device to store each character matrix hipMalloc((void**)& Ad, bytes); hipMalloc((void**)& Bd, bytes); // Copy the host input data to the device hipMemcpy(Ad, image->image, bytes, hipMemcpyHostToDevice); hipMemcpy(Bd, outputImage->image, bytes, hipMemcpyHostToDevice); // Specify the size of the grid and the size of the block dim3 dimGrid((int)ceil((float)image->Width() / (float)TILE_WIDTH), (int)ceil((float)image->Height() / (float)TILE_WIDTH)); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); // Launch the kernel on a size-by-size block of threads int Width = image->Width(), Height = image->Height(); if (sharedoryUse == 0) { //Launching the Global memory implementation kernel MedianFilterKernel << <dimGrid, dimBlock >> > (Ad, Bd, Width, Height); } else { //Launching the Shared memory implementation kernel MedianFilterKernelShared << <dimGrid, dimBlock >> > (Ad, Bd, Width, Height); } // Wait for completion hipDeviceSynchronize(); // Check for errors status = hipGetLastError(); if (status != hipSuccess) { std::cout << "Kernel failed: " << hipGetErrorString(status) << std::endl; hipFree(Ad); hipFree(Bd); return false; } // Retrieve the result matrix hipMemcpy(outputImage->image, Bd, bytes, hipMemcpyDeviceToHost); // Free device memory hipFree(Ad); hipFree(Bd); // Success return true; }
b762cb047d9aaff8afe24e6af3134d2fb5b54b3c.cu
#include <cuda.h> #include <cuda_runtime_api.h> #include <device_launch_parameters.h> #include <iostream> #include "MedianFIlter.h" #include "Bitmap.h" using namespace std; const int TILE_WIDTH = 31; // GPU Kernel to perform Median Filtering using global memory __global__ void MedianFilterKernel(unsigned char * Ad, unsigned char * Bd, int Width,int Height) { // Retrieve our global thread Id int j = blockIdx.y * blockDim.x + threadIdx.y; int i = blockIdx.x * blockDim.y + threadIdx.x; unsigned char borderarray[9]; if (i < Width && j < Height) { if (i == Width - 1 || j == Height - 1 || j == 0 || i == 0) { //Set the ouput pixel as 0 if it is a border element Bd[j*Width + i] = 0; } else { //Setting the border array or the filter around the pixel borderarray[0] = Ad[j * Width + i]; borderarray[1] = Ad[(j + 1) * Width + i]; borderarray[2] = Ad[(j - 1) * Width + i]; borderarray[3] = Ad[(j + 1) * Width + i + 1]; borderarray[4] = Ad[(j - 1) * Width + i + 1]; borderarray[5] = Ad[(j + 1) * Width + i - 1]; borderarray[6] = Ad[(j - 1) * Width + i - 1]; borderarray[7] = Ad[j * Width + i + 1]; borderarray[8] = Ad[j * Width + i - 1]; unsigned char temp; //Bubble sorting of the border elements for (int o = 0; o < 9 - 1; o++) {// Last i elements are already in place for (int p = 0; p < 9 - o - 1; p++) { if (borderarray[p] > borderarray[p + 1]) { temp = borderarray[p]; borderarray[p] = borderarray[p + 1]; borderarray[p + 1] = temp; } } } //Seting the median to be the pixel Bd[j*Width + i] = borderarray[4]; } } } __global__ void MedianFilterKernelShared(unsigned char * Ad, unsigned char * Bd, int Width, int Height) { // Retrieve our global thread Id int j = blockIdx.y * blockDim.x + threadIdx.y; int i = blockIdx.x * blockDim.y + threadIdx.x; __shared__ unsigned char shared[(TILE_WIDTH + 2)][(TILE_WIDTH + 2)]; if (i < Width && j < Height) { if (i == Width-1 || j == Height-1 || j == 0 || i == 0) { //Set the ouput pixel as 0 if it is a border element Bd[j * Width + i] = 0; } else { //Initialize with zero around the block(zero padding) to avoid illegal memory access if (threadIdx.x == 0) shared[threadIdx.x][threadIdx.y + 1] = 0; else if (threadIdx.x == TILE_WIDTH-1) shared[threadIdx.x + 2][threadIdx.y + 1] = 0; if (threadIdx.y == 0) { shared[threadIdx.x + 1][threadIdx.y] = 0; if (threadIdx.x == 0) shared[threadIdx.x][threadIdx.y] = 0; else if (threadIdx.x == TILE_WIDTH-1) shared[threadIdx.x + 2][threadIdx.y] = 0; } else if (threadIdx.y == TILE_WIDTH-1) { shared[threadIdx.x + 1][threadIdx.y + 2] = 0; if (threadIdx.x == TILE_WIDTH-1) shared[threadIdx.x + 2][threadIdx.y + 2] = 0; else if (threadIdx.x == 0) shared[threadIdx.x][threadIdx.y + 2] = 0; } __syncthreads(); //Setup pixel values shared[threadIdx.x + 1][threadIdx.y + 1] = Ad[j * Width + i]; //Check for boundary conditions within block and set the pixel values if (threadIdx.x == 0 && (i > 0)) shared[threadIdx.x][threadIdx.y + 1] = Ad[j * Width + (i - 1)]; else if (threadIdx.x == TILE_WIDTH-1 && (i < Width - 1)) shared[threadIdx.x + 2][threadIdx.y + 1] = Ad[j * Width + (i + 1)]; if (threadIdx.y == 0 && (j > 0)) { shared[threadIdx.x + 1][threadIdx.y] = Ad[(j - 1) * Width + i]; if (threadIdx.x == 0) shared[threadIdx.x][threadIdx.y] = Ad[(j - 1) * Width + (i - 1)]; else if (threadIdx.x == TILE_WIDTH-1) shared[threadIdx.x + 2][threadIdx.y] = Ad[(j - 1) * Width + (i + 1)]; } else if (threadIdx.y == TILE_WIDTH-1 && (j < Height - 1)) { shared[threadIdx.x + 1][threadIdx.y + 2] = Ad[(j + 1) * Width + i]; if (threadIdx.x == TILE_WIDTH-1) shared[threadIdx.x + 2][threadIdx.y + 2] = Ad[(j + 1) * Width + (i + 1)]; else if (threadIdx.x == 0) shared[threadIdx.x][threadIdx.y + 2] = Ad[(j + 1) * Width + (i - 1)]; } //Wait for all threads to be done. __syncthreads(); //Setup the filter. unsigned char borderarray[9] = { shared[threadIdx.x][threadIdx.y], shared[threadIdx.x + 1][threadIdx.y], shared[threadIdx.x + 2][threadIdx.y], shared[threadIdx.x][threadIdx.y + 1], shared[threadIdx.x + 1][threadIdx.y + 1], shared[threadIdx.x + 2][threadIdx.y + 1], shared[threadIdx.x][threadIdx.y + 2], shared[threadIdx.x + 1][threadIdx.y + 2], shared[threadIdx.x + 2][threadIdx.y + 2] }; unsigned char temp; //Bubble sorting of the border elements for (int o = 0; o < 9 - 1; o++) {// Last i elements are already in place for (int p = 0; p < 9 - o - 1; p++) { if (borderarray[p] > borderarray[p + 1]) { temp = borderarray[p]; borderarray[p] = borderarray[p + 1]; borderarray[p + 1] = temp; } } } //Seting the median to be the pixel Bd[(j)*Width + i] = borderarray[4]; } } } // Helper Function to run The GPU implementations bool MedianFilterGPU(Bitmap* image, Bitmap* outputImage, bool sharedoryUse) { // Error return value cudaError_t status; // Number of bytes in the image int size = image->Height() * image->Width(); int bytes = image->Height() * image->Width() * sizeof(char); // Pointers to the device arrays unsigned char* Ad, * Bd; // Allocate memory on the device to store each character matrix cudaMalloc((void**)& Ad, bytes); cudaMalloc((void**)& Bd, bytes); // Copy the host input data to the device cudaMemcpy(Ad, image->image, bytes, cudaMemcpyHostToDevice); cudaMemcpy(Bd, outputImage->image, bytes, cudaMemcpyHostToDevice); // Specify the size of the grid and the size of the block dim3 dimGrid((int)ceil((float)image->Width() / (float)TILE_WIDTH), (int)ceil((float)image->Height() / (float)TILE_WIDTH)); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); // Launch the kernel on a size-by-size block of threads int Width = image->Width(), Height = image->Height(); if (sharedoryUse == 0) { //Launching the Global memory implementation kernel MedianFilterKernel << <dimGrid, dimBlock >> > (Ad, Bd, Width, Height); } else { //Launching the Shared memory implementation kernel MedianFilterKernelShared << <dimGrid, dimBlock >> > (Ad, Bd, Width, Height); } // Wait for completion cudaThreadSynchronize(); // Check for errors status = cudaGetLastError(); if (status != cudaSuccess) { std::cout << "Kernel failed: " << cudaGetErrorString(status) << std::endl; cudaFree(Ad); cudaFree(Bd); return false; } // Retrieve the result matrix cudaMemcpy(outputImage->image, Bd, bytes, cudaMemcpyDeviceToHost); // Free device memory cudaFree(Ad); cudaFree(Bd); // Success return true; }
a7c65a94b376d0922efb9665ac8f8c6d76822b0f.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <time.h> #include "parameter.cuh" typedef hiprandStatePhilox4_32_10_t myCurandState_t; //#define DEBUG #define cudaCheckError() { \ hipError_t e=hipGetLastError(); \ if(e!=hipSuccess) { \ printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,hipGetErrorString(e)); \ exit(0); \ } \ } #define TOTAL (SIZE * SIZE) #define SRAND_VALUE 200 const int agentTypeOneNumber = agentNumber / 2; const int agentTypeTwoNumber = agentNumber - agentTypeOneNumber; const int happinessThreshold = 5; const int numThreadsPerBlock = 256; void printOutput(int [SIZE+2][SIZE+2]); void initPos(int grid [SIZE+2][SIZE+2]); int random_location(); __device__ unsigned int numberConflict = 0; __device__ unsigned int numberMoveable = 0; __device__ int getnextrand(myCurandState_t *state){ int number = (1 + (int)(hiprand_uniform(state)*(SIZE))); //printf("%d\n",number); return number; } __device__ void randomMove(myCurandState_t *state,int grid[][SIZE+2], int new_grid[][SIZE+2]){ int idx=blockIdx.x*blockDim.x+threadIdx.x; int idy=blockIdx.y*blockDim.y+threadIdx.y; int row,column; do{ row = getnextrand(&state[idx+idy]); column = getnextrand(&state[idx+idy]); }while( atomicCAS(&new_grid[row][column],0,grid[idx][idy]) ); new_grid[idx][idy] = 0; } __global__ void initCurand(myCurandState_t state[][SIZE+2], unsigned long seed){ int idx = threadIdx.x + blockIdx.x * blockDim.x; int idy=blockIdx.y*blockDim.y+threadIdx.y; hiprand_init( 0 ,idx*(SIZE+2)+idy, 0, &state[idx][idy]); } __global__ void compute(int grid[][SIZE+2], int new_grid[][SIZE+2], int temp_grid[][SIZE+2],int move_grid[][SIZE+2] ){ int idx=blockIdx.x*blockDim.x+threadIdx.x; int idy=blockIdx.y*blockDim.y+threadIdx.y; int sameTypeCount=0; int current_priority = idx*(SIZE+2)+idy; if(grid[idx][idy] != 0){ int currentType = grid[idx][idy]; if(grid[idx-1][idy-1] == currentType){ sameTypeCount += 1; } if(grid[idx-1][idy] == currentType){ sameTypeCount += 1; } if(grid[idx-1][idy+1] == currentType){ sameTypeCount += 1; } if(grid[idx][idy-1] == currentType){ sameTypeCount += 1; } if(grid[idx][idy+1] == currentType){ sameTypeCount += 1; } if(grid[idx+1][idy-1] == currentType){ sameTypeCount += 1; } if(grid[idx+1][idy] == currentType){ sameTypeCount += 1; } if(grid[idx+1][idy+1] == currentType){ sameTypeCount += 1; } if(sameTypeCount < happinessThreshold){ // printf("moveable: %d\n", current_priority); temp_grid[idx][idy] = current_priority; } } } __global__ void prepareNewGrid (int temp_grid[][SIZE+2], int new_grid[][SIZE+2]){ int idx=blockIdx.x*blockDim.x+threadIdx.x; int idy=blockIdx.y*blockDim.y+threadIdx.y; if(temp_grid[idx][idy] != 0){ new_grid[idx][idy] = 0; } } __device__ bool agentsLeft; __global__ void assign_ (myCurandState_t state[][SIZE+2],int grid[][SIZE+2], int new_grid[][SIZE+2], int temp_grid[][SIZE+2],int move_grid[][SIZE+2], int rowAndColumn[][SIZE+2]){ int idx=blockIdx.x*blockDim.x+threadIdx.x; int idy=blockIdx.y*blockDim.y+threadIdx.y; int current_priority = idx*(SIZE+2)+idy; int row = 0; int column = 0; int old_value; if(temp_grid[idx][idy] != 0 ){ while(true) { row = getnextrand(&state[idx][idy]); column = getnextrand(&state[idx][idy]); if(new_grid[row][column] == 0){ old_value = atomicMax(&move_grid[row][column],current_priority); if(old_value < current_priority){ if(old_value != 0){ agentsLeft = true; } } else { agentsLeft = true; } break; } } } } __global__ void updateTonew (int grid[][SIZE+2], int new_grid[][SIZE+2],int temp_grid[][SIZE+2],int move_grid[][SIZE+2],int rowAndColumn[][SIZE+2]){ int idx=blockIdx.x*blockDim.x+threadIdx.x; int idy=blockIdx.y*blockDim.y+threadIdx.y; int movegrid_priority = move_grid[idx][idy]; if(movegrid_priority != 0){ int source_row = movegrid_priority / (SIZE + 2); int source_col = movegrid_priority % (SIZE + 2); new_grid[idx][idy] = grid[source_row][source_col]; temp_grid[source_row][source_col] = 0; } } __global__ void newTogrid (int grid[][SIZE+2], int new_grid[][SIZE+2]){ int idx=blockIdx.x*blockDim.x+threadIdx.x; int idy=blockIdx.y*blockDim.y+threadIdx.y; grid[idx][idy] = new_grid[idx][idy]; } __global__ void clearMoveGrid (int move_grid[][SIZE+2]){ int idx=blockIdx.x*blockDim.x+threadIdx.x; int idy=blockIdx.y*blockDim.y+threadIdx.y; move_grid[idx][idy] = 0; } __global__ void update ( int temp_grid[][SIZE+2],int move_grid[][SIZE+2]){ int idx=blockIdx.x*blockDim.x+threadIdx.x; int idy=blockIdx.y*blockDim.y+threadIdx.y; temp_grid[idx][idy] = 0; move_grid[idx][idy] = 0; } void checkNumber(int grid [SIZE+2][SIZE+2]){ int agentTypeOne = 0; int agentTypeTwo = 0; for(int i=0; i<SIZE+2; i++){ for(int j=0; j<SIZE+2; j++){ if(grid[i][j] == 1){ agentTypeOne +=1; } else if(grid[i][j] == 2){ agentTypeTwo += 1; } } } printf("Type One %d, Type Two %d\n",agentTypeOne, agentTypeTwo); } int host_grid[SIZE+2][SIZE+2]; int main(int argc, char* argv[]) { hipDeviceSetLimit(hipLimitPrintfFifoSize, 10*1024*1024); struct timespec start, stop; double accum; int (*device_grid)[SIZE + 2]; int (*device_newGrid)[SIZE + 2]; int (*device_moveGrid)[SIZE + 2]; int (*device_tempGrid)[SIZE + 2]; int (*device_rowAndColumn)[SIZE + 2]; srand(SRAND_VALUE); size_t bytes = sizeof(int)*(SIZE + 2)*(SIZE + 2); myCurandState_t (*devState)[SIZE + 2]; bool agentsRemain = false; hipMalloc((void**)&devState, (SIZE+2)*(SIZE+2) * sizeof(myCurandState_t)); hipMalloc((void**)&device_grid, bytes); hipMalloc((void**)&device_newGrid, bytes); hipMalloc((void**)&device_tempGrid, bytes); hipMalloc((void**)&device_moveGrid, bytes); hipMalloc((void**)&device_rowAndColumn, bytes); int blockSizePerDim = sqrt(numThreadsPerBlock); int gridSizePerDim = (SIZE + 2) / blockSizePerDim; dim3 blockSize(blockSizePerDim, blockSizePerDim, 1); dim3 gridSize(gridSizePerDim, gridSizePerDim, 1); hipLaunchKernelGGL(( initCurand), dim3(gridSize) , dim3(blockSize), 0, 0, devState, 1); for (int i=0; i<(SIZE+2); i++){ for (int j=0; j<SIZE+2; j++){ host_grid[i][j] = 0; } } initPos(host_grid); // printOutput(host_grid); hipMemcpy(device_grid,host_grid,bytes,hipMemcpyHostToDevice); hipMemcpy(device_newGrid,host_grid,bytes,hipMemcpyHostToDevice); newTogrid << <gridSize, blockSize >> >(device_grid, device_newGrid); update << <gridSize, blockSize >> >(device_tempGrid,device_moveGrid); if( clock_gettime( CLOCK_REALTIME, &start) == -1 ) { perror( "clock gettime" ); exit( EXIT_FAILURE ); } int numRoundsTotal = atoi(argv[1]); for(int i=0; i<numRoundsTotal; i++){ compute << <gridSize, blockSize >> >(device_grid, device_newGrid,device_tempGrid, device_moveGrid); #ifdef DEBUG hipDeviceSynchronize(); cudaCheckError(); #endif hipLaunchKernelGGL(( prepareNewGrid), dim3(gridSize), dim3(blockSize), 0, 0, device_tempGrid,device_newGrid); do{ agentsRemain = false; hipMemcpyToSymbol(agentsLeft,&agentsRemain,sizeof(bool),0,hipMemcpyHostToDevice); assign_ << <gridSize, blockSize >> >(devState,device_grid, device_newGrid,device_tempGrid,device_moveGrid,device_rowAndColumn); updateTonew << <gridSize, blockSize >> >(device_grid, device_newGrid,device_tempGrid,device_moveGrid,device_rowAndColumn); hipLaunchKernelGGL(( clearMoveGrid), dim3(gridSize), dim3(blockSize) , 0, 0, device_moveGrid); hipMemcpyFromSymbol(&agentsRemain,agentsLeft,sizeof(bool),0, hipMemcpyDeviceToHost); }while(agentsRemain == true); //while there are agents left newTogrid << <gridSize, blockSize >> >(device_grid, device_newGrid); update << <gridSize, blockSize >> >(device_tempGrid,device_moveGrid); hipDeviceSynchronize(); } if( clock_gettime( CLOCK_REALTIME, &stop) == -1 ) { perror( "clock gettime" ); exit( EXIT_FAILURE ); } accum = ( stop.tv_sec - start.tv_sec ) * 1e6 + ( stop.tv_nsec - start.tv_nsec ) / 1e3; printf( "%.1f Time is %.5f s \n",float(OCCUPANCY), accum / 1e6); hipMemcpy(host_grid, device_grid, bytes, hipMemcpyDeviceToHost); //printOutput(host_grid); //checkNumber(host_grid); hipFree(device_grid); hipFree(device_newGrid); hipFree(device_tempGrid); hipFree(devState); hipFree(device_rowAndColumn); return 0; } void printOutput(int grid [SIZE+2][SIZE+2] ){ //output grid from 1 t o SIZE+1 for (int i=0; i<SIZE+2; i++){ for (int j=0; j<SIZE+2; j++){ printf("%d ",grid[i][j]); } printf("\n"); } printf("\n"); } void initPos(int grid [SIZE+2][SIZE+2]){ //assign type 1 and 2 to grid randomly int row; int column; for(int i=0; i<agentTypeOneNumber; i++){ do{ row = random_location(); column = random_location(); }while(grid[row][column] != 0); grid[row][column] = 1; } for(int i=0; i<agentTypeTwoNumber; i++){ do{ row = random_location(); column = random_location(); }while(grid[row][column] != 0); grid[row][column] = 2; } } int random_location() { //generate a random number from 1 to SIZE int r; r = rand(); return (r % (SIZE) +1 ); }
a7c65a94b376d0922efb9665ac8f8c6d76822b0f.cu
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include <curand.h> #include <curand_kernel.h> #include <time.h> #include "parameter.cuh" typedef curandStatePhilox4_32_10_t myCurandState_t; //#define DEBUG #define cudaCheckError() { \ cudaError_t e=cudaGetLastError(); \ if(e!=cudaSuccess) { \ printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(e)); \ exit(0); \ } \ } #define TOTAL (SIZE * SIZE) #define SRAND_VALUE 200 const int agentTypeOneNumber = agentNumber / 2; const int agentTypeTwoNumber = agentNumber - agentTypeOneNumber; const int happinessThreshold = 5; const int numThreadsPerBlock = 256; void printOutput(int [SIZE+2][SIZE+2]); void initPos(int grid [SIZE+2][SIZE+2]); int random_location(); __device__ unsigned int numberConflict = 0; __device__ unsigned int numberMoveable = 0; __device__ int getnextrand(myCurandState_t *state){ int number = (1 + (int)(curand_uniform(state)*(SIZE))); //printf("%d\n",number); return number; } __device__ void randomMove(myCurandState_t *state,int grid[][SIZE+2], int new_grid[][SIZE+2]){ int idx=blockIdx.x*blockDim.x+threadIdx.x; int idy=blockIdx.y*blockDim.y+threadIdx.y; int row,column; do{ row = getnextrand(&state[idx+idy]); column = getnextrand(&state[idx+idy]); }while( atomicCAS(&new_grid[row][column],0,grid[idx][idy]) ); new_grid[idx][idy] = 0; } __global__ void initCurand(myCurandState_t state[][SIZE+2], unsigned long seed){ int idx = threadIdx.x + blockIdx.x * blockDim.x; int idy=blockIdx.y*blockDim.y+threadIdx.y; curand_init( 0 ,idx*(SIZE+2)+idy, 0, &state[idx][idy]); } __global__ void compute(int grid[][SIZE+2], int new_grid[][SIZE+2], int temp_grid[][SIZE+2],int move_grid[][SIZE+2] ){ int idx=blockIdx.x*blockDim.x+threadIdx.x; int idy=blockIdx.y*blockDim.y+threadIdx.y; int sameTypeCount=0; int current_priority = idx*(SIZE+2)+idy; if(grid[idx][idy] != 0){ int currentType = grid[idx][idy]; if(grid[idx-1][idy-1] == currentType){ sameTypeCount += 1; } if(grid[idx-1][idy] == currentType){ sameTypeCount += 1; } if(grid[idx-1][idy+1] == currentType){ sameTypeCount += 1; } if(grid[idx][idy-1] == currentType){ sameTypeCount += 1; } if(grid[idx][idy+1] == currentType){ sameTypeCount += 1; } if(grid[idx+1][idy-1] == currentType){ sameTypeCount += 1; } if(grid[idx+1][idy] == currentType){ sameTypeCount += 1; } if(grid[idx+1][idy+1] == currentType){ sameTypeCount += 1; } if(sameTypeCount < happinessThreshold){ // printf("moveable: %d\n", current_priority); temp_grid[idx][idy] = current_priority; } } } __global__ void prepareNewGrid (int temp_grid[][SIZE+2], int new_grid[][SIZE+2]){ int idx=blockIdx.x*blockDim.x+threadIdx.x; int idy=blockIdx.y*blockDim.y+threadIdx.y; if(temp_grid[idx][idy] != 0){ new_grid[idx][idy] = 0; } } __device__ bool agentsLeft; __global__ void assign_ (myCurandState_t state[][SIZE+2],int grid[][SIZE+2], int new_grid[][SIZE+2], int temp_grid[][SIZE+2],int move_grid[][SIZE+2], int rowAndColumn[][SIZE+2]){ int idx=blockIdx.x*blockDim.x+threadIdx.x; int idy=blockIdx.y*blockDim.y+threadIdx.y; int current_priority = idx*(SIZE+2)+idy; int row = 0; int column = 0; int old_value; if(temp_grid[idx][idy] != 0 ){ while(true) { row = getnextrand(&state[idx][idy]); column = getnextrand(&state[idx][idy]); if(new_grid[row][column] == 0){ old_value = atomicMax(&move_grid[row][column],current_priority); if(old_value < current_priority){ if(old_value != 0){ agentsLeft = true; } } else { agentsLeft = true; } break; } } } } __global__ void updateTonew (int grid[][SIZE+2], int new_grid[][SIZE+2],int temp_grid[][SIZE+2],int move_grid[][SIZE+2],int rowAndColumn[][SIZE+2]){ int idx=blockIdx.x*blockDim.x+threadIdx.x; int idy=blockIdx.y*blockDim.y+threadIdx.y; int movegrid_priority = move_grid[idx][idy]; if(movegrid_priority != 0){ int source_row = movegrid_priority / (SIZE + 2); int source_col = movegrid_priority % (SIZE + 2); new_grid[idx][idy] = grid[source_row][source_col]; temp_grid[source_row][source_col] = 0; } } __global__ void newTogrid (int grid[][SIZE+2], int new_grid[][SIZE+2]){ int idx=blockIdx.x*blockDim.x+threadIdx.x; int idy=blockIdx.y*blockDim.y+threadIdx.y; grid[idx][idy] = new_grid[idx][idy]; } __global__ void clearMoveGrid (int move_grid[][SIZE+2]){ int idx=blockIdx.x*blockDim.x+threadIdx.x; int idy=blockIdx.y*blockDim.y+threadIdx.y; move_grid[idx][idy] = 0; } __global__ void update ( int temp_grid[][SIZE+2],int move_grid[][SIZE+2]){ int idx=blockIdx.x*blockDim.x+threadIdx.x; int idy=blockIdx.y*blockDim.y+threadIdx.y; temp_grid[idx][idy] = 0; move_grid[idx][idy] = 0; } void checkNumber(int grid [SIZE+2][SIZE+2]){ int agentTypeOne = 0; int agentTypeTwo = 0; for(int i=0; i<SIZE+2; i++){ for(int j=0; j<SIZE+2; j++){ if(grid[i][j] == 1){ agentTypeOne +=1; } else if(grid[i][j] == 2){ agentTypeTwo += 1; } } } printf("Type One %d, Type Two %d\n",agentTypeOne, agentTypeTwo); } int host_grid[SIZE+2][SIZE+2]; int main(int argc, char* argv[]) { cudaDeviceSetLimit(cudaLimitPrintfFifoSize, 10*1024*1024); struct timespec start, stop; double accum; int (*device_grid)[SIZE + 2]; int (*device_newGrid)[SIZE + 2]; int (*device_moveGrid)[SIZE + 2]; int (*device_tempGrid)[SIZE + 2]; int (*device_rowAndColumn)[SIZE + 2]; srand(SRAND_VALUE); size_t bytes = sizeof(int)*(SIZE + 2)*(SIZE + 2); myCurandState_t (*devState)[SIZE + 2]; bool agentsRemain = false; cudaMalloc((void**)&devState, (SIZE+2)*(SIZE+2) * sizeof(myCurandState_t)); cudaMalloc((void**)&device_grid, bytes); cudaMalloc((void**)&device_newGrid, bytes); cudaMalloc((void**)&device_tempGrid, bytes); cudaMalloc((void**)&device_moveGrid, bytes); cudaMalloc((void**)&device_rowAndColumn, bytes); int blockSizePerDim = sqrt(numThreadsPerBlock); int gridSizePerDim = (SIZE + 2) / blockSizePerDim; dim3 blockSize(blockSizePerDim, blockSizePerDim, 1); dim3 gridSize(gridSizePerDim, gridSizePerDim, 1); initCurand<<<gridSize , blockSize>>>(devState, 1); for (int i=0; i<(SIZE+2); i++){ for (int j=0; j<SIZE+2; j++){ host_grid[i][j] = 0; } } initPos(host_grid); // printOutput(host_grid); cudaMemcpy(device_grid,host_grid,bytes,cudaMemcpyHostToDevice); cudaMemcpy(device_newGrid,host_grid,bytes,cudaMemcpyHostToDevice); newTogrid << <gridSize, blockSize >> >(device_grid, device_newGrid); update << <gridSize, blockSize >> >(device_tempGrid,device_moveGrid); if( clock_gettime( CLOCK_REALTIME, &start) == -1 ) { perror( "clock gettime" ); exit( EXIT_FAILURE ); } int numRoundsTotal = atoi(argv[1]); for(int i=0; i<numRoundsTotal; i++){ compute << <gridSize, blockSize >> >(device_grid, device_newGrid,device_tempGrid, device_moveGrid); #ifdef DEBUG cudaDeviceSynchronize(); cudaCheckError(); #endif prepareNewGrid<<<gridSize, blockSize>>>(device_tempGrid,device_newGrid); do{ agentsRemain = false; cudaMemcpyToSymbol(agentsLeft,&agentsRemain,sizeof(bool),0,cudaMemcpyHostToDevice); assign_ << <gridSize, blockSize >> >(devState,device_grid, device_newGrid,device_tempGrid,device_moveGrid,device_rowAndColumn); updateTonew << <gridSize, blockSize >> >(device_grid, device_newGrid,device_tempGrid,device_moveGrid,device_rowAndColumn); clearMoveGrid<<<gridSize, blockSize >>>(device_moveGrid); cudaMemcpyFromSymbol(&agentsRemain,agentsLeft,sizeof(bool),0, cudaMemcpyDeviceToHost); }while(agentsRemain == true); //while there are agents left newTogrid << <gridSize, blockSize >> >(device_grid, device_newGrid); update << <gridSize, blockSize >> >(device_tempGrid,device_moveGrid); cudaDeviceSynchronize(); } if( clock_gettime( CLOCK_REALTIME, &stop) == -1 ) { perror( "clock gettime" ); exit( EXIT_FAILURE ); } accum = ( stop.tv_sec - start.tv_sec ) * 1e6 + ( stop.tv_nsec - start.tv_nsec ) / 1e3; printf( "%.1f Time is %.5f s \n",float(OCCUPANCY), accum / 1e6); cudaMemcpy(host_grid, device_grid, bytes, cudaMemcpyDeviceToHost); //printOutput(host_grid); //checkNumber(host_grid); cudaFree(device_grid); cudaFree(device_newGrid); cudaFree(device_tempGrid); cudaFree(devState); cudaFree(device_rowAndColumn); return 0; } void printOutput(int grid [SIZE+2][SIZE+2] ){ //output grid from 1 t o SIZE+1 for (int i=0; i<SIZE+2; i++){ for (int j=0; j<SIZE+2; j++){ printf("%d ",grid[i][j]); } printf("\n"); } printf("\n"); } void initPos(int grid [SIZE+2][SIZE+2]){ //assign type 1 and 2 to grid randomly int row; int column; for(int i=0; i<agentTypeOneNumber; i++){ do{ row = random_location(); column = random_location(); }while(grid[row][column] != 0); grid[row][column] = 1; } for(int i=0; i<agentTypeTwoNumber; i++){ do{ row = random_location(); column = random_location(); }while(grid[row][column] != 0); grid[row][column] = 2; } } int random_location() { //generate a random number from 1 to SIZE int r; r = rand(); return (r % (SIZE) +1 ); }
d2b2a3a80103984e1c32ef8f642dd2b8a7da736e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" #define W 4000 #define H 20530 __global__ void calcmean(float *matrix, float *mean){ }
d2b2a3a80103984e1c32ef8f642dd2b8a7da736e.cu
#include "includes.h" #define W 4000 #define H 20530 __global__ void calcmean(float *matrix, float *mean){ }
f5ae78c7a536fc8f5b9257f87808183657a6be48.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/cuda_common.h" #include "split_impl.h" namespace onnxruntime { namespace cuda { template <typename T> __global__ void _SplitKernel(const fast_divmod block_size_including_axis_dim_div, const fast_divmod block_size_inside_axis_dim_div, const int64_t* split_sizes, const int64_t* split_sizes_range, const int64_t* axis_dimension_input_output_mapping, const int num_outputs, const T* input_data, void** output_ptr, const CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); CUDA_LONG output_pos = 0; int outter_block_index = 0; int block_index = 0; int offset = 0; block_size_including_axis_dim_div.divmod(id, outter_block_index, offset); block_size_inside_axis_dim_div.divmod(offset, block_index, offset); int output_index = axis_dimension_input_output_mapping[block_index]; int64_t range_left = (output_index == 0) ? 0 : split_sizes_range[output_index - 1]; int block_offset = block_index - range_left; output_pos = (outter_block_index * split_sizes[output_index] + block_offset) * block_size_inside_axis_dim_div.d_ + offset; reinterpret_cast<T*>(output_ptr[output_index])[output_pos] = input_data[id]; } Status SplitImpl(const size_t element_size, const int block_size_including_axis_dim, const int block_size_inside_axis_dim, const int64_t* split_sizes, const int64_t* split_sizes_range, const int64_t* axis_dimension_input_output_mapping, const int num_outputs, const void* input_data, void** output_ptr, const size_t N) { int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock)); fast_divmod block_size_including_axis_dim_div = fast_divmod(block_size_including_axis_dim); fast_divmod block_size_inside_axis_dim_div = fast_divmod(block_size_inside_axis_dim); switch (element_size) { case sizeof(int8_t): hipLaunchKernelGGL(( _SplitKernel), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0, block_size_including_axis_dim_div, block_size_inside_axis_dim_div, split_sizes, split_sizes_range, axis_dimension_input_output_mapping, num_outputs, reinterpret_cast<const ToCudaType<int8_t>::MappedType*>(input_data), output_ptr, (CUDA_LONG)N); break; case sizeof(int16_t): hipLaunchKernelGGL(( _SplitKernel), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0, block_size_including_axis_dim_div, block_size_inside_axis_dim_div, split_sizes, split_sizes_range, axis_dimension_input_output_mapping, num_outputs, reinterpret_cast<const ToCudaType<int16_t>::MappedType*>(input_data), output_ptr, (CUDA_LONG)N); break; case sizeof(int32_t): hipLaunchKernelGGL(( _SplitKernel), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0, block_size_including_axis_dim_div, block_size_inside_axis_dim_div, split_sizes, split_sizes_range, axis_dimension_input_output_mapping, num_outputs, reinterpret_cast<const ToCudaType<int32_t>::MappedType*>(input_data), output_ptr, (CUDA_LONG)N); break; case sizeof(int64_t): hipLaunchKernelGGL(( _SplitKernel), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0, block_size_including_axis_dim_div, block_size_inside_axis_dim_div, split_sizes, split_sizes_range, axis_dimension_input_output_mapping, num_outputs, reinterpret_cast<const ToCudaType<int64_t>::MappedType*>(input_data), output_ptr, (CUDA_LONG)N); break; default: return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Type not supported for Slice operator"); } return Status::OK(); } } // namespace cuda } // namespace onnxruntime
f5ae78c7a536fc8f5b9257f87808183657a6be48.cu
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/cuda_common.h" #include "split_impl.h" namespace onnxruntime { namespace cuda { template <typename T> __global__ void _SplitKernel(const fast_divmod block_size_including_axis_dim_div, const fast_divmod block_size_inside_axis_dim_div, const int64_t* split_sizes, const int64_t* split_sizes_range, const int64_t* axis_dimension_input_output_mapping, const int num_outputs, const T* input_data, void** output_ptr, const CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); CUDA_LONG output_pos = 0; int outter_block_index = 0; int block_index = 0; int offset = 0; block_size_including_axis_dim_div.divmod(id, outter_block_index, offset); block_size_inside_axis_dim_div.divmod(offset, block_index, offset); int output_index = axis_dimension_input_output_mapping[block_index]; int64_t range_left = (output_index == 0) ? 0 : split_sizes_range[output_index - 1]; int block_offset = block_index - range_left; output_pos = (outter_block_index * split_sizes[output_index] + block_offset) * block_size_inside_axis_dim_div.d_ + offset; reinterpret_cast<T*>(output_ptr[output_index])[output_pos] = input_data[id]; } Status SplitImpl(const size_t element_size, const int block_size_including_axis_dim, const int block_size_inside_axis_dim, const int64_t* split_sizes, const int64_t* split_sizes_range, const int64_t* axis_dimension_input_output_mapping, const int num_outputs, const void* input_data, void** output_ptr, const size_t N) { int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock)); fast_divmod block_size_including_axis_dim_div = fast_divmod(block_size_including_axis_dim); fast_divmod block_size_inside_axis_dim_div = fast_divmod(block_size_inside_axis_dim); switch (element_size) { case sizeof(int8_t): _SplitKernel<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>( block_size_including_axis_dim_div, block_size_inside_axis_dim_div, split_sizes, split_sizes_range, axis_dimension_input_output_mapping, num_outputs, reinterpret_cast<const ToCudaType<int8_t>::MappedType*>(input_data), output_ptr, (CUDA_LONG)N); break; case sizeof(int16_t): _SplitKernel<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>( block_size_including_axis_dim_div, block_size_inside_axis_dim_div, split_sizes, split_sizes_range, axis_dimension_input_output_mapping, num_outputs, reinterpret_cast<const ToCudaType<int16_t>::MappedType*>(input_data), output_ptr, (CUDA_LONG)N); break; case sizeof(int32_t): _SplitKernel<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>( block_size_including_axis_dim_div, block_size_inside_axis_dim_div, split_sizes, split_sizes_range, axis_dimension_input_output_mapping, num_outputs, reinterpret_cast<const ToCudaType<int32_t>::MappedType*>(input_data), output_ptr, (CUDA_LONG)N); break; case sizeof(int64_t): _SplitKernel<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>( block_size_including_axis_dim_div, block_size_inside_axis_dim_div, split_sizes, split_sizes_range, axis_dimension_input_output_mapping, num_outputs, reinterpret_cast<const ToCudaType<int64_t>::MappedType*>(input_data), output_ptr, (CUDA_LONG)N); break; default: return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Type not supported for Slice operator"); } return Status::OK(); } } // namespace cuda } // namespace onnxruntime
cf470cbecedc3a45d3ddcdf18d36160eaa062eae.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*************************************************************************************************** * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Statically sized array of elements that accommodates all CUTLASS-supported numeric types and is safe to use in a union. */ #include "../common/cutlass_unit_test.h" #include "cutlass/array.h" #include "cutlass/util/device_memory.h" #pragma warning( disable : 4800) ///////////////////////////////////////////////////////////////////////////////////////////////// namespace test { namespace core { /// Each thread clears its array and writes to global memory. No PRMT instructions should /// be generated if Array<T, N> is a multiple of 32 bits. template <typename T, int N> __global__ void test_array_clear(cutlass::Array<T, N> *ptr) { cutlass::Array<T, N> storage; storage.clear(); ptr[threadIdx.x] = storage; } /// Each thread writes its thread index into the elements of its array and then writes the result /// to global memory. template <typename T, int N> __global__ void test_array_threadid(cutlass::Array<T, N> *ptr) { cutlass::Array<T, N> storage; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { storage.at(i) = T(int(threadIdx.x)); } ptr[threadIdx.x] = storage; } /// Each thread writes its thread index into the elements of its array and then writes the result /// to global memory. template <typename T, int N> __global__ void test_array_sequence(cutlass::Array<T, N> *ptr) { cutlass::Array<T, N> storage; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { storage.at(i) = T(i); } ptr[threadIdx.x] = storage; } } // namespace core } // namespace test ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename T, int N> class TestArray { public: // // Data members // /// Number of threads int const kThreads = 32; typedef cutlass::Array<T, N> ArrayTy; // // Methods // /// Ctor TestArray() { } /// Runs the test void run() { /// Device memory containing output cutlass::device_memory::allocation< ArrayTy > output(kThreads); std::vector< ArrayTy > output_host(kThreads); dim3 grid(1,1); dim3 block(kThreads, 1, 1); hipLaunchKernelGGL(( test::core::test_array_clear), dim3(grid), dim3(block) , 0, 0, output.get()); hipError_t result = hipDeviceSynchronize(); ASSERT_EQ(result, hipSuccess) << "CUDA error: " << hipGetErrorString(result); // // Verify contains all zeros // cutlass::device_memory::copy_to_host(output_host.data(), output.get(), kThreads); result = hipGetLastError(); ASSERT_EQ(result, hipSuccess) << "CUDA error: " << hipGetErrorString(result); char const *ptr_host = reinterpret_cast<char const *>(output_host.data()); for (int i = 0; i < sizeof(ArrayTy) * kThreads; ++i) { EXPECT_FALSE(ptr_host[i]); } // // Verify each element contains the low bits of the thread Id // hipLaunchKernelGGL(( test::core::test_array_threadid), dim3(grid), dim3(block) , 0, 0, output.get()); result = hipDeviceSynchronize(); ASSERT_EQ(result, hipSuccess) << "CUDA error: " << hipGetErrorString(result); cutlass::device_memory::copy_to_host(output_host.data(), output.get(), kThreads); result = hipGetLastError(); ASSERT_EQ(result, hipSuccess) << "CUDA error: " << hipGetErrorString(result); for (int i = 0; i < kThreads; ++i) { T tid = T(i); ArrayTy thread = output_host.at(i); // Element-wise access for (int j = 0; j < N; ++j) { EXPECT_TRUE(tid == thread[j]); } // Iterator access for (auto it = thread.begin(); it != thread.end(); ++it) { EXPECT_TRUE(tid == *it); } // Range-based for for (auto const & x : thread) { EXPECT_TRUE(tid == x); } } // // Verify each element // hipLaunchKernelGGL(( test::core::test_array_sequence), dim3(grid), dim3(block) , 0, 0, output.get()); result = hipDeviceSynchronize(); ASSERT_EQ(result, hipSuccess) << "CUDA error: " << hipGetErrorString(result); cutlass::device_memory::copy_to_host(output_host.data(), output.get(), kThreads); result = hipGetLastError(); ASSERT_EQ(result, hipSuccess) << "CUDA error: " << hipGetErrorString(result); for (int i = 0; i < kThreads; ++i) { ArrayTy thread = output_host.at(i); // Element-wise access for (int j = 0; j < N; ++j) { T got = T(j); EXPECT_TRUE(got == thread[j]); } // Iterator access int j = 0; for (auto it = thread.begin(); it != thread.end(); ++it, ++j) { T got = T(j); EXPECT_TRUE(got == *it); } // Range-based for j = 0; for (auto const & x : thread) { T got = T(j); EXPECT_TRUE(got == x); ++j; } } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(Array, Int8x16) { TestArray<int8_t, 16>().run(); } TEST(Array, Int32x4) { TestArray<int, 4>().run(); } #if __CUDA_ARCH__ >= 520 TEST(Array, Float16x8) { TestArray<cutlass::half_t, 8>().run(); } #endif TEST(Array, FloatBF16x8) { TestArray<cutlass::bfloat16_t, 8>().run(); } TEST(Array, FloatTF32x4) { TestArray<cutlass::tfloat32_t, 4>().run(); } TEST(Array, Float32x4) { TestArray<float, 4>().run(); } TEST(Array, Int4x32) { TestArray<cutlass::int4b_t, 32>().run(); } TEST(Array, Uint4x32) { TestArray<cutlass::uint4b_t, 32>().run(); } TEST(Array, Bin1x128) { TestArray<cutlass::bin1_t, 128>().run(); } /////////////////////////////////////////////////////////////////////////////////////////////////
cf470cbecedc3a45d3ddcdf18d36160eaa062eae.cu
/*************************************************************************************************** * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Statically sized array of elements that accommodates all CUTLASS-supported numeric types and is safe to use in a union. */ #include "../common/cutlass_unit_test.h" #include "cutlass/array.h" #include "cutlass/util/device_memory.h" #pragma warning( disable : 4800) ///////////////////////////////////////////////////////////////////////////////////////////////// namespace test { namespace core { /// Each thread clears its array and writes to global memory. No PRMT instructions should /// be generated if Array<T, N> is a multiple of 32 bits. template <typename T, int N> __global__ void test_array_clear(cutlass::Array<T, N> *ptr) { cutlass::Array<T, N> storage; storage.clear(); ptr[threadIdx.x] = storage; } /// Each thread writes its thread index into the elements of its array and then writes the result /// to global memory. template <typename T, int N> __global__ void test_array_threadid(cutlass::Array<T, N> *ptr) { cutlass::Array<T, N> storage; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { storage.at(i) = T(int(threadIdx.x)); } ptr[threadIdx.x] = storage; } /// Each thread writes its thread index into the elements of its array and then writes the result /// to global memory. template <typename T, int N> __global__ void test_array_sequence(cutlass::Array<T, N> *ptr) { cutlass::Array<T, N> storage; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { storage.at(i) = T(i); } ptr[threadIdx.x] = storage; } } // namespace core } // namespace test ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename T, int N> class TestArray { public: // // Data members // /// Number of threads int const kThreads = 32; typedef cutlass::Array<T, N> ArrayTy; // // Methods // /// Ctor TestArray() { } /// Runs the test void run() { /// Device memory containing output cutlass::device_memory::allocation< ArrayTy > output(kThreads); std::vector< ArrayTy > output_host(kThreads); dim3 grid(1,1); dim3 block(kThreads, 1, 1); test::core::test_array_clear<<< grid, block >>>(output.get()); cudaError_t result = cudaDeviceSynchronize(); ASSERT_EQ(result, cudaSuccess) << "CUDA error: " << cudaGetErrorString(result); // // Verify contains all zeros // cutlass::device_memory::copy_to_host(output_host.data(), output.get(), kThreads); result = cudaGetLastError(); ASSERT_EQ(result, cudaSuccess) << "CUDA error: " << cudaGetErrorString(result); char const *ptr_host = reinterpret_cast<char const *>(output_host.data()); for (int i = 0; i < sizeof(ArrayTy) * kThreads; ++i) { EXPECT_FALSE(ptr_host[i]); } // // Verify each element contains the low bits of the thread Id // test::core::test_array_threadid<<< grid, block >>>(output.get()); result = cudaDeviceSynchronize(); ASSERT_EQ(result, cudaSuccess) << "CUDA error: " << cudaGetErrorString(result); cutlass::device_memory::copy_to_host(output_host.data(), output.get(), kThreads); result = cudaGetLastError(); ASSERT_EQ(result, cudaSuccess) << "CUDA error: " << cudaGetErrorString(result); for (int i = 0; i < kThreads; ++i) { T tid = T(i); ArrayTy thread = output_host.at(i); // Element-wise access for (int j = 0; j < N; ++j) { EXPECT_TRUE(tid == thread[j]); } // Iterator access for (auto it = thread.begin(); it != thread.end(); ++it) { EXPECT_TRUE(tid == *it); } // Range-based for for (auto const & x : thread) { EXPECT_TRUE(tid == x); } } // // Verify each element // test::core::test_array_sequence<<< grid, block >>>(output.get()); result = cudaDeviceSynchronize(); ASSERT_EQ(result, cudaSuccess) << "CUDA error: " << cudaGetErrorString(result); cutlass::device_memory::copy_to_host(output_host.data(), output.get(), kThreads); result = cudaGetLastError(); ASSERT_EQ(result, cudaSuccess) << "CUDA error: " << cudaGetErrorString(result); for (int i = 0; i < kThreads; ++i) { ArrayTy thread = output_host.at(i); // Element-wise access for (int j = 0; j < N; ++j) { T got = T(j); EXPECT_TRUE(got == thread[j]); } // Iterator access int j = 0; for (auto it = thread.begin(); it != thread.end(); ++it, ++j) { T got = T(j); EXPECT_TRUE(got == *it); } // Range-based for j = 0; for (auto const & x : thread) { T got = T(j); EXPECT_TRUE(got == x); ++j; } } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(Array, Int8x16) { TestArray<int8_t, 16>().run(); } TEST(Array, Int32x4) { TestArray<int, 4>().run(); } #if __CUDA_ARCH__ >= 520 TEST(Array, Float16x8) { TestArray<cutlass::half_t, 8>().run(); } #endif TEST(Array, FloatBF16x8) { TestArray<cutlass::bfloat16_t, 8>().run(); } TEST(Array, FloatTF32x4) { TestArray<cutlass::tfloat32_t, 4>().run(); } TEST(Array, Float32x4) { TestArray<float, 4>().run(); } TEST(Array, Int4x32) { TestArray<cutlass::int4b_t, 32>().run(); } TEST(Array, Uint4x32) { TestArray<cutlass::uint4b_t, 32>().run(); } TEST(Array, Bin1x128) { TestArray<cutlass::bin1_t, 128>().run(); } /////////////////////////////////////////////////////////////////////////////////////////////////
5d4684b62a03194e6666d7e9576c7a9fcc4f8965.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime_api.h> #define BASE_TYPE float __global__ void dot_produce(const BASE_TYPE *a, const BASE_TYPE *b, BASE_TYPE *result, const int N) { extern __shared__ BASE_TYPE s[]; int index = blockDim.x * blockIdx.x + threadIdx.x; s[threadIdx.x] = a[index] * b[index]; __syncthreads(); if (threadIdx.x == 0) { for (int i = 1; i < blockDim.x; i++) s[0] += s[i]; result[blockIdx.x] = s[0]; } } BASE_TYPE* gen_array(const int N) { BASE_TYPE *a = new BASE_TYPE[N]; for (int i = 0; i < N; i++) { a[i] = i; } return a; } void print_vector(BASE_TYPE *a, const int N) { for (int i = 0; i < N; i++) printf("%3.0f ", a[i]); printf("\n"); } void cuda_init_array(BASE_TYPE **dev, const BASE_TYPE *host, const size_t size) { hipError_t err; err = hipMalloc((void **)dev, size); if (err != hipSuccess) throw err; if (host != NULL) { err = hipMemcpy(*dev, host, size, hipMemcpyHostToDevice); if (err != hipSuccess) throw err; } } void cuda_init_grid_and_block(dim3 *grid, dim3 *block, const int threadsPerBlock, const int N) { *grid = dim3(1); *block = dim3(N); printf("Block %d %d %d\n", block->x, block->y, block->z); printf("Grid %d %d %d\n", grid->x, grid->y, grid->z); } int main() { const int N = 10; const int threadsPerBlock = N; const size_t size = N * sizeof(BASE_TYPE); const size_t result_size = size / threadsPerBlock; hipError_t err; dim3 blockDim, gridDim; cuda_init_grid_and_block(&blockDim, &gridDim, threadsPerBlock, N); BASE_TYPE *host_a = gen_array(N), *host_b = gen_array(N); BASE_TYPE *dev_a, *dev_b, *dev_c; BASE_TYPE result; print_vector(host_a, N); print_vector(host_b, N); try { cuda_init_array(&dev_a, host_a, size); cuda_init_array(&dev_b, host_b, size); cuda_init_array(&dev_c, NULL, sizeof(BASE_TYPE)); } catch (hipError_t err) { fprintf(stderr, "Failed to allocate device (error code: %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } hipLaunchKernelGGL(( dot_produce), dim3(blockDim), dim3(gridDim), threadsPerBlock * sizeof(BASE_TYPE), 0, dev_a, dev_b, dev_c, N); err = hipMemcpy(&result, dev_c, result_size, hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device (error code: %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } printf("%4.2f\n", result); hipFree(dev_a); hipFree(dev_b); hipFree(dev_c); delete[] host_a; delete[] host_b; return 0; }
5d4684b62a03194e6666d7e9576c7a9fcc4f8965.cu
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime_api.h> #define BASE_TYPE float __global__ void dot_produce(const BASE_TYPE *a, const BASE_TYPE *b, BASE_TYPE *result, const int N) { extern __shared__ BASE_TYPE s[]; int index = blockDim.x * blockIdx.x + threadIdx.x; s[threadIdx.x] = a[index] * b[index]; __syncthreads(); if (threadIdx.x == 0) { for (int i = 1; i < blockDim.x; i++) s[0] += s[i]; result[blockIdx.x] = s[0]; } } BASE_TYPE* gen_array(const int N) { BASE_TYPE *a = new BASE_TYPE[N]; for (int i = 0; i < N; i++) { a[i] = i; } return a; } void print_vector(BASE_TYPE *a, const int N) { for (int i = 0; i < N; i++) printf("%3.0f ", a[i]); printf("\n"); } void cuda_init_array(BASE_TYPE **dev, const BASE_TYPE *host, const size_t size) { cudaError_t err; err = cudaMalloc((void **)dev, size); if (err != cudaSuccess) throw err; if (host != NULL) { err = cudaMemcpy(*dev, host, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) throw err; } } void cuda_init_grid_and_block(dim3 *grid, dim3 *block, const int threadsPerBlock, const int N) { *grid = dim3(1); *block = dim3(N); printf("Block %d %d %d\n", block->x, block->y, block->z); printf("Grid %d %d %d\n", grid->x, grid->y, grid->z); } int main() { const int N = 10; const int threadsPerBlock = N; const size_t size = N * sizeof(BASE_TYPE); const size_t result_size = size / threadsPerBlock; cudaError_t err; dim3 blockDim, gridDim; cuda_init_grid_and_block(&blockDim, &gridDim, threadsPerBlock, N); BASE_TYPE *host_a = gen_array(N), *host_b = gen_array(N); BASE_TYPE *dev_a, *dev_b, *dev_c; BASE_TYPE result; print_vector(host_a, N); print_vector(host_b, N); try { cuda_init_array(&dev_a, host_a, size); cuda_init_array(&dev_b, host_b, size); cuda_init_array(&dev_c, NULL, sizeof(BASE_TYPE)); } catch (cudaError_t err) { fprintf(stderr, "Failed to allocate device (error code: %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } dot_produce<<<blockDim, gridDim, threadsPerBlock * sizeof(BASE_TYPE)>>>(dev_a, dev_b, dev_c, N); err = cudaMemcpy(&result, dev_c, result_size, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device (error code: %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } printf("%4.2f\n", result); cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); delete[] host_a; delete[] host_b; return 0; }
7204b1862f7d374b996667ff033cd18bbf7b7b2b.hip
// !!! This is a file automatically generated by hipify!!! // local evictions #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include "test.h" #include "gmm.h" #include "debug.h" __global__ void kernel_inc(int *data, int count) { int tot_threads = gridDim.x * blockDim.x; int i = blockIdx.x * blockDim.x + threadIdx.x; for (; i < count; i += tot_threads) data[i]++; } int test_evict_local() { int *dptr = NULL, *dptr2 = NULL, *ptr = NULL; size_t size, sfree, total; int i, count, ret = 0; if (hipMemGetInfo(&sfree, &total) != hipSuccess) { GMM_TPRINT("failed to get mem info\n"); return -1; } size = total * 3 / 4; count = size / sizeof(int); ptr = (int *)malloc(size); if (!ptr) { GMM_TPRINT("malloc failed for ptr\n"); return -1; } memset(ptr, 0, size); if (hipMalloc(&dptr, size) != hipSuccess) { GMM_TPRINT("hipMalloc failed\n"); free(ptr); return -1; } gmm_print_dptr(dptr); if (hipMalloc(&dptr2, size) != hipSuccess) { GMM_TPRINT("hipMalloc failed\n"); hipFree(dptr); free(ptr); return -1; } if (hipMemcpy(dptr, ptr, size, hipMemcpyHostToDevice) != hipSuccess) { GMM_TPRINT("hipMemcpyHostToDevice to dptr failed\n"); ret = -1; goto finish; } GMM_TPRINT("hipMemcpyHostToDevice succeeded\n"); if (hipMemcpy(dptr2, ptr, size, hipMemcpyHostToDevice) != hipSuccess) { GMM_TPRINT("hipMemcpyHostToDevice to deptr2 failed\n"); ret = -1; goto finish; } GMM_TPRINT("hipMemcpyHostToDevice succeeded\n"); if (cudaReference(0, HINT_DEFAULT) != hipSuccess) { GMM_TPRINT("cudaReference failed\n"); ret = -1; goto finish; } hipLaunchKernelGGL(( kernel_inc), dim3(256), dim3(128), 0, 0, dptr, count); if (hipDeviceSynchronize() != hipSuccess) { GMM_TPRINT("hipDeviceSynchronize returned error\n"); ret = -1; goto finish; } else GMM_TPRINT("1st kernel finished\n"); gmm_print_dptr(dptr); if (cudaReference(0, HINT_DEFAULT) != hipSuccess) { GMM_TPRINT("cudaReference failed\n"); ret = -1; goto finish; } hipLaunchKernelGGL(( kernel_inc), dim3(256), dim3(128), 0, 0, dptr2, count); if (hipDeviceSynchronize() != hipSuccess) { GMM_TPRINT("hipDeviceSynchronize returned error\n"); ret = -1; goto finish; } else GMM_TPRINT("2nd kernel finished\n"); gmm_print_dptr(dptr); if (hipMemcpy(ptr, dptr, size, hipMemcpyDeviceToHost) != hipSuccess) { GMM_TPRINT("hipMemcpy DtoH failed\n"); ret = -1; goto finish; } GMM_TPRINT("hipMemcpyDeviceToHost succeeded\n"); for(i = 0; i < count; i++) if (ptr[i] != 1) { GMM_TPRINT("verification failed at ptr[%d]==%d\n", i, ptr[i]); ret = -1; goto finish; } if (hipMemcpy(ptr, dptr2, size, hipMemcpyDeviceToHost) != hipSuccess) { GMM_TPRINT("hipMemcpy DtoH failed\n"); ret = -1; goto finish; } GMM_TPRINT("hipMemcpyDeviceToHost succeeded\n"); for(i = 0; i < count; i++) if (ptr[i] != 1) { GMM_TPRINT("verification failed at ptr[%d]==%d\n", i, ptr[i]); ret = -1; goto finish; } GMM_TPRINT("verification passed\n"); finish: if (hipFree(dptr2) != hipSuccess) { GMM_TPRINT("hipFree failed\n"); } if (hipFree(dptr) != hipSuccess) { GMM_TPRINT("hipFree failed\n"); } free(ptr); return ret; }
7204b1862f7d374b996667ff033cd18bbf7b7b2b.cu
// local evictions #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include "test.h" #include "gmm.h" #include "debug.h" __global__ void kernel_inc(int *data, int count) { int tot_threads = gridDim.x * blockDim.x; int i = blockIdx.x * blockDim.x + threadIdx.x; for (; i < count; i += tot_threads) data[i]++; } int test_evict_local() { int *dptr = NULL, *dptr2 = NULL, *ptr = NULL; size_t size, sfree, total; int i, count, ret = 0; if (cudaMemGetInfo(&sfree, &total) != cudaSuccess) { GMM_TPRINT("failed to get mem info\n"); return -1; } size = total * 3 / 4; count = size / sizeof(int); ptr = (int *)malloc(size); if (!ptr) { GMM_TPRINT("malloc failed for ptr\n"); return -1; } memset(ptr, 0, size); if (cudaMalloc(&dptr, size) != cudaSuccess) { GMM_TPRINT("cudaMalloc failed\n"); free(ptr); return -1; } gmm_print_dptr(dptr); if (cudaMalloc(&dptr2, size) != cudaSuccess) { GMM_TPRINT("cudaMalloc failed\n"); cudaFree(dptr); free(ptr); return -1; } if (cudaMemcpy(dptr, ptr, size, cudaMemcpyHostToDevice) != cudaSuccess) { GMM_TPRINT("cudaMemcpyHostToDevice to dptr failed\n"); ret = -1; goto finish; } GMM_TPRINT("cudaMemcpyHostToDevice succeeded\n"); if (cudaMemcpy(dptr2, ptr, size, cudaMemcpyHostToDevice) != cudaSuccess) { GMM_TPRINT("cudaMemcpyHostToDevice to deptr2 failed\n"); ret = -1; goto finish; } GMM_TPRINT("cudaMemcpyHostToDevice succeeded\n"); if (cudaReference(0, HINT_DEFAULT) != cudaSuccess) { GMM_TPRINT("cudaReference failed\n"); ret = -1; goto finish; } kernel_inc<<<256, 128>>>(dptr, count); if (cudaDeviceSynchronize() != cudaSuccess) { GMM_TPRINT("cudaThreadSynchronize returned error\n"); ret = -1; goto finish; } else GMM_TPRINT("1st kernel finished\n"); gmm_print_dptr(dptr); if (cudaReference(0, HINT_DEFAULT) != cudaSuccess) { GMM_TPRINT("cudaReference failed\n"); ret = -1; goto finish; } kernel_inc<<<256, 128>>>(dptr2, count); if (cudaDeviceSynchronize() != cudaSuccess) { GMM_TPRINT("cudaThreadSynchronize returned error\n"); ret = -1; goto finish; } else GMM_TPRINT("2nd kernel finished\n"); gmm_print_dptr(dptr); if (cudaMemcpy(ptr, dptr, size, cudaMemcpyDeviceToHost) != cudaSuccess) { GMM_TPRINT("cudaMemcpy DtoH failed\n"); ret = -1; goto finish; } GMM_TPRINT("cudaMemcpyDeviceToHost succeeded\n"); for(i = 0; i < count; i++) if (ptr[i] != 1) { GMM_TPRINT("verification failed at ptr[%d]==%d\n", i, ptr[i]); ret = -1; goto finish; } if (cudaMemcpy(ptr, dptr2, size, cudaMemcpyDeviceToHost) != cudaSuccess) { GMM_TPRINT("cudaMemcpy DtoH failed\n"); ret = -1; goto finish; } GMM_TPRINT("cudaMemcpyDeviceToHost succeeded\n"); for(i = 0; i < count; i++) if (ptr[i] != 1) { GMM_TPRINT("verification failed at ptr[%d]==%d\n", i, ptr[i]); ret = -1; goto finish; } GMM_TPRINT("verification passed\n"); finish: if (cudaFree(dptr2) != cudaSuccess) { GMM_TPRINT("cudaFree failed\n"); } if (cudaFree(dptr) != cudaSuccess) { GMM_TPRINT("cudaFree failed\n"); } free(ptr); return ret; }
size2_selector.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <nvgraph_cusparse.hxx> #include <size2_selector.hxx> #include <common_selector.hxx> #include <async_event.hxx> #include <thrust/device_vector.h> #include <thrust/count.h> //count #include <thrust/sort.h> //sort #include <thrust/binary_search.h> //lower_bound #include <thrust/unique.h> //unique // This should be enabled #define EXPERIMENTAL_ITERATIVE_MATCHING namespace nvgraph { template <typename IndexType> void renumberAndCountAggregates(Vector<IndexType> &aggregates, const IndexType n, IndexType& num_aggregates) { // renumber aggregates Vector<IndexType> scratch(n+1); scratch.fill(0); thrust::device_ptr<IndexType> aggregates_thrust_dev_ptr(aggregates.raw()); thrust::device_ptr<IndexType> scratch_thrust_dev_ptr(scratch.raw()); // set scratch[aggregates[i]] = 1 thrust::fill(thrust::make_permutation_iterator(scratch_thrust_dev_ptr, aggregates_thrust_dev_ptr), thrust::make_permutation_iterator(scratch_thrust_dev_ptr, aggregates_thrust_dev_ptr + n), 1); //scratch.dump(0,scratch.get_size()); // do prefix sum on scratch thrust::exclusive_scan(scratch_thrust_dev_ptr, scratch_thrust_dev_ptr+n+1, scratch_thrust_dev_ptr); // scratch.dump(0,scratch.get_size()); // aggregates[i] = scratch[aggregates[i]] thrust::copy(thrust::make_permutation_iterator(scratch_thrust_dev_ptr, aggregates_thrust_dev_ptr), thrust::make_permutation_iterator(scratch_thrust_dev_ptr, aggregates_thrust_dev_ptr + n), aggregates_thrust_dev_ptr); cudaCheckError(); hipMemcpy(&num_aggregates, &scratch.raw()[scratch.get_size()-1], sizeof(int), hipMemcpyDefault); //num_aggregates = scratch.raw()[scratch.get_size()-1]; cudaCheckError(); } // ------------------ // Constructors // ------------------ template <typename IndexType, typename ValueType> Size2Selector<IndexType, ValueType>::Size2Selector() { //Using default vaues from AmgX m_deterministic = 1; m_stream=0; m_max_iterations = 15; m_numUnassigned_tol = 0.05; m_two_phase = 0; m_aggregation_edge_weight_component= 0; m_merge_singletons = 1; m_weight_formula = 0; m_similarity_metric = SCALED_BY_ROW_SUM; } // ------------------ // Methods // ------------------ // setAggregates for block_dia_csr_matrix_d format template <typename IndexType, typename ValueType> NVGRAPH_ERROR Size2Selector<IndexType, ValueType>::setAggregates_common_sqblocks(const ValuedCsrGraph<IndexType, ValueType> &A, Vector<IndexType> &aggregates, int &num_aggregates) { const IndexType n = (int) A.get_num_vertices(); const IndexType nnz = (int) A.get_num_edges(); const IndexType *A_row_offsets_ptr = A.get_raw_row_offsets(); const IndexType *A_column_indices_ptr = A.get_raw_column_indices(); const ValueType *A_nonzero_values_ptr = A.get_raw_values(); // compute row indices Vector<IndexType> row_indices(nnz); Cusparse::csr2coo( n, nnz, A_row_offsets_ptr, row_indices.raw()); // note : amgx uses cusp for that const IndexType *A_row_indices_ptr = row_indices.raw(); //All vectors should be initialized to -1. aggregates.fill(-1); Vector<IndexType> strongest_neighbour(n); strongest_neighbour.fill(-1); Vector<IndexType> strongest_neighbour_1phase(n); strongest_neighbour_1phase.fill(-1); Vector<float> edge_weights(nnz); edge_weights.fill(-1); float *edge_weights_ptr = edge_weights.raw(); float *rand_edge_weights_ptr = NULL; cudaCheckError(); IndexType *strongest_neighbour_ptr = strongest_neighbour.raw(); IndexType *strongest_neighbour_1phase_ptr = strongest_neighbour_1phase.raw(); IndexType *aggregates_ptr = aggregates.raw(); const int threads_per_block = 256; const int max_grid_size = 256; const int num_blocks = min( max_grid_size, (n-1)/threads_per_block+ 1 ); const int num_blocks_V2 = min( max_grid_size, (nnz-1)/threads_per_block + 1); int bsize = 1; // AmgX legacy: we don't use block CSR matrices, this is just to specify that we run on regular matrices int numUnassigned = n; int numUnassigned_previous = numUnassigned; thrust::device_ptr<IndexType> aggregates_thrust_dev_ptr(aggregates_ptr); switch(m_similarity_metric) { case USER_PROVIDED : { //copy non wero values of A in edge_weights (float) hipLaunchKernelGGL(( convert_type), dim3(num_blocks_V2),dim3(threads_per_block),0,this->m_stream, nnz, A_nonzero_values_ptr, edge_weights_ptr); cudaCheckError(); //edge_weights.dump(0,nnz); break; } case SCALED_BY_ROW_SUM : { // Compute the edge weights using .5*(A_ij+A_ji)/max(d(i),d(j)) where d(i) is the sum of outgoing edges of i Vector<ValueType> row_sum(n); const ValueType *A_row_sum_ptr = row_sum.raw(); Vector<ValueType> ones(n); ones.fill(1.0); ValueType alpha = 1.0, beta =0.0; Cusparse::csrmv(false, false, n, n, nnz,&alpha,A_nonzero_values_ptr, A_row_offsets_ptr, A_column_indices_ptr, ones.raw(),&beta, row_sum.raw()); hipFuncSetCacheConfig(computeEdgeWeightsBlockDiaCsr_V2<IndexType,ValueType,float>,hipFuncCachePreferL1); hipLaunchKernelGGL(( computeEdgeWeights_simple), dim3(num_blocks_V2),dim3(threads_per_block),0,this->m_stream, A_row_offsets_ptr, A_row_indices_ptr, A_column_indices_ptr, A_row_sum_ptr, A_nonzero_values_ptr, nnz, edge_weights_ptr, rand_edge_weights_ptr, n, this->m_weight_formula); cudaCheckError(); break; } case SCALED_BY_DIAGONAL : { // Compute the edge weights using AmgX formula (works only if there is a diagonal entry for each row) Vector<IndexType> diag_idx(n); const IndexType *A_dia_idx_ptr = diag_idx.raw(); hipLaunchKernelGGL(( computeDiagonalKernelCSR), dim3(num_blocks),dim3(threads_per_block),0,this->m_stream, n, A.get_raw_row_offsets(), A.get_raw_column_indices(), diag_idx.raw()); cudaCheckError(); hipFuncSetCacheConfig(computeEdgeWeightsBlockDiaCsr_V2<IndexType,ValueType,float>,hipFuncCachePreferL1); hipLaunchKernelGGL(( computeEdgeWeightsBlockDiaCsr_V2), dim3(num_blocks_V2),dim3(threads_per_block),0,this->m_stream, A_row_offsets_ptr, A_row_indices_ptr, A_column_indices_ptr, A_dia_idx_ptr, A_nonzero_values_ptr, nnz, edge_weights_ptr, rand_edge_weights_ptr, n, bsize,this->m_aggregation_edge_weight_component, this->m_weight_formula); cudaCheckError(); break; } default: return NVGRAPH_ERR_BAD_PARAMETERS; } #ifdef EXPERIMENTAL_ITERATIVE_MATCHING // TODO (from amgx): allocate host pinned memory AsyncEvent *throttle_event = new AsyncEvent; throttle_event->create(); std::vector<IndexType> h_unagg_vec(1); Vector<IndexType> d_unagg_vec(1); int *unaggregated = &h_unagg_vec[0]; int *d_unaggregated = d_unagg_vec.raw(); #endif int icount, s = 1; { icount = 0; float *weights_ptr = edge_weights_ptr; do { if( !this->m_two_phase ) { // 1-phase handshaking hipLaunchKernelGGL(( findStrongestNeighbourBlockDiaCsr_V2), dim3(num_blocks),dim3(threads_per_block),0,this->m_stream, A_row_offsets_ptr, A_column_indices_ptr, weights_ptr, n, aggregates_ptr, strongest_neighbour_ptr, strongest_neighbour_ptr, bsize, 1, this->m_merge_singletons); cudaCheckError(); } else { // 2-phase handshaking hipLaunchKernelGGL(( findStrongestNeighbourBlockDiaCsr_V2), dim3(num_blocks),dim3(threads_per_block),0,this->m_stream, A_row_offsets_ptr, A_column_indices_ptr, weights_ptr, n, aggregates_ptr, strongest_neighbour_1phase_ptr, strongest_neighbour_ptr, bsize, 1, this->m_merge_singletons); cudaCheckError(); // 2nd phase: for each block_row, find the strongest neighbour among those who gave hand on 1st phase hipLaunchKernelGGL(( findStrongestNeighbourBlockDiaCsr_V2), dim3(num_blocks),dim3(threads_per_block),0,this->m_stream, A_row_offsets_ptr, A_column_indices_ptr, weights_ptr, n, aggregates_ptr, strongest_neighbour_1phase_ptr, strongest_neighbour_ptr, bsize, 2, this->m_merge_singletons); cudaCheckError(); } // Look for perfect matches. Also, for nodes without unaggregated neighbours, merge with aggregate containing strongest neighbour hipLaunchKernelGGL(( matchEdges), dim3(num_blocks),dim3(threads_per_block),0,this->m_stream, n, aggregates_ptr, strongest_neighbour_ptr); cudaCheckError(); #ifdef EXPERIMENTAL_ITERATIVE_MATCHING s = (icount & 1); if( s == 0 ) { // count unaggregated vertices hipMemsetAsync(d_unaggregated, 0, sizeof(int), this->m_stream); hipLaunchKernelGGL(( countAggregates<IndexType,threads_per_block>), dim3(num_blocks),dim3(threads_per_block),0,this->m_stream, n, aggregates_ptr, d_unaggregated); cudaCheckError(); hipMemcpyAsync(unaggregated, d_unaggregated, sizeof(int), hipMemcpyDeviceToHost, this->m_stream); throttle_event->record(this->m_stream); cudaCheckError(); } else { throttle_event->sync(); numUnassigned_previous = numUnassigned; numUnassigned = *unaggregated; } #else hipStreamSynchronize(this->m_stream); numUnassigned_previous = numUnassigned; numUnassigned = (int)thrust::count(aggregates_thrust_dev_ptr, aggregates_thrust_dev_ptr+n,-1); cudaCheckError(); #endif icount++; } while ( (s == 0) || !(numUnassigned==0 || icount > this->m_max_iterations || 1.0*numUnassigned/n < this->m_numUnassigned_tol || numUnassigned == numUnassigned_previous)); } //print //printf("icount=%i, numUnassiged=%d, numUnassigned_tol=%f\n", icount, numUnassigned, this->m_numUnassigned_tol); #ifdef EXPERIMENTAL_ITERATIVE_MATCHING delete throttle_event; #endif if( this->m_merge_singletons ) { // Merge remaining vertices with current aggregates if (!this->m_deterministic) { while (numUnassigned != 0) { hipLaunchKernelGGL(( mergeWithExistingAggregatesBlockDiaCsr_V2), dim3(num_blocks),dim3(threads_per_block),0,this->m_stream, A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, n, aggregates_ptr, bsize,this->m_deterministic,(IndexType*) NULL); cudaCheckError(); numUnassigned = (int)thrust::count(aggregates_thrust_dev_ptr, aggregates_thrust_dev_ptr+n,-1); cudaCheckError(); } } else { Vector<int> aggregates_candidate(n); aggregates_candidate.fill(-1); while (numUnassigned != 0) { hipLaunchKernelGGL(( mergeWithExistingAggregatesBlockDiaCsr_V2), dim3(num_blocks),dim3(threads_per_block),0,this->m_stream, A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, n, aggregates_ptr, bsize,this->m_deterministic,aggregates_candidate.raw()); cudaCheckError(); hipLaunchKernelGGL(( joinExistingAggregates), dim3(num_blocks),dim3(threads_per_block),0,this->m_stream, n, aggregates_ptr, aggregates_candidate.raw()); cudaCheckError(); numUnassigned = (int)thrust::count(aggregates_thrust_dev_ptr, aggregates_thrust_dev_ptr+n,-1); cudaCheckError(); } } } else { //make singletons hipLaunchKernelGGL(( aggregateSingletons), dim3(num_blocks),dim3(threads_per_block),0,this->m_stream, aggregates_ptr, n ); cudaCheckError(); } renumberAndCountAggregates(aggregates, n, num_aggregates); return NVGRAPH_OK; } template <typename IndexType, typename ValueType> NVGRAPH_ERROR Size2Selector<IndexType, ValueType>::setAggregates(const ValuedCsrGraph<IndexType, ValueType> &A, Vector<IndexType> &aggregates, int &num_aggregates) { return setAggregates_common_sqblocks( A, aggregates, num_aggregates); } template class Size2Selector<int, float>; template class Size2Selector<int, double>; template void renumberAndCountAggregates <int> (Vector<int> &aggregates, const int n, int& num_aggregates); } //nvgraph
size2_selector.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <nvgraph_cusparse.hxx> #include <size2_selector.hxx> #include <common_selector.hxx> #include <async_event.hxx> #include <thrust/device_vector.h> #include <thrust/count.h> //count #include <thrust/sort.h> //sort #include <thrust/binary_search.h> //lower_bound #include <thrust/unique.h> //unique // This should be enabled #define EXPERIMENTAL_ITERATIVE_MATCHING namespace nvgraph { template <typename IndexType> void renumberAndCountAggregates(Vector<IndexType> &aggregates, const IndexType n, IndexType& num_aggregates) { // renumber aggregates Vector<IndexType> scratch(n+1); scratch.fill(0); thrust::device_ptr<IndexType> aggregates_thrust_dev_ptr(aggregates.raw()); thrust::device_ptr<IndexType> scratch_thrust_dev_ptr(scratch.raw()); // set scratch[aggregates[i]] = 1 thrust::fill(thrust::make_permutation_iterator(scratch_thrust_dev_ptr, aggregates_thrust_dev_ptr), thrust::make_permutation_iterator(scratch_thrust_dev_ptr, aggregates_thrust_dev_ptr + n), 1); //scratch.dump(0,scratch.get_size()); // do prefix sum on scratch thrust::exclusive_scan(scratch_thrust_dev_ptr, scratch_thrust_dev_ptr+n+1, scratch_thrust_dev_ptr); // scratch.dump(0,scratch.get_size()); // aggregates[i] = scratch[aggregates[i]] thrust::copy(thrust::make_permutation_iterator(scratch_thrust_dev_ptr, aggregates_thrust_dev_ptr), thrust::make_permutation_iterator(scratch_thrust_dev_ptr, aggregates_thrust_dev_ptr + n), aggregates_thrust_dev_ptr); cudaCheckError(); cudaMemcpy(&num_aggregates, &scratch.raw()[scratch.get_size()-1], sizeof(int), cudaMemcpyDefault); //num_aggregates = scratch.raw()[scratch.get_size()-1]; cudaCheckError(); } // ------------------ // Constructors // ------------------ template <typename IndexType, typename ValueType> Size2Selector<IndexType, ValueType>::Size2Selector() { //Using default vaues from AmgX m_deterministic = 1; m_stream=0; m_max_iterations = 15; m_numUnassigned_tol = 0.05; m_two_phase = 0; m_aggregation_edge_weight_component= 0; m_merge_singletons = 1; m_weight_formula = 0; m_similarity_metric = SCALED_BY_ROW_SUM; } // ------------------ // Methods // ------------------ // setAggregates for block_dia_csr_matrix_d format template <typename IndexType, typename ValueType> NVGRAPH_ERROR Size2Selector<IndexType, ValueType>::setAggregates_common_sqblocks(const ValuedCsrGraph<IndexType, ValueType> &A, Vector<IndexType> &aggregates, int &num_aggregates) { const IndexType n = (int) A.get_num_vertices(); const IndexType nnz = (int) A.get_num_edges(); const IndexType *A_row_offsets_ptr = A.get_raw_row_offsets(); const IndexType *A_column_indices_ptr = A.get_raw_column_indices(); const ValueType *A_nonzero_values_ptr = A.get_raw_values(); // compute row indices Vector<IndexType> row_indices(nnz); Cusparse::csr2coo( n, nnz, A_row_offsets_ptr, row_indices.raw()); // note : amgx uses cusp for that const IndexType *A_row_indices_ptr = row_indices.raw(); //All vectors should be initialized to -1. aggregates.fill(-1); Vector<IndexType> strongest_neighbour(n); strongest_neighbour.fill(-1); Vector<IndexType> strongest_neighbour_1phase(n); strongest_neighbour_1phase.fill(-1); Vector<float> edge_weights(nnz); edge_weights.fill(-1); float *edge_weights_ptr = edge_weights.raw(); float *rand_edge_weights_ptr = NULL; cudaCheckError(); IndexType *strongest_neighbour_ptr = strongest_neighbour.raw(); IndexType *strongest_neighbour_1phase_ptr = strongest_neighbour_1phase.raw(); IndexType *aggregates_ptr = aggregates.raw(); const int threads_per_block = 256; const int max_grid_size = 256; const int num_blocks = min( max_grid_size, (n-1)/threads_per_block+ 1 ); const int num_blocks_V2 = min( max_grid_size, (nnz-1)/threads_per_block + 1); int bsize = 1; // AmgX legacy: we don't use block CSR matrices, this is just to specify that we run on regular matrices int numUnassigned = n; int numUnassigned_previous = numUnassigned; thrust::device_ptr<IndexType> aggregates_thrust_dev_ptr(aggregates_ptr); switch(m_similarity_metric) { case USER_PROVIDED : { //copy non wero values of A in edge_weights (float) convert_type<<<num_blocks_V2,threads_per_block,0,this->m_stream>>>(nnz, A_nonzero_values_ptr, edge_weights_ptr); cudaCheckError(); //edge_weights.dump(0,nnz); break; } case SCALED_BY_ROW_SUM : { // Compute the edge weights using .5*(A_ij+A_ji)/max(d(i),d(j)) where d(i) is the sum of outgoing edges of i Vector<ValueType> row_sum(n); const ValueType *A_row_sum_ptr = row_sum.raw(); Vector<ValueType> ones(n); ones.fill(1.0); ValueType alpha = 1.0, beta =0.0; Cusparse::csrmv(false, false, n, n, nnz,&alpha,A_nonzero_values_ptr, A_row_offsets_ptr, A_column_indices_ptr, ones.raw(),&beta, row_sum.raw()); cudaFuncSetCacheConfig(computeEdgeWeightsBlockDiaCsr_V2<IndexType,ValueType,float>,cudaFuncCachePreferL1); computeEdgeWeights_simple<<<num_blocks_V2,threads_per_block,0,this->m_stream>>>(A_row_offsets_ptr, A_row_indices_ptr, A_column_indices_ptr, A_row_sum_ptr, A_nonzero_values_ptr, nnz, edge_weights_ptr, rand_edge_weights_ptr, n, this->m_weight_formula); cudaCheckError(); break; } case SCALED_BY_DIAGONAL : { // Compute the edge weights using AmgX formula (works only if there is a diagonal entry for each row) Vector<IndexType> diag_idx(n); const IndexType *A_dia_idx_ptr = diag_idx.raw(); computeDiagonalKernelCSR<<<num_blocks,threads_per_block,0,this->m_stream>>>(n, A.get_raw_row_offsets(), A.get_raw_column_indices(), diag_idx.raw()); cudaCheckError(); cudaFuncSetCacheConfig(computeEdgeWeightsBlockDiaCsr_V2<IndexType,ValueType,float>,cudaFuncCachePreferL1); computeEdgeWeightsBlockDiaCsr_V2<<<num_blocks_V2,threads_per_block,0,this->m_stream>>>(A_row_offsets_ptr, A_row_indices_ptr, A_column_indices_ptr, A_dia_idx_ptr, A_nonzero_values_ptr, nnz, edge_weights_ptr, rand_edge_weights_ptr, n, bsize,this->m_aggregation_edge_weight_component, this->m_weight_formula); cudaCheckError(); break; } default: return NVGRAPH_ERR_BAD_PARAMETERS; } #ifdef EXPERIMENTAL_ITERATIVE_MATCHING // TODO (from amgx): allocate host pinned memory AsyncEvent *throttle_event = new AsyncEvent; throttle_event->create(); std::vector<IndexType> h_unagg_vec(1); Vector<IndexType> d_unagg_vec(1); int *unaggregated = &h_unagg_vec[0]; int *d_unaggregated = d_unagg_vec.raw(); #endif int icount, s = 1; { icount = 0; float *weights_ptr = edge_weights_ptr; do { if( !this->m_two_phase ) { // 1-phase handshaking findStrongestNeighbourBlockDiaCsr_V2<<<num_blocks,threads_per_block,0,this->m_stream>>>(A_row_offsets_ptr, A_column_indices_ptr, weights_ptr, n, aggregates_ptr, strongest_neighbour_ptr, strongest_neighbour_ptr, bsize, 1, this->m_merge_singletons); cudaCheckError(); } else { // 2-phase handshaking findStrongestNeighbourBlockDiaCsr_V2<<<num_blocks,threads_per_block,0,this->m_stream>>>(A_row_offsets_ptr, A_column_indices_ptr, weights_ptr, n, aggregates_ptr, strongest_neighbour_1phase_ptr, strongest_neighbour_ptr, bsize, 1, this->m_merge_singletons); cudaCheckError(); // 2nd phase: for each block_row, find the strongest neighbour among those who gave hand on 1st phase findStrongestNeighbourBlockDiaCsr_V2<<<num_blocks,threads_per_block,0,this->m_stream>>>(A_row_offsets_ptr, A_column_indices_ptr, weights_ptr, n, aggregates_ptr, strongest_neighbour_1phase_ptr, strongest_neighbour_ptr, bsize, 2, this->m_merge_singletons); cudaCheckError(); } // Look for perfect matches. Also, for nodes without unaggregated neighbours, merge with aggregate containing strongest neighbour matchEdges<<<num_blocks,threads_per_block,0,this->m_stream>>>(n, aggregates_ptr, strongest_neighbour_ptr); cudaCheckError(); #ifdef EXPERIMENTAL_ITERATIVE_MATCHING s = (icount & 1); if( s == 0 ) { // count unaggregated vertices cudaMemsetAsync(d_unaggregated, 0, sizeof(int), this->m_stream); countAggregates<IndexType,threads_per_block><<<num_blocks,threads_per_block,0,this->m_stream>>>(n, aggregates_ptr, d_unaggregated); cudaCheckError(); cudaMemcpyAsync(unaggregated, d_unaggregated, sizeof(int), cudaMemcpyDeviceToHost, this->m_stream); throttle_event->record(this->m_stream); cudaCheckError(); } else { throttle_event->sync(); numUnassigned_previous = numUnassigned; numUnassigned = *unaggregated; } #else cudaStreamSynchronize(this->m_stream); numUnassigned_previous = numUnassigned; numUnassigned = (int)thrust::count(aggregates_thrust_dev_ptr, aggregates_thrust_dev_ptr+n,-1); cudaCheckError(); #endif icount++; } while ( (s == 0) || !(numUnassigned==0 || icount > this->m_max_iterations || 1.0*numUnassigned/n < this->m_numUnassigned_tol || numUnassigned == numUnassigned_previous)); } //print //printf("icount=%i, numUnassiged=%d, numUnassigned_tol=%f\n", icount, numUnassigned, this->m_numUnassigned_tol); #ifdef EXPERIMENTAL_ITERATIVE_MATCHING delete throttle_event; #endif if( this->m_merge_singletons ) { // Merge remaining vertices with current aggregates if (!this->m_deterministic) { while (numUnassigned != 0) { mergeWithExistingAggregatesBlockDiaCsr_V2<<<num_blocks,threads_per_block,0,this->m_stream>>>(A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, n, aggregates_ptr, bsize,this->m_deterministic,(IndexType*) NULL); cudaCheckError(); numUnassigned = (int)thrust::count(aggregates_thrust_dev_ptr, aggregates_thrust_dev_ptr+n,-1); cudaCheckError(); } } else { Vector<int> aggregates_candidate(n); aggregates_candidate.fill(-1); while (numUnassigned != 0) { mergeWithExistingAggregatesBlockDiaCsr_V2<<<num_blocks,threads_per_block,0,this->m_stream>>>(A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, n, aggregates_ptr, bsize,this->m_deterministic,aggregates_candidate.raw()); cudaCheckError(); joinExistingAggregates<<<num_blocks,threads_per_block,0,this->m_stream>>>(n, aggregates_ptr, aggregates_candidate.raw()); cudaCheckError(); numUnassigned = (int)thrust::count(aggregates_thrust_dev_ptr, aggregates_thrust_dev_ptr+n,-1); cudaCheckError(); } } } else { //make singletons aggregateSingletons<<<num_blocks,threads_per_block,0,this->m_stream>>>( aggregates_ptr, n ); cudaCheckError(); } renumberAndCountAggregates(aggregates, n, num_aggregates); return NVGRAPH_OK; } template <typename IndexType, typename ValueType> NVGRAPH_ERROR Size2Selector<IndexType, ValueType>::setAggregates(const ValuedCsrGraph<IndexType, ValueType> &A, Vector<IndexType> &aggregates, int &num_aggregates) { return setAggregates_common_sqblocks( A, aggregates, num_aggregates); } template class Size2Selector<int, float>; template class Size2Selector<int, double>; template void renumberAndCountAggregates <int> (Vector<int> &aggregates, const int n, int& num_aggregates); } //nvgraph
290cd171abe087cef9b1e0f6d8fd7465def089a0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright (c) 2019 by Contributors * \file geometry/cuda/geometry_op_impl.cc * \brief Geometry operator CUDA implementation */ #include <dgl/array.h> #include "../../runtime/cuda/cuda_common.h" #include "../../c_api_common.h" #include "../geometry_op.h" #define THREADS 1024 namespace dgl { namespace geometry { namespace impl { /*! * \brief Farthest Point Sampler without the need to compute all pairs of distance. * * The input array has shape (N, d), where N is the number of points, and d is the dimension. * It consists of a (flatten) batch of point clouds. * * In each batch, the algorithm starts with the sample index specified by ``start_idx``. * Then for each point, we maintain the minimum to-sample distance. * Finally, we pick the point with the maximum such distance. * This process will be repeated for ``sample_points`` - 1 times. */ template <typename FloatType, typename IdType> __global__ void fps_kernel(const FloatType *array_data, const int64_t batch_size, const int64_t sample_points, const int64_t point_in_batch, const int64_t dim, const IdType *start_idx, FloatType *dist_data, IdType *ret_data) { const int64_t thread_idx = threadIdx.x; const int64_t batch_idx = blockIdx.x; const int64_t array_start = point_in_batch * batch_idx; const int64_t ret_start = sample_points * batch_idx; __shared__ FloatType dist_max_ht[THREADS]; __shared__ int64_t dist_argmax_ht[THREADS]; // start with random initialization if (thread_idx == 0) { ret_data[ret_start] = (IdType)(start_idx[batch_idx]); } // sample the rest `sample_points - 1` points for (auto i = 0; i < sample_points - 1; i++) { __syncthreads(); // the last sampled point int64_t sample_idx = (int64_t)(ret_data[ret_start + i]); dist_argmax_ht[thread_idx] = 0; dist_max_ht[thread_idx] = (FloatType)(-1.); // multi-thread distance calculation for (auto j = thread_idx; j < point_in_batch; j += THREADS) { FloatType one_dist = (FloatType)(0.); for (auto d = 0; d < dim; d++) { FloatType tmp = array_data[(array_start + j) * dim + d] - array_data[(array_start + sample_idx) * dim + d]; one_dist += tmp * tmp; } if (i == 0 || dist_data[array_start + j] > one_dist) { dist_data[array_start + j] = one_dist; } if (dist_data[array_start + j] > dist_max_ht[thread_idx]) { dist_argmax_ht[thread_idx] = j; dist_max_ht[thread_idx] = dist_data[array_start + j]; } } __syncthreads(); if (thread_idx == 0) { FloatType best = dist_max_ht[0]; int64_t best_idx = dist_argmax_ht[0]; for (auto j = 1; j < THREADS; j++) { if (dist_max_ht[j] > best) { best = dist_max_ht[j]; best_idx = dist_argmax_ht[j]; } } ret_data[ret_start + i + 1] = (IdType)(best_idx); } } } template <DLDeviceType XPU, typename FloatType, typename IdType> void FarthestPointSampler(NDArray array, int64_t batch_size, int64_t sample_points, NDArray dist, IdArray start_idx, IdArray result) { auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); const FloatType* array_data = static_cast<FloatType*>(array->data); const int64_t point_in_batch = array->shape[0] / batch_size; const int64_t dim = array->shape[1]; // return value IdType* ret_data = static_cast<IdType*>(result->data); // distance FloatType* dist_data = static_cast<FloatType*>(dist->data); // sample for each cloud in the batch IdType* start_idx_data = static_cast<IdType*>(start_idx->data); CUDA_CALL(hipSetDevice(array->ctx.device_id)); CUDA_KERNEL_CALL(fps_kernel, batch_size, THREADS, 0, thr_entry->stream, array_data, batch_size, sample_points, point_in_batch, dim, start_idx_data, dist_data, ret_data); } template void FarthestPointSampler<kDLGPU, float, int32_t>( NDArray array, int64_t batch_size, int64_t sample_points, NDArray dist, IdArray start_idx, IdArray result); template void FarthestPointSampler<kDLGPU, float, int64_t>( NDArray array, int64_t batch_size, int64_t sample_points, NDArray dist, IdArray start_idx, IdArray result); template void FarthestPointSampler<kDLGPU, double, int32_t>( NDArray array, int64_t batch_size, int64_t sample_points, NDArray dist, IdArray start_idx, IdArray result); template void FarthestPointSampler<kDLGPU, double, int64_t>( NDArray array, int64_t batch_size, int64_t sample_points, NDArray dist, IdArray start_idx, IdArray result); } // namespace impl } // namespace geometry } // namespace dgl
290cd171abe087cef9b1e0f6d8fd7465def089a0.cu
/*! * Copyright (c) 2019 by Contributors * \file geometry/cuda/geometry_op_impl.cc * \brief Geometry operator CUDA implementation */ #include <dgl/array.h> #include "../../runtime/cuda/cuda_common.h" #include "../../c_api_common.h" #include "../geometry_op.h" #define THREADS 1024 namespace dgl { namespace geometry { namespace impl { /*! * \brief Farthest Point Sampler without the need to compute all pairs of distance. * * The input array has shape (N, d), where N is the number of points, and d is the dimension. * It consists of a (flatten) batch of point clouds. * * In each batch, the algorithm starts with the sample index specified by ``start_idx``. * Then for each point, we maintain the minimum to-sample distance. * Finally, we pick the point with the maximum such distance. * This process will be repeated for ``sample_points`` - 1 times. */ template <typename FloatType, typename IdType> __global__ void fps_kernel(const FloatType *array_data, const int64_t batch_size, const int64_t sample_points, const int64_t point_in_batch, const int64_t dim, const IdType *start_idx, FloatType *dist_data, IdType *ret_data) { const int64_t thread_idx = threadIdx.x; const int64_t batch_idx = blockIdx.x; const int64_t array_start = point_in_batch * batch_idx; const int64_t ret_start = sample_points * batch_idx; __shared__ FloatType dist_max_ht[THREADS]; __shared__ int64_t dist_argmax_ht[THREADS]; // start with random initialization if (thread_idx == 0) { ret_data[ret_start] = (IdType)(start_idx[batch_idx]); } // sample the rest `sample_points - 1` points for (auto i = 0; i < sample_points - 1; i++) { __syncthreads(); // the last sampled point int64_t sample_idx = (int64_t)(ret_data[ret_start + i]); dist_argmax_ht[thread_idx] = 0; dist_max_ht[thread_idx] = (FloatType)(-1.); // multi-thread distance calculation for (auto j = thread_idx; j < point_in_batch; j += THREADS) { FloatType one_dist = (FloatType)(0.); for (auto d = 0; d < dim; d++) { FloatType tmp = array_data[(array_start + j) * dim + d] - array_data[(array_start + sample_idx) * dim + d]; one_dist += tmp * tmp; } if (i == 0 || dist_data[array_start + j] > one_dist) { dist_data[array_start + j] = one_dist; } if (dist_data[array_start + j] > dist_max_ht[thread_idx]) { dist_argmax_ht[thread_idx] = j; dist_max_ht[thread_idx] = dist_data[array_start + j]; } } __syncthreads(); if (thread_idx == 0) { FloatType best = dist_max_ht[0]; int64_t best_idx = dist_argmax_ht[0]; for (auto j = 1; j < THREADS; j++) { if (dist_max_ht[j] > best) { best = dist_max_ht[j]; best_idx = dist_argmax_ht[j]; } } ret_data[ret_start + i + 1] = (IdType)(best_idx); } } } template <DLDeviceType XPU, typename FloatType, typename IdType> void FarthestPointSampler(NDArray array, int64_t batch_size, int64_t sample_points, NDArray dist, IdArray start_idx, IdArray result) { auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); const FloatType* array_data = static_cast<FloatType*>(array->data); const int64_t point_in_batch = array->shape[0] / batch_size; const int64_t dim = array->shape[1]; // return value IdType* ret_data = static_cast<IdType*>(result->data); // distance FloatType* dist_data = static_cast<FloatType*>(dist->data); // sample for each cloud in the batch IdType* start_idx_data = static_cast<IdType*>(start_idx->data); CUDA_CALL(cudaSetDevice(array->ctx.device_id)); CUDA_KERNEL_CALL(fps_kernel, batch_size, THREADS, 0, thr_entry->stream, array_data, batch_size, sample_points, point_in_batch, dim, start_idx_data, dist_data, ret_data); } template void FarthestPointSampler<kDLGPU, float, int32_t>( NDArray array, int64_t batch_size, int64_t sample_points, NDArray dist, IdArray start_idx, IdArray result); template void FarthestPointSampler<kDLGPU, float, int64_t>( NDArray array, int64_t batch_size, int64_t sample_points, NDArray dist, IdArray start_idx, IdArray result); template void FarthestPointSampler<kDLGPU, double, int32_t>( NDArray array, int64_t batch_size, int64_t sample_points, NDArray dist, IdArray start_idx, IdArray result); template void FarthestPointSampler<kDLGPU, double, int64_t>( NDArray array, int64_t batch_size, int64_t sample_points, NDArray dist, IdArray start_idx, IdArray result); } // namespace impl } // namespace geometry } // namespace dgl
5e844967574a314716a294b8327f8676c78a6590.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // This code is from this tutorial: // https://devblogs.nvidia.com/parallelforall/easy-introduction-cuda-c-and-c/ // This is a so-called SAXPY example. SAXPY stands for "Single-precision A*X Plus Y", and is a good hello world example // for parallel computation. #include <stdio.h> // Functions starting wit __global__ are "device" functions, or "kernels" that run on the GPU. // (sometimes they're also referred to as (compute) shaders as a reference to graphics shaders, the name of these // kernels when used for in a graphics context). // GPU have a large number of cores (sometimes several thousands) that run these kernels in parallel. // The cores are are grouped in blocks of dimensions NxN. // This particular kernel will do a single element multiplication in a vector multiplication. The GPU will invoke // this kernel in every thread, and will automatically replace the magic variables blockIdx, blockDim and threadIdx // with the corresponding block number, dimensions and thread ID (within the block). This way, you can figure out // what part of the calculation to do within a certain kernel instantiation. __global__ void saxpy(int n, float a, float *x, float *y) { // Determine which calculation to perform in this particular thread int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) y[i] = a * x[i] + y[i]; } // Normal functions are referred to as "host" functions/code and run on the CPU as usual int main(void) { // Allocate 2 vectors, x and y with 1 million floats (=single precision) each int N = 1 << 20; // 2^20 ~= 1 million float *x, *y, *d_x, *d_y; // d_x -> "device x", d_y -> "device y" x = (float *)malloc(N * sizeof(float)); y = (float *)malloc(N * sizeof(float)); // Allocate the memory on the GPUs hipMalloc(&d_x, N * sizeof(float)); hipMalloc(&d_y, N * sizeof(float)); // set all values in x to 1, all values in y to 2 for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } // Copy the values from the host to the GPU hipMemcpy(d_x, x, N * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_y, y, N * sizeof(float), hipMemcpyHostToDevice); // Perform SAXPY on 1M elements // Note that triple angular brackets, that's specific to CUDA // The triple angular brackets take 2 arguments: 1) number of blocks 2) number of threads per block // Since the size of our input vector can vary (it's size is N), we need to calculate how many blocks we should use // if we're using 256 threads. That number is N/256, but we do N+255/256 to make sure the result >=1 // (integer division rounds down). hipLaunchKernelGGL(( saxpy), dim3((N + 255) / 256), dim3(256), 0, 0, N, 2.0f, d_x, d_y); // We stored the result in Y on the device, copy it back to the host hipMemcpy(y, d_y, N * sizeof(float), hipMemcpyDeviceToHost); float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = max(maxError, abs(y[i] - 4.0f)); printf("Max error: %f\n", maxError); // clean up :) hipFree(d_x); hipFree(d_y); free(x); free(y); }
5e844967574a314716a294b8327f8676c78a6590.cu
// This code is from this tutorial: // https://devblogs.nvidia.com/parallelforall/easy-introduction-cuda-c-and-c/ // This is a so-called SAXPY example. SAXPY stands for "Single-precision A*X Plus Y", and is a good hello world example // for parallel computation. #include <stdio.h> // Functions starting wit __global__ are "device" functions, or "kernels" that run on the GPU. // (sometimes they're also referred to as (compute) shaders as a reference to graphics shaders, the name of these // kernels when used for in a graphics context). // GPU have a large number of cores (sometimes several thousands) that run these kernels in parallel. // The cores are are grouped in blocks of dimensions NxN. // This particular kernel will do a single element multiplication in a vector multiplication. The GPU will invoke // this kernel in every thread, and will automatically replace the magic variables blockIdx, blockDim and threadIdx // with the corresponding block number, dimensions and thread ID (within the block). This way, you can figure out // what part of the calculation to do within a certain kernel instantiation. __global__ void saxpy(int n, float a, float *x, float *y) { // Determine which calculation to perform in this particular thread int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) y[i] = a * x[i] + y[i]; } // Normal functions are referred to as "host" functions/code and run on the CPU as usual int main(void) { // Allocate 2 vectors, x and y with 1 million floats (=single precision) each int N = 1 << 20; // 2^20 ~= 1 million float *x, *y, *d_x, *d_y; // d_x -> "device x", d_y -> "device y" x = (float *)malloc(N * sizeof(float)); y = (float *)malloc(N * sizeof(float)); // Allocate the memory on the GPUs cudaMalloc(&d_x, N * sizeof(float)); cudaMalloc(&d_y, N * sizeof(float)); // set all values in x to 1, all values in y to 2 for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } // Copy the values from the host to the GPU cudaMemcpy(d_x, x, N * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_y, y, N * sizeof(float), cudaMemcpyHostToDevice); // Perform SAXPY on 1M elements // Note that triple angular brackets, that's specific to CUDA // The triple angular brackets take 2 arguments: 1) number of blocks 2) number of threads per block // Since the size of our input vector can vary (it's size is N), we need to calculate how many blocks we should use // if we're using 256 threads. That number is N/256, but we do N+255/256 to make sure the result >=1 // (integer division rounds down). saxpy<<<(N + 255) / 256, 256>>>(N, 2.0f, d_x, d_y); // We stored the result in Y on the device, copy it back to the host cudaMemcpy(y, d_y, N * sizeof(float), cudaMemcpyDeviceToHost); float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = max(maxError, abs(y[i] - 4.0f)); printf("Max error: %f\n", maxError); // clean up :) cudaFree(d_x); cudaFree(d_y); free(x); free(y); }
843aa937c8df6713cbdc43d91dce9a93ae5bf5a8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void blockEigSort( float *eigenvalues, float *eigenvectors, int *blocknums, int *blocksizes, int N ) { int blockNumber = blockIdx.x * blockDim.x + threadIdx.x; int startspot = blocknums[blockNumber]; int endspot = startspot + blocksizes[blockNumber] - 1; // Bubble sort for now, thinking blocks are relatively small // We may fix it later for( int i = startspot; i < endspot; i++ ) { for( int j = startspot; j < i; j++ ) { if( eigenvalues[j] > eigenvalues[j + 1] ) { float tmp = eigenvalues[j]; eigenvalues[j] = eigenvalues[j + 1]; eigenvalues[j + 1] = tmp; // Swapping addresses for( int i = 0; i < N; i++ ) { tmp = eigenvectors[i * N + j]; eigenvectors[i * N + j] = eigenvectors[i * N + j + 1]; eigenvectors[i * N + j + 1] = tmp; } /*float* tmpaddr = eigenvectors[j]; eigenvectors[j] = eigenvectors[j+1];; eigenvectors[j+1] = tmpaddr;*/ } } } }
843aa937c8df6713cbdc43d91dce9a93ae5bf5a8.cu
__global__ void blockEigSort( float *eigenvalues, float *eigenvectors, int *blocknums, int *blocksizes, int N ) { int blockNumber = blockIdx.x * blockDim.x + threadIdx.x; int startspot = blocknums[blockNumber]; int endspot = startspot + blocksizes[blockNumber] - 1; // Bubble sort for now, thinking blocks are relatively small // We may fix it later for( int i = startspot; i < endspot; i++ ) { for( int j = startspot; j < i; j++ ) { if( eigenvalues[j] > eigenvalues[j + 1] ) { float tmp = eigenvalues[j]; eigenvalues[j] = eigenvalues[j + 1]; eigenvalues[j + 1] = tmp; // Swapping addresses for( int i = 0; i < N; i++ ) { tmp = eigenvectors[i * N + j]; eigenvectors[i * N + j] = eigenvectors[i * N + j + 1]; eigenvectors[i * N + j + 1] = tmp; } /*float* tmpaddr = eigenvectors[j]; eigenvectors[j] = eigenvectors[j+1];; eigenvectors[j+1] = tmpaddr;*/ } } } }
56d285775b59647f2ff857f9ebd49d56cc271ae2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "gpu_helper.cuh" #include <helper_cuda.h> #include <sort_strings.cuh> #include "thrust/sort.h" #include <thrust/remove.h> #include <thrust/unique.h> #include <thrust/sequence.h> #include <thrust/transform_scan.h> #include <thrust/binary_search.h> using namespace thrust; __global__ void reposition_strings_d(unsigned char* d_word_array_in, unsigned char* d_word_array_out, int* d_position_in, int* d_position_out, const int word_count) { const int thread_num = threadIdx.x + blockDim.x*blockIdx.x; if (thread_num >= word_count) return; const int position_in = d_position_in[thread_num]; const int position_out = d_position_out[thread_num]; int i = 0; char c; do { c = d_word_array_in[position_in + i]; d_word_array_out[position_out + i] = c; i++; } while (c != BREAKCHAR); } __global__ void flag_different_than_last_d(const ullong* keys, int* flags, const size_t word_count) { const int thread_num = GetGlobalId(); if (thread_num >= word_count) return; flags[thread_num] = thread_num == 0 || keys[thread_num] != keys[thread_num - 1]; } __global__ void create_hashes_with_seg_d(const uchar* words, const int* word_positions, const int* segments, ullong* keys, const int offset, const int chars_to_hash, const int seg_shift, const size_t word_count) { const auto thread_num = GetGlobalId(); if (thread_num >= word_count) return; const auto position = word_positions[thread_num] + offset; keys[thread_num] = static_cast<ullong>(segments[thread_num]) << seg_shift | get_hash(words, chars_to_hash, position); } __global__ void mark_singletons_d(const ullong* keys, int* flags, const int* destinations, int* output, const int* positions, const size_t word_count) { const int thread_num = GetGlobalId(); if (thread_num >= word_count) return; const auto key = keys[thread_num]; const auto position = positions[thread_num]; const auto finished = (key & 1ULL) == 0ULL; const auto index_output = destinations[thread_num]; if (thread_num == 0) { if (finished || key != keys[thread_num + 1]) { output[index_output] = position; flags[thread_num] = 0; } else flags[thread_num] = 1; return; } const auto key_last = keys[thread_num - 1]; if (thread_num == word_count - 1) { if (key != key_last) { output[index_output] = position; flags[thread_num] = 0; } else if (finished) { output[index_output] = -1; flags[thread_num] = 0; } else flags[thread_num] = 1; return; } const auto key_next = keys[thread_num + 1]; if (key != key_last && (finished || key != key_next)) { output[index_output] = position; flags[thread_num] = 0; } else if (key == key_last && finished) { output[index_output] = -1; flags[thread_num] = 0; } else flags[thread_num] = 1; } __global__ void compute_postfix_lengths_d(uchar* words, int* positions, const int word_count, int* lengths) { const int thread_num = GetGlobalId(); if (thread_num >= word_count) return; int my_position = positions[thread_num]; if (my_position == -1) return; int length = 0; uchar c; for (int i = 1; i < CHARSTOHASH; i++) { c = words[my_position + i]; if (c == BREAKCHAR) return; } my_position = my_position + CHARSTOHASH; while (true) { c = words[my_position]; if (c == BREAKCHAR) break; my_position++; length++; } lengths[thread_num] = length + 1; } __global__ void copy_suffixes_d(const uchar* words, const int* positions, const size_t word_count, uchar* suffixes, const int* suffix_positions) { const auto thread_num = GetGlobalId(); if (thread_num >= word_count) return; const int suffix_pos = suffix_positions[thread_num]; const int len = suffix_positions[thread_num + 1] - suffix_pos; if (len == 0) return; if (len == 1) { suffixes[suffix_pos] = BREAKCHAR; return; } const int position = positions[thread_num] + CHARSTOHASH; for (int i = 0; i < len; i++) suffixes[suffix_pos + i] = words[position + i]; } void copy_suffixes(const device_vector<uchar>& words, const device_vector<int>& sorted_positions, const size_t word_count, const device_vector<int> suffix_positions, device_vector<uchar>& suffixes) { STARTKERNEL(copy_suffixes_d, "Copying suffixes", word_count, words.data().get(), sorted_positions.data().get(), word_count,suffixes.data().get(), suffix_positions.data().get()); } void flags_different_than_last(const device_vector<ullong>& keys, device_vector<int>& flags) { STARTKERNEL(flag_different_than_last_d, "Flags different than last", keys.size(), keys.data().get(), flags. data().get(), keys.size()); } void create_hashes_with_seg(const device_vector<int>& positions, const device_vector<uchar>& chars, device_vector<ullong>& keys, const device_vector<int>& segments, const int offset, const int segment_size, const int seg_chars) { STARTKERNEL(create_hashes_with_seg_d, "Create hashes", positions.size(), chars.data().get(), positions.data(). get(), segments.data().get(), keys.data().get(), offset, CHARSTOHASH - seg_chars, KEYBITS - segment_size, positions.size()); } void mark_singletons(const device_vector<int>& positions, const device_vector<ullong>& keys, const device_vector<int>& destinations, device_vector<int>& flags, device_vector<int>& output) { STARTKERNEL(mark_singletons_d, "Marking singletons", positions.size(), keys.data().get(), flags.data().get(), destinations.data().get(), output.data().get(), positions.data().get(), positions.size()); } void remove_handled(device_vector<int>& positions, device_vector<ullong>& keys, device_vector<int>& destinations, device_vector<int>& helper) { const auto iter_start = make_zip_iterator(make_tuple(keys.begin(), positions.begin(), destinations.begin())); const auto iter_end = make_zip_iterator(make_tuple(keys.end(), positions.end(), destinations.end())); const auto new_end = remove_if(iter_start, iter_end, helper.begin(), equal_to_val<uchar, 0>()); const auto current_count = new_end - iter_start; positions.resize(current_count); keys.resize(current_count); destinations.resize(current_count); helper.resize(current_count); } int compute_segment_size(const device_vector<int>& segments) { int max_segment = segments.back(); int segment_size; if (max_segment == 0) segment_size = 0; else { segment_size = 32; const int flag = 1 << (sizeof(int) * 8 - 1); while ((max_segment&flag) == 0) { max_segment <<= 1; segment_size--; } } return segment_size; } void sort_positions(device_vector<int>& positions, device_vector<ullong>& keys) { sort_by_key(keys.begin(), keys.end(), positions.begin()); } void get_sorted_positions(device_vector<int>& positions, const device_vector<uchar>& chars, device_vector<int>& output) { device_vector<ullong> keys(positions.size()); device_vector<int> destinations(positions.size()); device_vector<int> helper(positions.size()); output.reserve(positions.size() + 1); output.resize(positions.size()); sequence(destinations.begin(), destinations.end()); int offset = 0; int segment_size = 0; while (true) { const auto seg_chars = static_cast<int>(ceil(static_cast<double>(segment_size) / CHARBITS)); const auto hashing_time = measure::execution_gpu(create_hashes_with_seg, positions, chars, keys, helper, offset, segment_size, seg_chars); if (WRITETIME) std::cout << hashing_time << " microseconds taken creating hashes" << std::endl; offset += CHARSTOHASH - seg_chars; const auto sorting_time = measure::execution_gpu(sort_positions, positions, keys); if (WRITETIME) std::cout << sorting_time << " microseconds taken sorting" << std::endl; mark_singletons(positions, keys, destinations, helper, output); remove_handled(positions, keys, destinations, helper); if (positions.empty()) break; flags_different_than_last(keys, helper); inclusive_scan(helper.begin(), helper.end(), helper.begin()); segment_size = compute_segment_size(helper); } } void sort_positions_thrust(device_vector<int>& positions, const device_vector<uchar>& chars) { sort(positions.begin(), positions.end(), less_than_string(chars.data().get())); } void create_output(const device_vector<uchar>& words, device_vector<int>& sorted_positions, sorting_output_gpu& result) { const auto positions_end = remove_if(sorted_positions.begin(), sorted_positions.end(), equal_to_val<int, -1>()); const auto word_count = static_cast<int>(positions_end - sorted_positions.begin()); sorted_positions.resize(word_count + 1); result.positions.resize(word_count + 1); const compute_postfix_length_functor postfix_functor(words.data().get()); transform_exclusive_scan(sorted_positions.begin(), sorted_positions.end(), result.positions.begin(), postfix_functor, 0, thrust::plus<int>()); const int output_size = result.positions.back(); result.suffixes.resize(output_size); copy_suffixes(words, sorted_positions, word_count, result.positions, result.suffixes); result.hashes.resize(word_count); transform(sorted_positions.begin(), sorted_positions.begin()+word_count, result.hashes.begin(), hash_functor(words.data().get())); const auto hashes_end = unique_by_key(result.hashes.begin(), result.hashes.end(), result.positions.begin()); const auto hashes_count = hashes_end.first - result.hashes.begin(); result.hashes.resize(hashes_count); result.positions.resize(hashes_count); } void find_if_strings_exist(const device_vector<int>& values_positions, const device_vector<int>& input_positions, const device_vector<uchar>& words, device_vector<bool>& result) { result.resize(values_positions.size()); const less_than_string func(words.data().get()); binary_search(input_positions.begin(), input_positions.end(), values_positions.begin(), values_positions.end(), result.begin(), func); } void prepare_for_search(const host_vector<int>& positions_dictionary_host, const host_vector<uchar>& words_dictionary_host, const host_vector<int>& positions_book_host, const host_vector<uchar>& words_book_host, device_vector<int>& positions_book, device_vector<unsigned char>& words, device_vector<int>& positions_dictionary) { positions_dictionary.resize(positions_dictionary_host.size()); thrust::copy(positions_dictionary_host.begin(), positions_dictionary_host.end(), positions_dictionary.begin()); positions_book.resize(positions_book_host.size()); thrust::copy(positions_book_host.begin(), positions_book_host.end(), positions_book.begin()); using namespace thrust::placeholders; transform(positions_book.begin(), positions_book.end(), positions_book.begin(), _1 + words_dictionary_host.size()); words = device_vector<uchar>(words_dictionary_host.size() + words_book_host.size() + CHARSTOHASH); copy(words_dictionary_host.begin(), words_dictionary_host.end(), words.begin()); copy(words_book_host.begin(), words_book_host.end(), words.begin() + words_dictionary_host.size()); }
56d285775b59647f2ff857f9ebd49d56cc271ae2.cu
#include "gpu_helper.cuh" #include <helper_cuda.h> #include <sort_strings.cuh> #include "thrust/sort.h" #include <thrust/remove.h> #include <thrust/unique.h> #include <thrust/sequence.h> #include <thrust/transform_scan.h> #include <thrust/binary_search.h> using namespace thrust; __global__ void reposition_strings_d(unsigned char* d_word_array_in, unsigned char* d_word_array_out, int* d_position_in, int* d_position_out, const int word_count) { const int thread_num = threadIdx.x + blockDim.x*blockIdx.x; if (thread_num >= word_count) return; const int position_in = d_position_in[thread_num]; const int position_out = d_position_out[thread_num]; int i = 0; char c; do { c = d_word_array_in[position_in + i]; d_word_array_out[position_out + i] = c; i++; } while (c != BREAKCHAR); } __global__ void flag_different_than_last_d(const ullong* keys, int* flags, const size_t word_count) { const int thread_num = GetGlobalId(); if (thread_num >= word_count) return; flags[thread_num] = thread_num == 0 || keys[thread_num] != keys[thread_num - 1]; } __global__ void create_hashes_with_seg_d(const uchar* words, const int* word_positions, const int* segments, ullong* keys, const int offset, const int chars_to_hash, const int seg_shift, const size_t word_count) { const auto thread_num = GetGlobalId(); if (thread_num >= word_count) return; const auto position = word_positions[thread_num] + offset; keys[thread_num] = static_cast<ullong>(segments[thread_num]) << seg_shift | get_hash(words, chars_to_hash, position); } __global__ void mark_singletons_d(const ullong* keys, int* flags, const int* destinations, int* output, const int* positions, const size_t word_count) { const int thread_num = GetGlobalId(); if (thread_num >= word_count) return; const auto key = keys[thread_num]; const auto position = positions[thread_num]; const auto finished = (key & 1ULL) == 0ULL; const auto index_output = destinations[thread_num]; if (thread_num == 0) { if (finished || key != keys[thread_num + 1]) { output[index_output] = position; flags[thread_num] = 0; } else flags[thread_num] = 1; return; } const auto key_last = keys[thread_num - 1]; if (thread_num == word_count - 1) { if (key != key_last) { output[index_output] = position; flags[thread_num] = 0; } else if (finished) { output[index_output] = -1; flags[thread_num] = 0; } else flags[thread_num] = 1; return; } const auto key_next = keys[thread_num + 1]; if (key != key_last && (finished || key != key_next)) { output[index_output] = position; flags[thread_num] = 0; } else if (key == key_last && finished) { output[index_output] = -1; flags[thread_num] = 0; } else flags[thread_num] = 1; } __global__ void compute_postfix_lengths_d(uchar* words, int* positions, const int word_count, int* lengths) { const int thread_num = GetGlobalId(); if (thread_num >= word_count) return; int my_position = positions[thread_num]; if (my_position == -1) return; int length = 0; uchar c; for (int i = 1; i < CHARSTOHASH; i++) { c = words[my_position + i]; if (c == BREAKCHAR) return; } my_position = my_position + CHARSTOHASH; while (true) { c = words[my_position]; if (c == BREAKCHAR) break; my_position++; length++; } lengths[thread_num] = length + 1; } __global__ void copy_suffixes_d(const uchar* words, const int* positions, const size_t word_count, uchar* suffixes, const int* suffix_positions) { const auto thread_num = GetGlobalId(); if (thread_num >= word_count) return; const int suffix_pos = suffix_positions[thread_num]; const int len = suffix_positions[thread_num + 1] - suffix_pos; if (len == 0) return; if (len == 1) { suffixes[suffix_pos] = BREAKCHAR; return; } const int position = positions[thread_num] + CHARSTOHASH; for (int i = 0; i < len; i++) suffixes[suffix_pos + i] = words[position + i]; } void copy_suffixes(const device_vector<uchar>& words, const device_vector<int>& sorted_positions, const size_t word_count, const device_vector<int> suffix_positions, device_vector<uchar>& suffixes) { STARTKERNEL(copy_suffixes_d, "Copying suffixes", word_count, words.data().get(), sorted_positions.data().get(), word_count,suffixes.data().get(), suffix_positions.data().get()); } void flags_different_than_last(const device_vector<ullong>& keys, device_vector<int>& flags) { STARTKERNEL(flag_different_than_last_d, "Flags different than last", keys.size(), keys.data().get(), flags. data().get(), keys.size()); } void create_hashes_with_seg(const device_vector<int>& positions, const device_vector<uchar>& chars, device_vector<ullong>& keys, const device_vector<int>& segments, const int offset, const int segment_size, const int seg_chars) { STARTKERNEL(create_hashes_with_seg_d, "Create hashes", positions.size(), chars.data().get(), positions.data(). get(), segments.data().get(), keys.data().get(), offset, CHARSTOHASH - seg_chars, KEYBITS - segment_size, positions.size()); } void mark_singletons(const device_vector<int>& positions, const device_vector<ullong>& keys, const device_vector<int>& destinations, device_vector<int>& flags, device_vector<int>& output) { STARTKERNEL(mark_singletons_d, "Marking singletons", positions.size(), keys.data().get(), flags.data().get(), destinations.data().get(), output.data().get(), positions.data().get(), positions.size()); } void remove_handled(device_vector<int>& positions, device_vector<ullong>& keys, device_vector<int>& destinations, device_vector<int>& helper) { const auto iter_start = make_zip_iterator(make_tuple(keys.begin(), positions.begin(), destinations.begin())); const auto iter_end = make_zip_iterator(make_tuple(keys.end(), positions.end(), destinations.end())); const auto new_end = remove_if(iter_start, iter_end, helper.begin(), equal_to_val<uchar, 0>()); const auto current_count = new_end - iter_start; positions.resize(current_count); keys.resize(current_count); destinations.resize(current_count); helper.resize(current_count); } int compute_segment_size(const device_vector<int>& segments) { int max_segment = segments.back(); int segment_size; if (max_segment == 0) segment_size = 0; else { segment_size = 32; const int flag = 1 << (sizeof(int) * 8 - 1); while ((max_segment&flag) == 0) { max_segment <<= 1; segment_size--; } } return segment_size; } void sort_positions(device_vector<int>& positions, device_vector<ullong>& keys) { sort_by_key(keys.begin(), keys.end(), positions.begin()); } void get_sorted_positions(device_vector<int>& positions, const device_vector<uchar>& chars, device_vector<int>& output) { device_vector<ullong> keys(positions.size()); device_vector<int> destinations(positions.size()); device_vector<int> helper(positions.size()); output.reserve(positions.size() + 1); output.resize(positions.size()); sequence(destinations.begin(), destinations.end()); int offset = 0; int segment_size = 0; while (true) { const auto seg_chars = static_cast<int>(ceil(static_cast<double>(segment_size) / CHARBITS)); const auto hashing_time = measure::execution_gpu(create_hashes_with_seg, positions, chars, keys, helper, offset, segment_size, seg_chars); if (WRITETIME) std::cout << hashing_time << " microseconds taken creating hashes" << std::endl; offset += CHARSTOHASH - seg_chars; const auto sorting_time = measure::execution_gpu(sort_positions, positions, keys); if (WRITETIME) std::cout << sorting_time << " microseconds taken sorting" << std::endl; mark_singletons(positions, keys, destinations, helper, output); remove_handled(positions, keys, destinations, helper); if (positions.empty()) break; flags_different_than_last(keys, helper); inclusive_scan(helper.begin(), helper.end(), helper.begin()); segment_size = compute_segment_size(helper); } } void sort_positions_thrust(device_vector<int>& positions, const device_vector<uchar>& chars) { sort(positions.begin(), positions.end(), less_than_string(chars.data().get())); } void create_output(const device_vector<uchar>& words, device_vector<int>& sorted_positions, sorting_output_gpu& result) { const auto positions_end = remove_if(sorted_positions.begin(), sorted_positions.end(), equal_to_val<int, -1>()); const auto word_count = static_cast<int>(positions_end - sorted_positions.begin()); sorted_positions.resize(word_count + 1); result.positions.resize(word_count + 1); const compute_postfix_length_functor postfix_functor(words.data().get()); transform_exclusive_scan(sorted_positions.begin(), sorted_positions.end(), result.positions.begin(), postfix_functor, 0, thrust::plus<int>()); const int output_size = result.positions.back(); result.suffixes.resize(output_size); copy_suffixes(words, sorted_positions, word_count, result.positions, result.suffixes); result.hashes.resize(word_count); transform(sorted_positions.begin(), sorted_positions.begin()+word_count, result.hashes.begin(), hash_functor(words.data().get())); const auto hashes_end = unique_by_key(result.hashes.begin(), result.hashes.end(), result.positions.begin()); const auto hashes_count = hashes_end.first - result.hashes.begin(); result.hashes.resize(hashes_count); result.positions.resize(hashes_count); } void find_if_strings_exist(const device_vector<int>& values_positions, const device_vector<int>& input_positions, const device_vector<uchar>& words, device_vector<bool>& result) { result.resize(values_positions.size()); const less_than_string func(words.data().get()); binary_search(input_positions.begin(), input_positions.end(), values_positions.begin(), values_positions.end(), result.begin(), func); } void prepare_for_search(const host_vector<int>& positions_dictionary_host, const host_vector<uchar>& words_dictionary_host, const host_vector<int>& positions_book_host, const host_vector<uchar>& words_book_host, device_vector<int>& positions_book, device_vector<unsigned char>& words, device_vector<int>& positions_dictionary) { positions_dictionary.resize(positions_dictionary_host.size()); thrust::copy(positions_dictionary_host.begin(), positions_dictionary_host.end(), positions_dictionary.begin()); positions_book.resize(positions_book_host.size()); thrust::copy(positions_book_host.begin(), positions_book_host.end(), positions_book.begin()); using namespace thrust::placeholders; transform(positions_book.begin(), positions_book.end(), positions_book.begin(), _1 + words_dictionary_host.size()); words = device_vector<uchar>(words_dictionary_host.size() + words_book_host.size() + CHARSTOHASH); copy(words_dictionary_host.begin(), words_dictionary_host.end(), words.begin()); copy(words_book_host.begin(), words_book_host.end(), words.begin() + words_dictionary_host.size()); }
7d30e0edfcb06f33507505dfaa27388188a265ae.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @generated from magmablas/zlaswp_sym.cu normal z -> c, Tue Feb 9 16:05:32 2016 @author Stan Tomov @author Mathieu Faverge @author Ichitaro Yamazaki @author Mark Gates */ #include "magma_internal.h" // MAX_PIVOTS is maximum number of pivots to apply in each kernel launch // NTHREADS is number of threads in a block // 64 and 256 are better on Kepler; //#define MAX_PIVOTS 64 //#define NTHREADS 256 #define MAX_PIVOTS 32 #define NTHREADS 64 typedef struct { magmaFloatComplex *dA; int n, lda, j0, npivots; int ipiv[MAX_PIVOTS]; } claswp_sym_params_t; // Matrix A is stored row or column-wise in dA. // Divide matrix A into block-columns of NTHREADS columns each. // Each GPU block processes one block-column of A. // Each thread goes down a column of A, // swapping rows according to pivots stored in params. __global__ void claswp_sym_kernel( claswp_sym_params_t params ) { unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x; if ( tid < params.n ) { for( int ii = params.j0; ii < params.npivots; ++ii ) { int i1 = ii; int i2 = params.ipiv[ii]; // swap: i1 <-> i2 // this thread is responsible for the tid-th element magmaFloatComplex *A1 = NULL, *A2 = NULL; if (tid < i1) { // row swap: (i1,tid) <-> (i2,tid) A1 = params.dA + tid*params.lda + i1; A2 = params.dA + tid*params.lda + i2; } else if (tid == i1) { // diagonal swap: (i1,i1) <-> (i2,i2) A1 = params.dA + i1*params.lda + i1; A2 = params.dA + i2*params.lda + i2; } else if (tid < i2) { // row-col swap: (tid,i1) <-> (i2,tid) A1 = params.dA + i1*params.lda + tid; A2 = params.dA + tid*params.lda + i2; } else if (tid == i2) { // diagonal swap: done by i1-th thread } else if (tid > i2) { // column swap: (tid,i1) <-> (tid,i2) A1 = params.dA + i1*params.lda + tid; A2 = params.dA + i2*params.lda + tid; } if ( A1 != NULL && A2 != NULL) { magmaFloatComplex temp = *A1; *A1 = *A2; *A2 = temp; } } } } // Launch claswpx kernel with ceil( n / NTHREADS ) blocks of NTHREADS threads each. extern "C" void claswp_sym( claswp_sym_params_t &params, magma_queue_t queue ) { int blocks = magma_ceildiv(params.n, NTHREADS); hipLaunchKernelGGL(( claswp_sym_kernel), dim3(blocks), dim3(NTHREADS), 0, queue->cuda_stream() , params ); } /** Purpose: ============= CLASWPX performs a series of row interchanges on the matrix A. One row interchange is initiated for each of rows K1 through K2 of A. ** Unlike LAPACK, here A is stored either row-wise or column-wise, depending on ldx and ldy. ** Otherwise, this is identical to LAPACK's interface. Arguments: ========== \param[in] n INTEGER The number of columns of the matrix A. \param[in,out] dA COMPLEX array on GPU, dimension (*,*) On entry, the matrix of column dimension N to which the row interchanges will be applied. On exit, the permuted matrix. \param[in] lda INTEGER Stride between elements in same column. \param[in] k1 INTEGER The first element of IPIV for which a row interchange will be done. (One based index.) \param[in] k2 INTEGER The last element of IPIV for which a row interchange will be done. (One based index.) \param[in] ipiv INTEGER array, on CPU, dimension (K2*abs(INCI)) The vector of pivot indices. Only the elements in positions K1 through K2 of IPIV are accessed. IPIV(K) = L implies rows K and L are to be interchanged. \param[in] inci INTEGER The increment between successive values of IPIV. Currently, IPIV > 0. TODO: If IPIV is negative, the pivots are applied in reverse order. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_caux2 ********************************************************************/ extern "C" void magmablas_claswp_sym_q( magma_int_t n, magmaFloatComplex *dA, magma_int_t lda, magma_int_t k1, magma_int_t k2, const magma_int_t *ipiv, magma_int_t inci, magma_queue_t queue ) { magma_int_t info = 0; if ( n < 0 ) info = -1; else if ( k1 < 0 ) info = -4; else if ( k2 < 0 || k2 < k1 ) info = -5; else if ( inci <= 0 ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } for( int k = k1-1; k < k2; k += MAX_PIVOTS ) { int npivots = min( MAX_PIVOTS, k2-k ); // fields are: dA n lda j0 npivots claswp_sym_params_t params = { dA, int(n), int(lda), int(k), int(k+npivots) }; for( int j = 0; j < npivots; ++j ) { params.ipiv[j] = ipiv[(k+j)*inci] - 1; } claswp_sym( params, queue ); } }
7d30e0edfcb06f33507505dfaa27388188a265ae.cu
/* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @generated from magmablas/zlaswp_sym.cu normal z -> c, Tue Feb 9 16:05:32 2016 @author Stan Tomov @author Mathieu Faverge @author Ichitaro Yamazaki @author Mark Gates */ #include "magma_internal.h" // MAX_PIVOTS is maximum number of pivots to apply in each kernel launch // NTHREADS is number of threads in a block // 64 and 256 are better on Kepler; //#define MAX_PIVOTS 64 //#define NTHREADS 256 #define MAX_PIVOTS 32 #define NTHREADS 64 typedef struct { magmaFloatComplex *dA; int n, lda, j0, npivots; int ipiv[MAX_PIVOTS]; } claswp_sym_params_t; // Matrix A is stored row or column-wise in dA. // Divide matrix A into block-columns of NTHREADS columns each. // Each GPU block processes one block-column of A. // Each thread goes down a column of A, // swapping rows according to pivots stored in params. __global__ void claswp_sym_kernel( claswp_sym_params_t params ) { unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x; if ( tid < params.n ) { for( int ii = params.j0; ii < params.npivots; ++ii ) { int i1 = ii; int i2 = params.ipiv[ii]; // swap: i1 <-> i2 // this thread is responsible for the tid-th element magmaFloatComplex *A1 = NULL, *A2 = NULL; if (tid < i1) { // row swap: (i1,tid) <-> (i2,tid) A1 = params.dA + tid*params.lda + i1; A2 = params.dA + tid*params.lda + i2; } else if (tid == i1) { // diagonal swap: (i1,i1) <-> (i2,i2) A1 = params.dA + i1*params.lda + i1; A2 = params.dA + i2*params.lda + i2; } else if (tid < i2) { // row-col swap: (tid,i1) <-> (i2,tid) A1 = params.dA + i1*params.lda + tid; A2 = params.dA + tid*params.lda + i2; } else if (tid == i2) { // diagonal swap: done by i1-th thread } else if (tid > i2) { // column swap: (tid,i1) <-> (tid,i2) A1 = params.dA + i1*params.lda + tid; A2 = params.dA + i2*params.lda + tid; } if ( A1 != NULL && A2 != NULL) { magmaFloatComplex temp = *A1; *A1 = *A2; *A2 = temp; } } } } // Launch claswpx kernel with ceil( n / NTHREADS ) blocks of NTHREADS threads each. extern "C" void claswp_sym( claswp_sym_params_t &params, magma_queue_t queue ) { int blocks = magma_ceildiv(params.n, NTHREADS); claswp_sym_kernel<<< blocks, NTHREADS, 0, queue->cuda_stream() >>>( params ); } /** Purpose: ============= CLASWPX performs a series of row interchanges on the matrix A. One row interchange is initiated for each of rows K1 through K2 of A. ** Unlike LAPACK, here A is stored either row-wise or column-wise, depending on ldx and ldy. ** Otherwise, this is identical to LAPACK's interface. Arguments: ========== \param[in] n INTEGER The number of columns of the matrix A. \param[in,out] dA COMPLEX array on GPU, dimension (*,*) On entry, the matrix of column dimension N to which the row interchanges will be applied. On exit, the permuted matrix. \param[in] lda INTEGER Stride between elements in same column. \param[in] k1 INTEGER The first element of IPIV for which a row interchange will be done. (One based index.) \param[in] k2 INTEGER The last element of IPIV for which a row interchange will be done. (One based index.) \param[in] ipiv INTEGER array, on CPU, dimension (K2*abs(INCI)) The vector of pivot indices. Only the elements in positions K1 through K2 of IPIV are accessed. IPIV(K) = L implies rows K and L are to be interchanged. \param[in] inci INTEGER The increment between successive values of IPIV. Currently, IPIV > 0. TODO: If IPIV is negative, the pivots are applied in reverse order. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_caux2 ********************************************************************/ extern "C" void magmablas_claswp_sym_q( magma_int_t n, magmaFloatComplex *dA, magma_int_t lda, magma_int_t k1, magma_int_t k2, const magma_int_t *ipiv, magma_int_t inci, magma_queue_t queue ) { magma_int_t info = 0; if ( n < 0 ) info = -1; else if ( k1 < 0 ) info = -4; else if ( k2 < 0 || k2 < k1 ) info = -5; else if ( inci <= 0 ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } for( int k = k1-1; k < k2; k += MAX_PIVOTS ) { int npivots = min( MAX_PIVOTS, k2-k ); // fields are: dA n lda j0 npivots claswp_sym_params_t params = { dA, int(n), int(lda), int(k), int(k+npivots) }; for( int j = 0; j < npivots; ++j ) { params.ipiv[j] = ipiv[(k+j)*inci] - 1; } claswp_sym( params, queue ); } }
391d8d1d0603069e1bbe8c38253ac721766bbbb1.hip
// !!! This is a file automatically generated by hipify!!! //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // // Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ///////////////////////////////////////////////////////////////////////////////// #include "assert.h" #include "matx.h" #include "matx_pybind.h" #include "test_types.h" #include "utilities.h" #include "gtest/gtest.h" using namespace matx; constexpr int m = 100; constexpr int n = 50; template <typename T> class QRSolverTest : public ::testing::Test { protected: using dtype = float; void SetUp() override { pb = std::make_unique<MatXPybind>(); pb->InitAndRunTVGenerator<T>("00_solver", "qr", "run", {m, n}); pb->NumpyToTensorView(Av, "A"); pb->NumpyToTensorView(Qv, "Q"); pb->NumpyToTensorView(Rv, "R"); } void TearDown() { pb.reset(); } std::unique_ptr<MatXPybind> pb; tensor_t<T, 2> Av{{m, n}}; tensor_t<T, 2> Atv{{n, m}}; tensor_t<T, 1> TauV{{::min(m, n)}}; tensor_t<T, 2> Qv{{m, ::min(m, n)}}; tensor_t<T, 2> Rv{{::min(m, n), n}}; }; template <typename TensorType> class QRSolverTestNonComplexFloatTypes : public QRSolverTest<TensorType> { }; TYPED_TEST_SUITE(QRSolverTestNonComplexFloatTypes, MatXFloatNonComplexNonHalfTypes); TYPED_TEST(QRSolverTestNonComplexFloatTypes, QRBasic) { MATX_ENTER_HANDLER(); // cuSolver only supports col-major solving today, so we need to transpose, // solve, then transpose again to compare to Python qr(this->Av, this->TauV, this->Av); hipStreamSynchronize(0); // For now we're only verifying R. Q is a bit more complex to compute since // cuSolver/BLAS don't return Q, and instead return Householder reflections // that are used to compute Q. Eventually compute Q here and verify for (index_t i = 0; i < this->Av.Size(0); i++) { for (index_t j = 0; j < this->Av.Size(1); j++) { // R is stored only in the top triangle of A if (i <= j) { ASSERT_NEAR(this->Av(i, j), this->Rv(i, j), 0.001); } } } MATX_EXIT_HANDLER(); }
391d8d1d0603069e1bbe8c38253ac721766bbbb1.cu
//////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // // Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ///////////////////////////////////////////////////////////////////////////////// #include "assert.h" #include "matx.h" #include "matx_pybind.h" #include "test_types.h" #include "utilities.h" #include "gtest/gtest.h" using namespace matx; constexpr int m = 100; constexpr int n = 50; template <typename T> class QRSolverTest : public ::testing::Test { protected: using dtype = float; void SetUp() override { pb = std::make_unique<MatXPybind>(); pb->InitAndRunTVGenerator<T>("00_solver", "qr", "run", {m, n}); pb->NumpyToTensorView(Av, "A"); pb->NumpyToTensorView(Qv, "Q"); pb->NumpyToTensorView(Rv, "R"); } void TearDown() { pb.reset(); } std::unique_ptr<MatXPybind> pb; tensor_t<T, 2> Av{{m, n}}; tensor_t<T, 2> Atv{{n, m}}; tensor_t<T, 1> TauV{{std::min(m, n)}}; tensor_t<T, 2> Qv{{m, std::min(m, n)}}; tensor_t<T, 2> Rv{{std::min(m, n), n}}; }; template <typename TensorType> class QRSolverTestNonComplexFloatTypes : public QRSolverTest<TensorType> { }; TYPED_TEST_SUITE(QRSolverTestNonComplexFloatTypes, MatXFloatNonComplexNonHalfTypes); TYPED_TEST(QRSolverTestNonComplexFloatTypes, QRBasic) { MATX_ENTER_HANDLER(); // cuSolver only supports col-major solving today, so we need to transpose, // solve, then transpose again to compare to Python qr(this->Av, this->TauV, this->Av); cudaStreamSynchronize(0); // For now we're only verifying R. Q is a bit more complex to compute since // cuSolver/BLAS don't return Q, and instead return Householder reflections // that are used to compute Q. Eventually compute Q here and verify for (index_t i = 0; i < this->Av.Size(0); i++) { for (index_t j = 0; j < this->Av.Size(1); j++) { // R is stored only in the top triangle of A if (i <= j) { ASSERT_NEAR(this->Av(i, j), this->Rv(i, j), 0.001); } } } MATX_EXIT_HANDLER(); }
376b298399dd003e66011c0f55ecc0f1e64dd13b.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> #include "hip/hip_runtime.h" //device function __global__ void kernelAddVectors(int N, double *a, double *b, double *c) { int threadid = threadIdx.x; //thread number int blockid = blockIdx.x; //block number int Nblock = blockDim.x; //number of threads in a block int id = threadid + blockid*Nblock; if (id < N) { c[id] = a[id] + b[id]; } } int main(int argc, char **argv) { // get vector size from command line argument int N = atoi(argv[1]); //seed RNG double seed = clock(); srand48(seed); double *h_a, *h_b, *h_c; //host vectors // allocate storage h_a = (double *) malloc(N*sizeof(double)); h_b = (double *) malloc(N*sizeof(double)); h_c = (double *) malloc(N*sizeof(double)); //populate a and b for (int n=0;n<N;n++) { h_a[n] = drand48(); h_b[n] = drand48(); } double hostStart = clock(); // c = a + b for (int n=0;n<N;n++) { h_c[n] = h_a[n] + h_b[n]; } double hostEnd = clock(); double hostTime = (hostEnd - hostStart)/(double) CLOCKS_PER_SEC; printf("The host took %f seconds to add a and b \n", hostTime); //Device arrays double *d_a, *d_b, *d_c; //allocate memory on the Device with hipMalloc hipMalloc(&d_a,N*sizeof(double)); hipMalloc(&d_b,N*sizeof(double)); hipMalloc(&d_c,N*sizeof(double)); double copyStart = clock(); //copy data from the host to the device hipMemcpy(d_a,h_a,N*sizeof(double),hipMemcpyHostToDevice); hipMemcpy(d_b,h_b,N*sizeof(double),hipMemcpyHostToDevice); double copyEnd = clock(); double copyTime = (copyEnd-copyStart)/(double)CLOCKS_PER_SEC; printf("It took %f seconds to copy the data to device. \n",copyTime); //at this point the data is allocated and populated on the device int Nthreads = atoi(argv[2]); //get the number of threads per block from command line int Nblocks = (N+Nthreads-1)/Nthreads; double deviceStart = clock(); hipLaunchKernelGGL(( kernelAddVectors) , dim3(Nblocks) ,dim3(Nthreads) , 0, 0, N, d_a, d_b, d_c); hipDeviceSynchronize(); double deviceEnd = clock(); double deviceTime = (deviceEnd-deviceStart)/(double) CLOCKS_PER_SEC; printf("The device took %f seconds to add a and b \n", deviceTime); printf("The device was %f times faster\n", hostTime/deviceTime); copyStart = clock(); hipMemcpy(h_c,d_c,N*sizeof(double),hipMemcpyDeviceToHost); copyEnd = clock(); copyTime = (copyEnd-copyStart)/(double) CLOCKS_PER_SEC; printf("It took %f seconds to copy the data back to the host. \n",copyTime); hipFree(d_a); hipFree(d_b); hipFree(d_c); free(h_a); free(h_b); free(h_c); }
376b298399dd003e66011c0f55ecc0f1e64dd13b.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> #include "cuda.h" //device function __global__ void kernelAddVectors(int N, double *a, double *b, double *c) { int threadid = threadIdx.x; //thread number int blockid = blockIdx.x; //block number int Nblock = blockDim.x; //number of threads in a block int id = threadid + blockid*Nblock; if (id < N) { c[id] = a[id] + b[id]; } } int main(int argc, char **argv) { // get vector size from command line argument int N = atoi(argv[1]); //seed RNG double seed = clock(); srand48(seed); double *h_a, *h_b, *h_c; //host vectors // allocate storage h_a = (double *) malloc(N*sizeof(double)); h_b = (double *) malloc(N*sizeof(double)); h_c = (double *) malloc(N*sizeof(double)); //populate a and b for (int n=0;n<N;n++) { h_a[n] = drand48(); h_b[n] = drand48(); } double hostStart = clock(); // c = a + b for (int n=0;n<N;n++) { h_c[n] = h_a[n] + h_b[n]; } double hostEnd = clock(); double hostTime = (hostEnd - hostStart)/(double) CLOCKS_PER_SEC; printf("The host took %f seconds to add a and b \n", hostTime); //Device arrays double *d_a, *d_b, *d_c; //allocate memory on the Device with cudaMalloc cudaMalloc(&d_a,N*sizeof(double)); cudaMalloc(&d_b,N*sizeof(double)); cudaMalloc(&d_c,N*sizeof(double)); double copyStart = clock(); //copy data from the host to the device cudaMemcpy(d_a,h_a,N*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(d_b,h_b,N*sizeof(double),cudaMemcpyHostToDevice); double copyEnd = clock(); double copyTime = (copyEnd-copyStart)/(double)CLOCKS_PER_SEC; printf("It took %f seconds to copy the data to device. \n",copyTime); //at this point the data is allocated and populated on the device int Nthreads = atoi(argv[2]); //get the number of threads per block from command line int Nblocks = (N+Nthreads-1)/Nthreads; double deviceStart = clock(); kernelAddVectors <<<Nblocks ,Nthreads >>>(N, d_a, d_b, d_c); cudaDeviceSynchronize(); double deviceEnd = clock(); double deviceTime = (deviceEnd-deviceStart)/(double) CLOCKS_PER_SEC; printf("The device took %f seconds to add a and b \n", deviceTime); printf("The device was %f times faster\n", hostTime/deviceTime); copyStart = clock(); cudaMemcpy(h_c,d_c,N*sizeof(double),cudaMemcpyDeviceToHost); copyEnd = clock(); copyTime = (copyEnd-copyStart)/(double) CLOCKS_PER_SEC; printf("It took %f seconds to copy the data back to the host. \n",copyTime); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); free(h_a); free(h_b); free(h_c); }
dabb2ef04c628890083bcc5c9ea8b4804a350e37.hip
// !!! This is a file automatically generated by hipify!!! #include "SequenceVisitor.cuh" #include "UTFindPermutation.cuh" template<> void SequenceVisitor::set_arguments_size<ut_find_permutation_t>( ut_find_permutation_t::arguments_t arguments, const RuntimeOptions& runtime_options, const Constants& constants, const HostBuffers& host_buffers) { arguments.set_size<dev_ut_hit_permutations>(host_buffers.host_accumulated_number_of_ut_hits[0]); } template<> void SequenceVisitor::visit<ut_find_permutation_t>( ut_find_permutation_t& state, const ut_find_permutation_t::arguments_t& arguments, const RuntimeOptions& runtime_options, const Constants& constants, HostBuffers& host_buffers, hipStream_t& cuda_stream, hipEvent_t& cuda_generic_event) { state.set_opts( dim3(host_buffers.host_number_of_selected_events[0], constants.host_unique_x_sector_layer_offsets[4]), dim3(16), cuda_stream); state.set_arguments( arguments.offset<dev_ut_hits>(), arguments.offset<dev_ut_hit_offsets>(), arguments.offset<dev_ut_hit_permutations>(), constants.dev_unique_x_sector_layer_offsets, constants.dev_unique_x_sector_offsets, constants.dev_unique_sector_xs); state.invoke(); }
dabb2ef04c628890083bcc5c9ea8b4804a350e37.cu
#include "SequenceVisitor.cuh" #include "UTFindPermutation.cuh" template<> void SequenceVisitor::set_arguments_size<ut_find_permutation_t>( ut_find_permutation_t::arguments_t arguments, const RuntimeOptions& runtime_options, const Constants& constants, const HostBuffers& host_buffers) { arguments.set_size<dev_ut_hit_permutations>(host_buffers.host_accumulated_number_of_ut_hits[0]); } template<> void SequenceVisitor::visit<ut_find_permutation_t>( ut_find_permutation_t& state, const ut_find_permutation_t::arguments_t& arguments, const RuntimeOptions& runtime_options, const Constants& constants, HostBuffers& host_buffers, cudaStream_t& cuda_stream, cudaEvent_t& cuda_generic_event) { state.set_opts( dim3(host_buffers.host_number_of_selected_events[0], constants.host_unique_x_sector_layer_offsets[4]), dim3(16), cuda_stream); state.set_arguments( arguments.offset<dev_ut_hits>(), arguments.offset<dev_ut_hit_offsets>(), arguments.offset<dev_ut_hit_permutations>(), constants.dev_unique_x_sector_layer_offsets, constants.dev_unique_x_sector_offsets, constants.dev_unique_sector_xs); state.invoke(); }
e4675235f2f37ee5d62f54c17b9abe0cbf3c2e92.hip
// !!! This is a file automatically generated by hipify!!! #include <rocblas.h> #include <cassert> #include <cstdio> // A simple GPU Timer taken from CUB struct GpuTimer { hipEvent_t start; hipEvent_t stop; GpuTimer() { hipEventCreate(&start); hipEventCreate(&stop); } ~GpuTimer() { hipEventDestroy(start); hipEventDestroy(stop); } void Start() { hipEventRecord(start, 0); } void Stop() { hipEventRecord(stop, 0); } float ElapsedMillis() { float elapsed; hipEventSynchronize(stop); hipEventElapsedTime(&elapsed, start, stop); return elapsed; } }; void PrintDevices() { int num_devices; hipGetDeviceCount(&num_devices); for (int i = 0; i < num_devices; i++) { hipDeviceProp_t prop; hipGetDeviceProperties(&prop, i); printf("Device %d: %s\n", i, prop.name); } } void RunTest(int m, int k, int n, int transa, int transb) { PrintDevices(); // Prepare on device matrix data const int A_rows = transa ? m : k; const int A_cols = transa ? k : m; const int B_rows = transb ? k : n; const int B_cols = transb ? n : k; const int C_rows = n; const int C_cols = m; GpuTimer gpu_timer; float elapsed_millis; float throughput; hipblasHandle_t handle; hipblasCreate(&handle); float *host_A = reinterpret_cast<float *>(malloc(A_rows * A_cols * sizeof(float))); float *host_B = reinterpret_cast<float *>(malloc(B_rows * B_cols * sizeof(float))); for (int i = 0; i < A_rows * A_cols; i++) host_A[i] = i % 100; for (int i = 0; i < B_rows * B_cols; i++) host_B[i] = i % 100; float *device_A_float, *device_B_float, *device_C_float; hipMalloc(&device_A_float, A_rows * A_cols * sizeof(float)); hipMalloc(&device_B_float, B_rows * B_cols * sizeof(float)); hipMalloc(&device_C_float, C_rows * C_cols * sizeof(float)); hipMemcpy(device_A_float, host_A, A_rows * A_cols * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(device_B_float, host_B, B_rows * B_cols * sizeof(float), hipMemcpyHostToDevice); float alpha = 1.0f; float beta = 0.0f; int num_iter = 100; printf("m:%5d,k:%5d,n:%5d, transa:%d, transb:%d\n", m, k, n, transa, transb); printf("Using hipblasGemmEx():\n"); // Use hipblasGemmEx hipblasGemmAlgo_t algos[] = { CUBLAS_GEMM_DFALT, CUBLAS_GEMM_ALGO0, CUBLAS_GEMM_ALGO1, CUBLAS_GEMM_ALGO2, CUBLAS_GEMM_ALGO3, CUBLAS_GEMM_ALGO4, CUBLAS_GEMM_ALGO5, CUBLAS_GEMM_ALGO6, CUBLAS_GEMM_ALGO7, #if __CUDACC_VER_MAJOR__ >= 9 CUBLAS_GEMM_ALGO8, CUBLAS_GEMM_ALGO9, CUBLAS_GEMM_ALGO10, CUBLAS_GEMM_ALGO11, CUBLAS_GEMM_ALGO12, CUBLAS_GEMM_ALGO13, CUBLAS_GEMM_ALGO14, CUBLAS_GEMM_ALGO15, CUBLAS_GEMM_ALGO16, CUBLAS_GEMM_ALGO17, #endif }; for (int i = 0; i < sizeof(algos) / sizeof(algos[0]); ++i) { gpu_timer.Start(); bool result_valid = true; int error_code = 0; for (int ii = 0; ii < num_iter; ++ii) { auto result = hipblasGemmEx(handle, (transb ? HIPBLAS_OP_T : HIPBLAS_OP_N), (transa ? HIPBLAS_OP_T : HIPBLAS_OP_N), n, m, k, &alpha, device_B_float, HIP_R_32F, (transb ? k : n), device_A_float, HIP_R_32F, (transa ? m : k), &beta, device_C_float, HIP_R_32F, n, HIP_R_32F, algos[i]); if (result != 0) { result_valid = false; error_code = result; break; } } gpu_timer.Stop(); if (result_valid) { elapsed_millis = gpu_timer.ElapsedMillis() / num_iter; throughput = 1.0f / elapsed_millis / 1000000.0f * m * n * k * 2; printf( "algorithm:%d, runtime (msec):%6.4f, throughput " "(Gitems/sec):%5.2f\n", i, elapsed_millis, throughput); } else { printf("algorithm:%d returned error code:%d\n", i, error_code); } } printf("Using hipblasSgemm():\n"); gpu_timer.Start(); bool result_valid = true; bool error_code = 0; for (int i = 0; i < num_iter; ++i) { auto result = hipblasSgemm(handle, (transb ? HIPBLAS_OP_T : HIPBLAS_OP_N), (transa ? HIPBLAS_OP_T : HIPBLAS_OP_N), n, m, k, &alpha, device_B_float, (transb ? k : n), device_A_float, (transa ? m : k), &beta, device_C_float, n); if (result != 0) { result_valid = false; error_code = result; break; } } gpu_timer.Stop(); if (result_valid) { elapsed_millis = gpu_timer.ElapsedMillis() / num_iter; throughput = 1.0f / elapsed_millis / 1000000.0f * m * n * k * 2; printf("runtime (msec):%6.4f, throughput (Gitems/sec):%5.2f\n", elapsed_millis, throughput); } else { printf("hipblasSgemm() returned error code:%d\n", error_code); } gpu_timer.Start(); result_valid = false; error_code = 0; if (m == 1 && n > 1) { printf("Using hipblasSgemv():\n"); result_valid = true; for (int i = 0; i < num_iter; ++i) { auto result = hipblasSgemv(handle, (transb ? HIPBLAS_OP_T : HIPBLAS_OP_N), n, k, &alpha, device_B_float, (transb ? k : n), device_A_float, /*incx=*/1, &beta, device_C_float, /*incy=*/1); if (result != 0) { result_valid = false; error_code = result; break; } } } if (n == 1 && m > 1) { printf("Using hipblasSgemv():\n"); result_valid = true; for (int i = 0; i < num_iter; ++i) { auto result = hipblasSgemv(handle, (transa ? HIPBLAS_OP_N : HIPBLAS_OP_T), m, k, &alpha, device_A_float, (transa ? k : m), device_B_float, /*incx=*/1, &beta, device_C_float, /*incy=*/1); if (result != 0) { result_valid = false; error_code = result; break; } } } gpu_timer.Stop(); if (result_valid) { elapsed_millis = gpu_timer.ElapsedMillis() / num_iter; throughput = 1.0f / elapsed_millis / 1000000.0f * m * n * k * 2; printf("runtime (msec):%6.4f, throughput (Gitems/sec):%5.2f\n", elapsed_millis, throughput); } else if (error_code != 0) { printf("hipblasSgemv() returned error code:%d\n", error_code); } hipFree(device_A_float); hipFree(device_B_float); hipFree(device_C_float); hipblasDestroy(handle); free(host_A); free(host_B); } int main(int argc, char *argv[]) { int m, k, n, ta, tb; if (argc < 6) { // m, k, n, ta, tb m = 20; k = 20000; n = 200; ta = 0; tb = 1; } else { m = atoi(argv[1]); k = atoi(argv[2]); n = atoi(argv[3]); ta = atoi(argv[4]); tb = atoi(argv[5]); } RunTest(m, k, n, ta, tb); return 0; }
e4675235f2f37ee5d62f54c17b9abe0cbf3c2e92.cu
#include <cublas_v2.h> #include <cassert> #include <cstdio> // A simple GPU Timer taken from CUB struct GpuTimer { cudaEvent_t start; cudaEvent_t stop; GpuTimer() { cudaEventCreate(&start); cudaEventCreate(&stop); } ~GpuTimer() { cudaEventDestroy(start); cudaEventDestroy(stop); } void Start() { cudaEventRecord(start, 0); } void Stop() { cudaEventRecord(stop, 0); } float ElapsedMillis() { float elapsed; cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed, start, stop); return elapsed; } }; void PrintDevices() { int num_devices; cudaGetDeviceCount(&num_devices); for (int i = 0; i < num_devices; i++) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); printf("Device %d: %s\n", i, prop.name); } } void RunTest(int m, int k, int n, int transa, int transb) { PrintDevices(); // Prepare on device matrix data const int A_rows = transa ? m : k; const int A_cols = transa ? k : m; const int B_rows = transb ? k : n; const int B_cols = transb ? n : k; const int C_rows = n; const int C_cols = m; GpuTimer gpu_timer; float elapsed_millis; float throughput; cublasHandle_t handle; cublasCreate(&handle); float *host_A = reinterpret_cast<float *>(malloc(A_rows * A_cols * sizeof(float))); float *host_B = reinterpret_cast<float *>(malloc(B_rows * B_cols * sizeof(float))); for (int i = 0; i < A_rows * A_cols; i++) host_A[i] = i % 100; for (int i = 0; i < B_rows * B_cols; i++) host_B[i] = i % 100; float *device_A_float, *device_B_float, *device_C_float; cudaMalloc(&device_A_float, A_rows * A_cols * sizeof(float)); cudaMalloc(&device_B_float, B_rows * B_cols * sizeof(float)); cudaMalloc(&device_C_float, C_rows * C_cols * sizeof(float)); cudaMemcpy(device_A_float, host_A, A_rows * A_cols * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(device_B_float, host_B, B_rows * B_cols * sizeof(float), cudaMemcpyHostToDevice); float alpha = 1.0f; float beta = 0.0f; int num_iter = 100; printf("m:%5d,k:%5d,n:%5d, transa:%d, transb:%d\n", m, k, n, transa, transb); printf("Using cublasGemmEx():\n"); // Use cublasGemmEx cublasGemmAlgo_t algos[] = { CUBLAS_GEMM_DFALT, CUBLAS_GEMM_ALGO0, CUBLAS_GEMM_ALGO1, CUBLAS_GEMM_ALGO2, CUBLAS_GEMM_ALGO3, CUBLAS_GEMM_ALGO4, CUBLAS_GEMM_ALGO5, CUBLAS_GEMM_ALGO6, CUBLAS_GEMM_ALGO7, #if __CUDACC_VER_MAJOR__ >= 9 CUBLAS_GEMM_ALGO8, CUBLAS_GEMM_ALGO9, CUBLAS_GEMM_ALGO10, CUBLAS_GEMM_ALGO11, CUBLAS_GEMM_ALGO12, CUBLAS_GEMM_ALGO13, CUBLAS_GEMM_ALGO14, CUBLAS_GEMM_ALGO15, CUBLAS_GEMM_ALGO16, CUBLAS_GEMM_ALGO17, #endif }; for (int i = 0; i < sizeof(algos) / sizeof(algos[0]); ++i) { gpu_timer.Start(); bool result_valid = true; int error_code = 0; for (int ii = 0; ii < num_iter; ++ii) { auto result = cublasGemmEx(handle, (transb ? CUBLAS_OP_T : CUBLAS_OP_N), (transa ? CUBLAS_OP_T : CUBLAS_OP_N), n, m, k, &alpha, device_B_float, CUDA_R_32F, (transb ? k : n), device_A_float, CUDA_R_32F, (transa ? m : k), &beta, device_C_float, CUDA_R_32F, n, CUDA_R_32F, algos[i]); if (result != 0) { result_valid = false; error_code = result; break; } } gpu_timer.Stop(); if (result_valid) { elapsed_millis = gpu_timer.ElapsedMillis() / num_iter; throughput = 1.0f / elapsed_millis / 1000000.0f * m * n * k * 2; printf( "algorithm:%d, runtime (msec):%6.4f, throughput " "(Gitems/sec):%5.2f\n", i, elapsed_millis, throughput); } else { printf("algorithm:%d returned error code:%d\n", i, error_code); } } printf("Using cublasSgemm():\n"); gpu_timer.Start(); bool result_valid = true; bool error_code = 0; for (int i = 0; i < num_iter; ++i) { auto result = cublasSgemm(handle, (transb ? CUBLAS_OP_T : CUBLAS_OP_N), (transa ? CUBLAS_OP_T : CUBLAS_OP_N), n, m, k, &alpha, device_B_float, (transb ? k : n), device_A_float, (transa ? m : k), &beta, device_C_float, n); if (result != 0) { result_valid = false; error_code = result; break; } } gpu_timer.Stop(); if (result_valid) { elapsed_millis = gpu_timer.ElapsedMillis() / num_iter; throughput = 1.0f / elapsed_millis / 1000000.0f * m * n * k * 2; printf("runtime (msec):%6.4f, throughput (Gitems/sec):%5.2f\n", elapsed_millis, throughput); } else { printf("cublasSgemm() returned error code:%d\n", error_code); } gpu_timer.Start(); result_valid = false; error_code = 0; if (m == 1 && n > 1) { printf("Using cublasSgemv():\n"); result_valid = true; for (int i = 0; i < num_iter; ++i) { auto result = cublasSgemv(handle, (transb ? CUBLAS_OP_T : CUBLAS_OP_N), n, k, &alpha, device_B_float, (transb ? k : n), device_A_float, /*incx=*/1, &beta, device_C_float, /*incy=*/1); if (result != 0) { result_valid = false; error_code = result; break; } } } if (n == 1 && m > 1) { printf("Using cublasSgemv():\n"); result_valid = true; for (int i = 0; i < num_iter; ++i) { auto result = cublasSgemv(handle, (transa ? CUBLAS_OP_N : CUBLAS_OP_T), m, k, &alpha, device_A_float, (transa ? k : m), device_B_float, /*incx=*/1, &beta, device_C_float, /*incy=*/1); if (result != 0) { result_valid = false; error_code = result; break; } } } gpu_timer.Stop(); if (result_valid) { elapsed_millis = gpu_timer.ElapsedMillis() / num_iter; throughput = 1.0f / elapsed_millis / 1000000.0f * m * n * k * 2; printf("runtime (msec):%6.4f, throughput (Gitems/sec):%5.2f\n", elapsed_millis, throughput); } else if (error_code != 0) { printf("cublasSgemv() returned error code:%d\n", error_code); } cudaFree(device_A_float); cudaFree(device_B_float); cudaFree(device_C_float); cublasDestroy(handle); free(host_A); free(host_B); } int main(int argc, char *argv[]) { int m, k, n, ta, tb; if (argc < 6) { // m, k, n, ta, tb m = 20; k = 20000; n = 200; ta = 0; tb = 1; } else { m = atoi(argv[1]); k = atoi(argv[2]); n = atoi(argv[3]); ta = atoi(argv[4]); tb = atoi(argv[5]); } RunTest(m, k, n, ta, tb); return 0; }
960c8822def92dec9eccd5d432c4829b40a8bfa3.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <algorithm> #include <cfloat> #include <string> #include <vector> #include "paddle/fluid/framework/data_layout.h" #include "paddle/fluid/operators/activation_op.h" #include "paddle/fluid/operators/fused/fused_bn_add_activation_op.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/norm_utils.h" #include "paddle/fluid/platform/cudnn_helper.h" #include "paddle/fluid/platform/float16.h" DECLARE_bool(cudnn_batchnorm_spatial_persistent); namespace paddle { namespace operators { using Tensor = framework::Tensor; template <typename T> using CudnnDataType = platform::CudnnDataType<T>; template <typename T> using BatchNormParamType = typename CudnnDataType<T>::BatchNormParamType; template <typename T> class FusedBatchNormAddActKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { PADDLE_ENFORCE_EQ( platform::is_gpu_place(ctx.GetPlace()), true, platform::errors::PreconditionNotMet("It must use CUDAPlace.")); double epsilon = static_cast<double>(ctx.Attr<float>("epsilon")); float momentum = ctx.Attr<float>("momentum"); std::string act_type = ctx.Attr<std::string>("act_type"); if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) { LOG(ERROR) << "Provided epsilon is smaller than " << "CUDNN_BN_MIN_EPSILON. Setting it to " << "CUDNN_BN_MIN_EPSILON instead."; } epsilon = ::max(epsilon, CUDNN_BN_MIN_EPSILON); // Get the size for each dimension. // NHWC [batch_size, in_height, in_width, in_channels] const auto *x = ctx.Input<Tensor>("X"); const auto *z = ctx.Input<Tensor>("Z"); const auto &in_dims = x->dims(); const auto *scale = ctx.Input<Tensor>("Scale"); const auto *bias = ctx.Input<Tensor>("Bias"); auto *mean_out = ctx.Output<Tensor>("MeanOut"); auto *variance_out = ctx.Output<Tensor>("VarianceOut"); mean_out->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); variance_out->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); auto *saved_mean = ctx.Output<Tensor>("SavedMean"); auto *saved_variance = ctx.Output<Tensor>("SavedVariance"); saved_mean->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); saved_variance->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); auto *y = ctx.Output<Tensor>("Y"); y->mutable_data<T>(ctx.GetPlace()); int N, C, H, W, D; const DataLayout data_layout = DataLayout::kNHWC; ExtractNCWHD(in_dims, data_layout, &N, &C, &H, &W, &D); auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); // ------------------- cudnn descriptors --------------------- auto handle = dev_ctx.cudnn_handle(); cudnnTensorDescriptor_t data_desc_; cudnnTensorDescriptor_t bn_param_desc_; cudnnBatchNormMode_t mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT; PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&data_desc_)); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&bn_param_desc_)); std::vector<int> dims = {N, C, H, W, D}; std::vector<int> strides = {H * W * D * C, 1, W * D * C, D * C, C}; PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( data_desc_, CudnnDataType<T>::type, in_dims.size() > 3 ? in_dims.size() : 4, dims.data(), strides.data())); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnDeriveBNTensorDescriptor(bn_param_desc_, data_desc_, mode_)); double this_factor = 1. - momentum; cudnnBatchNormOps_t bnOps_ = CUDNN_BATCHNORM_OPS_BN_ADD_ACTIVATION; platform::ScopedActivationDescriptor scope_act_desc; cudnnActivationDescriptor_t activation_desc_ = scope_act_desc.descriptor<T>(act_type); size_t workspace_size = 0; size_t reserve_space_size = 0; void *reserve_space_ptr = nullptr; void *workspace_ptr = nullptr; Tensor workspace_tensor; // Create reserve space and workspace for batch norm. // Create tensor for each batchnorm op, it will be used in the // backward. Thus this tensor shouldn't be temp. auto *reserve_space = ctx.Output<Tensor>("ReserveSpace"); PADDLE_ENFORCE_NOT_NULL( reserve_space, platform::errors::NotFound( "The argument ReserveSpace of batch_norm op is not found.")); // --------------- cudnn batchnorm workspace --------------- PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload:: cudnnGetBatchNormalizationForwardTrainingExWorkspaceSize( /*handle=*/handle, /*mode=*/mode_, /*bnOps=*/bnOps_, /*xDesc=*/data_desc_, /*zDesc=*/data_desc_, /*yDesc=*/data_desc_, /*bnScaleBiasMeanVarDesc=*/bn_param_desc_, /*activationDesc=*/activation_desc_, /*sizeInBytes=*/&workspace_size)); // -------------- cudnn batchnorm reserve space -------------- PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnGetBatchNormalizationTrainingExReserveSpaceSize( /*handle=*/handle, /*mode=*/mode_, /*bnOps=*/bnOps_, /*activationDesc=*/activation_desc_, /*xDesc=*/data_desc_, /*sizeInBytes=*/&reserve_space_size)); reserve_space_ptr = reserve_space->mutable_data(ctx.GetPlace(), x->type(), reserve_space_size); workspace_ptr = workspace_tensor.mutable_data(ctx.GetPlace(), x->type(), workspace_size); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnBatchNormalizationForwardTrainingEx( handle, mode_, bnOps_, CudnnDataType<T>::kOne(), CudnnDataType<T>::kZero(), data_desc_, x->template data<T>(), data_desc_, z->template data<T>(), data_desc_, y->template data<T>(), bn_param_desc_, scale->template data<BatchNormParamType<T>>(), bias->template data<BatchNormParamType<T>>(), this_factor, mean_out->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), variance_out->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), epsilon, saved_mean->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), saved_variance->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), activation_desc_, workspace_ptr, workspace_size, reserve_space_ptr, reserve_space_size)); // clean when exit. PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(data_desc_)); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(bn_param_desc_)); } }; template <typename T> class FusedBatchNormAddActGradKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { PADDLE_ENFORCE_EQ( platform::is_gpu_place(ctx.GetPlace()), true, platform::errors::PreconditionNotMet("It must use CUDAPlace.")); double epsilon = static_cast<double>(ctx.Attr<float>("epsilon")); std::string act_type = ctx.Attr<std::string>("act_type"); const auto *x = ctx.Input<Tensor>("X"); const auto *y = ctx.Input<Tensor>("Y"); const auto *d_y = ctx.Input<Tensor>(framework::GradVarName("Y")); const auto *scale = ctx.Input<Tensor>("Scale"); const auto *bias = ctx.Input<Tensor>("Bias"); const auto *reserve_space = ctx.Input<Tensor>("ReserveSpace"); const auto &in_dims = x->dims(); int N, C, H, W, D; const DataLayout data_layout = DataLayout::kNHWC; ExtractNCWHD(in_dims, data_layout, &N, &C, &H, &W, &D); // init output auto *d_x = ctx.Output<Tensor>(framework::GradVarName("X")); auto *d_z = ctx.Output<Tensor>(framework::GradVarName("Z")); auto *d_scale = ctx.Output<Tensor>(framework::GradVarName("Scale")); auto *d_bias = ctx.Output<Tensor>(framework::GradVarName("Bias")); d_x->mutable_data<T>(ctx.GetPlace()); d_z->mutable_data<T>(ctx.GetPlace()); PADDLE_ENFORCE_EQ( d_scale && d_bias, true, platform::errors::PreconditionNotMet( "Both the scale grad and the bias grad must not be null.")); d_scale->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); d_bias->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); PADDLE_ENFORCE_EQ(scale->dims().size(), 1UL, platform::errors::PreconditionNotMet( "The scale only has one dimension.")); PADDLE_ENFORCE_EQ( scale->dims()[0], C, platform::errors::PreconditionNotMet( "The size of scale is equal to the channel of Input(X).")); auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); std::vector<int> dims = {N, C, H, W, D}; std::vector<int> strides = {H * W * C * D, 1, W * D * C, D * C, C}; // ------------------- cudnn descriptors --------------------- cudnnTensorDescriptor_t data_desc_; cudnnTensorDescriptor_t bn_param_desc_; cudnnBatchNormMode_t mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT; PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&data_desc_)); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&bn_param_desc_)); if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) { LOG(ERROR) << "Provided epsilon is smaller than " << "CUDNN_BN_MIN_EPSILON. Setting it to " << "CUDNN_BN_MIN_EPSILON instead."; } epsilon = ::max(epsilon, CUDNN_BN_MIN_EPSILON); PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( data_desc_, CudnnDataType<T>::type, in_dims.size() > 3 ? in_dims.size() : 4, dims.data(), strides.data())); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnDeriveBNTensorDescriptor(bn_param_desc_, data_desc_, mode_)); const auto *saved_mean = ctx.Input<Tensor>("SavedMean"); const auto *saved_var = ctx.Input<Tensor>("SavedVariance"); const auto *saved_mean_data = saved_mean->template data<BatchNormParamType<T>>(); const auto *saved_var_data = saved_var->template data<BatchNormParamType<T>>(); size_t workspace_size = 0; void *workspace_ptr = nullptr; Tensor workspace_tensor; auto reserve_space_size = reserve_space->memory_size(); cudnnBatchNormOps_t bnOps_ = CUDNN_BATCHNORM_OPS_BN_ADD_ACTIVATION; platform::ScopedActivationDescriptor scope_act_desc; cudnnActivationDescriptor_t activation_desc_ = scope_act_desc.descriptor<T>(act_type); // --------------- cudnn batchnorm workspace --------------- PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnGetBatchNormalizationBackwardExWorkspaceSize( /*handle=*/dev_ctx.cudnn_handle(), /*mode=*/mode_, /*bnOps=*/bnOps_, /*xDesc=*/data_desc_, /*yDesc=*/data_desc_, /*dyDesc=*/data_desc_, /*dzDesc=*/data_desc_, /*dxDesc=*/data_desc_, /*bnScaleBiasMeanVarDesc=*/bn_param_desc_, /*activationDesc=*/activation_desc_, /*sizeInBytes=*/&workspace_size)); workspace_ptr = workspace_tensor.mutable_data(ctx.GetPlace(), x->type(), workspace_size); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnBatchNormalizationBackwardEx( /*handle=*/dev_ctx.cudnn_handle(), /*mode=*/mode_, /*bnOps=*/bnOps_, /*alphaDataDiff=*/CudnnDataType<T>::kOne(), /*betaDataDiff=*/CudnnDataType<T>::kZero(), /*alphaParamDiff=*/CudnnDataType<T>::kOne(), /*betaParamDiff=*/CudnnDataType<T>::kZero(), /*xDesc=*/data_desc_, /*xData=*/x->template data<T>(), /*yDesc=*/data_desc_, /*yData=*/y->template data<T>(), /*dyDesc=*/data_desc_, /*dyData=*/d_y->template data<T>(), /*dzDesc=*/data_desc_, /*dzData=*/d_z->template data<T>(), /*dxDesc=*/data_desc_, /*dxData=*/d_x->template data<T>(), /*dBnScaleBiasDesc=*/bn_param_desc_, /*bnScaleData=*/scale->template data<BatchNormParamType<T>>(), /*bnBiasData=*/bias->template data<BatchNormParamType<T>>(), /*dBnScaleData=*/d_scale->template data<BatchNormParamType<T>>(), /*dBnBiasData=*/d_bias->template data<BatchNormParamType<T>>(), /*epsilon=*/epsilon, /*savedMean=*/saved_mean_data, /*savedInvVariance=*/saved_var_data, /*activationDesmc=*/activation_desc_, /*workspace=*/workspace_ptr, /*workSpaceSizeInBytes=*/workspace_size, /*reserveSpace=*/const_cast<T *>(reserve_space->template data<T>()), /*reserveSpaceSizeInBytes=*/reserve_space_size)); // clean when exit. PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(data_desc_)); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(bn_param_desc_)); } }; } // namespace operators } // namespace paddle #if CUDNN_VERSION >= 7401 namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( fused_bn_add_activation, ops::FusedBatchNormAddActKernel<plat::CUDADeviceContext, plat::float16>); REGISTER_OP_CUDA_KERNEL(fused_bn_add_activation_grad, ops::FusedBatchNormAddActGradKernel< plat::CUDADeviceContext, plat::float16>); #endif
960c8822def92dec9eccd5d432c4829b40a8bfa3.cu
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <algorithm> #include <cfloat> #include <string> #include <vector> #include "paddle/fluid/framework/data_layout.h" #include "paddle/fluid/operators/activation_op.h" #include "paddle/fluid/operators/fused/fused_bn_add_activation_op.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/norm_utils.h" #include "paddle/fluid/platform/cudnn_helper.h" #include "paddle/fluid/platform/float16.h" DECLARE_bool(cudnn_batchnorm_spatial_persistent); namespace paddle { namespace operators { using Tensor = framework::Tensor; template <typename T> using CudnnDataType = platform::CudnnDataType<T>; template <typename T> using BatchNormParamType = typename CudnnDataType<T>::BatchNormParamType; template <typename T> class FusedBatchNormAddActKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { PADDLE_ENFORCE_EQ( platform::is_gpu_place(ctx.GetPlace()), true, platform::errors::PreconditionNotMet("It must use CUDAPlace.")); double epsilon = static_cast<double>(ctx.Attr<float>("epsilon")); float momentum = ctx.Attr<float>("momentum"); std::string act_type = ctx.Attr<std::string>("act_type"); if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) { LOG(ERROR) << "Provided epsilon is smaller than " << "CUDNN_BN_MIN_EPSILON. Setting it to " << "CUDNN_BN_MIN_EPSILON instead."; } epsilon = std::max(epsilon, CUDNN_BN_MIN_EPSILON); // Get the size for each dimension. // NHWC [batch_size, in_height, in_width, in_channels] const auto *x = ctx.Input<Tensor>("X"); const auto *z = ctx.Input<Tensor>("Z"); const auto &in_dims = x->dims(); const auto *scale = ctx.Input<Tensor>("Scale"); const auto *bias = ctx.Input<Tensor>("Bias"); auto *mean_out = ctx.Output<Tensor>("MeanOut"); auto *variance_out = ctx.Output<Tensor>("VarianceOut"); mean_out->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); variance_out->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); auto *saved_mean = ctx.Output<Tensor>("SavedMean"); auto *saved_variance = ctx.Output<Tensor>("SavedVariance"); saved_mean->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); saved_variance->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); auto *y = ctx.Output<Tensor>("Y"); y->mutable_data<T>(ctx.GetPlace()); int N, C, H, W, D; const DataLayout data_layout = DataLayout::kNHWC; ExtractNCWHD(in_dims, data_layout, &N, &C, &H, &W, &D); auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); // ------------------- cudnn descriptors --------------------- auto handle = dev_ctx.cudnn_handle(); cudnnTensorDescriptor_t data_desc_; cudnnTensorDescriptor_t bn_param_desc_; cudnnBatchNormMode_t mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT; PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&data_desc_)); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&bn_param_desc_)); std::vector<int> dims = {N, C, H, W, D}; std::vector<int> strides = {H * W * D * C, 1, W * D * C, D * C, C}; PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( data_desc_, CudnnDataType<T>::type, in_dims.size() > 3 ? in_dims.size() : 4, dims.data(), strides.data())); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnDeriveBNTensorDescriptor(bn_param_desc_, data_desc_, mode_)); double this_factor = 1. - momentum; cudnnBatchNormOps_t bnOps_ = CUDNN_BATCHNORM_OPS_BN_ADD_ACTIVATION; platform::ScopedActivationDescriptor scope_act_desc; cudnnActivationDescriptor_t activation_desc_ = scope_act_desc.descriptor<T>(act_type); size_t workspace_size = 0; size_t reserve_space_size = 0; void *reserve_space_ptr = nullptr; void *workspace_ptr = nullptr; Tensor workspace_tensor; // Create reserve space and workspace for batch norm. // Create tensor for each batchnorm op, it will be used in the // backward. Thus this tensor shouldn't be temp. auto *reserve_space = ctx.Output<Tensor>("ReserveSpace"); PADDLE_ENFORCE_NOT_NULL( reserve_space, platform::errors::NotFound( "The argument ReserveSpace of batch_norm op is not found.")); // --------------- cudnn batchnorm workspace --------------- PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload:: cudnnGetBatchNormalizationForwardTrainingExWorkspaceSize( /*handle=*/handle, /*mode=*/mode_, /*bnOps=*/bnOps_, /*xDesc=*/data_desc_, /*zDesc=*/data_desc_, /*yDesc=*/data_desc_, /*bnScaleBiasMeanVarDesc=*/bn_param_desc_, /*activationDesc=*/activation_desc_, /*sizeInBytes=*/&workspace_size)); // -------------- cudnn batchnorm reserve space -------------- PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnGetBatchNormalizationTrainingExReserveSpaceSize( /*handle=*/handle, /*mode=*/mode_, /*bnOps=*/bnOps_, /*activationDesc=*/activation_desc_, /*xDesc=*/data_desc_, /*sizeInBytes=*/&reserve_space_size)); reserve_space_ptr = reserve_space->mutable_data(ctx.GetPlace(), x->type(), reserve_space_size); workspace_ptr = workspace_tensor.mutable_data(ctx.GetPlace(), x->type(), workspace_size); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnBatchNormalizationForwardTrainingEx( handle, mode_, bnOps_, CudnnDataType<T>::kOne(), CudnnDataType<T>::kZero(), data_desc_, x->template data<T>(), data_desc_, z->template data<T>(), data_desc_, y->template data<T>(), bn_param_desc_, scale->template data<BatchNormParamType<T>>(), bias->template data<BatchNormParamType<T>>(), this_factor, mean_out->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), variance_out->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), epsilon, saved_mean->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), saved_variance->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), activation_desc_, workspace_ptr, workspace_size, reserve_space_ptr, reserve_space_size)); // clean when exit. PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(data_desc_)); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(bn_param_desc_)); } }; template <typename T> class FusedBatchNormAddActGradKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { PADDLE_ENFORCE_EQ( platform::is_gpu_place(ctx.GetPlace()), true, platform::errors::PreconditionNotMet("It must use CUDAPlace.")); double epsilon = static_cast<double>(ctx.Attr<float>("epsilon")); std::string act_type = ctx.Attr<std::string>("act_type"); const auto *x = ctx.Input<Tensor>("X"); const auto *y = ctx.Input<Tensor>("Y"); const auto *d_y = ctx.Input<Tensor>(framework::GradVarName("Y")); const auto *scale = ctx.Input<Tensor>("Scale"); const auto *bias = ctx.Input<Tensor>("Bias"); const auto *reserve_space = ctx.Input<Tensor>("ReserveSpace"); const auto &in_dims = x->dims(); int N, C, H, W, D; const DataLayout data_layout = DataLayout::kNHWC; ExtractNCWHD(in_dims, data_layout, &N, &C, &H, &W, &D); // init output auto *d_x = ctx.Output<Tensor>(framework::GradVarName("X")); auto *d_z = ctx.Output<Tensor>(framework::GradVarName("Z")); auto *d_scale = ctx.Output<Tensor>(framework::GradVarName("Scale")); auto *d_bias = ctx.Output<Tensor>(framework::GradVarName("Bias")); d_x->mutable_data<T>(ctx.GetPlace()); d_z->mutable_data<T>(ctx.GetPlace()); PADDLE_ENFORCE_EQ( d_scale && d_bias, true, platform::errors::PreconditionNotMet( "Both the scale grad and the bias grad must not be null.")); d_scale->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); d_bias->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); PADDLE_ENFORCE_EQ(scale->dims().size(), 1UL, platform::errors::PreconditionNotMet( "The scale only has one dimension.")); PADDLE_ENFORCE_EQ( scale->dims()[0], C, platform::errors::PreconditionNotMet( "The size of scale is equal to the channel of Input(X).")); auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); std::vector<int> dims = {N, C, H, W, D}; std::vector<int> strides = {H * W * C * D, 1, W * D * C, D * C, C}; // ------------------- cudnn descriptors --------------------- cudnnTensorDescriptor_t data_desc_; cudnnTensorDescriptor_t bn_param_desc_; cudnnBatchNormMode_t mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT; PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&data_desc_)); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&bn_param_desc_)); if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) { LOG(ERROR) << "Provided epsilon is smaller than " << "CUDNN_BN_MIN_EPSILON. Setting it to " << "CUDNN_BN_MIN_EPSILON instead."; } epsilon = std::max(epsilon, CUDNN_BN_MIN_EPSILON); PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( data_desc_, CudnnDataType<T>::type, in_dims.size() > 3 ? in_dims.size() : 4, dims.data(), strides.data())); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnDeriveBNTensorDescriptor(bn_param_desc_, data_desc_, mode_)); const auto *saved_mean = ctx.Input<Tensor>("SavedMean"); const auto *saved_var = ctx.Input<Tensor>("SavedVariance"); const auto *saved_mean_data = saved_mean->template data<BatchNormParamType<T>>(); const auto *saved_var_data = saved_var->template data<BatchNormParamType<T>>(); size_t workspace_size = 0; void *workspace_ptr = nullptr; Tensor workspace_tensor; auto reserve_space_size = reserve_space->memory_size(); cudnnBatchNormOps_t bnOps_ = CUDNN_BATCHNORM_OPS_BN_ADD_ACTIVATION; platform::ScopedActivationDescriptor scope_act_desc; cudnnActivationDescriptor_t activation_desc_ = scope_act_desc.descriptor<T>(act_type); // --------------- cudnn batchnorm workspace --------------- PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnGetBatchNormalizationBackwardExWorkspaceSize( /*handle=*/dev_ctx.cudnn_handle(), /*mode=*/mode_, /*bnOps=*/bnOps_, /*xDesc=*/data_desc_, /*yDesc=*/data_desc_, /*dyDesc=*/data_desc_, /*dzDesc=*/data_desc_, /*dxDesc=*/data_desc_, /*bnScaleBiasMeanVarDesc=*/bn_param_desc_, /*activationDesc=*/activation_desc_, /*sizeInBytes=*/&workspace_size)); workspace_ptr = workspace_tensor.mutable_data(ctx.GetPlace(), x->type(), workspace_size); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnBatchNormalizationBackwardEx( /*handle=*/dev_ctx.cudnn_handle(), /*mode=*/mode_, /*bnOps=*/bnOps_, /*alphaDataDiff=*/CudnnDataType<T>::kOne(), /*betaDataDiff=*/CudnnDataType<T>::kZero(), /*alphaParamDiff=*/CudnnDataType<T>::kOne(), /*betaParamDiff=*/CudnnDataType<T>::kZero(), /*xDesc=*/data_desc_, /*xData=*/x->template data<T>(), /*yDesc=*/data_desc_, /*yData=*/y->template data<T>(), /*dyDesc=*/data_desc_, /*dyData=*/d_y->template data<T>(), /*dzDesc=*/data_desc_, /*dzData=*/d_z->template data<T>(), /*dxDesc=*/data_desc_, /*dxData=*/d_x->template data<T>(), /*dBnScaleBiasDesc=*/bn_param_desc_, /*bnScaleData=*/scale->template data<BatchNormParamType<T>>(), /*bnBiasData=*/bias->template data<BatchNormParamType<T>>(), /*dBnScaleData=*/d_scale->template data<BatchNormParamType<T>>(), /*dBnBiasData=*/d_bias->template data<BatchNormParamType<T>>(), /*epsilon=*/epsilon, /*savedMean=*/saved_mean_data, /*savedInvVariance=*/saved_var_data, /*activationDesmc=*/activation_desc_, /*workspace=*/workspace_ptr, /*workSpaceSizeInBytes=*/workspace_size, /*reserveSpace=*/const_cast<T *>(reserve_space->template data<T>()), /*reserveSpaceSizeInBytes=*/reserve_space_size)); // clean when exit. PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(data_desc_)); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(bn_param_desc_)); } }; } // namespace operators } // namespace paddle #if CUDNN_VERSION >= 7401 namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( fused_bn_add_activation, ops::FusedBatchNormAddActKernel<plat::CUDADeviceContext, plat::float16>); REGISTER_OP_CUDA_KERNEL(fused_bn_add_activation_grad, ops::FusedBatchNormAddActGradKernel< plat::CUDADeviceContext, plat::float16>); #endif
3f4516c96aea0f281732f6a088304e42b85eee6d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "computeQ.cc" #define MU_THREADS_PER_BLOCK 256 #define Q_THREADS_PER_BLOCK 256 /* Original: inline void ComputePhiMagCPU(int numK, float* phiR, float* phiI, float* __restrict__ phiMag) { int indexK = 0; for (indexK = 0; indexK < numK; indexK++) { float real = phiR[indexK]; float imag = phiI[indexK]; phiMag[indexK] = real*real + imag*imag; } } */ __global__ void dev_ComputePhiMagGPU(const int numK, const float* phiR, const float* phiI, float* phiMag) { indexK = blockIdx.x * MU_THREADS_PER_BLOCK + threadIdx.x; if (indexK < numK) { float real = phiR[indexK]; float image = phiI[indexK]; phiMag[indexK] = real*real + imag*imag; } } void ComputePhiMagGPU(int numK, float* phiR, float* phiI, float* __restrict__ phiMag) { dim3 dim_grid((numK-1)/MU_THREADS_PER_BLOCK + 1, 1, 1); dim3 dim_block(MU_THREADS_PER_BLOCK, 1, 1); hipLaunchKernelGGL(( dev_ComputePhiMagGPU), dim3(dim_grid), dim3(dim_block), 0, 0, numK, phiR, phiI, phiMag); } /* //inline void ComputeQCPU(int numK, int numX, struct kValues *kVals, float* x, float* y, float* z, float *__restrict__ Qr, float *__restrict__ Qi) { float expArg; float cosArg; float sinArg; int indexK, indexX; // Loop over the space and frequency domains. // Generally, numX > numK. // Since loops are not tiled, it's better that the loop with the smaller // cache footprint be innermost. for (indexX = 0; indexX < numX; indexX++) { // Sum the contributions to this point over all frequencies float Qracc = 0.0f; float Qiacc = 0.0f; for (indexK = 0; indexK < numK; indexK++) { expArg = PIx2 * (kVals[indexK].Kx * x[indexX] + kVals[indexK].Ky * y[indexX] + kVals[indexK].Kz * z[indexX]); cosArg = cosf(expArg); sinArg = sinf(expArg); float phi = kVals[indexK].PhiMag; Qracc += phi * cosArg; Qiacc += phi * sinArg; } Qr[indexX] = Qracc; Qi[indexX] = Qiacc; } } */ __global__ void dev_ComputeQGPU(const int numK, const int numX, struct kValues *kVals, const float* x, const float* y, const float* z, float* Qr, float* Qi) { // Local vars float loc_x, loc_y, loc_z; float Qracc = 0.0f; float Qiacc = 0.0f; // Find index of voxel assigned to this thread int indexX = blockIdx.x * Q_THREADS_PER_BLOCK + threadIdx.x; __shared__ struct kValues kVals_tile[Q_THREADS_PER_BLOCK]; for(int i = 0; i < (numK-1)/Q_THREADS_PER_BLOCK + 1; i++) { // Loop for each tile //Collaborative loading if(indexX < numX) { loc_x = x[indexX]; loc_y = y[indexX]; loc_z = z[indexX]; kVals_tile[threadIdx.x] = kVals[i*Q_THREADS_PER_BLOCK+threadIdx.x]; } else { loc_x = 0; loc_y = 0; loc_z = 0; kVals_tile[threadIdx.x] = 0; } __syncthreads(); // sync to ensure tile properly loaded // Accumulation if(indexX < numX) { // Checking data bounds for (indexK = 0; indexK < Q_THREADS_PER_BLOCK; indexK++) { expArg = PIx2 * (kVals_tile[indexK].Kx * loc_x + kVals_tile[indexK].Ky * loc_y + kVals_tile[indexK].Kz * loc_z); cosArg = cosf(expArg); sinArg = sinf(expArg); float phi = kVals_tile[indexK].PhiMag; Qracc += phi * cosArg; Qiacc += phi * sinArg; } } } if(indexX < numX) { Qr[indexX] = Qracc; Qi[indexX] = Qiacc; } } void ComputeQGPU(int numK, int numX, struct kValues *kVals, float* x, float* y, float* z, float *__restrict__ Qr, float *__restrict__ Qi) { dim3 dim_grid((numK-1)/Q_THREADS_PER_BLOCK + 1, 1, 1); dim3 dim_block(Q_THREADS_PER_BLOCK, 1, 1); dev_ComputeQGPU(numK, numX, kVals, x, y, z, Qr, Qi); } /* Original: void createDataStructsCPU(int numK, int numX, float** phiMag, float** Qr, float** Qi) { *phiMag = (float* ) memalign(16, numK * sizeof(float)); *Qr = (float*) memalign(16, numX * sizeof (float)); memset((void *)*Qr, 0, numX * sizeof(float)); *Qi = (float*) memalign(16, numX * sizeof (float)); memset((void *)*Qi, 0, numX * sizeof(float)); } */ void createDataStructsGPU(int numK, int numX, float** phiMag, float** Qr, float** Qi)) { hipError_t cuda_ret; cuda_ret = hipMalloc((void**) &(*phiMag), numK*sizeof(float)); if (cuda_ret != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__); exit(EXIT_FAILURE); } cuda_ret = hipMalloc((void**) &(*Qr), numX*sizeof(float)); if (cuda_ret != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__); exit(EXIT_FAILURE); } cuda_ret = hipMemset((void *)*Qr, 0, numX * sizeof(float)); if (cuda_ret != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__); exit(EXIT_FAILURE); } cuda_ret = hipMalloc((void**) &(*Qi), numX*sizeof(float)); if (cuda_ret != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__); exit(EXIT_FAILURE); } cuda_ret = hipMemset((void *)*Qi, 0, numX * sizeof(float)); if (cuda_ret != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__); exit(EXIT_FAILURE); } }
3f4516c96aea0f281732f6a088304e42b85eee6d.cu
#include "computeQ.cc" #define MU_THREADS_PER_BLOCK 256 #define Q_THREADS_PER_BLOCK 256 /* Original: inline void ComputePhiMagCPU(int numK, float* phiR, float* phiI, float* __restrict__ phiMag) { int indexK = 0; for (indexK = 0; indexK < numK; indexK++) { float real = phiR[indexK]; float imag = phiI[indexK]; phiMag[indexK] = real*real + imag*imag; } } */ __global__ void dev_ComputePhiMagGPU(const int numK, const float* phiR, const float* phiI, float* phiMag) { indexK = blockIdx.x * MU_THREADS_PER_BLOCK + threadIdx.x; if (indexK < numK) { float real = phiR[indexK]; float image = phiI[indexK]; phiMag[indexK] = real*real + imag*imag; } } void ComputePhiMagGPU(int numK, float* phiR, float* phiI, float* __restrict__ phiMag) { dim3 dim_grid((numK-1)/MU_THREADS_PER_BLOCK + 1, 1, 1); dim3 dim_block(MU_THREADS_PER_BLOCK, 1, 1); dev_ComputePhiMagGPU<<<dim_grid, dim_block>>>(numK, phiR, phiI, phiMag); } /* //inline void ComputeQCPU(int numK, int numX, struct kValues *kVals, float* x, float* y, float* z, float *__restrict__ Qr, float *__restrict__ Qi) { float expArg; float cosArg; float sinArg; int indexK, indexX; // Loop over the space and frequency domains. // Generally, numX > numK. // Since loops are not tiled, it's better that the loop with the smaller // cache footprint be innermost. for (indexX = 0; indexX < numX; indexX++) { // Sum the contributions to this point over all frequencies float Qracc = 0.0f; float Qiacc = 0.0f; for (indexK = 0; indexK < numK; indexK++) { expArg = PIx2 * (kVals[indexK].Kx * x[indexX] + kVals[indexK].Ky * y[indexX] + kVals[indexK].Kz * z[indexX]); cosArg = cosf(expArg); sinArg = sinf(expArg); float phi = kVals[indexK].PhiMag; Qracc += phi * cosArg; Qiacc += phi * sinArg; } Qr[indexX] = Qracc; Qi[indexX] = Qiacc; } } */ __global__ void dev_ComputeQGPU(const int numK, const int numX, struct kValues *kVals, const float* x, const float* y, const float* z, float* Qr, float* Qi) { // Local vars float loc_x, loc_y, loc_z; float Qracc = 0.0f; float Qiacc = 0.0f; // Find index of voxel assigned to this thread int indexX = blockIdx.x * Q_THREADS_PER_BLOCK + threadIdx.x; __shared__ struct kValues kVals_tile[Q_THREADS_PER_BLOCK]; for(int i = 0; i < (numK-1)/Q_THREADS_PER_BLOCK + 1; i++) { // Loop for each tile //Collaborative loading if(indexX < numX) { loc_x = x[indexX]; loc_y = y[indexX]; loc_z = z[indexX]; kVals_tile[threadIdx.x] = kVals[i*Q_THREADS_PER_BLOCK+threadIdx.x]; } else { loc_x = 0; loc_y = 0; loc_z = 0; kVals_tile[threadIdx.x] = 0; } __syncthreads(); // sync to ensure tile properly loaded // Accumulation if(indexX < numX) { // Checking data bounds for (indexK = 0; indexK < Q_THREADS_PER_BLOCK; indexK++) { expArg = PIx2 * (kVals_tile[indexK].Kx * loc_x + kVals_tile[indexK].Ky * loc_y + kVals_tile[indexK].Kz * loc_z); cosArg = cosf(expArg); sinArg = sinf(expArg); float phi = kVals_tile[indexK].PhiMag; Qracc += phi * cosArg; Qiacc += phi * sinArg; } } } if(indexX < numX) { Qr[indexX] = Qracc; Qi[indexX] = Qiacc; } } void ComputeQGPU(int numK, int numX, struct kValues *kVals, float* x, float* y, float* z, float *__restrict__ Qr, float *__restrict__ Qi) { dim3 dim_grid((numK-1)/Q_THREADS_PER_BLOCK + 1, 1, 1); dim3 dim_block(Q_THREADS_PER_BLOCK, 1, 1); dev_ComputeQGPU(numK, numX, kVals, x, y, z, Qr, Qi); } /* Original: void createDataStructsCPU(int numK, int numX, float** phiMag, float** Qr, float** Qi) { *phiMag = (float* ) memalign(16, numK * sizeof(float)); *Qr = (float*) memalign(16, numX * sizeof (float)); memset((void *)*Qr, 0, numX * sizeof(float)); *Qi = (float*) memalign(16, numX * sizeof (float)); memset((void *)*Qi, 0, numX * sizeof(float)); } */ void createDataStructsGPU(int numK, int numX, float** phiMag, float** Qr, float** Qi)) { cudaError_t cuda_ret; cuda_ret = cudaMalloc((void**) &(*phiMag), numK*sizeof(float)); if (cuda_ret != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__); exit(EXIT_FAILURE); } cuda_ret = cudaMalloc((void**) &(*Qr), numX*sizeof(float)); if (cuda_ret != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__); exit(EXIT_FAILURE); } cuda_ret = cudaMemset((void *)*Qr, 0, numX * sizeof(float)); if (cuda_ret != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__); exit(EXIT_FAILURE); } cuda_ret = cudaMalloc((void**) &(*Qi), numX*sizeof(float)); if (cuda_ret != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__); exit(EXIT_FAILURE); } cuda_ret = cudaMemset((void *)*Qi, 0, numX * sizeof(float)); if (cuda_ret != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__); exit(EXIT_FAILURE); } }
4160c72e85bcb2123c6bbdb874ec66b66baff25d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************* * Copyright (c) 2014, ArrayFire * All rights reserved. * * This file is distributed under 3-clause BSD license. * The complete license agreement can be obtained at: * http://arrayfire.com/licenses/BSD-3-Clause ********************************************************/ #include <backend.hpp> #include <dispatch.hpp> #include <Param.hpp> #include <debug_cuda.hpp> #include <math.hpp> #include <convolve.hpp> namespace cuda { namespace kernel { static const int THREADS_X = 16; static const int THREADS_Y = 16; // below shared MAX_*_LEN's are calculated based on // a maximum shared memory configuration of 48KB per block // considering complex types as well static const int MAX_SCONV_FILTER_LEN = 31; // we shall declare the maximum size required of above all three cases // and re-use the same constant memory locations for every case __constant__ char sFilter[2*THREADS_Y*(2*(MAX_SCONV_FILTER_LEN-1)+THREADS_X)*sizeof(double)]; template<typename T, typename accType, int conv_dim, bool expand, int fLen> __global__ void convolve2_separable(Param<T> out, CParam<T> signal, int nBBS0, int nBBS1) { const int smem_len = (conv_dim==0 ? (THREADS_X+2*(fLen-1))* THREADS_Y: (THREADS_Y+2*(fLen-1))* THREADS_X); __shared__ T shrdMem[smem_len]; const int radius = fLen-1; const int padding = 2*radius; const int s0 = signal.strides[0]; const int s1 = signal.strides[1]; const int d0 = signal.dims[0]; const int d1 = signal.dims[1]; const int shrdLen = THREADS_X + (conv_dim==0 ? padding : 0); unsigned b2 = blockIdx.x/nBBS0; unsigned b3 = blockIdx.y/nBBS1; T *dst = (T *)out.ptr + (b2*out.strides[2] + b3*out.strides[3]); const T *src = (const T *)signal.ptr + (b2*signal.strides[2] + b3*signal.strides[3]); const accType *impulse = (const accType *)sFilter; int lx = threadIdx.x; int ly = threadIdx.y; int ox = THREADS_X * (blockIdx.x-b2*nBBS0) + lx; int oy = THREADS_Y * (blockIdx.y-b3*nBBS1) + ly; int gx = ox; int gy = oy; // below if-else statement is based on template parameter if (conv_dim==0) { gx += (expand ? 0 : fLen>>1); int endX = ((fLen-1)<<1) + THREADS_X; #pragma unroll for(int lx = threadIdx.x, glb_x = gx; lx<endX; lx += THREADS_X, glb_x += THREADS_X) { int i = glb_x - radius; int j = gy; bool is_i = i>=0 && i<d0; bool is_j = j>=0 && j<d1; shrdMem[ly*shrdLen+lx] = (is_i && is_j ? src[i*s0 + j*s1] : scalar<T>(0)); } } else if (conv_dim==1) { gy += (expand ? 0 : fLen>>1); int endY = ((fLen-1)<<1) + THREADS_Y; #pragma unroll for(int ly = threadIdx.y, glb_y = gy; ly<endY; ly += THREADS_Y, glb_y += THREADS_Y) { int i = gx; int j = glb_y - radius; bool is_i = i>=0 && i<d0; bool is_j = j>=0 && j<d1; shrdMem[ly*shrdLen+lx] = (is_i && is_j ? src[i*s0 + j*s1] : scalar<T>(0)); } } __syncthreads(); if (ox<out.dims[0] && oy<out.dims[1]) { // below conditional statement is based on template parameter int i = (conv_dim==0 ? lx : ly) + radius; accType accum = scalar<accType>(0); #pragma unroll for(int f=0; f<fLen; ++f) { accType f_val = impulse[f]; // below conditional statement is based on template parameter int s_idx = (conv_dim==0 ? (ly*shrdLen+(i-f)) : ((i-f)*shrdLen+lx)); T s_val = shrdMem[s_idx]; accum = accum + s_val*f_val; } dst[oy*out.strides[1]+ox] = (T)accum; } } template<typename T, typename aT, int cDim, bool expand, int f> void conv2Helper(dim3 blks, dim3 thrds, Param<T> out, CParam<T> sig, int nBBS0, int nBBS1) { CUDA_LAUNCH((convolve2_separable<T, aT, cDim, expand, f>), blks, thrds, out, sig, nBBS0, nBBS1); } template<typename T, typename accType, int conv_dim, bool expand> void convolve2(Param<T> out, CParam<T> signal, CParam<accType> filter) { int fLen = filter.dims[0] * filter.dims[1] * filter.dims[2] * filter.dims[3]; if(fLen > kernel::MAX_SCONV_FILTER_LEN) { // call upon fft CUDA_NOT_SUPPORTED(); } dim3 threads(THREADS_X, THREADS_Y); int blk_x = divup(out.dims[0], threads.x); int blk_y = divup(out.dims[1], threads.y); dim3 blocks(blk_x*signal.dims[2], blk_y*signal.dims[3]); // FIX ME: if the filter array is strided, direct copy of symbols // might cause issues CUDA_CHECK(hipMemcpyToSymbolAsync(kernel::sFilter, filter.ptr, fLen*sizeof(accType), 0, hipMemcpyDeviceToDevice, cuda::getActiveStream())); switch(fLen) { case 2: conv2Helper<T, accType, conv_dim, expand, 2>(blocks, threads, out, signal, blk_x, blk_y); break; case 3: conv2Helper<T, accType, conv_dim, expand, 3>(blocks, threads, out, signal, blk_x, blk_y); break; case 4: conv2Helper<T, accType, conv_dim, expand, 4>(blocks, threads, out, signal, blk_x, blk_y); break; case 5: conv2Helper<T, accType, conv_dim, expand, 5>(blocks, threads, out, signal, blk_x, blk_y); break; case 6: conv2Helper<T, accType, conv_dim, expand, 6>(blocks, threads, out, signal, blk_x, blk_y); break; case 7: conv2Helper<T, accType, conv_dim, expand, 7>(blocks, threads, out, signal, blk_x, blk_y); break; case 8: conv2Helper<T, accType, conv_dim, expand, 8>(blocks, threads, out, signal, blk_x, blk_y); break; case 9: conv2Helper<T, accType, conv_dim, expand, 9>(blocks, threads, out, signal, blk_x, blk_y); break; case 10: conv2Helper<T, accType, conv_dim, expand, 10>(blocks, threads, out, signal, blk_x, blk_y); break; case 11: conv2Helper<T, accType, conv_dim, expand, 11>(blocks, threads, out, signal, blk_x, blk_y); break; case 12: conv2Helper<T, accType, conv_dim, expand, 12>(blocks, threads, out, signal, blk_x, blk_y); break; case 13: conv2Helper<T, accType, conv_dim, expand, 13>(blocks, threads, out, signal, blk_x, blk_y); break; case 14: conv2Helper<T, accType, conv_dim, expand, 14>(blocks, threads, out, signal, blk_x, blk_y); break; case 15: conv2Helper<T, accType, conv_dim, expand, 15>(blocks, threads, out, signal, blk_x, blk_y); break; case 16: conv2Helper<T, accType, conv_dim, expand, 16>(blocks, threads, out, signal, blk_x, blk_y); break; case 17: conv2Helper<T, accType, conv_dim, expand, 17>(blocks, threads, out, signal, blk_x, blk_y); break; case 18: conv2Helper<T, accType, conv_dim, expand, 18>(blocks, threads, out, signal, blk_x, blk_y); break; case 19: conv2Helper<T, accType, conv_dim, expand, 19>(blocks, threads, out, signal, blk_x, blk_y); break; case 20: conv2Helper<T, accType, conv_dim, expand, 20>(blocks, threads, out, signal, blk_x, blk_y); break; case 21: conv2Helper<T, accType, conv_dim, expand, 21>(blocks, threads, out, signal, blk_x, blk_y); break; case 22: conv2Helper<T, accType, conv_dim, expand, 22>(blocks, threads, out, signal, blk_x, blk_y); break; case 23: conv2Helper<T, accType, conv_dim, expand, 23>(blocks, threads, out, signal, blk_x, blk_y); break; case 24: conv2Helper<T, accType, conv_dim, expand, 24>(blocks, threads, out, signal, blk_x, blk_y); break; case 25: conv2Helper<T, accType, conv_dim, expand, 25>(blocks, threads, out, signal, blk_x, blk_y); break; case 26: conv2Helper<T, accType, conv_dim, expand, 26>(blocks, threads, out, signal, blk_x, blk_y); break; case 27: conv2Helper<T, accType, conv_dim, expand, 27>(blocks, threads, out, signal, blk_x, blk_y); break; case 28: conv2Helper<T, accType, conv_dim, expand, 28>(blocks, threads, out, signal, blk_x, blk_y); break; case 29: conv2Helper<T, accType, conv_dim, expand, 29>(blocks, threads, out, signal, blk_x, blk_y); break; case 30: conv2Helper<T, accType, conv_dim, expand, 30>(blocks, threads, out, signal, blk_x, blk_y); break; case 31: conv2Helper<T, accType, conv_dim, expand, 31>(blocks, threads, out, signal, blk_x, blk_y); break; default: CUDA_NOT_SUPPORTED(); } POST_LAUNCH_CHECK(); } #define INSTANTIATE(T, accType) \ template void convolve2<T, accType, 0, true >(Param<T> out, CParam<T> signal, CParam<accType> filter); \ template void convolve2<T, accType, 0, false>(Param<T> out, CParam<T> signal, CParam<accType> filter); \ template void convolve2<T, accType, 1, true >(Param<T> out, CParam<T> signal, CParam<accType> filter); \ template void convolve2<T, accType, 1, false>(Param<T> out, CParam<T> signal, CParam<accType> filter); \ INSTANTIATE(cdouble, cdouble) INSTANTIATE(cfloat , cfloat) INSTANTIATE(double , double) INSTANTIATE(float , float) INSTANTIATE(uint , float) INSTANTIATE(int , float) INSTANTIATE(uchar , float) INSTANTIATE(char , float) INSTANTIATE(ushort , float) INSTANTIATE(short , float) INSTANTIATE(uintl , float) INSTANTIATE(intl , float) } }
4160c72e85bcb2123c6bbdb874ec66b66baff25d.cu
/******************************************************* * Copyright (c) 2014, ArrayFire * All rights reserved. * * This file is distributed under 3-clause BSD license. * The complete license agreement can be obtained at: * http://arrayfire.com/licenses/BSD-3-Clause ********************************************************/ #include <backend.hpp> #include <dispatch.hpp> #include <Param.hpp> #include <debug_cuda.hpp> #include <math.hpp> #include <convolve.hpp> namespace cuda { namespace kernel { static const int THREADS_X = 16; static const int THREADS_Y = 16; // below shared MAX_*_LEN's are calculated based on // a maximum shared memory configuration of 48KB per block // considering complex types as well static const int MAX_SCONV_FILTER_LEN = 31; // we shall declare the maximum size required of above all three cases // and re-use the same constant memory locations for every case __constant__ char sFilter[2*THREADS_Y*(2*(MAX_SCONV_FILTER_LEN-1)+THREADS_X)*sizeof(double)]; template<typename T, typename accType, int conv_dim, bool expand, int fLen> __global__ void convolve2_separable(Param<T> out, CParam<T> signal, int nBBS0, int nBBS1) { const int smem_len = (conv_dim==0 ? (THREADS_X+2*(fLen-1))* THREADS_Y: (THREADS_Y+2*(fLen-1))* THREADS_X); __shared__ T shrdMem[smem_len]; const int radius = fLen-1; const int padding = 2*radius; const int s0 = signal.strides[0]; const int s1 = signal.strides[1]; const int d0 = signal.dims[0]; const int d1 = signal.dims[1]; const int shrdLen = THREADS_X + (conv_dim==0 ? padding : 0); unsigned b2 = blockIdx.x/nBBS0; unsigned b3 = blockIdx.y/nBBS1; T *dst = (T *)out.ptr + (b2*out.strides[2] + b3*out.strides[3]); const T *src = (const T *)signal.ptr + (b2*signal.strides[2] + b3*signal.strides[3]); const accType *impulse = (const accType *)sFilter; int lx = threadIdx.x; int ly = threadIdx.y; int ox = THREADS_X * (blockIdx.x-b2*nBBS0) + lx; int oy = THREADS_Y * (blockIdx.y-b3*nBBS1) + ly; int gx = ox; int gy = oy; // below if-else statement is based on template parameter if (conv_dim==0) { gx += (expand ? 0 : fLen>>1); int endX = ((fLen-1)<<1) + THREADS_X; #pragma unroll for(int lx = threadIdx.x, glb_x = gx; lx<endX; lx += THREADS_X, glb_x += THREADS_X) { int i = glb_x - radius; int j = gy; bool is_i = i>=0 && i<d0; bool is_j = j>=0 && j<d1; shrdMem[ly*shrdLen+lx] = (is_i && is_j ? src[i*s0 + j*s1] : scalar<T>(0)); } } else if (conv_dim==1) { gy += (expand ? 0 : fLen>>1); int endY = ((fLen-1)<<1) + THREADS_Y; #pragma unroll for(int ly = threadIdx.y, glb_y = gy; ly<endY; ly += THREADS_Y, glb_y += THREADS_Y) { int i = gx; int j = glb_y - radius; bool is_i = i>=0 && i<d0; bool is_j = j>=0 && j<d1; shrdMem[ly*shrdLen+lx] = (is_i && is_j ? src[i*s0 + j*s1] : scalar<T>(0)); } } __syncthreads(); if (ox<out.dims[0] && oy<out.dims[1]) { // below conditional statement is based on template parameter int i = (conv_dim==0 ? lx : ly) + radius; accType accum = scalar<accType>(0); #pragma unroll for(int f=0; f<fLen; ++f) { accType f_val = impulse[f]; // below conditional statement is based on template parameter int s_idx = (conv_dim==0 ? (ly*shrdLen+(i-f)) : ((i-f)*shrdLen+lx)); T s_val = shrdMem[s_idx]; accum = accum + s_val*f_val; } dst[oy*out.strides[1]+ox] = (T)accum; } } template<typename T, typename aT, int cDim, bool expand, int f> void conv2Helper(dim3 blks, dim3 thrds, Param<T> out, CParam<T> sig, int nBBS0, int nBBS1) { CUDA_LAUNCH((convolve2_separable<T, aT, cDim, expand, f>), blks, thrds, out, sig, nBBS0, nBBS1); } template<typename T, typename accType, int conv_dim, bool expand> void convolve2(Param<T> out, CParam<T> signal, CParam<accType> filter) { int fLen = filter.dims[0] * filter.dims[1] * filter.dims[2] * filter.dims[3]; if(fLen > kernel::MAX_SCONV_FILTER_LEN) { // call upon fft CUDA_NOT_SUPPORTED(); } dim3 threads(THREADS_X, THREADS_Y); int blk_x = divup(out.dims[0], threads.x); int blk_y = divup(out.dims[1], threads.y); dim3 blocks(blk_x*signal.dims[2], blk_y*signal.dims[3]); // FIX ME: if the filter array is strided, direct copy of symbols // might cause issues CUDA_CHECK(cudaMemcpyToSymbolAsync(kernel::sFilter, filter.ptr, fLen*sizeof(accType), 0, cudaMemcpyDeviceToDevice, cuda::getActiveStream())); switch(fLen) { case 2: conv2Helper<T, accType, conv_dim, expand, 2>(blocks, threads, out, signal, blk_x, blk_y); break; case 3: conv2Helper<T, accType, conv_dim, expand, 3>(blocks, threads, out, signal, blk_x, blk_y); break; case 4: conv2Helper<T, accType, conv_dim, expand, 4>(blocks, threads, out, signal, blk_x, blk_y); break; case 5: conv2Helper<T, accType, conv_dim, expand, 5>(blocks, threads, out, signal, blk_x, blk_y); break; case 6: conv2Helper<T, accType, conv_dim, expand, 6>(blocks, threads, out, signal, blk_x, blk_y); break; case 7: conv2Helper<T, accType, conv_dim, expand, 7>(blocks, threads, out, signal, blk_x, blk_y); break; case 8: conv2Helper<T, accType, conv_dim, expand, 8>(blocks, threads, out, signal, blk_x, blk_y); break; case 9: conv2Helper<T, accType, conv_dim, expand, 9>(blocks, threads, out, signal, blk_x, blk_y); break; case 10: conv2Helper<T, accType, conv_dim, expand, 10>(blocks, threads, out, signal, blk_x, blk_y); break; case 11: conv2Helper<T, accType, conv_dim, expand, 11>(blocks, threads, out, signal, blk_x, blk_y); break; case 12: conv2Helper<T, accType, conv_dim, expand, 12>(blocks, threads, out, signal, blk_x, blk_y); break; case 13: conv2Helper<T, accType, conv_dim, expand, 13>(blocks, threads, out, signal, blk_x, blk_y); break; case 14: conv2Helper<T, accType, conv_dim, expand, 14>(blocks, threads, out, signal, blk_x, blk_y); break; case 15: conv2Helper<T, accType, conv_dim, expand, 15>(blocks, threads, out, signal, blk_x, blk_y); break; case 16: conv2Helper<T, accType, conv_dim, expand, 16>(blocks, threads, out, signal, blk_x, blk_y); break; case 17: conv2Helper<T, accType, conv_dim, expand, 17>(blocks, threads, out, signal, blk_x, blk_y); break; case 18: conv2Helper<T, accType, conv_dim, expand, 18>(blocks, threads, out, signal, blk_x, blk_y); break; case 19: conv2Helper<T, accType, conv_dim, expand, 19>(blocks, threads, out, signal, blk_x, blk_y); break; case 20: conv2Helper<T, accType, conv_dim, expand, 20>(blocks, threads, out, signal, blk_x, blk_y); break; case 21: conv2Helper<T, accType, conv_dim, expand, 21>(blocks, threads, out, signal, blk_x, blk_y); break; case 22: conv2Helper<T, accType, conv_dim, expand, 22>(blocks, threads, out, signal, blk_x, blk_y); break; case 23: conv2Helper<T, accType, conv_dim, expand, 23>(blocks, threads, out, signal, blk_x, blk_y); break; case 24: conv2Helper<T, accType, conv_dim, expand, 24>(blocks, threads, out, signal, blk_x, blk_y); break; case 25: conv2Helper<T, accType, conv_dim, expand, 25>(blocks, threads, out, signal, blk_x, blk_y); break; case 26: conv2Helper<T, accType, conv_dim, expand, 26>(blocks, threads, out, signal, blk_x, blk_y); break; case 27: conv2Helper<T, accType, conv_dim, expand, 27>(blocks, threads, out, signal, blk_x, blk_y); break; case 28: conv2Helper<T, accType, conv_dim, expand, 28>(blocks, threads, out, signal, blk_x, blk_y); break; case 29: conv2Helper<T, accType, conv_dim, expand, 29>(blocks, threads, out, signal, blk_x, blk_y); break; case 30: conv2Helper<T, accType, conv_dim, expand, 30>(blocks, threads, out, signal, blk_x, blk_y); break; case 31: conv2Helper<T, accType, conv_dim, expand, 31>(blocks, threads, out, signal, blk_x, blk_y); break; default: CUDA_NOT_SUPPORTED(); } POST_LAUNCH_CHECK(); } #define INSTANTIATE(T, accType) \ template void convolve2<T, accType, 0, true >(Param<T> out, CParam<T> signal, CParam<accType> filter); \ template void convolve2<T, accType, 0, false>(Param<T> out, CParam<T> signal, CParam<accType> filter); \ template void convolve2<T, accType, 1, true >(Param<T> out, CParam<T> signal, CParam<accType> filter); \ template void convolve2<T, accType, 1, false>(Param<T> out, CParam<T> signal, CParam<accType> filter); \ INSTANTIATE(cdouble, cdouble) INSTANTIATE(cfloat , cfloat) INSTANTIATE(double , double) INSTANTIATE(float , float) INSTANTIATE(uint , float) INSTANTIATE(int , float) INSTANTIATE(uchar , float) INSTANTIATE(char , float) INSTANTIATE(ushort , float) INSTANTIATE(short , float) INSTANTIATE(uintl , float) INSTANTIATE(intl , float) } }
580de221f6250ada07366251ee0bb79bd25ebe41.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hiprand/hiprand.h> #include <rocblas.h> #include "dropout_layer.h" #include "dark_cuda.h" #include "utils.h" __global__ void yoloswag420blazeit360noscope(float *input, int size, float *rand, float prob, float scale) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(id < size) input[id] = (rand[id] < prob) ? 0 : input[id]*scale; } void forward_dropout_layer_gpu(dropout_layer layer, network_state state) { if (!state.train) return; int iteration_num = (*state.net.seen) / (state.net.batch*state.net.subdivisions); //if (iteration_num < state.net.burn_in) return; int size = layer.inputs*layer.batch; cuda_random(layer.rand_gpu, size); /* int i; for(i = 0; i < size; ++i){ layer.rand[i] = rand_uniform(); } cuda_push_array(layer.rand_gpu, layer.rand, size); */ hipLaunchKernelGGL(( yoloswag420blazeit360noscope), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, get_cuda_stream() , state.input, size, layer.rand_gpu, layer.probability, layer.scale); CHECK_CUDA(hipPeekAtLastError()); } void backward_dropout_layer_gpu(dropout_layer layer, network_state state) { if(!state.delta) return; int iteration_num = (*state.net.seen) / (state.net.batch*state.net.subdivisions); //if (iteration_num < state.net.burn_in) return; int size = layer.inputs*layer.batch; hipLaunchKernelGGL(( yoloswag420blazeit360noscope), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, get_cuda_stream() , state.delta, size, layer.rand_gpu, layer.probability, layer.scale); CHECK_CUDA(hipPeekAtLastError()); }
580de221f6250ada07366251ee0bb79bd25ebe41.cu
#include <cuda_runtime.h> #include <curand.h> #include <cublas_v2.h> #include "dropout_layer.h" #include "dark_cuda.h" #include "utils.h" __global__ void yoloswag420blazeit360noscope(float *input, int size, float *rand, float prob, float scale) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(id < size) input[id] = (rand[id] < prob) ? 0 : input[id]*scale; } void forward_dropout_layer_gpu(dropout_layer layer, network_state state) { if (!state.train) return; int iteration_num = (*state.net.seen) / (state.net.batch*state.net.subdivisions); //if (iteration_num < state.net.burn_in) return; int size = layer.inputs*layer.batch; cuda_random(layer.rand_gpu, size); /* int i; for(i = 0; i < size; ++i){ layer.rand[i] = rand_uniform(); } cuda_push_array(layer.rand_gpu, layer.rand, size); */ yoloswag420blazeit360noscope<<<cuda_gridsize(size), BLOCK, 0, get_cuda_stream() >>>(state.input, size, layer.rand_gpu, layer.probability, layer.scale); CHECK_CUDA(cudaPeekAtLastError()); } void backward_dropout_layer_gpu(dropout_layer layer, network_state state) { if(!state.delta) return; int iteration_num = (*state.net.seen) / (state.net.batch*state.net.subdivisions); //if (iteration_num < state.net.burn_in) return; int size = layer.inputs*layer.batch; yoloswag420blazeit360noscope<<<cuda_gridsize(size), BLOCK, 0, get_cuda_stream() >>>(state.delta, size, layer.rand_gpu, layer.probability, layer.scale); CHECK_CUDA(cudaPeekAtLastError()); }
4b1bce48e9b682c19d70cde040102ea31ec650df.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "common.h" #include "naive.h" namespace StreamCompaction { namespace Naive { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } // TODO: __global__ /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { int* dev_data1, * dev_data2; hipMalloc((void**)&dev_data1, n * sizeof(int)); checkCUDAError("hipMalloc dev_data1 failed!"); hipMalloc((void**)&dev_data2, n * sizeof(int)); checkCUDAError("hipMalloc dev_data2 failed!"); hipMemcpy(dev_data1, idata, sizeof(int) * n, hipMemcpyHostToDevice); hipMemcpy(dev_data2, idata, sizeof(int), hipMemcpyHostToDevice); dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize); timer().startGpuTimer(); for (int i = 0; i < ilog2ceil(n); i++) { kernParallelScan << <fullBlocksPerGrid, blockSize >> > (n, i, dev_data1, dev_data2); checkCUDAError("kernParallelScan failed!"); hipDeviceSynchronize(); int* temp = dev_data1; dev_data1 = dev_data2; dev_data2 = temp; } kernInclusiveToExclusive << <fullBlocksPerGrid, blockSize >> > (n, dev_data1, dev_data2); checkCUDAError("kernInclusiveToExclusive failed!"); timer().endGpuTimer(); hipMemcpy(odata, dev_data2, sizeof(int) * n, hipMemcpyDeviceToHost); hipFree(dev_data1); hipFree(dev_data2); } __global__ void kernParallelScan(int n, int level, int *src, int *dest) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < n) { if (index >= (1 << level)) { dest[index] = src[index - (1 << level)] + src[index]; } else { dest[index] = src[index]; } } } __global__ void kernInclusiveToExclusive(int n, int *src, int *dest) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index == 0) { dest[index] = 0; } else if (index < n){ dest[index] = src[index - 1]; } } } }
4b1bce48e9b682c19d70cde040102ea31ec650df.cu
#include <cuda.h> #include <cuda_runtime.h> #include "common.h" #include "naive.h" namespace StreamCompaction { namespace Naive { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } // TODO: __global__ /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { int* dev_data1, * dev_data2; cudaMalloc((void**)&dev_data1, n * sizeof(int)); checkCUDAError("cudaMalloc dev_data1 failed!"); cudaMalloc((void**)&dev_data2, n * sizeof(int)); checkCUDAError("cudaMalloc dev_data2 failed!"); cudaMemcpy(dev_data1, idata, sizeof(int) * n, cudaMemcpyHostToDevice); cudaMemcpy(dev_data2, idata, sizeof(int), cudaMemcpyHostToDevice); dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize); timer().startGpuTimer(); for (int i = 0; i < ilog2ceil(n); i++) { kernParallelScan << <fullBlocksPerGrid, blockSize >> > (n, i, dev_data1, dev_data2); checkCUDAError("kernParallelScan failed!"); cudaDeviceSynchronize(); int* temp = dev_data1; dev_data1 = dev_data2; dev_data2 = temp; } kernInclusiveToExclusive << <fullBlocksPerGrid, blockSize >> > (n, dev_data1, dev_data2); checkCUDAError("kernInclusiveToExclusive failed!"); timer().endGpuTimer(); cudaMemcpy(odata, dev_data2, sizeof(int) * n, cudaMemcpyDeviceToHost); cudaFree(dev_data1); cudaFree(dev_data2); } __global__ void kernParallelScan(int n, int level, int *src, int *dest) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < n) { if (index >= (1 << level)) { dest[index] = src[index - (1 << level)] + src[index]; } else { dest[index] = src[index]; } } } __global__ void kernInclusiveToExclusive(int n, int *src, int *dest) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index == 0) { dest[index] = 0; } else if (index < n){ dest[index] = src[index - 1]; } } } }
852bfc7808047acb62969d38a33731c5cfbe0cc6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * GridTools * * Copyright (c) 2014-2021, ETH Zurich * All rights reserved. * * Please, refer to the LICENSE file in the root directory. * SPDX-License-Identifier: BSD-3-Clause */ #include <gtest/gtest.h> #include <gridtools/common/cuda_util.hpp> #include <gridtools/common/integral_constant.hpp> #include <gridtools/meta.hpp> #include <gridtools/sid/allocator.hpp> #include <cuda_test_helper.hpp> namespace gridtools { namespace { template <typename PtrHolder> __device__ bool check_allocation(PtrHolder ptr_holder) { auto &ref = *ptr_holder(); ref = 1.; return ref == 1.; } template <typename PtrHolder> __global__ void test_allocated(PtrHolder testee, bool *result) {} TEST(simple_device_memory_allocator, test) { sid::device::allocator<GT_INTEGRAL_CONSTANT_FROM_VALUE(&cuda_util::cuda_malloc<char[]>)> alloc; auto ptr_holder = allocate(alloc, meta::lazy::id<double>{}, 1); auto result = gridtools::on_device::exec( GT_MAKE_INTEGRAL_CONSTANT_FROM_VALUE(&check_allocation<decltype(ptr_holder)>), ptr_holder); ASSERT_TRUE(result); } } // namespace } // namespace gridtools
852bfc7808047acb62969d38a33731c5cfbe0cc6.cu
/* * GridTools * * Copyright (c) 2014-2021, ETH Zurich * All rights reserved. * * Please, refer to the LICENSE file in the root directory. * SPDX-License-Identifier: BSD-3-Clause */ #include <gtest/gtest.h> #include <gridtools/common/cuda_util.hpp> #include <gridtools/common/integral_constant.hpp> #include <gridtools/meta.hpp> #include <gridtools/sid/allocator.hpp> #include <cuda_test_helper.hpp> namespace gridtools { namespace { template <typename PtrHolder> __device__ bool check_allocation(PtrHolder ptr_holder) { auto &ref = *ptr_holder(); ref = 1.; return ref == 1.; } template <typename PtrHolder> __global__ void test_allocated(PtrHolder testee, bool *result) {} TEST(simple_device_memory_allocator, test) { sid::device::allocator<GT_INTEGRAL_CONSTANT_FROM_VALUE(&cuda_util::cuda_malloc<char[]>)> alloc; auto ptr_holder = allocate(alloc, meta::lazy::id<double>{}, 1); auto result = gridtools::on_device::exec( GT_MAKE_INTEGRAL_CONSTANT_FROM_VALUE(&check_allocation<decltype(ptr_holder)>), ptr_holder); ASSERT_TRUE(result); } } // namespace } // namespace gridtools
a980d5faadf76c159b884522505985a69f49d7bb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Cell(i, j) = cell[i + j * n] */ #include<stdio.h> #include<math.h> #include<stdlib.h> #include<string.h> #include<time.h> #include"mt.h" __device__ __constant__ int d_Np; __device__ __constant__ double d_L; #define NUM_BLOCK 32 #define NUM_THREAD 32 #define PI 3.1415926535897932384626433 //host functions---------------------------------------------------------------- void init_configuration(double *h_x, double *h_y, double h_L, int h_Np) { int i; for(i = 0; i < h_Np; i += 1) { h_x[i] = h_L * genrand_real2(); h_y[i] = h_L * genrand_real2(); } } void h_check_active(double *h_x, double *h_y, double h_L, int h_Np, int *h_active) { int i, j; double dx, dy, dr_square; double diameter_square = 1.0; for(i = 0; i < h_Np; i += 1) { h_active[i] = 0; } for(i = 0; i < h_Np; i += 1) { for(j = 0; j < i; j += 1) { dx = h_x[i] - h_x[j]; if(dx > 0.5 * h_L) { dx -= h_L; } else if(dx < -0.5 * h_L) { dx += h_L; } dy = h_y[i] - h_y[j]; if(dy > 0.5 * h_L) { dy -= h_L; } else if(dy < -0.5 * h_L) { dy += h_L; } dr_square = dx * dx + dy * dy; if(dr_square < diameter_square) { h_active[i] = 1; h_active[j] = 1; } } } } void h_check_active_with_list(double *h_x, double *h_y, double h_L, int h_Np, int *h_active, int *h_cell_list, int cell_per_axis, int N_per_cell) { int i, j; int x_c, y_c; int cell_id, N_in_cell; int pair_id; double dx, dy, dr_square; double diameter_square = 1.0; for(i = 0; i < h_Np; i += 1) { x_c = (int)(h_x[i] * (double)cell_per_axis / h_L); y_c = (int)(h_y[i] * (double)cell_per_axis / h_L); cell_id = x_c + y_c * cell_per_axis; N_in_cell = h_cell_list[cell_id * N_per_cell]; for(j = 1; j <= N_in_cell; j += 1) { pair_id = h_cell_list[cell_id * N_per_cell + j]; if(i == pair_id) {continue;} dx = h_x[i] - h_x[pair_id]; if(dx < -0.5 * h_L) { dx += h_L; } else if(dx > 0.5 * h_L) { dx -= h_L; } dy = h_y[i] - h_y[pair_id]; if(dy < -0.5 * h_L) { dy += h_L; } else if(dy > 0.5 * h_L) { dy -= h_L; } dr_square = dx * dx + dy * dy; if(diameter_square > dr_square) { h_active[i] = 1; } } } } void h_DBG(int *A, int *B, int dim) { int i; double res = 0.0; for(i = 0; i < dim; i += 1) { res += (A[i] - B[i]) * (A[i] - B[i]); } printf("res %f\n", res); } int h_make_cell_list(double *h_x, double *h_y, double h_L, int h_Np, int *h_cell_list, int cell_per_axis, int N_per_cell) { //I dont know which is better modulo (%)calculation and if(){}elseif(){}else{} int i, j, k; int x_cell, y_cell; int cell_id; int cell_list_size = cell_per_axis * cell_per_axis * N_per_cell; int contained_num; //init cell list for(i = 0; i < cell_list_size; i += 1) { h_cell_list[i] = 0; } //make cell list for(i = 0; i < h_Np; i += 1) { x_cell = (int)(h_x[i] * (double)cell_per_axis / h_L); y_cell = (int)(h_y[i] * (double)cell_per_axis / h_L); for(j = x_cell - 1; j <= x_cell + 1; j += 1) { for(k = y_cell - 1; k <= y_cell + 1; k += 1) { cell_id = ((j + cell_per_axis) % cell_per_axis) + ((k + cell_per_axis) % cell_per_axis) * cell_per_axis; h_cell_list[cell_id * N_per_cell] += 1; contained_num = h_cell_list[cell_id * N_per_cell]; if(contained_num >= N_per_cell) { printf("too many particles in a cell\n"); return 1; } h_cell_list[cell_id * N_per_cell + contained_num] = i; } } } return 0; } //device functions-------------------------------------------------------------- __global__ void d_check_active(double *d_x, double *d_y, int *d_active) { int i_global; int i, j; int Np = d_Np; double l = 0.5 * d_L; double dx, dy ,dr_square; double diameter_square = 1.0; i_global = blockDim.x * blockIdx.x + threadIdx.x; for(i = i_global; i < Np; i += NUM_BLOCK * NUM_THREAD) { d_active[i] = 0; for(j = 0; j < Np; j += 1) { if(j != i) { dx = d_x[i] - d_x[j]; if(dx > l) { dx -= d_L; } else if(dx < -l) { dx += d_L; } dy = d_y[i] - d_y[j]; if(dy > l) { dy -= d_L; } else if(dy < -l) { dy += d_L; } dr_square = dx * dx + dy * dy; if(dr_square < diameter_square) { d_active[i] = 1; break; } } } } } __global__ void d_check_active_with_list(double *d_x, double *d_y, int *d_active, int *d_cell_list, int cell_per_axis, int N_per_cell) { //d_L and d_Np are already declared as __global__ const int i, j; int x_c, y_c; int cell_id, N_in_cell; int pair_id; int i_global; double dx, dy, dr_square; double diameter_square = 1.0; i_global = blockDim.x * blockIdx.x + threadIdx.x; if(i_global < d_Np) { d_active[i_global] = 0; x_c = (int)(d_x[i_global] * (double)cell_per_axis / d_L); y_c = (int)(d_y[i_global] * (double)cell_per_axis / d_L); cell_id = x_c + y_c * cell_per_axis; N_in_cell = d_cell_list[cell_id * N_per_cell]; for(j = 1; j <= N_in_cell; j += 1) { pair_id = d_cell_list[cell_id * N_per_cell + j]; if(i_global == pair_id) {continue;} dx = d_x[i_global] - d_x[pair_id]; dy = d_y[i_global] - d_y[pair_id]; if(dx < -0.5 * d_L) { dx += d_L; } else if(dx > 0.5 * d_L) { dx -= d_L; } if(dy < -0.5 * d_L) { dy += d_L; } else if(dy > 0.5 * d_L) { dy -= d_L; } dr_square = dx * dx + dy * dy; if(diameter_square > dr_square) { d_active[i_global] = 1; } } } } __global__ void d_make_cell_list(double *d_x, double *d_y, int *d_cell_list, int *d_belonging_cell, int cell_per_axis, int N_per_cell) { //this func needs equal to or more than Np threads int i, j, k, l; int i_global; int x_cell, y_cell; int cell_id; i_global = blockDim.x * blockIdx.x + threadIdx.x; if(i_global < d_Np) { x_cell = (int)(d_x[i_global] * (double)cell_per_axis / d_L); y_cell = (int)(d_y[i_global] * (double)cell_per_axis / d_L); cell_id = x_cell + y_cell * cell_per_axis; d_belonging_cell[i_global] = cell_id; } __syncthreads(); if(i_global < cell_per_axis * cell_per_axis) { d_cell_list[i_global * N_per_cell] = 0; x_cell = i_global % cell_per_axis; y_cell = i_global / cell_per_axis; for(j = x_cell - 1; j <= x_cell + 1; j += 1) { for(k = y_cell - 1; k <= y_cell + 1; k += 1) { cell_id = ((j + cell_per_axis) % cell_per_axis) + ((k + cell_per_axis) % cell_per_axis) * cell_per_axis; for(l = 0; l < d_Np; l += 1) { if(d_belonging_cell[l] == cell_id) { d_cell_list[i_global * N_per_cell] += 1; d_cell_list[i_global * N_per_cell + d_cell_list[i_global * N_per_cell] ] = l; } } } } } } //------------------------------------------------------------------------------ int main(void) { clock_t start, end; int cell_per_axis; int N_per_cell; //variables in host double *h_x; double *h_y; double h_L; int *h_active; int *h_check_result; int h_Np; int *h_cell_list; int *h_active_DBG; //variables in device double *d_x; double *d_y; int *d_active; int *d_cell_list; int *d_belonging_cell; //initialize //init_genrand(19970303); init_genrand((int)time(NULL)); //--set variable h_Np = 18000; h_L = 140.0; cell_per_axis = (int)(h_L / 11.0) + 1;//renew list every 5 steps N_per_cell = (h_Np * 13) / (cell_per_axis * cell_per_axis); printf("cell per axis:%d N_per_cell:%d\n", cell_per_axis, N_per_cell); hipMemcpyToSymbol(d_Np, &h_Np, sizeof(int), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(d_L, &h_L, sizeof(double), 0, hipMemcpyHostToDevice); //--allocate memory //----memory on host hipHostMalloc((void **)&h_x, h_Np * sizeof(double), hipHostMallocMapped); hipHostMalloc((void **)&h_y, h_Np * sizeof(double), hipHostMallocMapped); hipHostMalloc((void **)&h_active, h_Np * sizeof(int), hipHostMallocMapped); hipHostMalloc((void **)&h_check_result, h_Np * sizeof(int), hipHostMallocMapped); hipHostMalloc((void **)&h_cell_list, cell_per_axis * cell_per_axis * N_per_cell * sizeof(int), hipHostMallocMapped); h_active_DBG = (int *)calloc(h_Np, sizeof(int)); //----memory on device hipMalloc((void **)&d_x, h_Np * sizeof(double)); hipMalloc((void **)&d_y, h_Np * sizeof(double)); hipMalloc((void **)&d_active, h_Np * sizeof(int)); hipMalloc((void **)&d_cell_list, cell_per_axis * cell_per_axis * N_per_cell * sizeof(int)); hipMalloc((void **)&d_belonging_cell, h_Np * sizeof(int)); //--place particles init_configuration(h_x, h_y, h_L, h_Np); hipMemcpy(d_x, h_x, h_Np * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_y, h_y, h_Np * sizeof(double), hipMemcpyHostToDevice); //--make first acriveness array //----made in host start = clock(); h_check_active(h_x, h_y, h_L, h_Np, h_active); end = clock(); //printf("straighforward:%d [ms]\n\n", (int)((end - start)*1000 /CLOCKS_PER_SEC )); printf("straighforward:%d\n\n", (int)(end - start)); //----made in host with cell list start = clock(); h_make_cell_list(h_x, h_y, h_L, h_Np, h_cell_list, cell_per_axis, N_per_cell); h_check_active_with_list(h_x, h_y, h_L, h_Np, h_active_DBG, h_cell_list, cell_per_axis, N_per_cell); end = clock(); printf("host cell list:%d\n", (int)(end - start)); h_DBG(h_active, h_active_DBG, h_Np); printf("\n"); //----made in device global start = clock(); hipMemcpy(d_x, h_x, h_Np * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_y, h_y, h_Np * sizeof(double), hipMemcpyHostToDevice); hipLaunchKernelGGL(( d_check_active), dim3(NUM_BLOCK), dim3(NUM_THREAD), 0, 0, d_x, d_y, d_active); hipDeviceSynchronize(); hipMemcpy(h_check_result, d_active, h_Np * sizeof(int), hipMemcpyDeviceToHost); end = clock(); printf("gpu:%d\n", (int)(end - start)); h_DBG(h_active, h_check_result, h_Np); printf("\n"); //----made in device global with list, list is made in device start = clock(); hipMemcpy(d_x, h_x, h_Np * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_y, h_y, h_Np * sizeof(double), hipMemcpyHostToDevice); hipLaunchKernelGGL(( d_make_cell_list), dim3(1), dim3(h_Np), 0, 0, d_x, d_y, d_cell_list, d_belonging_cell, cell_per_axis, N_per_cell); hipDeviceSynchronize(); hipLaunchKernelGGL(( d_check_active_with_list), dim3(1), dim3(h_Np), 0, 0, d_x, d_y, d_active, d_cell_list, cell_per_axis, N_per_cell); hipDeviceSynchronize(); hipMemcpy(h_check_result, d_active, h_Np * sizeof(int), hipMemcpyDeviceToHost); end = clock(); printf("gpu with gpu list:%d\n", (int)(end - start)); h_DBG(h_active, h_check_result, h_Np); printf("\n"); //time loop //--move particles //--check activeness //--(sometimes) make new cell list //finalize //--free memory hipHostFree(h_x); hipHostFree(h_y); hipHostFree(h_active); hipHostFree(h_check_result); hipHostFree(h_cell_list); free(h_active_DBG); hipFree(d_x); hipFree(d_y); hipFree(d_active); hipFree(d_cell_list); hipFree(d_belonging_cell); return 0; }
a980d5faadf76c159b884522505985a69f49d7bb.cu
/* * Cell(i, j) = cell[i + j * n] */ #include<stdio.h> #include<math.h> #include<stdlib.h> #include<string.h> #include<time.h> #include"mt.h" __device__ __constant__ int d_Np; __device__ __constant__ double d_L; #define NUM_BLOCK 32 #define NUM_THREAD 32 #define PI 3.1415926535897932384626433 //host functions---------------------------------------------------------------- void init_configuration(double *h_x, double *h_y, double h_L, int h_Np) { int i; for(i = 0; i < h_Np; i += 1) { h_x[i] = h_L * genrand_real2(); h_y[i] = h_L * genrand_real2(); } } void h_check_active(double *h_x, double *h_y, double h_L, int h_Np, int *h_active) { int i, j; double dx, dy, dr_square; double diameter_square = 1.0; for(i = 0; i < h_Np; i += 1) { h_active[i] = 0; } for(i = 0; i < h_Np; i += 1) { for(j = 0; j < i; j += 1) { dx = h_x[i] - h_x[j]; if(dx > 0.5 * h_L) { dx -= h_L; } else if(dx < -0.5 * h_L) { dx += h_L; } dy = h_y[i] - h_y[j]; if(dy > 0.5 * h_L) { dy -= h_L; } else if(dy < -0.5 * h_L) { dy += h_L; } dr_square = dx * dx + dy * dy; if(dr_square < diameter_square) { h_active[i] = 1; h_active[j] = 1; } } } } void h_check_active_with_list(double *h_x, double *h_y, double h_L, int h_Np, int *h_active, int *h_cell_list, int cell_per_axis, int N_per_cell) { int i, j; int x_c, y_c; int cell_id, N_in_cell; int pair_id; double dx, dy, dr_square; double diameter_square = 1.0; for(i = 0; i < h_Np; i += 1) { x_c = (int)(h_x[i] * (double)cell_per_axis / h_L); y_c = (int)(h_y[i] * (double)cell_per_axis / h_L); cell_id = x_c + y_c * cell_per_axis; N_in_cell = h_cell_list[cell_id * N_per_cell]; for(j = 1; j <= N_in_cell; j += 1) { pair_id = h_cell_list[cell_id * N_per_cell + j]; if(i == pair_id) {continue;} dx = h_x[i] - h_x[pair_id]; if(dx < -0.5 * h_L) { dx += h_L; } else if(dx > 0.5 * h_L) { dx -= h_L; } dy = h_y[i] - h_y[pair_id]; if(dy < -0.5 * h_L) { dy += h_L; } else if(dy > 0.5 * h_L) { dy -= h_L; } dr_square = dx * dx + dy * dy; if(diameter_square > dr_square) { h_active[i] = 1; } } } } void h_DBG(int *A, int *B, int dim) { int i; double res = 0.0; for(i = 0; i < dim; i += 1) { res += (A[i] - B[i]) * (A[i] - B[i]); } printf("res %f\n", res); } int h_make_cell_list(double *h_x, double *h_y, double h_L, int h_Np, int *h_cell_list, int cell_per_axis, int N_per_cell) { //I dont know which is better modulo (%)calculation and if(){}elseif(){}else{} int i, j, k; int x_cell, y_cell; int cell_id; int cell_list_size = cell_per_axis * cell_per_axis * N_per_cell; int contained_num; //init cell list for(i = 0; i < cell_list_size; i += 1) { h_cell_list[i] = 0; } //make cell list for(i = 0; i < h_Np; i += 1) { x_cell = (int)(h_x[i] * (double)cell_per_axis / h_L); y_cell = (int)(h_y[i] * (double)cell_per_axis / h_L); for(j = x_cell - 1; j <= x_cell + 1; j += 1) { for(k = y_cell - 1; k <= y_cell + 1; k += 1) { cell_id = ((j + cell_per_axis) % cell_per_axis) + ((k + cell_per_axis) % cell_per_axis) * cell_per_axis; h_cell_list[cell_id * N_per_cell] += 1; contained_num = h_cell_list[cell_id * N_per_cell]; if(contained_num >= N_per_cell) { printf("too many particles in a cell\n"); return 1; } h_cell_list[cell_id * N_per_cell + contained_num] = i; } } } return 0; } //device functions-------------------------------------------------------------- __global__ void d_check_active(double *d_x, double *d_y, int *d_active) { int i_global; int i, j; int Np = d_Np; double l = 0.5 * d_L; double dx, dy ,dr_square; double diameter_square = 1.0; i_global = blockDim.x * blockIdx.x + threadIdx.x; for(i = i_global; i < Np; i += NUM_BLOCK * NUM_THREAD) { d_active[i] = 0; for(j = 0; j < Np; j += 1) { if(j != i) { dx = d_x[i] - d_x[j]; if(dx > l) { dx -= d_L; } else if(dx < -l) { dx += d_L; } dy = d_y[i] - d_y[j]; if(dy > l) { dy -= d_L; } else if(dy < -l) { dy += d_L; } dr_square = dx * dx + dy * dy; if(dr_square < diameter_square) { d_active[i] = 1; break; } } } } } __global__ void d_check_active_with_list(double *d_x, double *d_y, int *d_active, int *d_cell_list, int cell_per_axis, int N_per_cell) { //d_L and d_Np are already declared as __global__ const int i, j; int x_c, y_c; int cell_id, N_in_cell; int pair_id; int i_global; double dx, dy, dr_square; double diameter_square = 1.0; i_global = blockDim.x * blockIdx.x + threadIdx.x; if(i_global < d_Np) { d_active[i_global] = 0; x_c = (int)(d_x[i_global] * (double)cell_per_axis / d_L); y_c = (int)(d_y[i_global] * (double)cell_per_axis / d_L); cell_id = x_c + y_c * cell_per_axis; N_in_cell = d_cell_list[cell_id * N_per_cell]; for(j = 1; j <= N_in_cell; j += 1) { pair_id = d_cell_list[cell_id * N_per_cell + j]; if(i_global == pair_id) {continue;} dx = d_x[i_global] - d_x[pair_id]; dy = d_y[i_global] - d_y[pair_id]; if(dx < -0.5 * d_L) { dx += d_L; } else if(dx > 0.5 * d_L) { dx -= d_L; } if(dy < -0.5 * d_L) { dy += d_L; } else if(dy > 0.5 * d_L) { dy -= d_L; } dr_square = dx * dx + dy * dy; if(diameter_square > dr_square) { d_active[i_global] = 1; } } } } __global__ void d_make_cell_list(double *d_x, double *d_y, int *d_cell_list, int *d_belonging_cell, int cell_per_axis, int N_per_cell) { //this func needs equal to or more than Np threads int i, j, k, l; int i_global; int x_cell, y_cell; int cell_id; i_global = blockDim.x * blockIdx.x + threadIdx.x; if(i_global < d_Np) { x_cell = (int)(d_x[i_global] * (double)cell_per_axis / d_L); y_cell = (int)(d_y[i_global] * (double)cell_per_axis / d_L); cell_id = x_cell + y_cell * cell_per_axis; d_belonging_cell[i_global] = cell_id; } __syncthreads(); if(i_global < cell_per_axis * cell_per_axis) { d_cell_list[i_global * N_per_cell] = 0; x_cell = i_global % cell_per_axis; y_cell = i_global / cell_per_axis; for(j = x_cell - 1; j <= x_cell + 1; j += 1) { for(k = y_cell - 1; k <= y_cell + 1; k += 1) { cell_id = ((j + cell_per_axis) % cell_per_axis) + ((k + cell_per_axis) % cell_per_axis) * cell_per_axis; for(l = 0; l < d_Np; l += 1) { if(d_belonging_cell[l] == cell_id) { d_cell_list[i_global * N_per_cell] += 1; d_cell_list[i_global * N_per_cell + d_cell_list[i_global * N_per_cell] ] = l; } } } } } } //------------------------------------------------------------------------------ int main(void) { clock_t start, end; int cell_per_axis; int N_per_cell; //variables in host double *h_x; double *h_y; double h_L; int *h_active; int *h_check_result; int h_Np; int *h_cell_list; int *h_active_DBG; //variables in device double *d_x; double *d_y; int *d_active; int *d_cell_list; int *d_belonging_cell; //initialize //init_genrand(19970303); init_genrand((int)time(NULL)); //--set variable h_Np = 18000; h_L = 140.0; cell_per_axis = (int)(h_L / 11.0) + 1;//renew list every 5 steps N_per_cell = (h_Np * 13) / (cell_per_axis * cell_per_axis); printf("cell per axis:%d N_per_cell:%d\n", cell_per_axis, N_per_cell); cudaMemcpyToSymbol(d_Np, &h_Np, sizeof(int), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(d_L, &h_L, sizeof(double), 0, cudaMemcpyHostToDevice); //--allocate memory //----memory on host cudaHostAlloc((void **)&h_x, h_Np * sizeof(double), cudaHostAllocMapped); cudaHostAlloc((void **)&h_y, h_Np * sizeof(double), cudaHostAllocMapped); cudaHostAlloc((void **)&h_active, h_Np * sizeof(int), cudaHostAllocMapped); cudaHostAlloc((void **)&h_check_result, h_Np * sizeof(int), cudaHostAllocMapped); cudaHostAlloc((void **)&h_cell_list, cell_per_axis * cell_per_axis * N_per_cell * sizeof(int), cudaHostAllocMapped); h_active_DBG = (int *)calloc(h_Np, sizeof(int)); //----memory on device cudaMalloc((void **)&d_x, h_Np * sizeof(double)); cudaMalloc((void **)&d_y, h_Np * sizeof(double)); cudaMalloc((void **)&d_active, h_Np * sizeof(int)); cudaMalloc((void **)&d_cell_list, cell_per_axis * cell_per_axis * N_per_cell * sizeof(int)); cudaMalloc((void **)&d_belonging_cell, h_Np * sizeof(int)); //--place particles init_configuration(h_x, h_y, h_L, h_Np); cudaMemcpy(d_x, h_x, h_Np * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_y, h_y, h_Np * sizeof(double), cudaMemcpyHostToDevice); //--make first acriveness array //----made in host start = clock(); h_check_active(h_x, h_y, h_L, h_Np, h_active); end = clock(); //printf("straighforward:%d [ms]\n\n", (int)((end - start)*1000 /CLOCKS_PER_SEC )); printf("straighforward:%d\n\n", (int)(end - start)); //----made in host with cell list start = clock(); h_make_cell_list(h_x, h_y, h_L, h_Np, h_cell_list, cell_per_axis, N_per_cell); h_check_active_with_list(h_x, h_y, h_L, h_Np, h_active_DBG, h_cell_list, cell_per_axis, N_per_cell); end = clock(); printf("host cell list:%d\n", (int)(end - start)); h_DBG(h_active, h_active_DBG, h_Np); printf("\n"); //----made in device global start = clock(); cudaMemcpy(d_x, h_x, h_Np * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_y, h_y, h_Np * sizeof(double), cudaMemcpyHostToDevice); d_check_active<<<NUM_BLOCK, NUM_THREAD>>>(d_x, d_y, d_active); cudaDeviceSynchronize(); cudaMemcpy(h_check_result, d_active, h_Np * sizeof(int), cudaMemcpyDeviceToHost); end = clock(); printf("gpu:%d\n", (int)(end - start)); h_DBG(h_active, h_check_result, h_Np); printf("\n"); //----made in device global with list, list is made in device start = clock(); cudaMemcpy(d_x, h_x, h_Np * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_y, h_y, h_Np * sizeof(double), cudaMemcpyHostToDevice); d_make_cell_list<<<1, h_Np>>>(d_x, d_y, d_cell_list, d_belonging_cell, cell_per_axis, N_per_cell); cudaDeviceSynchronize(); d_check_active_with_list<<<1, h_Np>>>(d_x, d_y, d_active, d_cell_list, cell_per_axis, N_per_cell); cudaDeviceSynchronize(); cudaMemcpy(h_check_result, d_active, h_Np * sizeof(int), cudaMemcpyDeviceToHost); end = clock(); printf("gpu with gpu list:%d\n", (int)(end - start)); h_DBG(h_active, h_check_result, h_Np); printf("\n"); //time loop //--move particles //--check activeness //--(sometimes) make new cell list //finalize //--free memory cudaFreeHost(h_x); cudaFreeHost(h_y); cudaFreeHost(h_active); cudaFreeHost(h_check_result); cudaFreeHost(h_cell_list); free(h_active_DBG); cudaFree(d_x); cudaFree(d_y); cudaFree(d_active); cudaFree(d_cell_list); cudaFree(d_belonging_cell); return 0; }
ea2c7e6a06f37423c2eed90c4c62a2f82748aa71.hip
// !!! This is a file automatically generated by hipify!!! /* % Function: sc_fdma_modulator % Description: Generates sc-fdma signal of the subframe % Inputs: *subframe_h: received DMRS number 1 % M_pusch_rb numer of resource blocks assigned to ue % Outputs: *pusch_bb_h base band signal By: Mohammed Mostafa */ #include "sc_fdma_modulator.cuh" int main(int argc, char **argv) { const int M_pusch_rb = 100; const int M_pusch_sc = N_sc_rb * M_pusch_rb; //input hipfftComplex* subframe_h = (hipfftComplex *)malloc(sizeof(hipfftComplex)*N_symbs_per_subframe*M_pusch_sc); int j = 1; for (int i = 0; i < N_symbs_per_subframe*M_pusch_sc ; i++) { subframe_h[i].x = rand()/(float)RAND_MAX*10; subframe_h[i].y = rand()/(float)RAND_MAX*10; j++; if (j == 1201) j = 1; } //For output hipfftComplex *pusch_bb_h; //Call the Transform Precoder Function sc_fdma_modulator(subframe_h, M_pusch_rb, &pusch_bb_h); //Print results for (int i = 0; i < modulated_subframe_length; i++) { printf("idx = %d \t %f \t %f \n", i + 1, pusch_bb_h[i].x, pusch_bb_h[i].y); } //To compare with MATLAB results FILE *results; if ((results = freopen("TP_Results.m", "w+", stdout)) == NULL) { printf("Cannot open file.\n"); exit(1); } printf("clear; clc;\nsymbols_real = [ "); for (int i = 0; i < (modulated_subframe_length); i++) { printf("%10f", pusch_bb_h[i].x); if (i != ((modulated_subframe_length)-1)) printf(","); } printf(" ];\nsymbols_imag = [ "); for (int i = 0; i < (modulated_subframe_length); i++) { printf("%10f", pusch_bb_h[i].y); if (i != ((modulated_subframe_length)-1)) printf(","); } printf(" ];\n"); printf("symbols_CUDA = symbols_real + 1i * symbols_imag;\n"); fclose(results); FILE *results1; if ((results1 = freopen("TP_input_.m", "w+", stdout)) == NULL) { printf("Cannot open file.\n"); exit(1); } printf("clear; clc;\nsymbols_in_real = [ "); for (int i = 0; i < (16800); i++) { printf("%10f", subframe_h[i].x); if (i != ((16800)-1)) printf(","); } printf(" ];\nsymbols_in_imag = [ "); for (int i = 0; i < (16800); i++) { printf("%10f", subframe_h[i].y); if (i != ((16800)-1)) printf(","); } printf(" ];\n"); printf("symbols_input_CUDA = symbols_in_real + 1i * symbols_in_imag;\n"); fclose(results1); return 0; }
ea2c7e6a06f37423c2eed90c4c62a2f82748aa71.cu
/* % Function: sc_fdma_modulator % Description: Generates sc-fdma signal of the subframe % Inputs: *subframe_h: received DMRS number 1 % M_pusch_rb numer of resource blocks assigned to ue % Outputs: *pusch_bb_h base band signal By: Mohammed Mostafa */ #include "sc_fdma_modulator.cuh" int main(int argc, char **argv) { const int M_pusch_rb = 100; const int M_pusch_sc = N_sc_rb * M_pusch_rb; //input cufftComplex* subframe_h = (cufftComplex *)malloc(sizeof(cufftComplex)*N_symbs_per_subframe*M_pusch_sc); int j = 1; for (int i = 0; i < N_symbs_per_subframe*M_pusch_sc ; i++) { subframe_h[i].x = rand()/(float)RAND_MAX*10; subframe_h[i].y = rand()/(float)RAND_MAX*10; j++; if (j == 1201) j = 1; } //For output cufftComplex *pusch_bb_h; //Call the Transform Precoder Function sc_fdma_modulator(subframe_h, M_pusch_rb, &pusch_bb_h); //Print results for (int i = 0; i < modulated_subframe_length; i++) { printf("idx = %d \t %f \t %f \n", i + 1, pusch_bb_h[i].x, pusch_bb_h[i].y); } //To compare with MATLAB results FILE *results; if ((results = freopen("TP_Results.m", "w+", stdout)) == NULL) { printf("Cannot open file.\n"); exit(1); } printf("clear; clc;\nsymbols_real = [ "); for (int i = 0; i < (modulated_subframe_length); i++) { printf("%10f", pusch_bb_h[i].x); if (i != ((modulated_subframe_length)-1)) printf(","); } printf(" ];\nsymbols_imag = [ "); for (int i = 0; i < (modulated_subframe_length); i++) { printf("%10f", pusch_bb_h[i].y); if (i != ((modulated_subframe_length)-1)) printf(","); } printf(" ];\n"); printf("symbols_CUDA = symbols_real + 1i * symbols_imag;\n"); fclose(results); FILE *results1; if ((results1 = freopen("TP_input_.m", "w+", stdout)) == NULL) { printf("Cannot open file.\n"); exit(1); } printf("clear; clc;\nsymbols_in_real = [ "); for (int i = 0; i < (16800); i++) { printf("%10f", subframe_h[i].x); if (i != ((16800)-1)) printf(","); } printf(" ];\nsymbols_in_imag = [ "); for (int i = 0; i < (16800); i++) { printf("%10f", subframe_h[i].y); if (i != ((16800)-1)) printf(","); } printf(" ];\n"); printf("symbols_input_CUDA = symbols_in_real + 1i * symbols_in_imag;\n"); fclose(results1); return 0; }
85df3920214ba32622a3101d0ae08c09c76a0538.hip
// !!! This is a file automatically generated by hipify!!! /* C stuff */ #include <stdio.h> #include <stdlib.h> #include <fcntl.h> #include <unistd.h> #include <string.h> #include <errno.h> // Open-CV for the vision stuff #include <opencv2/opencv.hpp> /* Cuda stuff */ #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include "GPU.cuh" using namespace std; void testError(int ok, char* message){ if(ok != hipSuccess){ cerr << message << endl; } } /*************************************************************************** Prepare the kernel call: - Transfer the features to the GPU - Prepare an array for the results, initialized to zero (in parallel on the GPU) ***************************************************************************/ void preKernel(float *features, float *features_integral, float **_gpuFeatures, float **_gpuFeaturesIntegral, unsigned int **_gpuResult, int16_t w, int16_t h, int16_t w_integral, int16_t h_integral, int16_t noChannels, int numLabels) { hipError_t ok; int size; //printFreeGPUMem("CUDA Malloc features: "); // Allocate GPU memory for the features and transfer // them from host memory to GPU memory size=noChannels*w*h*sizeof(float); ok=hipMalloc ((void**) _gpuFeatures, size); testError(ok, "hipMalloc error 1"); ok=hipMemcpy (*_gpuFeatures, features, size, hipMemcpyHostToDevice); testError(ok, "hipMemcpyHostToDevice error 1"); size=noChannels*w_integral*h_integral*sizeof(float); ok=hipMalloc ((void**) _gpuFeaturesIntegral, size); testError(ok, "hipMalloc error 2"); ok=hipMemcpy (*_gpuFeaturesIntegral, features_integral, size, hipMemcpyHostToDevice); testError(ok, "hipMemcpyHostToDevice error 2"); size=w*h*numLabels*sizeof(unsigned int); ok=hipMalloc ((void**) _gpuResult, size); testError(ok, "hipMalloc error 3"); //.... KERNEL LAUNCH ICI } /*************************************************************************** After the kernel call: - Transfer the result back from the GPU to the _CPU - free the GPU memory related to a single image (features), but not the forest! ***************************************************************************/ void postKernel(float *_gpuFeatures, float *_gpuFeaturesIntegral, unsigned int *_gpuResult, unsigned int *result, int16_t w, int16_t h, int numLabels) { hipError_t ok; int size; // Copy the results back to host memory size=w*h*numLabels*sizeof(unsigned int); ok=hipMemcpy (result, _gpuResult, size, hipMemcpyDeviceToHost); testError(ok, "hipMemcpyDeviceToHost error 1"); #ifdef GPU_DEBUG_SINGLE_PIXEL std::cerr << "Debug-error code (int)=" << std::dec << (int) *result << "\n"; std::cerr << "Return values: "; for (int i=0; i<result[0]; ++i) std::cerr << result[i+1] << " "; std::cerr << "\n"; #endif // Free up GPU memory. hipFree(_gpuFeatures); hipFree(_gpuFeaturesIntegral); hipFree(_gpuResult); } __device__ float gpuGetValueIntegral (float *gpuFeaturesIntegral, uint8_t channel, int16_t x1, int16_t y1, int16_t x2, int16_t y2, int16_t w, int16_t h) { float res = ( gpuFeaturesIntegral[y2 + x2*h + channel*w*h] - gpuFeaturesIntegral[y2 + x1*h + channel*w*h] - gpuFeaturesIntegral[y1 + x2*h + channel*w*h] + gpuFeaturesIntegral[y1 + x1*h + channel*w*h]); return res; }
85df3920214ba32622a3101d0ae08c09c76a0538.cu
/* C stuff */ #include <stdio.h> #include <stdlib.h> #include <fcntl.h> #include <unistd.h> #include <string.h> #include <errno.h> // Open-CV for the vision stuff #include <opencv2/opencv.hpp> /* Cuda stuff */ #include <cuda_runtime_api.h> #include <cuda.h> #include "GPU.cuh" using namespace std; void testError(int ok, char* message){ if(ok != cudaSuccess){ cerr << message << endl; } } /*************************************************************************** Prepare the kernel call: - Transfer the features to the GPU - Prepare an array for the results, initialized to zero (in parallel on the GPU) ***************************************************************************/ void preKernel(float *features, float *features_integral, float **_gpuFeatures, float **_gpuFeaturesIntegral, unsigned int **_gpuResult, int16_t w, int16_t h, int16_t w_integral, int16_t h_integral, int16_t noChannels, int numLabels) { cudaError_t ok; int size; //printFreeGPUMem("CUDA Malloc features: "); // Allocate GPU memory for the features and transfer // them from host memory to GPU memory size=noChannels*w*h*sizeof(float); ok=cudaMalloc ((void**) _gpuFeatures, size); testError(ok, "cudaMalloc error 1"); ok=cudaMemcpy (*_gpuFeatures, features, size, cudaMemcpyHostToDevice); testError(ok, "cudaMemcpyHostToDevice error 1"); size=noChannels*w_integral*h_integral*sizeof(float); ok=cudaMalloc ((void**) _gpuFeaturesIntegral, size); testError(ok, "cudaMalloc error 2"); ok=cudaMemcpy (*_gpuFeaturesIntegral, features_integral, size, cudaMemcpyHostToDevice); testError(ok, "cudaMemcpyHostToDevice error 2"); size=w*h*numLabels*sizeof(unsigned int); ok=cudaMalloc ((void**) _gpuResult, size); testError(ok, "cudaMalloc error 3"); //.... KERNEL LAUNCH ICI } /*************************************************************************** After the kernel call: - Transfer the result back from the GPU to the _CPU - free the GPU memory related to a single image (features), but not the forest! ***************************************************************************/ void postKernel(float *_gpuFeatures, float *_gpuFeaturesIntegral, unsigned int *_gpuResult, unsigned int *result, int16_t w, int16_t h, int numLabels) { cudaError_t ok; int size; // Copy the results back to host memory size=w*h*numLabels*sizeof(unsigned int); ok=cudaMemcpy (result, _gpuResult, size, cudaMemcpyDeviceToHost); testError(ok, "cudaMemcpyDeviceToHost error 1"); #ifdef GPU_DEBUG_SINGLE_PIXEL std::cerr << "Debug-error code (int)=" << std::dec << (int) *result << "\n"; std::cerr << "Return values: "; for (int i=0; i<result[0]; ++i) std::cerr << result[i+1] << " "; std::cerr << "\n"; #endif // Free up GPU memory. cudaFree(_gpuFeatures); cudaFree(_gpuFeaturesIntegral); cudaFree(_gpuResult); } __device__ float gpuGetValueIntegral (float *gpuFeaturesIntegral, uint8_t channel, int16_t x1, int16_t y1, int16_t x2, int16_t y2, int16_t w, int16_t h) { float res = ( gpuFeaturesIntegral[y2 + x2*h + channel*w*h] - gpuFeaturesIntegral[y2 + x1*h + channel*w*h] - gpuFeaturesIntegral[y1 + x2*h + channel*w*h] + gpuFeaturesIntegral[y1 + x1*h + channel*w*h]); return res; }
0739ee0ffce6b26d67ba782d85ca9ea9a8ab2049.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" { #ifndef REAL #define REAL float #endif #ifndef CAST #define CAST(fun) fun ## f #endif #ifndef REAL2o3 #define REAL2o3 (REAL)0.6666666666666667 #endif #ifndef REAL3o2 #define REAL3o2 (REAL)1.5 #endif } __global__ void vector_inv_cbrt (const int n, const REAL* x, const int offset_x, const int stride_x, REAL* y, const int offset_y, const int stride_y) { const int gid = blockIdx.x * blockDim.x + threadIdx.x; if (gid < n) { y[offset_y + gid * stride_y] = CAST(rcbrt)(x[offset_x + gid * stride_x]); } }
0739ee0ffce6b26d67ba782d85ca9ea9a8ab2049.cu
#include "includes.h" extern "C" { #ifndef REAL #define REAL float #endif #ifndef CAST #define CAST(fun) fun ## f #endif #ifndef REAL2o3 #define REAL2o3 (REAL)0.6666666666666667 #endif #ifndef REAL3o2 #define REAL3o2 (REAL)1.5 #endif } __global__ void vector_inv_cbrt (const int n, const REAL* x, const int offset_x, const int stride_x, REAL* y, const int offset_y, const int stride_y) { const int gid = blockIdx.x * blockDim.x + threadIdx.x; if (gid < n) { y[offset_y + gid * stride_y] = CAST(rcbrt)(x[offset_x + gid * stride_x]); } }
ad9bc20d98fc5c1b04393f521d744c9f75389686.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void rdiv_double(int n, double *a, double *b, double *sum) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) sum[i] = b[i] / a[i]; }
ad9bc20d98fc5c1b04393f521d744c9f75389686.cu
#include "includes.h" __global__ void rdiv_double(int n, double *a, double *b, double *sum) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) sum[i] = b[i] / a[i]; }
40f6bfd39ebe6e639c3411625829784845ebf6bd.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> __global__ void scaleit_kernel(double *a,int n, int scaleBy) { /* Determine my index */ int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { a[i] = a[i] * (double)scaleBy; } } /* nvcc uses C++ name mangling by default */ extern "C" { int scaleit_launcher_(double *d_a, int *n, int *scaleBy) { dim3 block, grid; /* Decompose Problem */ block = dim3(1024, 1, 1); grid = dim3(*n/block.x, 1, 1); /* Launch Compute Kernel */ hipLaunchKernelGGL(( scaleit_kernel), dim3(grid),dim3(block), 0, 0, d_a,*n,*scaleBy); return 0; } }
40f6bfd39ebe6e639c3411625829784845ebf6bd.cu
#include <cuda.h> #include <stdio.h> __global__ void scaleit_kernel(double *a,int n, int scaleBy) { /* Determine my index */ int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { a[i] = a[i] * (double)scaleBy; } } /* nvcc uses C++ name mangling by default */ extern "C" { int scaleit_launcher_(double *d_a, int *n, int *scaleBy) { dim3 block, grid; /* Decompose Problem */ block = dim3(1024, 1, 1); grid = dim3(*n/block.x, 1, 1); /* Launch Compute Kernel */ scaleit_kernel<<<grid,block>>>(d_a,*n,*scaleBy); return 0; } }
fcb618b207b2ef34c01e3c25fb2ab0eb13a49630.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "mvmls_cuda.hpp" #include <ftl/cuda/weighting.hpp> #include <ftl/operators/cuda/mask.hpp> #include <ftl/cuda/warp.hpp> using ftl::cuda::TextureObject; using ftl::rgbd::Camera; using ftl::cuda::Mask; using ftl::cuda::MvMLSParams; #define T_PER_BLOCK 8 #define WARP_SIZE 32 #include "correspondence_common.hpp" // ==== Remove zero-confidence ================================================= __global__ void zero_confidence_kernel( TextureObject<float> conf, TextureObject<float> depth) { const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; if (x < depth.width() && y < depth.height()) { const float c = conf.tex2D((int)x,(int)y); if (c == 0.0f) { depth(x,y) = 1000.0f; } } } void ftl::cuda::zero_confidence(TextureObject<float> &conf, TextureObject<float> &depth, hipStream_t stream) { const dim3 gridSize((depth.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (depth.height() + T_PER_BLOCK - 1)/T_PER_BLOCK); const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK); hipLaunchKernelGGL(( zero_confidence_kernel), dim3(gridSize), dim3(blockSize), 0, stream, conf, depth); cudaSafeCall( hipGetLastError() ); } // ==== Show correspondence errors ============================================= __global__ void show_cor_error_kernel( TextureObject<uchar4> colour, TextureObject<short2> screen1, TextureObject<short2> screen2, float thresh) { const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; if (x < colour.width() && y < colour.height()) { short2 s1 = screen1.tex2D(x,y); //colour(x,y) = make_uchar4(0,0,0,0); if (s1.x >= 0 && s1.x < screen2.width() && s1.y < screen2.height()) { short2 s2 = screen2.tex2D(s1.x, s1.y); float l = sqrt(square(s2.x-x) + square(s2.y-y)); float nl = min(1.0f, l/thresh); //colour(x,y) = (l < 1.0f) ? make_uchar4(0,255,0,0) : (s2.x < 0) ? make_uchar4(255.0f, 0.0f, 0.0f, 0.0f) : make_uchar4(0.0f, (1.0f-nl)*255.0f, nl*255.0f, 0.0f); if (nl < 1.0f && s2.x >= 0) colour(x,y) = make_uchar4(0.0f, (1.0f-nl)*255.0f, nl*255.0f, 0.0f); } } } void ftl::cuda::show_cor_error(TextureObject<uchar4> &colour, TextureObject<short2> &screen1, TextureObject<short2> &screen2, float thresh, hipStream_t stream) { const dim3 gridSize((colour.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (colour.height() + T_PER_BLOCK - 1)/T_PER_BLOCK); const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK); hipLaunchKernelGGL(( show_cor_error_kernel), dim3(gridSize), dim3(blockSize), 0, stream, colour, screen1, screen2, thresh); cudaSafeCall( hipGetLastError() ); } // ==== Remove correspondence errors =========================================== __global__ void remove_cor_error_kernel( TextureObject<float> adjust, TextureObject<short2> screen1, TextureObject<short2> screen2, float thresh) { const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; if (x < adjust.width() && y < adjust.height()) { short2 s1 = screen1.tex2D(x,y); if (s1.x >= 0 && s1.x < screen2.width() && s1.y < screen2.height()) { short2 s2 = screen2.tex2D(s1.x, s1.y); float l = sqrt(square(s2.x-x) + square(s2.y-y)); if (l >= thresh || s2.x < 0) { adjust(x,y) = PINF; screen1(x,y) = make_short2(-1,-1); } } } } void ftl::cuda::remove_cor_error(TextureObject<float> &adjust, TextureObject<short2> &screen1, TextureObject<short2> &screen2, float thresh, hipStream_t stream) { const dim3 gridSize((adjust.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (adjust.height() + T_PER_BLOCK - 1)/T_PER_BLOCK); const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK); hipLaunchKernelGGL(( remove_cor_error_kernel), dim3(gridSize), dim3(blockSize), 0, stream, adjust, screen1, screen2, thresh); cudaSafeCall( hipGetLastError() ); } // ==== Show depth adjustments ================================================= __global__ void show_depth_adjust_kernel( TextureObject<uchar4> colour, TextureObject<short2> screen, TextureObject<float> adjust, float scale) { const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; if (x < colour.width() && y < colour.height()) { float a = adjust.tex2D(x,y); short2 s = screen.tex2D(x,y); //colour(x,y) = make_uchar4(0,0,0,0); if (s.x >= 0) { float ncG = min(1.0f, fabsf(a)/scale); float ncB = -max(-1.0f, min(0.0f, a/scale)); float ncR = max(0.0f, min(1.0f, a/scale)); colour(x,y) = make_uchar4(ncB*255.0f, (1.0f-ncG)*255.0f, ncR*255.0f, 0.0f); } } } void ftl::cuda::show_depth_adjustment(TextureObject<uchar4> &colour, TextureObject<short2> &screen, TextureObject<float> &adjust, float scale, hipStream_t stream) { const dim3 gridSize((colour.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (colour.height() + T_PER_BLOCK - 1)/T_PER_BLOCK); const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK); hipLaunchKernelGGL(( show_depth_adjust_kernel), dim3(gridSize), dim3(blockSize), 0, stream, colour, screen, adjust, scale); cudaSafeCall( hipGetLastError() ); } // ==== Vis reprojection ======================================================= __global__ void viz_reprojection_kernel( TextureObject<uchar4> colour_out, TextureObject<uchar4> colour_in, TextureObject<float> depth_out, TextureObject<float> depth_in, float4x4 pose, Camera cam1, Camera cam2) { const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; if (x < colour_in.width() && y < colour_in.height()) { float d = depth_in.tex2D(x,y); const float3 camPosOrigin = pose * cam1.screenToCam(x,y,d); const uint2 p = cam2.camToScreen<uint2>(camPosOrigin); if (p.x < colour_out.width() && p.y < colour_out.height()) { float dout = depth_out(p.x, p.y); uchar4 cin = colour_in(x,y); uchar4 cout = colour_out(p.x, p.y); if (fabs(dout-camPosOrigin.z) < 0.1f) { colour_out(p.x, p.y) = make_uchar4( (int(cin.x)+int(cout.x)) / 2, (int(cin.y)+int(cout.y)) / 2, (int(cin.z)+int(cout.z)) / 2, 0); } } } } void ftl::cuda::viz_reprojection( TextureObject<uchar4> &colour_out, TextureObject<uchar4> &colour_in, TextureObject<float> &depth_out, TextureObject<float> &depth_in, const float4x4 &pose, const Camera &cam1, const Camera &cam2, hipStream_t stream) { const dim3 gridSize((colour_in.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (colour_in.height() + T_PER_BLOCK - 1)/T_PER_BLOCK); const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK); hipLaunchKernelGGL(( viz_reprojection_kernel), dim3(gridSize), dim3(blockSize), 0, stream, colour_out, colour_in, depth_out, depth_in, pose, cam1, cam2); cudaSafeCall( hipGetLastError() ); }
fcb618b207b2ef34c01e3c25fb2ab0eb13a49630.cu
#include "mvmls_cuda.hpp" #include <ftl/cuda/weighting.hpp> #include <ftl/operators/cuda/mask.hpp> #include <ftl/cuda/warp.hpp> using ftl::cuda::TextureObject; using ftl::rgbd::Camera; using ftl::cuda::Mask; using ftl::cuda::MvMLSParams; #define T_PER_BLOCK 8 #define WARP_SIZE 32 #include "correspondence_common.hpp" // ==== Remove zero-confidence ================================================= __global__ void zero_confidence_kernel( TextureObject<float> conf, TextureObject<float> depth) { const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; if (x < depth.width() && y < depth.height()) { const float c = conf.tex2D((int)x,(int)y); if (c == 0.0f) { depth(x,y) = 1000.0f; } } } void ftl::cuda::zero_confidence(TextureObject<float> &conf, TextureObject<float> &depth, cudaStream_t stream) { const dim3 gridSize((depth.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (depth.height() + T_PER_BLOCK - 1)/T_PER_BLOCK); const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK); zero_confidence_kernel<<<gridSize, blockSize, 0, stream>>>(conf, depth); cudaSafeCall( cudaGetLastError() ); } // ==== Show correspondence errors ============================================= __global__ void show_cor_error_kernel( TextureObject<uchar4> colour, TextureObject<short2> screen1, TextureObject<short2> screen2, float thresh) { const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; if (x < colour.width() && y < colour.height()) { short2 s1 = screen1.tex2D(x,y); //colour(x,y) = make_uchar4(0,0,0,0); if (s1.x >= 0 && s1.x < screen2.width() && s1.y < screen2.height()) { short2 s2 = screen2.tex2D(s1.x, s1.y); float l = sqrt(square(s2.x-x) + square(s2.y-y)); float nl = min(1.0f, l/thresh); //colour(x,y) = (l < 1.0f) ? make_uchar4(0,255,0,0) : (s2.x < 0) ? make_uchar4(255.0f, 0.0f, 0.0f, 0.0f) : make_uchar4(0.0f, (1.0f-nl)*255.0f, nl*255.0f, 0.0f); if (nl < 1.0f && s2.x >= 0) colour(x,y) = make_uchar4(0.0f, (1.0f-nl)*255.0f, nl*255.0f, 0.0f); } } } void ftl::cuda::show_cor_error(TextureObject<uchar4> &colour, TextureObject<short2> &screen1, TextureObject<short2> &screen2, float thresh, cudaStream_t stream) { const dim3 gridSize((colour.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (colour.height() + T_PER_BLOCK - 1)/T_PER_BLOCK); const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK); show_cor_error_kernel<<<gridSize, blockSize, 0, stream>>>(colour, screen1, screen2, thresh); cudaSafeCall( cudaGetLastError() ); } // ==== Remove correspondence errors =========================================== __global__ void remove_cor_error_kernel( TextureObject<float> adjust, TextureObject<short2> screen1, TextureObject<short2> screen2, float thresh) { const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; if (x < adjust.width() && y < adjust.height()) { short2 s1 = screen1.tex2D(x,y); if (s1.x >= 0 && s1.x < screen2.width() && s1.y < screen2.height()) { short2 s2 = screen2.tex2D(s1.x, s1.y); float l = sqrt(square(s2.x-x) + square(s2.y-y)); if (l >= thresh || s2.x < 0) { adjust(x,y) = PINF; screen1(x,y) = make_short2(-1,-1); } } } } void ftl::cuda::remove_cor_error(TextureObject<float> &adjust, TextureObject<short2> &screen1, TextureObject<short2> &screen2, float thresh, cudaStream_t stream) { const dim3 gridSize((adjust.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (adjust.height() + T_PER_BLOCK - 1)/T_PER_BLOCK); const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK); remove_cor_error_kernel<<<gridSize, blockSize, 0, stream>>>(adjust, screen1, screen2, thresh); cudaSafeCall( cudaGetLastError() ); } // ==== Show depth adjustments ================================================= __global__ void show_depth_adjust_kernel( TextureObject<uchar4> colour, TextureObject<short2> screen, TextureObject<float> adjust, float scale) { const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; if (x < colour.width() && y < colour.height()) { float a = adjust.tex2D(x,y); short2 s = screen.tex2D(x,y); //colour(x,y) = make_uchar4(0,0,0,0); if (s.x >= 0) { float ncG = min(1.0f, fabsf(a)/scale); float ncB = -max(-1.0f, min(0.0f, a/scale)); float ncR = max(0.0f, min(1.0f, a/scale)); colour(x,y) = make_uchar4(ncB*255.0f, (1.0f-ncG)*255.0f, ncR*255.0f, 0.0f); } } } void ftl::cuda::show_depth_adjustment(TextureObject<uchar4> &colour, TextureObject<short2> &screen, TextureObject<float> &adjust, float scale, cudaStream_t stream) { const dim3 gridSize((colour.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (colour.height() + T_PER_BLOCK - 1)/T_PER_BLOCK); const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK); show_depth_adjust_kernel<<<gridSize, blockSize, 0, stream>>>(colour, screen, adjust, scale); cudaSafeCall( cudaGetLastError() ); } // ==== Vis reprojection ======================================================= __global__ void viz_reprojection_kernel( TextureObject<uchar4> colour_out, TextureObject<uchar4> colour_in, TextureObject<float> depth_out, TextureObject<float> depth_in, float4x4 pose, Camera cam1, Camera cam2) { const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; if (x < colour_in.width() && y < colour_in.height()) { float d = depth_in.tex2D(x,y); const float3 camPosOrigin = pose * cam1.screenToCam(x,y,d); const uint2 p = cam2.camToScreen<uint2>(camPosOrigin); if (p.x < colour_out.width() && p.y < colour_out.height()) { float dout = depth_out(p.x, p.y); uchar4 cin = colour_in(x,y); uchar4 cout = colour_out(p.x, p.y); if (fabs(dout-camPosOrigin.z) < 0.1f) { colour_out(p.x, p.y) = make_uchar4( (int(cin.x)+int(cout.x)) / 2, (int(cin.y)+int(cout.y)) / 2, (int(cin.z)+int(cout.z)) / 2, 0); } } } } void ftl::cuda::viz_reprojection( TextureObject<uchar4> &colour_out, TextureObject<uchar4> &colour_in, TextureObject<float> &depth_out, TextureObject<float> &depth_in, const float4x4 &pose, const Camera &cam1, const Camera &cam2, cudaStream_t stream) { const dim3 gridSize((colour_in.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (colour_in.height() + T_PER_BLOCK - 1)/T_PER_BLOCK); const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK); viz_reprojection_kernel<<<gridSize, blockSize, 0, stream>>>(colour_out, colour_in, depth_out, depth_in, pose, cam1, cam2); cudaSafeCall( cudaGetLastError() ); }
57514aa31afc97bf2c4f27d900d6e2cfb95fd46f.hip
// !!! This is a file automatically generated by hipify!!! /* Implements the various scatter operations on cusp vectors */ #define PETSC_SKIP_COMPLEX #include <petscconf.h> PETSC_CUDA_EXTERN_C_BEGIN #include <petsc/private/vecimpl.h> /*I "petscvec.h" I*/ #include <../src/vec/vec/impls/dvecimpl.h> PETSC_CUDA_EXTERN_C_END #include <../src/vec/vec/impls/seq/seqcusp/cuspvecimpl.h> #include <hip/hip_runtime.h> #undef __FUNCT__ #define __FUNCT__ "VecScatterCUSPIndicesCreate_StoS" PetscErrorCode VecScatterCUSPIndicesCreate_StoS(PetscInt n,PetscInt toFirst,PetscInt fromFirst,PetscInt toStep, PetscInt fromStep,PetscInt *tslots, PetscInt *fslots,PetscCUSPIndices *ci) { PetscCUSPIndices cci; VecScatterCUSPIndices_StoS stos_scatter; hipError_t err = hipSuccess; hipStream_t stream; PetscInt *intVecGPU; int device; hipDeviceProp_t props; PetscFunctionBegin; cci = new struct _p_PetscCUSPIndices; stos_scatter = new struct _p_VecScatterCUSPIndices_StoS; /* create the "from" indices */ stos_scatter->fslots = 0; stos_scatter->fromFirst = 0; stos_scatter->fromStep = 0; if (n) { if (fslots) { /* allocate GPU memory for the to-slots */ err = hipMalloc((void **)&intVecGPU,n*sizeof(PetscInt));CHKERRCUSP((int)err); err = hipMemcpy(intVecGPU,fslots,n*sizeof(PetscInt),hipMemcpyHostToDevice);CHKERRCUSP((int)err); /* assign the pointer to the struct */ stos_scatter->fslots = intVecGPU; stos_scatter->fromMode = VEC_SCATTER_CUSP_GENERAL; } else if (fromStep) { stos_scatter->fromFirst = fromFirst; stos_scatter->fromStep = fromStep; stos_scatter->fromMode = VEC_SCATTER_CUSP_STRIDED; } else { SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Must provide fslots or fromStep."); } } /* create the "to" indices */ stos_scatter->tslots = 0; stos_scatter->toFirst = 0; stos_scatter->toStep = 0; if (n) { if (tslots) { /* allocate GPU memory for the to-slots */ err = hipMalloc((void **)&intVecGPU,n*sizeof(PetscInt));CHKERRCUSP((int)err); err = hipMemcpy(intVecGPU,tslots,n*sizeof(PetscInt),hipMemcpyHostToDevice);CHKERRCUSP((int)err); /* assign the pointer to the struct */ stos_scatter->tslots = intVecGPU; stos_scatter->toMode = VEC_SCATTER_CUSP_GENERAL; } else if (toStep) { stos_scatter->toFirst = toFirst; stos_scatter->toStep = toStep; stos_scatter->toMode = VEC_SCATTER_CUSP_STRIDED; } else { SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Must provide tslots or toStep."); } } /* allocate the stream variable */ err = hipStreamCreate(&stream);CHKERRCUSP((int)err); stos_scatter->stream = stream; /* the number of indices */ stos_scatter->n = n; /* get the maximum number of coresident thread blocks */ hipGetDevice(&device); hipGetDeviceProperties(&props, device); stos_scatter->MAX_CORESIDENT_THREADS = props.maxThreadsPerMultiProcessor; if (props.major>=3) { stos_scatter->MAX_BLOCKS = 16*props.multiProcessorCount; } else { stos_scatter->MAX_BLOCKS = 8*props.multiProcessorCount; } /* assign the indices */ cci->scatter = (VecScatterCUSPIndices_StoS)stos_scatter; cci->scatterType = VEC_SCATTER_CUSP_STOS; *ci = cci; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecScatterCUSPIndicesCreate_PtoP" PetscErrorCode VecScatterCUSPIndicesCreate_PtoP(PetscInt ns,PetscInt *sendIndices,PetscInt nr,PetscInt *recvIndices,PetscCUSPIndices *ci) { PetscCUSPIndices cci; VecScatterCUSPIndices_PtoP ptop_scatter; PetscFunctionBegin; cci = new struct _p_PetscCUSPIndices; ptop_scatter = new struct _p_VecScatterCUSPIndices_PtoP; /* this calculation assumes that the input indices are sorted */ ptop_scatter->ns = sendIndices[ns-1]-sendIndices[0]+1; ptop_scatter->sendLowestIndex = sendIndices[0]; ptop_scatter->nr = recvIndices[nr-1]-recvIndices[0]+1; ptop_scatter->recvLowestIndex = recvIndices[0]; /* assign indices */ cci->scatter = (VecScatterCUSPIndices_PtoP)ptop_scatter; cci->scatterType = VEC_SCATTER_CUSP_PTOP; *ci = cci; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecScatterCUSPIndicesDestroy" PetscErrorCode VecScatterCUSPIndicesDestroy(PetscCUSPIndices *ci) { PetscFunctionBegin; if (!(*ci)) PetscFunctionReturn(0); try { if (ci) { if ((*ci)->scatterType == VEC_SCATTER_CUSP_PTOP) { delete (VecScatterCUSPIndices_PtoP)(*ci)->scatter; (*ci)->scatter = 0; } else { hipError_t err = hipSuccess; VecScatterCUSPIndices_StoS stos_scatter = (VecScatterCUSPIndices_StoS)(*ci)->scatter; if (stos_scatter->fslots) { err = hipFree(stos_scatter->fslots);CHKERRCUSP((int)err); stos_scatter->fslots = 0; } /* free the GPU memory for the to-slots */ if (stos_scatter->tslots) { err = hipFree(stos_scatter->tslots);CHKERRCUSP((int)err); stos_scatter->tslots = 0; } /* free the stream variable */ if (stos_scatter->stream) { err = hipStreamDestroy(stos_scatter->stream);CHKERRCUSP((int)err); stos_scatter->stream = 0; } delete stos_scatter; (*ci)->scatter = 0; } delete *ci; *ci = 0; } } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } PetscFunctionReturn(0); } /* Insert operator */ class Insert { public: __device__ PetscScalar operator() (PetscScalar a,PetscScalar b) const { return a; } }; /* Add operator */ class Add { public: __device__ PetscScalar operator() (PetscScalar a,PetscScalar b) const { return a+b; } }; /* Add operator */ class Max { public: __device__ PetscScalar operator() (PetscScalar a,PetscScalar b) const { #if !defined(PETSC_USE_COMPLEX) return PetscMax(a,b); #endif } }; /* Sequential general to sequential general GPU kernel */ template<class OPERATOR> __global__ void VecScatterCUSP_SGtoSG_kernel(PetscInt n,PetscInt *xind,PetscScalar *x,PetscInt *yind,PetscScalar *y,OPERATOR OP) { const int tidx = blockIdx.x*blockDim.x + threadIdx.x; const int grid_size = gridDim.x * blockDim.x; for (int i = tidx; i < n; i += grid_size) { y[yind[i]] = OP(x[xind[i]],y[yind[i]]); } } /* Sequential general to sequential strided GPU kernel */ template<class OPERATOR> __global__ void VecScatterCUSP_SGtoSS_kernel(PetscInt n,PetscInt *xind,PetscScalar *x,PetscInt toFirst,PetscInt toStep,PetscScalar *y,OPERATOR OP) { const int tidx = blockIdx.x*blockDim.x + threadIdx.x; const int grid_size = gridDim.x * blockDim.x; for (int i = tidx; i < n; i += grid_size) { y[toFirst+i*toStep] = OP(x[xind[i]],y[toFirst+i*toStep]); } } /* Sequential strided to sequential strided GPU kernel */ template<class OPERATOR> __global__ void VecScatterCUSP_SStoSS_kernel(PetscInt n,PetscInt fromFirst,PetscInt fromStep,PetscScalar *x,PetscInt toFirst,PetscInt toStep,PetscScalar *y,OPERATOR OP) { const int tidx = blockIdx.x*blockDim.x + threadIdx.x; const int grid_size = gridDim.x * blockDim.x; for (int i = tidx; i < n; i += grid_size) { y[toFirst+i*toStep] = OP(x[fromFirst+i*fromStep],y[toFirst+i*toStep]); } } /* Sequential strided to sequential general GPU kernel */ template<class OPERATOR> __global__ void VecScatterCUSP_SStoSG_kernel(PetscInt n,PetscInt fromFirst,PetscInt fromStep,PetscScalar *x,PetscInt *yind,PetscScalar *y,OPERATOR OP) { const int tidx = blockIdx.x*blockDim.x + threadIdx.x; const int grid_size = gridDim.x * blockDim.x; for (int i = tidx; i < n; i += grid_size) { y[yind[i]] = OP(x[fromFirst+i*fromStep],y[yind[i]]); } } template<class OPERATOR> void VecScatterCUSP_StoS_Dispatcher(CUSPARRAY *xarray,CUSPARRAY *yarray,PetscCUSPIndices ci,ScatterMode mode,OPERATOR OP) { PetscInt nBlocks=0,nThreads=128; VecScatterCUSPIndices_StoS stos_scatter = (VecScatterCUSPIndices_StoS)ci->scatter; nBlocks=(int)ceil(((float) stos_scatter->n)/((float) nThreads))+1; if (nBlocks>stos_scatter->MAX_CORESIDENT_THREADS/nThreads) { nBlocks = stos_scatter->MAX_CORESIDENT_THREADS/nThreads; } dim3 block(nThreads,1,1); dim3 grid(nBlocks,1,1); if (mode == SCATTER_FORWARD) { if (stos_scatter->fromMode == VEC_SCATTER_CUSP_GENERAL && stos_scatter->toMode == VEC_SCATTER_CUSP_GENERAL) { hipLaunchKernelGGL(( VecScatterCUSP_SGtoSG_kernel), dim3(grid),dim3(block),0,stos_scatter->stream, stos_scatter->n,stos_scatter->fslots,xarray->data().get(),stos_scatter->tslots,yarray->data().get(),OP); } else if (stos_scatter->fromMode == VEC_SCATTER_CUSP_GENERAL && stos_scatter->toMode == VEC_SCATTER_CUSP_STRIDED) { hipLaunchKernelGGL(( VecScatterCUSP_SGtoSS_kernel), dim3(grid),dim3(block),0,stos_scatter->stream, stos_scatter->n,stos_scatter->fslots,xarray->data().get(),stos_scatter->toFirst,stos_scatter->toStep,yarray->data().get(),OP); } else if (stos_scatter->fromMode == VEC_SCATTER_CUSP_STRIDED && stos_scatter->toMode == VEC_SCATTER_CUSP_STRIDED) { hipLaunchKernelGGL(( VecScatterCUSP_SStoSS_kernel), dim3(grid),dim3(block),0,stos_scatter->stream, stos_scatter->n,stos_scatter->fromFirst,stos_scatter->fromStep,xarray->data().get(),stos_scatter->toFirst,stos_scatter->toStep,yarray->data().get(),OP); } else if (stos_scatter->fromMode == VEC_SCATTER_CUSP_STRIDED && stos_scatter->toMode == VEC_SCATTER_CUSP_GENERAL) { hipLaunchKernelGGL(( VecScatterCUSP_SStoSG_kernel), dim3(grid),dim3(block),0,stos_scatter->stream, stos_scatter->n,stos_scatter->fromFirst,stos_scatter->fromStep,xarray->data().get(),stos_scatter->tslots,yarray->data().get(),OP); } } else { if (stos_scatter->toMode == VEC_SCATTER_CUSP_GENERAL && stos_scatter->fromMode == VEC_SCATTER_CUSP_GENERAL) { hipLaunchKernelGGL(( VecScatterCUSP_SGtoSG_kernel), dim3(grid),dim3(block),0,stos_scatter->stream, stos_scatter->n,stos_scatter->tslots,xarray->data().get(),stos_scatter->fslots,yarray->data().get(),OP); } else if (stos_scatter->toMode == VEC_SCATTER_CUSP_GENERAL && stos_scatter->fromMode == VEC_SCATTER_CUSP_STRIDED) { hipLaunchKernelGGL(( VecScatterCUSP_SGtoSS_kernel), dim3(grid),dim3(block),0,stos_scatter->stream, stos_scatter->n,stos_scatter->tslots,xarray->data().get(),stos_scatter->fromFirst,stos_scatter->fromStep,yarray->data().get(),OP); } else if (stos_scatter->toMode == VEC_SCATTER_CUSP_STRIDED && stos_scatter->fromMode == VEC_SCATTER_CUSP_STRIDED) { hipLaunchKernelGGL(( VecScatterCUSP_SStoSS_kernel), dim3(grid),dim3(block),0,stos_scatter->stream, stos_scatter->n,stos_scatter->toFirst,stos_scatter->toStep,xarray->data().get(),stos_scatter->fromFirst,stos_scatter->fromStep,yarray->data().get(),OP); } else if (stos_scatter->toMode == VEC_SCATTER_CUSP_STRIDED && stos_scatter->fromMode == VEC_SCATTER_CUSP_GENERAL) { hipLaunchKernelGGL(( VecScatterCUSP_SStoSG_kernel), dim3(grid),dim3(block),0,stos_scatter->stream, stos_scatter->n,stos_scatter->toFirst,stos_scatter->toStep,xarray->data().get(),stos_scatter->fslots,yarray->data().get(),OP); } } } #undef __FUNCT__ #define __FUNCT__ "VecScatterCUSP_StoS" PetscErrorCode VecScatterCUSP_StoS(Vec x,Vec y,PetscCUSPIndices ci,InsertMode addv,ScatterMode mode) { PetscErrorCode ierr; CUSPARRAY *xarray,*yarray; VecScatterCUSPIndices_StoS stos_scatter = (VecScatterCUSPIndices_StoS)ci->scatter; hipError_t err = hipSuccess; PetscFunctionBegin; ierr = VecCUSPAllocateCheck(x);CHKERRQ(ierr); ierr = VecCUSPAllocateCheck(y);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(x,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayReadWrite(y,&yarray);CHKERRQ(ierr); if (stos_scatter->n) { if (addv == INSERT_VALUES) VecScatterCUSP_StoS_Dispatcher(xarray,yarray,ci,mode,Insert()); else if (addv == ADD_VALUES) VecScatterCUSP_StoS_Dispatcher(xarray,yarray,ci,mode,Add()); #if !defined(PETSC_USE_COMPLEX) else if (addv == MAX_VALUES) VecScatterCUSP_StoS_Dispatcher(xarray,yarray,ci,mode,Max()); #endif else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_UNKNOWN_TYPE,"Wrong insert option"); err = hipGetLastError();CHKERRCUSP((int)err); err = hipStreamSynchronize(stos_scatter->stream);CHKERRCUSP((int)err); } ierr = VecCUSPRestoreArrayRead(x,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayReadWrite(y,&yarray);CHKERRQ(ierr); PetscFunctionReturn(0); }
57514aa31afc97bf2c4f27d900d6e2cfb95fd46f.cu
/* Implements the various scatter operations on cusp vectors */ #define PETSC_SKIP_COMPLEX #include <petscconf.h> PETSC_CUDA_EXTERN_C_BEGIN #include <petsc/private/vecimpl.h> /*I "petscvec.h" I*/ #include <../src/vec/vec/impls/dvecimpl.h> PETSC_CUDA_EXTERN_C_END #include <../src/vec/vec/impls/seq/seqcusp/cuspvecimpl.h> #include <cuda_runtime.h> #undef __FUNCT__ #define __FUNCT__ "VecScatterCUSPIndicesCreate_StoS" PetscErrorCode VecScatterCUSPIndicesCreate_StoS(PetscInt n,PetscInt toFirst,PetscInt fromFirst,PetscInt toStep, PetscInt fromStep,PetscInt *tslots, PetscInt *fslots,PetscCUSPIndices *ci) { PetscCUSPIndices cci; VecScatterCUSPIndices_StoS stos_scatter; cudaError_t err = cudaSuccess; cudaStream_t stream; PetscInt *intVecGPU; int device; cudaDeviceProp props; PetscFunctionBegin; cci = new struct _p_PetscCUSPIndices; stos_scatter = new struct _p_VecScatterCUSPIndices_StoS; /* create the "from" indices */ stos_scatter->fslots = 0; stos_scatter->fromFirst = 0; stos_scatter->fromStep = 0; if (n) { if (fslots) { /* allocate GPU memory for the to-slots */ err = cudaMalloc((void **)&intVecGPU,n*sizeof(PetscInt));CHKERRCUSP((int)err); err = cudaMemcpy(intVecGPU,fslots,n*sizeof(PetscInt),cudaMemcpyHostToDevice);CHKERRCUSP((int)err); /* assign the pointer to the struct */ stos_scatter->fslots = intVecGPU; stos_scatter->fromMode = VEC_SCATTER_CUSP_GENERAL; } else if (fromStep) { stos_scatter->fromFirst = fromFirst; stos_scatter->fromStep = fromStep; stos_scatter->fromMode = VEC_SCATTER_CUSP_STRIDED; } else { SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Must provide fslots or fromStep."); } } /* create the "to" indices */ stos_scatter->tslots = 0; stos_scatter->toFirst = 0; stos_scatter->toStep = 0; if (n) { if (tslots) { /* allocate GPU memory for the to-slots */ err = cudaMalloc((void **)&intVecGPU,n*sizeof(PetscInt));CHKERRCUSP((int)err); err = cudaMemcpy(intVecGPU,tslots,n*sizeof(PetscInt),cudaMemcpyHostToDevice);CHKERRCUSP((int)err); /* assign the pointer to the struct */ stos_scatter->tslots = intVecGPU; stos_scatter->toMode = VEC_SCATTER_CUSP_GENERAL; } else if (toStep) { stos_scatter->toFirst = toFirst; stos_scatter->toStep = toStep; stos_scatter->toMode = VEC_SCATTER_CUSP_STRIDED; } else { SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Must provide tslots or toStep."); } } /* allocate the stream variable */ err = cudaStreamCreate(&stream);CHKERRCUSP((int)err); stos_scatter->stream = stream; /* the number of indices */ stos_scatter->n = n; /* get the maximum number of coresident thread blocks */ cudaGetDevice(&device); cudaGetDeviceProperties(&props, device); stos_scatter->MAX_CORESIDENT_THREADS = props.maxThreadsPerMultiProcessor; if (props.major>=3) { stos_scatter->MAX_BLOCKS = 16*props.multiProcessorCount; } else { stos_scatter->MAX_BLOCKS = 8*props.multiProcessorCount; } /* assign the indices */ cci->scatter = (VecScatterCUSPIndices_StoS)stos_scatter; cci->scatterType = VEC_SCATTER_CUSP_STOS; *ci = cci; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecScatterCUSPIndicesCreate_PtoP" PetscErrorCode VecScatterCUSPIndicesCreate_PtoP(PetscInt ns,PetscInt *sendIndices,PetscInt nr,PetscInt *recvIndices,PetscCUSPIndices *ci) { PetscCUSPIndices cci; VecScatterCUSPIndices_PtoP ptop_scatter; PetscFunctionBegin; cci = new struct _p_PetscCUSPIndices; ptop_scatter = new struct _p_VecScatterCUSPIndices_PtoP; /* this calculation assumes that the input indices are sorted */ ptop_scatter->ns = sendIndices[ns-1]-sendIndices[0]+1; ptop_scatter->sendLowestIndex = sendIndices[0]; ptop_scatter->nr = recvIndices[nr-1]-recvIndices[0]+1; ptop_scatter->recvLowestIndex = recvIndices[0]; /* assign indices */ cci->scatter = (VecScatterCUSPIndices_PtoP)ptop_scatter; cci->scatterType = VEC_SCATTER_CUSP_PTOP; *ci = cci; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecScatterCUSPIndicesDestroy" PetscErrorCode VecScatterCUSPIndicesDestroy(PetscCUSPIndices *ci) { PetscFunctionBegin; if (!(*ci)) PetscFunctionReturn(0); try { if (ci) { if ((*ci)->scatterType == VEC_SCATTER_CUSP_PTOP) { delete (VecScatterCUSPIndices_PtoP)(*ci)->scatter; (*ci)->scatter = 0; } else { cudaError_t err = cudaSuccess; VecScatterCUSPIndices_StoS stos_scatter = (VecScatterCUSPIndices_StoS)(*ci)->scatter; if (stos_scatter->fslots) { err = cudaFree(stos_scatter->fslots);CHKERRCUSP((int)err); stos_scatter->fslots = 0; } /* free the GPU memory for the to-slots */ if (stos_scatter->tslots) { err = cudaFree(stos_scatter->tslots);CHKERRCUSP((int)err); stos_scatter->tslots = 0; } /* free the stream variable */ if (stos_scatter->stream) { err = cudaStreamDestroy(stos_scatter->stream);CHKERRCUSP((int)err); stos_scatter->stream = 0; } delete stos_scatter; (*ci)->scatter = 0; } delete *ci; *ci = 0; } } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } PetscFunctionReturn(0); } /* Insert operator */ class Insert { public: __device__ PetscScalar operator() (PetscScalar a,PetscScalar b) const { return a; } }; /* Add operator */ class Add { public: __device__ PetscScalar operator() (PetscScalar a,PetscScalar b) const { return a+b; } }; /* Add operator */ class Max { public: __device__ PetscScalar operator() (PetscScalar a,PetscScalar b) const { #if !defined(PETSC_USE_COMPLEX) return PetscMax(a,b); #endif } }; /* Sequential general to sequential general GPU kernel */ template<class OPERATOR> __global__ void VecScatterCUSP_SGtoSG_kernel(PetscInt n,PetscInt *xind,PetscScalar *x,PetscInt *yind,PetscScalar *y,OPERATOR OP) { const int tidx = blockIdx.x*blockDim.x + threadIdx.x; const int grid_size = gridDim.x * blockDim.x; for (int i = tidx; i < n; i += grid_size) { y[yind[i]] = OP(x[xind[i]],y[yind[i]]); } } /* Sequential general to sequential strided GPU kernel */ template<class OPERATOR> __global__ void VecScatterCUSP_SGtoSS_kernel(PetscInt n,PetscInt *xind,PetscScalar *x,PetscInt toFirst,PetscInt toStep,PetscScalar *y,OPERATOR OP) { const int tidx = blockIdx.x*blockDim.x + threadIdx.x; const int grid_size = gridDim.x * blockDim.x; for (int i = tidx; i < n; i += grid_size) { y[toFirst+i*toStep] = OP(x[xind[i]],y[toFirst+i*toStep]); } } /* Sequential strided to sequential strided GPU kernel */ template<class OPERATOR> __global__ void VecScatterCUSP_SStoSS_kernel(PetscInt n,PetscInt fromFirst,PetscInt fromStep,PetscScalar *x,PetscInt toFirst,PetscInt toStep,PetscScalar *y,OPERATOR OP) { const int tidx = blockIdx.x*blockDim.x + threadIdx.x; const int grid_size = gridDim.x * blockDim.x; for (int i = tidx; i < n; i += grid_size) { y[toFirst+i*toStep] = OP(x[fromFirst+i*fromStep],y[toFirst+i*toStep]); } } /* Sequential strided to sequential general GPU kernel */ template<class OPERATOR> __global__ void VecScatterCUSP_SStoSG_kernel(PetscInt n,PetscInt fromFirst,PetscInt fromStep,PetscScalar *x,PetscInt *yind,PetscScalar *y,OPERATOR OP) { const int tidx = blockIdx.x*blockDim.x + threadIdx.x; const int grid_size = gridDim.x * blockDim.x; for (int i = tidx; i < n; i += grid_size) { y[yind[i]] = OP(x[fromFirst+i*fromStep],y[yind[i]]); } } template<class OPERATOR> void VecScatterCUSP_StoS_Dispatcher(CUSPARRAY *xarray,CUSPARRAY *yarray,PetscCUSPIndices ci,ScatterMode mode,OPERATOR OP) { PetscInt nBlocks=0,nThreads=128; VecScatterCUSPIndices_StoS stos_scatter = (VecScatterCUSPIndices_StoS)ci->scatter; nBlocks=(int)ceil(((float) stos_scatter->n)/((float) nThreads))+1; if (nBlocks>stos_scatter->MAX_CORESIDENT_THREADS/nThreads) { nBlocks = stos_scatter->MAX_CORESIDENT_THREADS/nThreads; } dim3 block(nThreads,1,1); dim3 grid(nBlocks,1,1); if (mode == SCATTER_FORWARD) { if (stos_scatter->fromMode == VEC_SCATTER_CUSP_GENERAL && stos_scatter->toMode == VEC_SCATTER_CUSP_GENERAL) { VecScatterCUSP_SGtoSG_kernel<<<grid,block,0,stos_scatter->stream>>>(stos_scatter->n,stos_scatter->fslots,xarray->data().get(),stos_scatter->tslots,yarray->data().get(),OP); } else if (stos_scatter->fromMode == VEC_SCATTER_CUSP_GENERAL && stos_scatter->toMode == VEC_SCATTER_CUSP_STRIDED) { VecScatterCUSP_SGtoSS_kernel<<<grid,block,0,stos_scatter->stream>>>(stos_scatter->n,stos_scatter->fslots,xarray->data().get(),stos_scatter->toFirst,stos_scatter->toStep,yarray->data().get(),OP); } else if (stos_scatter->fromMode == VEC_SCATTER_CUSP_STRIDED && stos_scatter->toMode == VEC_SCATTER_CUSP_STRIDED) { VecScatterCUSP_SStoSS_kernel<<<grid,block,0,stos_scatter->stream>>>(stos_scatter->n,stos_scatter->fromFirst,stos_scatter->fromStep,xarray->data().get(),stos_scatter->toFirst,stos_scatter->toStep,yarray->data().get(),OP); } else if (stos_scatter->fromMode == VEC_SCATTER_CUSP_STRIDED && stos_scatter->toMode == VEC_SCATTER_CUSP_GENERAL) { VecScatterCUSP_SStoSG_kernel<<<grid,block,0,stos_scatter->stream>>>(stos_scatter->n,stos_scatter->fromFirst,stos_scatter->fromStep,xarray->data().get(),stos_scatter->tslots,yarray->data().get(),OP); } } else { if (stos_scatter->toMode == VEC_SCATTER_CUSP_GENERAL && stos_scatter->fromMode == VEC_SCATTER_CUSP_GENERAL) { VecScatterCUSP_SGtoSG_kernel<<<grid,block,0,stos_scatter->stream>>>(stos_scatter->n,stos_scatter->tslots,xarray->data().get(),stos_scatter->fslots,yarray->data().get(),OP); } else if (stos_scatter->toMode == VEC_SCATTER_CUSP_GENERAL && stos_scatter->fromMode == VEC_SCATTER_CUSP_STRIDED) { VecScatterCUSP_SGtoSS_kernel<<<grid,block,0,stos_scatter->stream>>>(stos_scatter->n,stos_scatter->tslots,xarray->data().get(),stos_scatter->fromFirst,stos_scatter->fromStep,yarray->data().get(),OP); } else if (stos_scatter->toMode == VEC_SCATTER_CUSP_STRIDED && stos_scatter->fromMode == VEC_SCATTER_CUSP_STRIDED) { VecScatterCUSP_SStoSS_kernel<<<grid,block,0,stos_scatter->stream>>>(stos_scatter->n,stos_scatter->toFirst,stos_scatter->toStep,xarray->data().get(),stos_scatter->fromFirst,stos_scatter->fromStep,yarray->data().get(),OP); } else if (stos_scatter->toMode == VEC_SCATTER_CUSP_STRIDED && stos_scatter->fromMode == VEC_SCATTER_CUSP_GENERAL) { VecScatterCUSP_SStoSG_kernel<<<grid,block,0,stos_scatter->stream>>>(stos_scatter->n,stos_scatter->toFirst,stos_scatter->toStep,xarray->data().get(),stos_scatter->fslots,yarray->data().get(),OP); } } } #undef __FUNCT__ #define __FUNCT__ "VecScatterCUSP_StoS" PetscErrorCode VecScatterCUSP_StoS(Vec x,Vec y,PetscCUSPIndices ci,InsertMode addv,ScatterMode mode) { PetscErrorCode ierr; CUSPARRAY *xarray,*yarray; VecScatterCUSPIndices_StoS stos_scatter = (VecScatterCUSPIndices_StoS)ci->scatter; cudaError_t err = cudaSuccess; PetscFunctionBegin; ierr = VecCUSPAllocateCheck(x);CHKERRQ(ierr); ierr = VecCUSPAllocateCheck(y);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(x,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayReadWrite(y,&yarray);CHKERRQ(ierr); if (stos_scatter->n) { if (addv == INSERT_VALUES) VecScatterCUSP_StoS_Dispatcher(xarray,yarray,ci,mode,Insert()); else if (addv == ADD_VALUES) VecScatterCUSP_StoS_Dispatcher(xarray,yarray,ci,mode,Add()); #if !defined(PETSC_USE_COMPLEX) else if (addv == MAX_VALUES) VecScatterCUSP_StoS_Dispatcher(xarray,yarray,ci,mode,Max()); #endif else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_UNKNOWN_TYPE,"Wrong insert option"); err = cudaGetLastError();CHKERRCUSP((int)err); err = cudaStreamSynchronize(stos_scatter->stream);CHKERRCUSP((int)err); } ierr = VecCUSPRestoreArrayRead(x,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayReadWrite(y,&yarray);CHKERRQ(ierr); PetscFunctionReturn(0); }
e2139b539885c5c886b0594afc1245828c386ab5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" using namespace std; void showMatriz(int *matriz, int anchura, int altura); void generateSeeds(int *matriz, int ancho, int alto, int cantidad, char modo); void gestionSemillas(int *matriz, int ancho, int numeroSemillas, int alto, char modo); int checkFull(int *matriz, int tamano); bool checkMove(int *matriz, int ancho, int alto); void guardar(int vidas, int *tablero, int altura, int anchura, char dificultad); int* cargar(); int* MostrarEspecificaciones(); hipError_t cudaStatus; /* add_up * Funcin del kernel para sumar hacia arriba todos los nmeros que sean iguales. */ __device__ void stack_right(int *matriz, int anchura, int altura, int x, int y) { for (int i = anchura - 1; i > 0; i--) //realizaremos el desplazamiento celda a celda una altura-1 veces para gestionar la posibilidad del ultimo poniendose el primero de la lista { if ((y != anchura-1) && (matriz[x*anchura + y] != 0) && matriz[x*anchura + (y + 1)] == 0) //Si la celda pertenece a la primera fila, es 0 o su superior no es 0, no hace nada { matriz[x*anchura + (y + 1)] = matriz[x*anchura + y]; //Si lo es, desplazamos la celda matriz[x*anchura + y] = 0; } __syncthreads(); //utilizamos una sincronizacion para que estos pasos sean realizados a la vez por los hilos del bloque } } __device__ void add_right(int *matriz, int x, int y, int altura, int anchura) { if (y != anchura-1 && y < anchura) //Los primeros hilos de la derecha no deben realizar ninguna operacion pues sern modificados por los demas { if (matriz[x*anchura + y] != 0) //Si es distinto de 0, gestiona su posible suma o desplazamiento { if (matriz[x*anchura + y] == matriz[x*anchura + (y + 1)]) //Si es igual a su superior, se procede a comprobar el numero de celdas con el mismo numero que hay en esa columna { int iguales = 0; iguales++; for (int i = 1; y + i < anchura; i++) { if (matriz[x*anchura + y] == matriz[x*anchura + (y + i)]) { iguales++; } else { break; } } if (iguales % 2 == 0) //Si el numero es par, se suman, si no, ese numero ser mezclado con otro y no estar disponible { matriz[x*anchura + (y + 1)] = matriz[x*anchura + (y + 1)] * 2; matriz[x*anchura + y] = 0; } } else if (matriz[x*anchura + (y + 1)] == 0) // Se comprueba que otros hilos hayan dejado 0 en sus operaciones para desplazarse { matriz[x*anchura + (y + 1)] = matriz[x*anchura + y]; matriz[x*anchura + y] = 0; } } } } __global__ void mov_rightK(int *matriz, int anchura, int altura) { int x = threadIdx.x; int y = threadIdx.y; stack_right(matriz, anchura, altura, x, y); //Realizamos las llamadas de la siguiente manera para gestionar el movimiento: add_right(matriz, x, y, altura, anchura); //2 2 0 4 -> 4 4 0 0 __syncthreads(); stack_right(matriz, anchura, altura, x, y); }
e2139b539885c5c886b0594afc1245828c386ab5.cu
#include "includes.h" using namespace std; void showMatriz(int *matriz, int anchura, int altura); void generateSeeds(int *matriz, int ancho, int alto, int cantidad, char modo); void gestionSemillas(int *matriz, int ancho, int numeroSemillas, int alto, char modo); int checkFull(int *matriz, int tamano); bool checkMove(int *matriz, int ancho, int alto); void guardar(int vidas, int *tablero, int altura, int anchura, char dificultad); int* cargar(); int* MostrarEspecificaciones(); cudaError_t cudaStatus; /* add_up * Función del kernel para sumar hacia arriba todos los números que sean iguales. */ __device__ void stack_right(int *matriz, int anchura, int altura, int x, int y) { for (int i = anchura - 1; i > 0; i--) //realizaremos el desplazamiento celda a celda una altura-1 veces para gestionar la posibilidad del ultimo poniendose el primero de la lista { if ((y != anchura-1) && (matriz[x*anchura + y] != 0) && matriz[x*anchura + (y + 1)] == 0) //Si la celda pertenece a la primera fila, es 0 o su superior no es 0, no hace nada { matriz[x*anchura + (y + 1)] = matriz[x*anchura + y]; //Si lo es, desplazamos la celda matriz[x*anchura + y] = 0; } __syncthreads(); //utilizamos una sincronizacion para que estos pasos sean realizados a la vez por los hilos del bloque } } __device__ void add_right(int *matriz, int x, int y, int altura, int anchura) { if (y != anchura-1 && y < anchura) //Los primeros hilos de la derecha no deben realizar ninguna operacion pues serán modificados por los demas { if (matriz[x*anchura + y] != 0) //Si es distinto de 0, gestiona su posible suma o desplazamiento { if (matriz[x*anchura + y] == matriz[x*anchura + (y + 1)]) //Si es igual a su superior, se procede a comprobar el numero de celdas con el mismo numero que hay en esa columna { int iguales = 0; iguales++; for (int i = 1; y + i < anchura; i++) { if (matriz[x*anchura + y] == matriz[x*anchura + (y + i)]) { iguales++; } else { break; } } if (iguales % 2 == 0) //Si el numero es par, se suman, si no, ese numero será mezclado con otro y no estará disponible { matriz[x*anchura + (y + 1)] = matriz[x*anchura + (y + 1)] * 2; matriz[x*anchura + y] = 0; } } else if (matriz[x*anchura + (y + 1)] == 0) // Se comprueba que otros hilos hayan dejado 0 en sus operaciones para desplazarse { matriz[x*anchura + (y + 1)] = matriz[x*anchura + y]; matriz[x*anchura + y] = 0; } } } } __global__ void mov_rightK(int *matriz, int anchura, int altura) { int x = threadIdx.x; int y = threadIdx.y; stack_right(matriz, anchura, altura, x, y); //Realizamos las llamadas de la siguiente manera para gestionar el movimiento: add_right(matriz, x, y, altura, anchura); //2 2 0 4 -> 4 4 0 0 __syncthreads(); stack_right(matriz, anchura, altura, x, y); }
abfe987dee39f180d9f4c411239a22ebb6cf449c.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "vec_floorf.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; size_t n = XSIZE*YSIZE; float *result = NULL; hipMalloc(&result, XSIZE*YSIZE); float *x = NULL; hipMalloc(&x, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( vec_floorf), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( vec_floorf), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( vec_floorf), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
abfe987dee39f180d9f4c411239a22ebb6cf449c.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "vec_floorf.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; size_t n = XSIZE*YSIZE; float *result = NULL; cudaMalloc(&result, XSIZE*YSIZE); float *x = NULL; cudaMalloc(&x, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); vec_floorf<<<gridBlock,threadBlock>>>(n,result,x); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { vec_floorf<<<gridBlock,threadBlock>>>(n,result,x); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { vec_floorf<<<gridBlock,threadBlock>>>(n,result,x); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
d4e0b44e1ceebecfa3be530c115b9cf9def74063.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include<stdio.h> #include<stdlib.h> __global__ void addmat(int *a, int *b, int *c) { int n= threadIdx.x,m=blockIdx.x, size=blockDim.x; c[m*size+n]=a[m*size+n]+b[m*size+n]; } __global__ void addrow (int *A, int *B, int *C,int n) { int idx = threadIdx.x; printf("idx = %d\n", idx); for (int i = 0; i < n; ++i) { C[i + n * idx] = A[i + n * idx] + B[i + n * idx]; } } __global__ void addcol(int *A, int *B, int *C,int m) { int idx = threadIdx.x; int x=blockDim.x; printf("idx = %d\n", idx); for (int i = 0; i < m; ++i) { C[ i*x + idx] = A[ i*x + idx] + B[ i*x + idx]; } } int main(void) { int a[8]={1,2,3,4,5,6,1,2},b[8]={1,2,3,4,5,6,1,2},*c,*c1,*c2,m=4,n=2,i,j; int *d_a,*d_b,*d_c,*d_c1,*d_c2; int size=sizeof(int)*m*n; c=(int*)malloc(m*n*sizeof(int)); c1=(int*)malloc(m*n*sizeof(int)); c2=(int*)malloc(m*n*sizeof(int)); hipMalloc((void**)&d_a,size); hipMalloc((void**)&d_b,size); hipMalloc((void**)&d_c,size); hipMalloc((void**)&d_c1,size); hipMalloc((void**)&d_c2,size); hipMemcpy(d_a,a,size,hipMemcpyHostToDevice); hipMemcpy(d_b,b,size,hipMemcpyHostToDevice); hipLaunchKernelGGL(( addrow), dim3(1), dim3(m), 0, 0, d_a, d_b, d_c,n); hipMemcpy(c,d_c,size,hipMemcpyDeviceToHost); printf("Result matrix using computation using each row is:\n"); for(i=0;i<m;i++) { for(j=0;j<n;j++) printf("%d\t",c[i*n+j]); printf("\n"); } hipMemcpy(d_a,a,size,hipMemcpyHostToDevice); hipMemcpy(d_b,b,size,hipMemcpyHostToDevice); hipLaunchKernelGGL(( addcol), dim3(1),dim3(n), 0, 0, d_a,d_b,d_c2,m); hipMemcpy(c2,d_c2,size,hipMemcpyDeviceToHost); printf("Result matrix using computation using each column is:\n"); for(i=0;i<m;i++) { for(j=0;j<n;j++) printf("%d\t",c2[i*n+j]); printf("\n"); } hipMemcpy(d_a,a,size,hipMemcpyHostToDevice); hipMemcpy(d_b,b,size,hipMemcpyHostToDevice); hipLaunchKernelGGL(( addmat), dim3(m),dim3(n), 0, 0, d_a,d_b,d_c1); hipMemcpy(c1,d_c1,size,hipMemcpyDeviceToHost); printf("Result matrix using computation using each element is:\n"); for(i=0;i<m;i++) { for(j=0;j<n;j++) printf("%d\t",c1[i*n+j]); printf("\n"); } getchar(); hipFree(d_a); hipFree(d_b); hipFree(d_c); hipFree(d_c1); return 0; }
d4e0b44e1ceebecfa3be530c115b9cf9def74063.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include<stdio.h> #include<stdlib.h> __global__ void addmat(int *a, int *b, int *c) { int n= threadIdx.x,m=blockIdx.x, size=blockDim.x; c[m*size+n]=a[m*size+n]+b[m*size+n]; } __global__ void addrow (int *A, int *B, int *C,int n) { int idx = threadIdx.x; printf("idx = %d\n", idx); for (int i = 0; i < n; ++i) { C[i + n * idx] = A[i + n * idx] + B[i + n * idx]; } } __global__ void addcol(int *A, int *B, int *C,int m) { int idx = threadIdx.x; int x=blockDim.x; printf("idx = %d\n", idx); for (int i = 0; i < m; ++i) { C[ i*x + idx] = A[ i*x + idx] + B[ i*x + idx]; } } int main(void) { int a[8]={1,2,3,4,5,6,1,2},b[8]={1,2,3,4,5,6,1,2},*c,*c1,*c2,m=4,n=2,i,j; int *d_a,*d_b,*d_c,*d_c1,*d_c2; int size=sizeof(int)*m*n; c=(int*)malloc(m*n*sizeof(int)); c1=(int*)malloc(m*n*sizeof(int)); c2=(int*)malloc(m*n*sizeof(int)); cudaMalloc((void**)&d_a,size); cudaMalloc((void**)&d_b,size); cudaMalloc((void**)&d_c,size); cudaMalloc((void**)&d_c1,size); cudaMalloc((void**)&d_c2,size); cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice); cudaMemcpy(d_b,b,size,cudaMemcpyHostToDevice); addrow<<<1, m>>>(d_a, d_b, d_c,n); cudaMemcpy(c,d_c,size,cudaMemcpyDeviceToHost); printf("Result matrix using computation using each row is:\n"); for(i=0;i<m;i++) { for(j=0;j<n;j++) printf("%d\t",c[i*n+j]); printf("\n"); } cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice); cudaMemcpy(d_b,b,size,cudaMemcpyHostToDevice); addcol<<<1,n>>>(d_a,d_b,d_c2,m); cudaMemcpy(c2,d_c2,size,cudaMemcpyDeviceToHost); printf("Result matrix using computation using each column is:\n"); for(i=0;i<m;i++) { for(j=0;j<n;j++) printf("%d\t",c2[i*n+j]); printf("\n"); } cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice); cudaMemcpy(d_b,b,size,cudaMemcpyHostToDevice); addmat<<<m,n>>>(d_a,d_b,d_c1); cudaMemcpy(c1,d_c1,size,cudaMemcpyDeviceToHost); printf("Result matrix using computation using each element is:\n"); for(i=0;i<m;i++) { for(j=0;j<n;j++) printf("%d\t",c1[i*n+j]); printf("\n"); } getchar(); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); cudaFree(d_c1); return 0; }
51afe41bf32d109dcbce5daed3e693d6c5e66565.hip
// !!! This is a file automatically generated by hipify!!! #include "pscan.h" #include <iostream> #include <hip/hip_runtime.h> #include "cuda_utils.h" static const int THREADS_PER_BLOCK = 128; static const int ELEMENTS_PER_BLOCK = THREADS_PER_BLOCK * 2; __host__ int nextPowerOfTwo(int x) { int power = 1; while (power < x) { power *= 2; } return power; } __global__ void prescan_small_kernel(int *input, int *output, int n, int pow2) { extern __shared__ int buffer[]; const int threadID = threadIdx.x; if (threadID < n) { buffer[2 * threadID] = input[2 * threadID]; buffer[2 * threadID + 1] = input[2 * threadID + 1]; } else { buffer[2 * threadID] = 0.0; buffer[2 * threadID + 1] = 0.0; } int offset = 1; for (int d = pow2 >> 1; d > 0; d >>= 1) { __syncthreads(); if (threadID < d) { const int ai = offset * (2 * threadID + 1) - 1; const int bi = offset * (2 * threadID + 2) - 1; buffer[bi] += buffer[ai]; } offset *= 2; } if (threadID == 0) { buffer[pow2 - 1] = 0; } for (int d = 1; d < pow2; d *= 2) { offset >>= 1; __syncthreads(); if (threadID < d) { const int ai = offset * (2 * threadID + 1) - 1; const int bi = offset * (2 * threadID + 2) - 1; const int t = buffer[ai]; buffer[ai] = buffer[bi]; buffer[bi] += t; } } __syncthreads(); if (threadID < n) { output[2 * threadID] = buffer[2 * threadID]; output[2 * threadID + 1] = buffer[2 * threadID + 1]; } } __global__ void prescan_large_kernel(int *input, int *output, int n, int *sums) { const int blockID = blockIdx.x; const int threadID = threadIdx.x; const int blockOffset = blockID * n; extern __shared__ int buffer[]; buffer[2 * threadID] = input[blockOffset + (2 * threadID)]; buffer[2 * threadID + 1] = input[blockOffset + (2 * threadID + 1)]; int offset = 1; for (int d = n >> 1; d > 0; d >>= 1) { __syncthreads(); if (threadID < d) { const int ai = offset * (2 * threadID + 1) - 1; const int bi = offset * (2 * threadID + 2) - 1; buffer[bi] += buffer[ai]; } offset *= 2; } __syncthreads(); if (threadID == 0) { sums[blockID] = buffer[n - 1]; buffer[n - 1] = 0; } for (int d = 1; d < n; d *= 2) { offset >>= 1; __syncthreads(); if (threadID < d) { const int ai = offset * (2 * threadID + 1) - 1; const int bi = offset * (2 * threadID + 2) - 1; const int t = buffer[ai]; buffer[ai] = buffer[bi]; buffer[bi] += t; } } __syncthreads(); output[blockOffset + (2 * threadID)] = buffer[2 * threadID]; output[blockOffset + (2 * threadID + 1)] = buffer[2 * threadID + 1]; } __global__ void add(int *output, int length, int *n) { const int blockID = blockIdx.x; const int threadID = threadIdx.x; const int blockOffset = blockID * length; output[blockOffset + threadID] += n[blockID]; } __global__ void add(int *output, int length, int *n1, int *n2) { const int blockID = blockIdx.x; const int threadID = threadIdx.x; const int blockOffset = blockID * length; output[blockOffset + threadID] += n1[blockID] + n2[blockID]; } void prescan_small(int *d_in, int *d_out, int n, int dev_id = 0, hipStream_t stream = 0) { const int pow2 = nextPowerOfTwo(n); hipSetDevice(dev_id); hipLaunchKernelGGL(( prescan_small_kernel), dim3(1), dim3((n + 1) / 2), 2 * pow2 * sizeof(int), stream, d_in, d_out, n, pow2); CUDA_CHECK_ERRORS(); } void prescan_large(int *d_in, int *d_out, int n, int dev_id = 0, hipStream_t stream = 0) { const int blocks = (n + ELEMENTS_PER_BLOCK - 1) / ELEMENTS_PER_BLOCK; const int sharedSize = ELEMENTS_PER_BLOCK * sizeof(int); hipSetDevice(dev_id); int *d_sums, *d_incr; hipMalloc((void **)&d_sums, blocks * sizeof(int)); hipMalloc((void **)&d_incr, blocks * sizeof(int)); hipLaunchKernelGGL(( prescan_large_kernel), dim3(blocks), dim3(THREADS_PER_BLOCK), 2 * sharedSize, stream, d_in, d_out, ELEMENTS_PER_BLOCK, d_sums); const int sumThreadsNeeded = (blocks + 1) / 2; if (sumThreadsNeeded > THREADS_PER_BLOCK) { prescan_large(d_sums, d_incr, blocks, dev_id, stream); } else { prescan_small(d_sums, d_incr, blocks, dev_id, stream); } hipLaunchKernelGGL(( add), dim3(blocks), dim3(ELEMENTS_PER_BLOCK), 0, stream, d_out, ELEMENTS_PER_BLOCK, d_incr); hipSetDevice(dev_id); hipFree(d_sums); hipFree(d_incr); CUDA_CHECK_ERRORS(); } void prescan(int *d_in, int *d_out, int size, int dev_id, hipStream_t stream) { const size_t residue = size % ELEMENTS_PER_BLOCK; if (size < ELEMENTS_PER_BLOCK) { prescan_small(d_in, d_out, size, dev_id, stream); } else if (residue == 0) { prescan_large(d_in, d_out, size, dev_id, stream); } else { const size_t tail = size - residue; prescan_large(d_in, d_out, tail, dev_id, stream); prescan_small(&d_in[tail], &d_out[tail], residue, dev_id, stream); hipLaunchKernelGGL(( add), dim3(1), dim3(residue), 0, stream, &d_out[tail], residue, &d_in[tail - 1], &d_out[tail - 1]); } CUDA_CHECK_ERRORS(); }
51afe41bf32d109dcbce5daed3e693d6c5e66565.cu
#include "pscan.h" #include <iostream> #include <cuda_runtime.h> #include "cuda_utils.h" static const int THREADS_PER_BLOCK = 128; static const int ELEMENTS_PER_BLOCK = THREADS_PER_BLOCK * 2; __host__ int nextPowerOfTwo(int x) { int power = 1; while (power < x) { power *= 2; } return power; } __global__ void prescan_small_kernel(int *input, int *output, int n, int pow2) { extern __shared__ int buffer[]; const int threadID = threadIdx.x; if (threadID < n) { buffer[2 * threadID] = input[2 * threadID]; buffer[2 * threadID + 1] = input[2 * threadID + 1]; } else { buffer[2 * threadID] = 0.0; buffer[2 * threadID + 1] = 0.0; } int offset = 1; for (int d = pow2 >> 1; d > 0; d >>= 1) { __syncthreads(); if (threadID < d) { const int ai = offset * (2 * threadID + 1) - 1; const int bi = offset * (2 * threadID + 2) - 1; buffer[bi] += buffer[ai]; } offset *= 2; } if (threadID == 0) { buffer[pow2 - 1] = 0; } for (int d = 1; d < pow2; d *= 2) { offset >>= 1; __syncthreads(); if (threadID < d) { const int ai = offset * (2 * threadID + 1) - 1; const int bi = offset * (2 * threadID + 2) - 1; const int t = buffer[ai]; buffer[ai] = buffer[bi]; buffer[bi] += t; } } __syncthreads(); if (threadID < n) { output[2 * threadID] = buffer[2 * threadID]; output[2 * threadID + 1] = buffer[2 * threadID + 1]; } } __global__ void prescan_large_kernel(int *input, int *output, int n, int *sums) { const int blockID = blockIdx.x; const int threadID = threadIdx.x; const int blockOffset = blockID * n; extern __shared__ int buffer[]; buffer[2 * threadID] = input[blockOffset + (2 * threadID)]; buffer[2 * threadID + 1] = input[blockOffset + (2 * threadID + 1)]; int offset = 1; for (int d = n >> 1; d > 0; d >>= 1) { __syncthreads(); if (threadID < d) { const int ai = offset * (2 * threadID + 1) - 1; const int bi = offset * (2 * threadID + 2) - 1; buffer[bi] += buffer[ai]; } offset *= 2; } __syncthreads(); if (threadID == 0) { sums[blockID] = buffer[n - 1]; buffer[n - 1] = 0; } for (int d = 1; d < n; d *= 2) { offset >>= 1; __syncthreads(); if (threadID < d) { const int ai = offset * (2 * threadID + 1) - 1; const int bi = offset * (2 * threadID + 2) - 1; const int t = buffer[ai]; buffer[ai] = buffer[bi]; buffer[bi] += t; } } __syncthreads(); output[blockOffset + (2 * threadID)] = buffer[2 * threadID]; output[blockOffset + (2 * threadID + 1)] = buffer[2 * threadID + 1]; } __global__ void add(int *output, int length, int *n) { const int blockID = blockIdx.x; const int threadID = threadIdx.x; const int blockOffset = blockID * length; output[blockOffset + threadID] += n[blockID]; } __global__ void add(int *output, int length, int *n1, int *n2) { const int blockID = blockIdx.x; const int threadID = threadIdx.x; const int blockOffset = blockID * length; output[blockOffset + threadID] += n1[blockID] + n2[blockID]; } void prescan_small(int *d_in, int *d_out, int n, int dev_id = 0, cudaStream_t stream = 0) { const int pow2 = nextPowerOfTwo(n); cudaSetDevice(dev_id); prescan_small_kernel<<<1, (n + 1) / 2, 2 * pow2 * sizeof(int), stream>>>(d_in, d_out, n, pow2); CUDA_CHECK_ERRORS(); } void prescan_large(int *d_in, int *d_out, int n, int dev_id = 0, cudaStream_t stream = 0) { const int blocks = (n + ELEMENTS_PER_BLOCK - 1) / ELEMENTS_PER_BLOCK; const int sharedSize = ELEMENTS_PER_BLOCK * sizeof(int); cudaSetDevice(dev_id); int *d_sums, *d_incr; cudaMalloc((void **)&d_sums, blocks * sizeof(int)); cudaMalloc((void **)&d_incr, blocks * sizeof(int)); prescan_large_kernel<<<blocks, THREADS_PER_BLOCK, 2 * sharedSize, stream>>>( d_in, d_out, ELEMENTS_PER_BLOCK, d_sums); const int sumThreadsNeeded = (blocks + 1) / 2; if (sumThreadsNeeded > THREADS_PER_BLOCK) { prescan_large(d_sums, d_incr, blocks, dev_id, stream); } else { prescan_small(d_sums, d_incr, blocks, dev_id, stream); } add<<<blocks, ELEMENTS_PER_BLOCK, 0, stream>>>(d_out, ELEMENTS_PER_BLOCK, d_incr); cudaSetDevice(dev_id); cudaFree(d_sums); cudaFree(d_incr); CUDA_CHECK_ERRORS(); } void prescan(int *d_in, int *d_out, int size, int dev_id, cudaStream_t stream) { const size_t residue = size % ELEMENTS_PER_BLOCK; if (size < ELEMENTS_PER_BLOCK) { prescan_small(d_in, d_out, size, dev_id, stream); } else if (residue == 0) { prescan_large(d_in, d_out, size, dev_id, stream); } else { const size_t tail = size - residue; prescan_large(d_in, d_out, tail, dev_id, stream); prescan_small(&d_in[tail], &d_out[tail], residue, dev_id, stream); add<<<1, residue, 0, stream>>>(&d_out[tail], residue, &d_in[tail - 1], &d_out[tail - 1]); } CUDA_CHECK_ERRORS(); }
36736b44bf0794f064fee800468f7bb236fc603e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright 2020,2021 Sony Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <nbla/cuda/common.hpp> #include <nbla/cuda/solver/rmsprop_graves.hpp> #include "./clip_grad.cuh" #include "./mixed_precision_training.cuh" #include "./weight_decay.cuh" namespace nbla { template <typename T> __global__ void kernel_rmsprop_graves_update(const int num, T *data, const T *grad, T *n, T *g, T *d, const float lr, const float decay, const float momentum, const float eps) { NBLA_CUDA_KERNEL_LOOP(idx, num) { n[idx] = decay * n[idx] + (1 - decay) * grad[idx] * grad[idx]; g[idx] = decay * g[idx] + (1 - decay) * grad[idx]; d[idx] = (momentum)*d[idx] - lr * grad[idx] / (sqrt(n[idx] - g[idx] * g[idx] + eps)); data[idx] += d[idx]; } } template <typename T> void RMSpropGravesCuda<T>::update_impl(const string &key, VariablePtr param) { Size_t size = param->size(); auto &state = this->states_.at(key); VariablePtr s1 = state.pstate["n"]; VariablePtr s2 = state.pstate["g"]; VariablePtr s3 = state.pstate["d"]; T *n = s1->cast_data_and_get_pointer<T>(this->ctx_); T *g = s2->cast_data_and_get_pointer<T>(this->ctx_); T *d = s3->cast_data_and_get_pointer<T>(this->ctx_); const T *grad = param->get_grad_pointer<T>(this->ctx_); T *data = param->cast_data_and_get_pointer<T>(this->ctx_); NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_rmsprop_graves_update, size, data, grad, n, g, d, this->lr_, this->decay_, this->momentum_, this->eps_); auto &t = state.t; t = ::min(t + 1, std::numeric_limits<uint32_t>::max() - 1); } NBLA_DEF_WEIGHT_DECAY(RMSpropGravesCuda, weight_decay_cuda); NBLA_DEF_CLIP_GRAD_BY_NORM(RMSpropGravesCuda, clip_grad_by_norm_cuda); NBLA_DEF_CHECK_INF_GRAD(RMSpropGravesCuda, check_inf_grad_cuda); NBLA_DEF_CHECK_NAN_GRAD(RMSpropGravesCuda, check_nan_grad_cuda); NBLA_DEF_CHECK_INF_OR_NAN_GRAD(RMSpropGravesCuda, check_inf_or_nan_grad_cuda); NBLA_DEF_SCALE_GRAD(RMSpropGravesCuda, scale_grad_impl_cuda); } // namespace nbla
36736b44bf0794f064fee800468f7bb236fc603e.cu
// Copyright 2020,2021 Sony Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <nbla/cuda/common.hpp> #include <nbla/cuda/solver/rmsprop_graves.hpp> #include "./clip_grad.cuh" #include "./mixed_precision_training.cuh" #include "./weight_decay.cuh" namespace nbla { template <typename T> __global__ void kernel_rmsprop_graves_update(const int num, T *data, const T *grad, T *n, T *g, T *d, const float lr, const float decay, const float momentum, const float eps) { NBLA_CUDA_KERNEL_LOOP(idx, num) { n[idx] = decay * n[idx] + (1 - decay) * grad[idx] * grad[idx]; g[idx] = decay * g[idx] + (1 - decay) * grad[idx]; d[idx] = (momentum)*d[idx] - lr * grad[idx] / (sqrt(n[idx] - g[idx] * g[idx] + eps)); data[idx] += d[idx]; } } template <typename T> void RMSpropGravesCuda<T>::update_impl(const string &key, VariablePtr param) { Size_t size = param->size(); auto &state = this->states_.at(key); VariablePtr s1 = state.pstate["n"]; VariablePtr s2 = state.pstate["g"]; VariablePtr s3 = state.pstate["d"]; T *n = s1->cast_data_and_get_pointer<T>(this->ctx_); T *g = s2->cast_data_and_get_pointer<T>(this->ctx_); T *d = s3->cast_data_and_get_pointer<T>(this->ctx_); const T *grad = param->get_grad_pointer<T>(this->ctx_); T *data = param->cast_data_and_get_pointer<T>(this->ctx_); NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_rmsprop_graves_update, size, data, grad, n, g, d, this->lr_, this->decay_, this->momentum_, this->eps_); auto &t = state.t; t = std::min(t + 1, std::numeric_limits<uint32_t>::max() - 1); } NBLA_DEF_WEIGHT_DECAY(RMSpropGravesCuda, weight_decay_cuda); NBLA_DEF_CLIP_GRAD_BY_NORM(RMSpropGravesCuda, clip_grad_by_norm_cuda); NBLA_DEF_CHECK_INF_GRAD(RMSpropGravesCuda, check_inf_grad_cuda); NBLA_DEF_CHECK_NAN_GRAD(RMSpropGravesCuda, check_nan_grad_cuda); NBLA_DEF_CHECK_INF_OR_NAN_GRAD(RMSpropGravesCuda, check_inf_or_nan_grad_cuda); NBLA_DEF_SCALE_GRAD(RMSpropGravesCuda, scale_grad_impl_cuda); } // namespace nbla
a34692f914560eda5b632246bb8aaa631bad73ff.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void cuda_hello(){ printf("Hello from your GPU\n"); } int main(void){ hipLaunchKernelGGL(( cuda_hello), dim3(1),dim3(1), 0, 0, ); return 0; }
a34692f914560eda5b632246bb8aaa631bad73ff.cu
#include <stdio.h> __global__ void cuda_hello(){ printf("Hello from your GPU\n"); } int main(void){ cuda_hello<<<1,1>>>(); return 0; }
489ffc8d9bdccfeae5665b68fc4f33283f7cbe85.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * @copyright (c) 2012- King Abdullah University of Science and * Technology (KAUST). All rights reserved. **/ /** * @file src/blas_l2/chemv_offset.cu * KBLAS is a high performance CUDA library for subset of BLAS * and LAPACK routines optimized for NVIDIA GPUs. * KBLAS is provided by KAUST. * * @version 2.0.0 * @author Ahmad Abdelfattah * @date 2017-11-13 **/ #include "syhemv_offset_core.cuh" #if(SM >= 30) #define chemv_upper_bs (32) #define chemv_upper_ty (2) #define chemv_upper_by (2) #define chemv_lower_bs (32) #define chemv_lower_ty (8) #define chemv_lower_by (2) #else #define chemv_upper_bs (64) #define chemv_upper_ty (8) #define chemv_upper_by (2) #define chemv_lower_bs (32) #define chemv_lower_ty (4) #define chemv_lower_by (2) #endif /*************************************************************************************/ int kblas_chemv_offset_driver( char uplo, int m, cuFloatComplex alpha, cuFloatComplex *dA, int lda, cuFloatComplex *dX, int incx, cuFloatComplex beta, cuFloatComplex *dY, int incy, int offset, hipStream_t stream = 0) { // handle the case when incx and/or incy is -ve if(incx < 0) dX -= (m-1) * incx; if(incy < 0) dY -= (m-1) * incy; if(uplo == 'U' || uplo == 'u') { /** configuration params **/ const int chemv_bs = chemv_upper_bs; const int thread_x = chemv_bs; const int thread_y = chemv_upper_ty; const int elements_per_thread = (chemv_bs/(2*thread_y)) ; /** end configuration params **/ /** offset necessary calculation **/ int offset_ = offset % chemv_bs; int total_blocks_skipped = offset / chemv_bs; int my_skipped_blocks = total_blocks_skipped/ngpus; if(gpu_gid < (total_blocks_skipped%ngpus)) my_skipped_blocks += 1; int ref_gpu = total_blocks_skipped%ngpus; int new_gpu_gid = (gpu_gid - ref_gpu + ngpus) % ngpus; // Advance pointers accordingly dA += my_skipped_blocks * chemv_bs * lda; dA += total_blocks_skipped * chemv_bs; dX += total_blocks_skipped * chemv_bs * incx; dY += total_blocks_skipped * chemv_bs * incy; m -= total_blocks_skipped * chemv_bs; /** end offset necessary calculation **/ int mod = m % chemv_bs; int nstripes = m / chemv_bs + (mod != 0); int blocks = nstripes/ngpus; if(new_gpu_gid < (nstripes%ngpus) ) blocks += 1; dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks,1); dim3 dimGrid_(blocks, chemv_upper_by); if(blocks == 0) return 0; if(mod == 0) { hipLaunchKernelGGL(( syhemvu_special_d_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, nstripes, offset_); hipLaunchKernelGGL(( syhemvu_special_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, nstripes, offset_); } else { hipLaunchKernelGGL(( syhemvu_generic_d_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); const int irregular_part = mod % elements_per_thread; /** * The upper case kernel for irregular dimensions has an extra template parameter. * This parameter must be among the values listed in the switch-case statement below. * The possible values are in the range 0 - (elements_per_thread-1) * Make sure these values are updated whenever you change the configuration parameters. **/ switch(irregular_part) { case 0:hipLaunchKernelGGL(( syhemvu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 0>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break; case 1:hipLaunchKernelGGL(( syhemvu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 1>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break; case 2:hipLaunchKernelGGL(( syhemvu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 2>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break; case 3:hipLaunchKernelGGL(( syhemvu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 3>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break; case 4:hipLaunchKernelGGL(( syhemvu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 4>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break; case 5:hipLaunchKernelGGL(( syhemvu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 5>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break; case 6:hipLaunchKernelGGL(( syhemvu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 6>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break; case 7:hipLaunchKernelGGL(( syhemvu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 7>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break; case 8:hipLaunchKernelGGL(( syhemvu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 8>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break; // return error otherwise: default: printf("CHEMV-UPPER ERROR: improper template parameter. Please read the inline documentation for this function. \n"); return -1; } } } else if(uplo == 'L' || uplo == 'l') { /** configuration params **/ const int chemv_bs = chemv_lower_bs; const int thread_x = chemv_bs; const int thread_y = chemv_lower_ty; const int elements_per_thread = (chemv_bs/(2*thread_y)) ; /** end configuration params **/ /** offset necessary calculation **/ int offset_ = offset % chemv_bs; int total_blocks_skipped = offset / chemv_bs; int my_skipped_blocks = total_blocks_skipped/ngpus; if(gpu_gid < (total_blocks_skipped%ngpus)) my_skipped_blocks += 1; int ref_gpu = total_blocks_skipped%ngpus; int new_gpu_gid = (gpu_gid - ref_gpu + ngpus) % ngpus; // Advance pointers accordingly dA += my_skipped_blocks * chemv_bs * lda; dA += total_blocks_skipped * chemv_bs; dX += total_blocks_skipped * chemv_bs * incx; dY += total_blocks_skipped * chemv_bs * incy; m -= total_blocks_skipped * chemv_bs; /** end offset necessary calculation **/ int mod = m % chemv_bs; int nstripes = m / chemv_bs + (mod != 0); int blocks = nstripes/ngpus; if(new_gpu_gid < (nstripes%ngpus) ) blocks += 1; dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks,1); dim3 dimGrid_(blocks, chemv_lower_by); if(blocks == 0) return 0; if(mod == 0) { hipLaunchKernelGGL(( syhemvl_special_d_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, nstripes, offset_); hipLaunchKernelGGL(( syhemvl_special_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, nstripes, offset_); } else { hipLaunchKernelGGL(( syhemvl_generic_d_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); hipLaunchKernelGGL(( syhemvl_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); } } else{printf("Upper/Lower mode %c is not supported \n", uplo); return -1;} return 0; } /*************************************************************************************/ extern "C" int kblas_chemv_offset( char uplo, int m, cuFloatComplex alpha, cuFloatComplex *dA, int lda, cuFloatComplex *dX, int incx, cuFloatComplex beta, cuFloatComplex *dY, int incy, int offset) { return kblas_chemv_offset_driver(uplo, m, alpha, dA, lda, dX, incx, beta, dY, incy, offset); } /*************************************************************************************/ extern "C" int kblas_chemv_offset_async( char uplo, int m, cuFloatComplex alpha, cuFloatComplex *dA, int lda, cuFloatComplex *dX, int incx, cuFloatComplex beta, cuFloatComplex *dY, int incy, int offset, hipStream_t stream) { return kblas_chemv_offset_driver(uplo, m, alpha, dA, lda, dX, incx, beta, dY, incy, offset, stream); } /*************************************************************************************/
489ffc8d9bdccfeae5665b68fc4f33283f7cbe85.cu
/** * @copyright (c) 2012- King Abdullah University of Science and * Technology (KAUST). All rights reserved. **/ /** * @file src/blas_l2/chemv_offset.cu * KBLAS is a high performance CUDA library for subset of BLAS * and LAPACK routines optimized for NVIDIA GPUs. * KBLAS is provided by KAUST. * * @version 2.0.0 * @author Ahmad Abdelfattah * @date 2017-11-13 **/ #include "syhemv_offset_core.cuh" #if(SM >= 30) #define chemv_upper_bs (32) #define chemv_upper_ty (2) #define chemv_upper_by (2) #define chemv_lower_bs (32) #define chemv_lower_ty (8) #define chemv_lower_by (2) #else #define chemv_upper_bs (64) #define chemv_upper_ty (8) #define chemv_upper_by (2) #define chemv_lower_bs (32) #define chemv_lower_ty (4) #define chemv_lower_by (2) #endif /*************************************************************************************/ int kblas_chemv_offset_driver( char uplo, int m, cuFloatComplex alpha, cuFloatComplex *dA, int lda, cuFloatComplex *dX, int incx, cuFloatComplex beta, cuFloatComplex *dY, int incy, int offset, cudaStream_t stream = 0) { // handle the case when incx and/or incy is -ve if(incx < 0) dX -= (m-1) * incx; if(incy < 0) dY -= (m-1) * incy; if(uplo == 'U' || uplo == 'u') { /** configuration params **/ const int chemv_bs = chemv_upper_bs; const int thread_x = chemv_bs; const int thread_y = chemv_upper_ty; const int elements_per_thread = (chemv_bs/(2*thread_y)) ; /** end configuration params **/ /** offset necessary calculation **/ int offset_ = offset % chemv_bs; int total_blocks_skipped = offset / chemv_bs; int my_skipped_blocks = total_blocks_skipped/ngpus; if(gpu_gid < (total_blocks_skipped%ngpus)) my_skipped_blocks += 1; int ref_gpu = total_blocks_skipped%ngpus; int new_gpu_gid = (gpu_gid - ref_gpu + ngpus) % ngpus; // Advance pointers accordingly dA += my_skipped_blocks * chemv_bs * lda; dA += total_blocks_skipped * chemv_bs; dX += total_blocks_skipped * chemv_bs * incx; dY += total_blocks_skipped * chemv_bs * incy; m -= total_blocks_skipped * chemv_bs; /** end offset necessary calculation **/ int mod = m % chemv_bs; int nstripes = m / chemv_bs + (mod != 0); int blocks = nstripes/ngpus; if(new_gpu_gid < (nstripes%ngpus) ) blocks += 1; dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks,1); dim3 dimGrid_(blocks, chemv_upper_by); if(blocks == 0) return 0; if(mod == 0) { syhemvu_special_d_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, nstripes, offset_); syhemvu_special_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid_, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, nstripes, offset_); } else { syhemvu_generic_d_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); const int irregular_part = mod % elements_per_thread; /** * The upper case kernel for irregular dimensions has an extra template parameter. * This parameter must be among the values listed in the switch-case statement below. * The possible values are in the range 0 - (elements_per_thread-1) * Make sure these values are updated whenever you change the configuration parameters. **/ switch(irregular_part) { case 0: syhemvu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 0><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break; case 1: syhemvu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 1><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break; case 2: syhemvu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 2><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break; case 3: syhemvu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 3><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break; case 4: syhemvu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 4><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break; case 5: syhemvu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 5><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break; case 6: syhemvu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 6><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break; case 7: syhemvu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 7><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break; case 8: syhemvu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 8><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break; // return error otherwise: default: printf("CHEMV-UPPER ERROR: improper template parameter. Please read the inline documentation for this function. \n"); return -1; } } } else if(uplo == 'L' || uplo == 'l') { /** configuration params **/ const int chemv_bs = chemv_lower_bs; const int thread_x = chemv_bs; const int thread_y = chemv_lower_ty; const int elements_per_thread = (chemv_bs/(2*thread_y)) ; /** end configuration params **/ /** offset necessary calculation **/ int offset_ = offset % chemv_bs; int total_blocks_skipped = offset / chemv_bs; int my_skipped_blocks = total_blocks_skipped/ngpus; if(gpu_gid < (total_blocks_skipped%ngpus)) my_skipped_blocks += 1; int ref_gpu = total_blocks_skipped%ngpus; int new_gpu_gid = (gpu_gid - ref_gpu + ngpus) % ngpus; // Advance pointers accordingly dA += my_skipped_blocks * chemv_bs * lda; dA += total_blocks_skipped * chemv_bs; dX += total_blocks_skipped * chemv_bs * incx; dY += total_blocks_skipped * chemv_bs * incy; m -= total_blocks_skipped * chemv_bs; /** end offset necessary calculation **/ int mod = m % chemv_bs; int nstripes = m / chemv_bs + (mod != 0); int blocks = nstripes/ngpus; if(new_gpu_gid < (nstripes%ngpus) ) blocks += 1; dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks,1); dim3 dimGrid_(blocks, chemv_lower_by); if(blocks == 0) return 0; if(mod == 0) { syhemvl_special_d_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, nstripes, offset_); syhemvl_special_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid_, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, nstripes, offset_); } else { syhemvl_generic_d_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); syhemvl_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid_, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); } } else{printf("Upper/Lower mode %c is not supported \n", uplo); return -1;} return 0; } /*************************************************************************************/ extern "C" int kblas_chemv_offset( char uplo, int m, cuFloatComplex alpha, cuFloatComplex *dA, int lda, cuFloatComplex *dX, int incx, cuFloatComplex beta, cuFloatComplex *dY, int incy, int offset) { return kblas_chemv_offset_driver(uplo, m, alpha, dA, lda, dX, incx, beta, dY, incy, offset); } /*************************************************************************************/ extern "C" int kblas_chemv_offset_async( char uplo, int m, cuFloatComplex alpha, cuFloatComplex *dA, int lda, cuFloatComplex *dX, int incx, cuFloatComplex beta, cuFloatComplex *dY, int incy, int offset, cudaStream_t stream) { return kblas_chemv_offset_driver(uplo, m, alpha, dA, lda, dX, incx, beta, dY, incy, offset, stream); } /*************************************************************************************/
0cba7bf13098261dec6f1da4ba16d919b7f4df3e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //pass //--gridDim=[196,1,1] --blockDim=[512,1,1] __global__ void incKernel(int *data, int N) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) data[i]++; }
0cba7bf13098261dec6f1da4ba16d919b7f4df3e.cu
//pass //--gridDim=[196,1,1] --blockDim=[512,1,1] __global__ void incKernel(int *data, int N) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) data[i]++; }
08a1a87b0e8f356560fb6ee3a5d645faaeaeacc4.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "subgradinputAtomic.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *gradInput = NULL; hipMalloc(&gradInput, XSIZE*YSIZE); float *gradOutput = NULL; hipMalloc(&gradOutput, XSIZE*YSIZE); float *weight = NULL; hipMalloc(&weight, XSIZE*YSIZE); int input_n = 1; int input_h = 1; int input_w = 1; int kH = 1; int kW = 1; int dH = 1; int dW = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( subgradinputAtomic), dim3(gridBlock),dim3(threadBlock), 0, 0, gradInput,gradOutput,weight,input_n,input_h,input_w,kH,kW,dH,dW); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( subgradinputAtomic), dim3(gridBlock),dim3(threadBlock), 0, 0, gradInput,gradOutput,weight,input_n,input_h,input_w,kH,kW,dH,dW); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( subgradinputAtomic), dim3(gridBlock),dim3(threadBlock), 0, 0, gradInput,gradOutput,weight,input_n,input_h,input_w,kH,kW,dH,dW); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
08a1a87b0e8f356560fb6ee3a5d645faaeaeacc4.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "subgradinputAtomic.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *gradInput = NULL; cudaMalloc(&gradInput, XSIZE*YSIZE); float *gradOutput = NULL; cudaMalloc(&gradOutput, XSIZE*YSIZE); float *weight = NULL; cudaMalloc(&weight, XSIZE*YSIZE); int input_n = 1; int input_h = 1; int input_w = 1; int kH = 1; int kW = 1; int dH = 1; int dW = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); subgradinputAtomic<<<gridBlock,threadBlock>>>(gradInput,gradOutput,weight,input_n,input_h,input_w,kH,kW,dH,dW); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { subgradinputAtomic<<<gridBlock,threadBlock>>>(gradInput,gradOutput,weight,input_n,input_h,input_w,kH,kW,dH,dW); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { subgradinputAtomic<<<gridBlock,threadBlock>>>(gradInput,gradOutput,weight,input_n,input_h,input_w,kH,kW,dH,dW); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
0ed0f3b9f7df947a406c1c88cdfb1b239478c296.hip
// !!! This is a file automatically generated by hipify!!! #include<cuda.h> #include<iostream> #include <chrono> #include <random> #define CUB_STDERR // print CUDA runtime errors to console #include <stdio.h> #include <hipcub/hipcub.hpp> #include <hipcub/hipcub.hpp> #include "cub/util_debug.cuh" using namespace std; using namespace cub; CachingDeviceAllocator g_allocator(true); // Caching allocator for device memory int main(int argc, char** argv) { if (argc != 2) { return 0; } int n = atoi(argv[1]); const size_t num_items = n; float *h_in = new float[num_items]; std::random_device source; std::mt19937_64 generator(source()); std::uniform_real_distribution<float> dist(-1.0f, 1.0f); for (int i = 0; i < n; i++) { h_in[i] = dist(generator); } hipEvent_t startEvent, stopEvent; hipEventCreate(&startEvent); hipEventCreate(&stopEvent); float* d_in = NULL; g_allocator.DeviceAllocate((void**)&d_in, sizeof(float) * num_items); hipMemcpy(d_in, h_in, sizeof(float) * num_items, hipMemcpyHostToDevice); float* d_sum = NULL; g_allocator.DeviceAllocate((void**)&d_sum, sizeof(float) * 1); // Request and allocate temporary storage void* d_temp_storage = NULL; size_t temp_storage_bytes = 0; DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_sum, num_items); g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes); hipEventRecord(startEvent, 0); DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_sum, num_items); hipEventRecord(stopEvent, 0); float gpu_sum; hipMemcpy(&gpu_sum, d_sum, sizeof(float) * 1, hipMemcpyDeviceToHost); hipEventSynchronize(stopEvent); float elapsedTime; hipEventElapsedTime(&elapsedTime, startEvent, stopEvent); cout << gpu_sum << endl; cout << elapsedTime << endl; if (d_in) g_allocator.DeviceFree(d_in); if (d_sum) g_allocator.DeviceFree(d_sum); if (d_temp_storage) g_allocator.DeviceFree(d_temp_storage); return 0; }
0ed0f3b9f7df947a406c1c88cdfb1b239478c296.cu
#include<cuda.h> #include<iostream> #include <chrono> #include <random> #define CUB_STDERR // print CUDA runtime errors to console #include <stdio.h> #include <cub/util_allocator.cuh> #include <cub/device/device_reduce.cuh> #include "cub/util_debug.cuh" using namespace std; using namespace cub; CachingDeviceAllocator g_allocator(true); // Caching allocator for device memory int main(int argc, char** argv) { if (argc != 2) { return 0; } int n = atoi(argv[1]); const size_t num_items = n; float *h_in = new float[num_items]; std::random_device source; std::mt19937_64 generator(source()); std::uniform_real_distribution<float> dist(-1.0f, 1.0f); for (int i = 0; i < n; i++) { h_in[i] = dist(generator); } cudaEvent_t startEvent, stopEvent; cudaEventCreate(&startEvent); cudaEventCreate(&stopEvent); float* d_in = NULL; g_allocator.DeviceAllocate((void**)&d_in, sizeof(float) * num_items); cudaMemcpy(d_in, h_in, sizeof(float) * num_items, cudaMemcpyHostToDevice); float* d_sum = NULL; g_allocator.DeviceAllocate((void**)&d_sum, sizeof(float) * 1); // Request and allocate temporary storage void* d_temp_storage = NULL; size_t temp_storage_bytes = 0; DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_sum, num_items); g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes); cudaEventRecord(startEvent, 0); DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_sum, num_items); cudaEventRecord(stopEvent, 0); float gpu_sum; cudaMemcpy(&gpu_sum, d_sum, sizeof(float) * 1, cudaMemcpyDeviceToHost); cudaEventSynchronize(stopEvent); float elapsedTime; cudaEventElapsedTime(&elapsedTime, startEvent, stopEvent); cout << gpu_sum << endl; cout << elapsedTime << endl; if (d_in) g_allocator.DeviceFree(d_in); if (d_sum) g_allocator.DeviceFree(d_sum); if (d_temp_storage) g_allocator.DeviceFree(d_temp_storage); return 0; }
8b5f2b976541707fcb523d2b2d0d2d88b33f0bfd.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <time.h> #include <sys/time.h> #include <gtest/gtest.h> #include <utilities/legacy/error_utils.hpp> #include <cuspatial/soa_readers.hpp> #include <cuspatial/point_in_polygon.hpp> #include "pip_util.h" #include <tests/utilities/legacy/cudf_test_utils.cuh> #include <tests/utilities/legacy/cudf_test_fixtures.h> template <typename T> struct PIPTest : public GdfTest { T * x=nullptr, *y=nullptr; int point_len=-1; gdf_column f_pos,r_pos,poly_x,poly_y,pnt_x,pnt_y; size_t free_mem = 0, total_mem = 0; int set_initialize(const char *poly_filename, const char *point_filename) { hipMemGetInfo(&free_mem, &total_mem); std::cout<<"GPU total_mem="<<total_mem<<std::endl; std::cout<<"beginning GPU free_mem="<<free_mem<<std::endl; struct timeval t0,t1,t2; gettimeofday(&t0, nullptr); cuspatial::read_polygon_soa(poly_filename,&f_pos,&r_pos,&poly_x,&poly_y); gettimeofday(&t1, nullptr); float ply_load_time=cuspatial::calc_time("polygon data loading time ......",t0,t1); auto xy_pair=cuspatial::read_lonlat_points_soa(point_filename); pnt_x=xy_pair.first; pnt_y=xy_pair.second; gettimeofday(&t2, nullptr); float pnt_load_time=cuspatial::calc_time("point data loading time ......",t1,t2); point_len=pnt_x.size; return (0); } std::vector<uint32_t> exec_gpu_pip() { gdf_column result_bitmap = cuspatial::point_in_polygon_bitmap(this->pnt_x, this->pnt_y, this->f_pos, this->r_pos, this->poly_x, this->poly_y); std::vector<uint32_t> h_result(this->point_len); EXPECT_EQ(hipMemcpy(h_result.data(), result_bitmap.data, this->point_len * sizeof(uint32_t), hipMemcpyDeviceToHost), hipSuccess); gdf_column_free(&result_bitmap); return h_result; } }; //typedef testing::Types<int16_t, int32_t, int64_t, float, double> NumericTypes; typedef testing::Types<double> NumericTypes; TYPED_TEST_CASE(PIPTest, NumericTypes); #if 0 // disable until data files are checked in TYPED_TEST(PIPTest, piptest) { std::string pnt_filename =std::string("../../data/locust.location"); std::string ply_filename=std::string("../../data/itsroi.ply"); ASSERT_GE(this->set_initialize(ply_filename.c_str(),pnt_filename.c_str()),0); struct timeval t0,t1,t2; gettimeofday(&t0, nullptr); std::vector<uint32_t> gpu_pip_res = this->exec_gpu_pip(gpu_pip_res); gettimeofday(&t1, nullptr); float gpu_pip_time1=cuspatial::calc_time("GPU PIP time 1(including point data transfer and kernel time)......",t0,t1); //Testing asynchronous issues by 2nd call std::vector<uint> gpu_pip_res2 = this->exec_gpu_pip(); gettimeofday(&t2, nullptr); float gpu_pip_time2=cuspatial::calc_time("GPU PIP time 2(including point data transfer and kernel time)......",t1,t2); int err_cnt=0,non_zero=0; for(int i=0;i<this->point_len;i++) { if(gpu_pip_res[i]!=gpu_pip_res2[i]) { /*printf("ERR: %d %d %d, G=%08x C=%08x\n",i,__builtin_popcount(gpu_pip_res[i]), __builtin_popcount(gpu_pip_res2[i]), (unsigned int)(gpu_pip_res[i]),(unsigned int)(gpu_pip_res2[i]));*/ err_cnt++; } if(gpu_pip_res[i]!=0&&gpu_pip_res2[i]!=0) non_zero++; } if(err_cnt==0) std::cout<<"two rounds GPU results are identical...................OK"<<std::endl; else { std::cout<<"two rounds GPU results differ by: "<<err_cnt<<std::endl; std::cout<<"non zero results="<<non_zero<<std::endl; } this->set_finalize(); hipMemGetInfo(&this->free_mem, &this->total_mem); std::cout<<"ending GPU free mem "<<this->free_mem<<std::endl; } #endif
8b5f2b976541707fcb523d2b2d0d2d88b33f0bfd.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <time.h> #include <sys/time.h> #include <gtest/gtest.h> #include <utilities/legacy/error_utils.hpp> #include <cuspatial/soa_readers.hpp> #include <cuspatial/point_in_polygon.hpp> #include "pip_util.h" #include <tests/utilities/legacy/cudf_test_utils.cuh> #include <tests/utilities/legacy/cudf_test_fixtures.h> template <typename T> struct PIPTest : public GdfTest { T * x=nullptr, *y=nullptr; int point_len=-1; gdf_column f_pos,r_pos,poly_x,poly_y,pnt_x,pnt_y; size_t free_mem = 0, total_mem = 0; int set_initialize(const char *poly_filename, const char *point_filename) { cudaMemGetInfo(&free_mem, &total_mem); std::cout<<"GPU total_mem="<<total_mem<<std::endl; std::cout<<"beginning GPU free_mem="<<free_mem<<std::endl; struct timeval t0,t1,t2; gettimeofday(&t0, nullptr); cuspatial::read_polygon_soa(poly_filename,&f_pos,&r_pos,&poly_x,&poly_y); gettimeofday(&t1, nullptr); float ply_load_time=cuspatial::calc_time("polygon data loading time ......",t0,t1); auto xy_pair=cuspatial::read_lonlat_points_soa(point_filename); pnt_x=xy_pair.first; pnt_y=xy_pair.second; gettimeofday(&t2, nullptr); float pnt_load_time=cuspatial::calc_time("point data loading time ......",t1,t2); point_len=pnt_x.size; return (0); } std::vector<uint32_t> exec_gpu_pip() { gdf_column result_bitmap = cuspatial::point_in_polygon_bitmap(this->pnt_x, this->pnt_y, this->f_pos, this->r_pos, this->poly_x, this->poly_y); std::vector<uint32_t> h_result(this->point_len); EXPECT_EQ(cudaMemcpy(h_result.data(), result_bitmap.data, this->point_len * sizeof(uint32_t), cudaMemcpyDeviceToHost), cudaSuccess); gdf_column_free(&result_bitmap); return h_result; } }; //typedef testing::Types<int16_t, int32_t, int64_t, float, double> NumericTypes; typedef testing::Types<double> NumericTypes; TYPED_TEST_CASE(PIPTest, NumericTypes); #if 0 // disable until data files are checked in TYPED_TEST(PIPTest, piptest) { std::string pnt_filename =std::string("../../data/locust.location"); std::string ply_filename=std::string("../../data/itsroi.ply"); ASSERT_GE(this->set_initialize(ply_filename.c_str(),pnt_filename.c_str()),0); struct timeval t0,t1,t2; gettimeofday(&t0, nullptr); std::vector<uint32_t> gpu_pip_res = this->exec_gpu_pip(gpu_pip_res); gettimeofday(&t1, nullptr); float gpu_pip_time1=cuspatial::calc_time("GPU PIP time 1(including point data transfer and kernel time)......",t0,t1); //Testing asynchronous issues by 2nd call std::vector<uint> gpu_pip_res2 = this->exec_gpu_pip(); gettimeofday(&t2, nullptr); float gpu_pip_time2=cuspatial::calc_time("GPU PIP time 2(including point data transfer and kernel time)......",t1,t2); int err_cnt=0,non_zero=0; for(int i=0;i<this->point_len;i++) { if(gpu_pip_res[i]!=gpu_pip_res2[i]) { /*printf("ERR: %d %d %d, G=%08x C=%08x\n",i,__builtin_popcount(gpu_pip_res[i]), __builtin_popcount(gpu_pip_res2[i]), (unsigned int)(gpu_pip_res[i]),(unsigned int)(gpu_pip_res2[i]));*/ err_cnt++; } if(gpu_pip_res[i]!=0&&gpu_pip_res2[i]!=0) non_zero++; } if(err_cnt==0) std::cout<<"two rounds GPU results are identical...................OK"<<std::endl; else { std::cout<<"two rounds GPU results differ by: "<<err_cnt<<std::endl; std::cout<<"non zero results="<<non_zero<<std::endl; } this->set_finalize(); cudaMemGetInfo(&this->free_mem, &this->total_mem); std::cout<<"ending GPU free mem "<<this->free_mem<<std::endl; } #endif
27aad8d37b268b38994982abf7157f5c735de5d5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> //#include "nnd_cuda.h" #include <torch/torch.h> #include <vector> __global__ void NmDistanceKernel(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i){ const int batch=512; __shared__ float buf[batch*3]; for (int i=blockIdx.x;i<b;i+=gridDim.x){ for (int k2=0;k2<m;k2+=batch){ int end_k=min(m,k2+batch)-k2; for (int j=threadIdx.x;j<end_k*3;j+=blockDim.x){ buf[j]=xyz2[(i*m+k2)*3+j]; } __syncthreads(); for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){ float x1=xyz[(i*n+j)*3+0]; float y1=xyz[(i*n+j)*3+1]; float z1=xyz[(i*n+j)*3+2]; int best_i=0; float best=0; int end_ka=end_k-(end_k&3); if (end_ka==batch){ for (int k=0;k<batch;k+=4){ { float x2=buf[k*3+0]-x1; float y2=buf[k*3+1]-y1; float z2=buf[k*3+2]-z1; float d=x2*x2+y2*y2+z2*z2; if (k==0 || d<best){ best=d; best_i=k+k2; } } { float x2=buf[k*3+3]-x1; float y2=buf[k*3+4]-y1; float z2=buf[k*3+5]-z1; float d=x2*x2+y2*y2+z2*z2; if (d<best){ best=d; best_i=k+k2+1; } } { float x2=buf[k*3+6]-x1; float y2=buf[k*3+7]-y1; float z2=buf[k*3+8]-z1; float d=x2*x2+y2*y2+z2*z2; if (d<best){ best=d; best_i=k+k2+2; } } { float x2=buf[k*3+9]-x1; float y2=buf[k*3+10]-y1; float z2=buf[k*3+11]-z1; float d=x2*x2+y2*y2+z2*z2; if (d<best){ best=d; best_i=k+k2+3; } } } }else{ for (int k=0;k<end_ka;k+=4){ { float x2=buf[k*3+0]-x1; float y2=buf[k*3+1]-y1; float z2=buf[k*3+2]-z1; float d=x2*x2+y2*y2+z2*z2; if (k==0 || d<best){ best=d; best_i=k+k2; } } { float x2=buf[k*3+3]-x1; float y2=buf[k*3+4]-y1; float z2=buf[k*3+5]-z1; float d=x2*x2+y2*y2+z2*z2; if (d<best){ best=d; best_i=k+k2+1; } } { float x2=buf[k*3+6]-x1; float y2=buf[k*3+7]-y1; float z2=buf[k*3+8]-z1; float d=x2*x2+y2*y2+z2*z2; if (d<best){ best=d; best_i=k+k2+2; } } { float x2=buf[k*3+9]-x1; float y2=buf[k*3+10]-y1; float z2=buf[k*3+11]-z1; float d=x2*x2+y2*y2+z2*z2; if (d<best){ best=d; best_i=k+k2+3; } } } } for (int k=end_ka;k<end_k;k++){ float x2=buf[k*3+0]-x1; float y2=buf[k*3+1]-y1; float z2=buf[k*3+2]-z1; float d=x2*x2+y2*y2+z2*z2; if (k==0 || d<best){ best=d; best_i=k+k2; } } if (k2==0 || result[(i*n+j)]>best){ result[(i*n+j)]=best; result_i[(i*n+j)]=best_i; } } __syncthreads(); } } } int NmDistanceKernelLauncher( at::Tensor xyz1, at::Tensor xyz2, at::Tensor dist1, at::Tensor dist2, at::Tensor idx1, at::Tensor idx2) { int b = xyz1.size(0); int n = xyz1.size(1); int m = xyz2.size(1); float *xyz1_data = xyz1.data<float>(); float *xyz2_data = xyz2.data<float>(); float *dist1_data = dist1.data<float>(); float *dist2_data = dist2.data<float>(); int *idx1_data = idx1.data<int>(); int *idx2_data = idx2.data<int>(); hipLaunchKernelGGL(( NmDistanceKernel), dim3(dim3(32,16,1)),dim3(512), 0, 0, b,n,xyz1_data,m,xyz2_data,dist1_data,idx1_data); hipLaunchKernelGGL(( NmDistanceKernel), dim3(dim3(32,16,1)),dim3(512), 0, 0, b,m,xyz2_data,n,xyz1_data,dist2_data,idx2_data); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in nnd updateOutput: %s\n", hipGetErrorString(err)); //THError("aborting"); return 0; } return 1; } __global__ void NmDistanceGradKernel(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,float * grad_xyz1,float * grad_xyz2){ for (int i=blockIdx.x;i<b;i+=gridDim.x){ for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){ float x1=xyz1[(i*n+j)*3+0]; float y1=xyz1[(i*n+j)*3+1]; float z1=xyz1[(i*n+j)*3+2]; int j2=idx1[i*n+j]; float x2=xyz2[(i*m+j2)*3+0]; float y2=xyz2[(i*m+j2)*3+1]; float z2=xyz2[(i*m+j2)*3+2]; float g=grad_dist1[i*n+j]*2; atomicAdd(&(grad_xyz1[(i*n+j)*3+0]),g*(x1-x2)); atomicAdd(&(grad_xyz1[(i*n+j)*3+1]),g*(y1-y2)); atomicAdd(&(grad_xyz1[(i*n+j)*3+2]),g*(z1-z2)); atomicAdd(&(grad_xyz2[(i*m+j2)*3+0]),-(g*(x1-x2))); atomicAdd(&(grad_xyz2[(i*m+j2)*3+1]),-(g*(y1-y2))); atomicAdd(&(grad_xyz2[(i*m+j2)*3+2]),-(g*(z1-z2))); } } } int NmDistanceGradKernelLauncher( at::Tensor xyz1, at::Tensor xyz2, at::Tensor gradxyz1, at::Tensor gradxyz2, at::Tensor graddist1, at::Tensor graddist2, at::Tensor idx1, at::Tensor idx2) { int b = xyz1.size(0); int n = xyz1.size(1); int m = xyz2.size(1); //gradxyz1.zero_(); // gradxyz2.zero_(); //printf("b:%d, n:%d, m:%d\n", b, n, m); float *xyz1_data = xyz1.data<float>(); float *xyz2_data = xyz2.data<float>(); float *gradxyz1_data = gradxyz1.data<float>(); float *gradxyz2_data = gradxyz2.data<float>(); float *graddist1_data = graddist1.data<float>(); float *graddist2_data = graddist2.data<float>(); int *idx1_data = idx1.data<int>(); int *idx2_data = idx2.data<int>(); hipLaunchKernelGGL(( NmDistanceGradKernel), dim3(dim3(1,16,1)),dim3(256), 0, 0, b,n,xyz1_data,m,xyz2_data,graddist1_data,idx1_data,gradxyz1_data,gradxyz2_data); hipLaunchKernelGGL(( NmDistanceGradKernel), dim3(dim3(1,16,1)),dim3(256), 0, 0, b,m,xyz2_data,n,xyz1_data,graddist2_data,idx2_data,gradxyz2_data,gradxyz1_data); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in nnd get grad: %s\n", hipGetErrorString(err)); //THError("aborting"); return 0; } return 1; }
27aad8d37b268b38994982abf7157f5c735de5d5.cu
#include <stdio.h> //#include "nnd_cuda.h" #include <torch/torch.h> #include <vector> __global__ void NmDistanceKernel(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i){ const int batch=512; __shared__ float buf[batch*3]; for (int i=blockIdx.x;i<b;i+=gridDim.x){ for (int k2=0;k2<m;k2+=batch){ int end_k=min(m,k2+batch)-k2; for (int j=threadIdx.x;j<end_k*3;j+=blockDim.x){ buf[j]=xyz2[(i*m+k2)*3+j]; } __syncthreads(); for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){ float x1=xyz[(i*n+j)*3+0]; float y1=xyz[(i*n+j)*3+1]; float z1=xyz[(i*n+j)*3+2]; int best_i=0; float best=0; int end_ka=end_k-(end_k&3); if (end_ka==batch){ for (int k=0;k<batch;k+=4){ { float x2=buf[k*3+0]-x1; float y2=buf[k*3+1]-y1; float z2=buf[k*3+2]-z1; float d=x2*x2+y2*y2+z2*z2; if (k==0 || d<best){ best=d; best_i=k+k2; } } { float x2=buf[k*3+3]-x1; float y2=buf[k*3+4]-y1; float z2=buf[k*3+5]-z1; float d=x2*x2+y2*y2+z2*z2; if (d<best){ best=d; best_i=k+k2+1; } } { float x2=buf[k*3+6]-x1; float y2=buf[k*3+7]-y1; float z2=buf[k*3+8]-z1; float d=x2*x2+y2*y2+z2*z2; if (d<best){ best=d; best_i=k+k2+2; } } { float x2=buf[k*3+9]-x1; float y2=buf[k*3+10]-y1; float z2=buf[k*3+11]-z1; float d=x2*x2+y2*y2+z2*z2; if (d<best){ best=d; best_i=k+k2+3; } } } }else{ for (int k=0;k<end_ka;k+=4){ { float x2=buf[k*3+0]-x1; float y2=buf[k*3+1]-y1; float z2=buf[k*3+2]-z1; float d=x2*x2+y2*y2+z2*z2; if (k==0 || d<best){ best=d; best_i=k+k2; } } { float x2=buf[k*3+3]-x1; float y2=buf[k*3+4]-y1; float z2=buf[k*3+5]-z1; float d=x2*x2+y2*y2+z2*z2; if (d<best){ best=d; best_i=k+k2+1; } } { float x2=buf[k*3+6]-x1; float y2=buf[k*3+7]-y1; float z2=buf[k*3+8]-z1; float d=x2*x2+y2*y2+z2*z2; if (d<best){ best=d; best_i=k+k2+2; } } { float x2=buf[k*3+9]-x1; float y2=buf[k*3+10]-y1; float z2=buf[k*3+11]-z1; float d=x2*x2+y2*y2+z2*z2; if (d<best){ best=d; best_i=k+k2+3; } } } } for (int k=end_ka;k<end_k;k++){ float x2=buf[k*3+0]-x1; float y2=buf[k*3+1]-y1; float z2=buf[k*3+2]-z1; float d=x2*x2+y2*y2+z2*z2; if (k==0 || d<best){ best=d; best_i=k+k2; } } if (k2==0 || result[(i*n+j)]>best){ result[(i*n+j)]=best; result_i[(i*n+j)]=best_i; } } __syncthreads(); } } } int NmDistanceKernelLauncher( at::Tensor xyz1, at::Tensor xyz2, at::Tensor dist1, at::Tensor dist2, at::Tensor idx1, at::Tensor idx2) { int b = xyz1.size(0); int n = xyz1.size(1); int m = xyz2.size(1); float *xyz1_data = xyz1.data<float>(); float *xyz2_data = xyz2.data<float>(); float *dist1_data = dist1.data<float>(); float *dist2_data = dist2.data<float>(); int *idx1_data = idx1.data<int>(); int *idx2_data = idx2.data<int>(); NmDistanceKernel<<<dim3(32,16,1),512>>>(b,n,xyz1_data,m,xyz2_data,dist1_data,idx1_data); NmDistanceKernel<<<dim3(32,16,1),512>>>(b,m,xyz2_data,n,xyz1_data,dist2_data,idx2_data); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in nnd updateOutput: %s\n", cudaGetErrorString(err)); //THError("aborting"); return 0; } return 1; } __global__ void NmDistanceGradKernel(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,float * grad_xyz1,float * grad_xyz2){ for (int i=blockIdx.x;i<b;i+=gridDim.x){ for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){ float x1=xyz1[(i*n+j)*3+0]; float y1=xyz1[(i*n+j)*3+1]; float z1=xyz1[(i*n+j)*3+2]; int j2=idx1[i*n+j]; float x2=xyz2[(i*m+j2)*3+0]; float y2=xyz2[(i*m+j2)*3+1]; float z2=xyz2[(i*m+j2)*3+2]; float g=grad_dist1[i*n+j]*2; atomicAdd(&(grad_xyz1[(i*n+j)*3+0]),g*(x1-x2)); atomicAdd(&(grad_xyz1[(i*n+j)*3+1]),g*(y1-y2)); atomicAdd(&(grad_xyz1[(i*n+j)*3+2]),g*(z1-z2)); atomicAdd(&(grad_xyz2[(i*m+j2)*3+0]),-(g*(x1-x2))); atomicAdd(&(grad_xyz2[(i*m+j2)*3+1]),-(g*(y1-y2))); atomicAdd(&(grad_xyz2[(i*m+j2)*3+2]),-(g*(z1-z2))); } } } int NmDistanceGradKernelLauncher( at::Tensor xyz1, at::Tensor xyz2, at::Tensor gradxyz1, at::Tensor gradxyz2, at::Tensor graddist1, at::Tensor graddist2, at::Tensor idx1, at::Tensor idx2) { int b = xyz1.size(0); int n = xyz1.size(1); int m = xyz2.size(1); //gradxyz1.zero_(); // gradxyz2.zero_(); //printf("b:%d, n:%d, m:%d\n", b, n, m); float *xyz1_data = xyz1.data<float>(); float *xyz2_data = xyz2.data<float>(); float *gradxyz1_data = gradxyz1.data<float>(); float *gradxyz2_data = gradxyz2.data<float>(); float *graddist1_data = graddist1.data<float>(); float *graddist2_data = graddist2.data<float>(); int *idx1_data = idx1.data<int>(); int *idx2_data = idx2.data<int>(); NmDistanceGradKernel<<<dim3(1,16,1),256>>>(b,n,xyz1_data,m,xyz2_data,graddist1_data,idx1_data,gradxyz1_data,gradxyz2_data); NmDistanceGradKernel<<<dim3(1,16,1),256>>>(b,m,xyz2_data,n,xyz1_data,graddist2_data,idx2_data,gradxyz2_data,gradxyz1_data); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in nnd get grad: %s\n", cudaGetErrorString(err)); //THError("aborting"); return 0; } return 1; }
651eda7870eed05d4f984136605065e4b4c50d73.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include "hit.h" #include "vector3.h" #include "partitioning/aabb.h" #include "partitioning/octree.h" __device__ static int ray_intersect(struct ray ray, vector3 *input_vertex, vector3 *input_normal, vector3 *out, vector3 *normal) { const float EPSILON = 0.0000001; vector3 vertex0 = input_vertex[0]; vector3 vertex1 = input_vertex[1]; vector3 vertex2 = input_vertex[2]; vector3 normal0 = vector3_normalize(input_normal[0]); vector3 normal1 = vector3_normalize(input_normal[1]); vector3 normal2 = vector3_normalize(input_normal[2]); vector3 edge1, edge2, h, s, q; float a, f, u, v; edge1 = vector3_sub(vertex1, vertex0); edge2 = vector3_sub(vertex2, vertex0); h = vector3_cross(ray.direction, edge2); a = vector3_dot(edge1, h); if (a > -EPSILON && a < EPSILON) return 0; f = 1 / a; s = vector3_sub(ray.origin, vertex0); u = f * vector3_dot(s, h); if (u < 0.0 || u > 1.0) return 0; q = vector3_cross(s, edge1); v = f * vector3_dot(ray.direction, q); if (v < 0.0 || u + v > 1.0) return 0; float t = f * vector3_dot(edge2, q); if (t > EPSILON) { vector3 t2 = vector3_scale(vector3_normalize(ray.direction), t * vector3_length(ray.direction)); *out = vector3_add(ray.origin, t2); *normal = vector3_add(vector3_add(vector3_scale(normal0, 1 - u - v), vector3_scale(normal1, u)), vector3_scale(normal2, v)); return 1; } return 0; } __device__ static struct ray triangle_collide(struct object object, struct ray ray) { float distance = 0; struct ray ret = init_ray(); for (size_t i = 0; i < object.triangle_count; i++) { vector3 out; vector3 normal; int has_intersected = ray_intersect( ray, get_vertex(object.triangles, i), get_normal(object.triangles, i), &out, &normal ); if (has_intersected) { float new_dist = vector3_length(vector3_sub(out, ray.origin)); if (new_dist > 0.01 && (new_dist < distance || distance == 0)) { distance = new_dist; struct ray new_ret; new_ret.origin = out; new_ret.direction = normal; ret = new_ret; } } } return ret; } /* Partitioning dependent code */ # if defined(PARTITIONING_NONE) || defined(PARTITIONING_AABB) __device__ struct ray collide(const struct scene* scene, struct ray ray, struct object* obj) { float distance = 0; struct ray ret = init_ray(); for (size_t i = 0; i < scene->object_count; i++) { #if defined(PARTITIONING_AABB) // Try the aabb first, to prevent checking for collision with all triangles // if there is no intersections. if (!hit_aabb(&scene->aabbs[i], &ray)) continue; #endif struct ray new_ray = triangle_collide(scene->objects[i], ray); if (!vector3_is_zero(new_ray.direction)) { float new_dist = vector3_length(vector3_sub(new_ray.origin, ray.origin)); if (new_dist > 0.01 && (new_dist < distance || distance == 0)) { distance = new_dist; ret = new_ray; *obj = scene->objects[i]; } } } return ret; } # else /* PARTITIONING_OCTREE */ __device__ struct ray collide(const struct scene* scene, struct ray ray, struct object* obj) { float distance = 0; struct ray ret = init_ray(); constexpr int children_per_node = 8; constexpr int max_depth = 8; struct octree *octree_stack[children_per_node * max_depth]; octree_stack[0] = scene->octree; size_t octree_stack_size = 1; while (octree_stack_size > 0) { struct octree current = *octree_stack[--octree_stack_size]; if (hit_aabb(&current.box, &ray)) {// It it's this octree, perform an intersection test on all it's objects, and add the children // Perform the intersection check on it's objects for (size_t i = current.start_index; i < current.end_index; ++i) { if (!hit_aabb(&scene->aabbs[i], &ray)) continue; struct ray new_ray = triangle_collide(scene->objects[i], ray); if (!vector3_is_zero(new_ray.direction)) { float new_dist = vector3_length(vector3_sub(new_ray.origin, ray.origin)); if (new_dist > 0.01 && (new_dist < distance || distance == 0)) { distance = new_dist; ret = new_ray; *obj = scene->objects[i]; } } } // Add all of it's children for (size_t child = 0; child < children_per_node; ++child) { if (current.children[child] != nullptr) { octree_stack[octree_stack_size++] = current.children[child]; } } } } return ret; } /* __device__ struct ray collide(const struct scene* scene, struct ray ray, struct object* obj) { float distance = 0; struct ray ret = init_ray(); // Children per octree // Depth of the octree while (octree_stack_size > 0) { struct octree current = *octree_stack[--octree_stack_size]; if (hit_aabb(&current.box, &ray)) {// It it's this octree, perform an intersection test on all it's objects, and add the children // Perform the intersection check on it's objects for (size_t i = current.start_index; i < current.end_index; ++i) { struct ray new_ray = triangle_collide(scene->objects[i], ray); if (!vector3_is_zero(new_ray.direction)) { float new_dist = vector3_length(vector3_sub(new_ray.origin, ray.origin)); if (new_dist > 0.01 && (new_dist < distance || distance == 0)) { distance = new_dist; ret = new_ray; *obj = scene->objects[i]; } } } // Add all of it's children for (size_t child = 0; child < 8; ++child) { if (current.children[child] != nullptr) { octree_stack[++octree_stack_size] = current.children[child]; } } } } return ret; } */ # endif /* End of Partitioning dependent code */ __device__ float3 operator+(const float3 &a, const float3 &b) { return make_float3(a.x+b.x, a.y+b.y, a.z+b.z); } __device__ float3 operator-(const float3 &a, const float3 &b) { return make_float3(a.x-b.x, a.y-b.y, a.z-b.z); } __device__ float operator~(const float3 &a) { return sqrt(a.x * a.x + a.y * a.y + a.z * a.z); } __device__ float collide_dist(const struct scene* scene, struct ray ray) { struct object object; struct ray new_ray = collide(scene, ray, &object); if (vector3_is_zero(new_ray.direction)) return 0; else { vector3 res = vector3_sub(new_ray.origin, ray.origin); return vector3_length(res); } }
651eda7870eed05d4f984136605065e4b4c50d73.cu
#include <cuda_runtime.h> #include "hit.h" #include "vector3.h" #include "partitioning/aabb.h" #include "partitioning/octree.h" __device__ static int ray_intersect(struct ray ray, vector3 *input_vertex, vector3 *input_normal, vector3 *out, vector3 *normal) { const float EPSILON = 0.0000001; vector3 vertex0 = input_vertex[0]; vector3 vertex1 = input_vertex[1]; vector3 vertex2 = input_vertex[2]; vector3 normal0 = vector3_normalize(input_normal[0]); vector3 normal1 = vector3_normalize(input_normal[1]); vector3 normal2 = vector3_normalize(input_normal[2]); vector3 edge1, edge2, h, s, q; float a, f, u, v; edge1 = vector3_sub(vertex1, vertex0); edge2 = vector3_sub(vertex2, vertex0); h = vector3_cross(ray.direction, edge2); a = vector3_dot(edge1, h); if (a > -EPSILON && a < EPSILON) return 0; f = 1 / a; s = vector3_sub(ray.origin, vertex0); u = f * vector3_dot(s, h); if (u < 0.0 || u > 1.0) return 0; q = vector3_cross(s, edge1); v = f * vector3_dot(ray.direction, q); if (v < 0.0 || u + v > 1.0) return 0; float t = f * vector3_dot(edge2, q); if (t > EPSILON) { vector3 t2 = vector3_scale(vector3_normalize(ray.direction), t * vector3_length(ray.direction)); *out = vector3_add(ray.origin, t2); *normal = vector3_add(vector3_add(vector3_scale(normal0, 1 - u - v), vector3_scale(normal1, u)), vector3_scale(normal2, v)); return 1; } return 0; } __device__ static struct ray triangle_collide(struct object object, struct ray ray) { float distance = 0; struct ray ret = init_ray(); for (size_t i = 0; i < object.triangle_count; i++) { vector3 out; vector3 normal; int has_intersected = ray_intersect( ray, get_vertex(object.triangles, i), get_normal(object.triangles, i), &out, &normal ); if (has_intersected) { float new_dist = vector3_length(vector3_sub(out, ray.origin)); if (new_dist > 0.01 && (new_dist < distance || distance == 0)) { distance = new_dist; struct ray new_ret; new_ret.origin = out; new_ret.direction = normal; ret = new_ret; } } } return ret; } /* Partitioning dependent code */ # if defined(PARTITIONING_NONE) || defined(PARTITIONING_AABB) __device__ struct ray collide(const struct scene* scene, struct ray ray, struct object* obj) { float distance = 0; struct ray ret = init_ray(); for (size_t i = 0; i < scene->object_count; i++) { #if defined(PARTITIONING_AABB) // Try the aabb first, to prevent checking for collision with all triangles // if there is no intersections. if (!hit_aabb(&scene->aabbs[i], &ray)) continue; #endif struct ray new_ray = triangle_collide(scene->objects[i], ray); if (!vector3_is_zero(new_ray.direction)) { float new_dist = vector3_length(vector3_sub(new_ray.origin, ray.origin)); if (new_dist > 0.01 && (new_dist < distance || distance == 0)) { distance = new_dist; ret = new_ray; *obj = scene->objects[i]; } } } return ret; } # else /* PARTITIONING_OCTREE */ __device__ struct ray collide(const struct scene* scene, struct ray ray, struct object* obj) { float distance = 0; struct ray ret = init_ray(); constexpr int children_per_node = 8; constexpr int max_depth = 8; struct octree *octree_stack[children_per_node * max_depth]; octree_stack[0] = scene->octree; size_t octree_stack_size = 1; while (octree_stack_size > 0) { struct octree current = *octree_stack[--octree_stack_size]; if (hit_aabb(&current.box, &ray)) {// It it's this octree, perform an intersection test on all it's objects, and add the children // Perform the intersection check on it's objects for (size_t i = current.start_index; i < current.end_index; ++i) { if (!hit_aabb(&scene->aabbs[i], &ray)) continue; struct ray new_ray = triangle_collide(scene->objects[i], ray); if (!vector3_is_zero(new_ray.direction)) { float new_dist = vector3_length(vector3_sub(new_ray.origin, ray.origin)); if (new_dist > 0.01 && (new_dist < distance || distance == 0)) { distance = new_dist; ret = new_ray; *obj = scene->objects[i]; } } } // Add all of it's children for (size_t child = 0; child < children_per_node; ++child) { if (current.children[child] != nullptr) { octree_stack[octree_stack_size++] = current.children[child]; } } } } return ret; } /* __device__ struct ray collide(const struct scene* scene, struct ray ray, struct object* obj) { float distance = 0; struct ray ret = init_ray(); // Children per octree // Depth of the octree while (octree_stack_size > 0) { struct octree current = *octree_stack[--octree_stack_size]; if (hit_aabb(&current.box, &ray)) {// It it's this octree, perform an intersection test on all it's objects, and add the children // Perform the intersection check on it's objects for (size_t i = current.start_index; i < current.end_index; ++i) { struct ray new_ray = triangle_collide(scene->objects[i], ray); if (!vector3_is_zero(new_ray.direction)) { float new_dist = vector3_length(vector3_sub(new_ray.origin, ray.origin)); if (new_dist > 0.01 && (new_dist < distance || distance == 0)) { distance = new_dist; ret = new_ray; *obj = scene->objects[i]; } } } // Add all of it's children for (size_t child = 0; child < 8; ++child) { if (current.children[child] != nullptr) { octree_stack[++octree_stack_size] = current.children[child]; } } } } return ret; } */ # endif /* End of Partitioning dependent code */ __device__ float3 operator+(const float3 &a, const float3 &b) { return make_float3(a.x+b.x, a.y+b.y, a.z+b.z); } __device__ float3 operator-(const float3 &a, const float3 &b) { return make_float3(a.x-b.x, a.y-b.y, a.z-b.z); } __device__ float operator~(const float3 &a) { return sqrt(a.x * a.x + a.y * a.y + a.z * a.z); } __device__ float collide_dist(const struct scene* scene, struct ray ray) { struct object object; struct ray new_ray = collide(scene, ray, &object); if (vector3_is_zero(new_ray.direction)) return 0; else { vector3 res = vector3_sub(new_ray.origin, ray.origin); return vector3_length(res); } }
f368f9d206051ae7023f0ffca3eccfb8fa0172a9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "fp_gpu.cuh" __global__ void _set_input_train(int idx) { int ix=threadIdx.x+blockDim.x*blockIdx.x; int iy=threadIdx.y+blockDim.y*blockIdx.y; if(ix<ROW&&iy<COL) { _input[ix][iy]=_train_image[idx][ix][iy]; } } __global__ void _set_input_test(int idx) { int ix=threadIdx.x+blockDim.x*blockIdx.x; int iy=threadIdx.y+blockDim.y*blockIdx.y; if(ix<ROW&&iy<COL) { _input[ix][iy]=_test_image[idx][ix][iy]; } } void set_input_gpu_train(int idx) { dim3 block(32,32); dim3 grid((ROW-1)/block.x+1,(COL-1)/block.y+1); hipLaunchKernelGGL(( _set_input_train), dim3(block),dim3(grid), 0, 0, idx); hipDeviceSynchronize(); } void set_input_gpu_test(int idx) { dim3 block(32,32); dim3 grid((ROW-1)/block.x+1,(COL-1)/block.y+1); hipLaunchKernelGGL(( _set_input_test), dim3(block),dim3(grid), 0, 0, idx); hipDeviceSynchronize(); } __global__ void _input_conv() { int ix=threadIdx.x+blockDim.x*blockIdx.x; int iy=threadIdx.y+blockDim.y*blockIdx.y; int iz=threadIdx.z+blockDim.z*blockIdx.z; if(ix<CONV_W_NUM&&iy<CONV_SIZE&&iz<CONV_SIZE) { _conv_z[ix][iy][iz]=0; // #pragma unroll for(int l=0;l<CONV_W_SIZE;l++) for(int m=0;m<CONV_W_SIZE;m++) _conv_z[ix][iy][iz]+=_input[iy+l][iz+m]*_conv_w[ix][l][m]; _conv_z[ix][iy][iz]+=_conv_b[ix]; _conv_a[ix][iy][iz]=_sigmoid(_conv_z[ix][iy][iz]); } } void input_conv_gpu() { dim3 block(8,8,8); dim3 grid((CONV_W_NUM-1)/block.x+1,(CONV_SIZE-1)/block.y+1,(CONV_SIZE-1)/block.z+1); hipLaunchKernelGGL(( _input_conv), dim3(block),dim3(grid), 0, 0, ); hipDeviceSynchronize(); } __global__ void _conv_pool() { int i=threadIdx.x+blockDim.x*blockIdx.x; int j=threadIdx.y+blockDim.y*blockIdx.y; int k=threadIdx.z+blockDim.z*blockIdx.z; if(i<CONV_W_NUM&&j<POOL_SIZE&&k<POOL_SIZE) { float _max=_conv_a[i][j*2][k*2]; _pool_pos[i][j][k]=0; if(_conv_a[i][j*2][k*2+1]>_max) { _max=_conv_a[i][j*2][k*2+1]; _pool_pos[i][j][k]=1; } if(_conv_a[i][j*2+1][k*2]>_max) { _max=_conv_a[i][j*2+1][k*2]; _pool_pos[i][j][k]=2; } if(_conv_a[i][j*2+1][k*2+1]>_max) { _max=_conv_a[i][j*2+1][k*2+1]; _pool_pos[i][j][k]=3; } _pool[i][j][k]=_max; } } void conv_pool_gpu() { dim3 block(8,8,8); dim3 grid((CONV_W_NUM-1)/block.x+1,(POOL_SIZE-1)/block.y+1,(POOL_SIZE-1)/block.z+1); hipLaunchKernelGGL(( _conv_pool), dim3(block),dim3(grid), 0, 0, ); hipDeviceSynchronize(); } __global__ void _pool_fc1() { int i=threadIdx.x+blockDim.x*blockIdx.x; if(i<FC1_SIZE) { _fc1_z[i]=0; for(int j=0;j<CONV_W_NUM;j++) for(int k=0;k<POOL_SIZE;k++) for(int l=0;l<POOL_SIZE;l++) _fc1_z[i]+=_pool[j][k][l]*_fc1_w[i][j][k][l]; _fc1_z[i]+=_fc1_b[i]; _fc1_a[i]=_sigmoid(_fc1_z[i]); } } void pool_fc1_gpu() { dim3 block(32); dim3 grid((FC1_SIZE-1)/block.x+1); hipLaunchKernelGGL(( _pool_fc1), dim3(block),dim3(grid), 0, 0, ); hipDeviceSynchronize(); } __global__ void _fc1_fc2() { int i=threadIdx.x+blockDim.x*blockIdx.x; if(i<FC2_SIZE) { _fc2_z[i]=0; for(int j=0;j<FC1_SIZE;j++) _fc2_z[i]+=_fc1_a[j]*_fc2_w[i][j]; _fc2_z[i]+=_fc2_b[i]; _fc2_a[i]=_sigmoid(_fc2_z[i]); } } void fc1_fc2_gpu() { dim3 block(32); dim3 grid((FC2_SIZE-1)/block.x+1); hipLaunchKernelGGL(( _fc1_fc2), dim3(block),dim3(grid), 0, 0, ); hipDeviceSynchronize(); } __global__ void _set_answer_train(int idx) { int i=threadIdx.x+blockDim.x*blockIdx.x; if(i<FC2_SIZE) { _output[i]=_fc2_a[i]; _answer[i]=(_train_label[idx]==i)?1:0; } } __global__ void _set_answer_test(int idx) { int i=threadIdx.x+blockDim.x*blockIdx.x; if(i<FC2_SIZE) { _output[i]=_fc2_a[i]; _answer[i]=(_test_label[idx]==i)?1:0; } } void set_answer_gpu_train(int idx) { dim3 block(32); dim3 grid((FC2_SIZE-1)/block.x+1); hipLaunchKernelGGL(( _set_answer_train), dim3(block),dim3(grid), 0, 0, idx); hipDeviceSynchronize(); } void set_answer_gpu_test(int idx) { dim3 block(32); dim3 grid((FC2_SIZE-1)/block.x+1); hipLaunchKernelGGL(( _set_answer_test), dim3(block),dim3(grid), 0, 0, idx); hipDeviceSynchronize(); } __global__ void _check_answer_get_error() { float _max=_output[0]; int max_pos=0; for(int i=0;i<FC2_SIZE;i++) { if(_max<_output[i]) { _max=_output[i]; max_pos=i; } } if(_answer[max_pos]) _correct_cnt++; for(int i=0;i<FC2_SIZE;i++) { _C[i]=_output[i]-_answer[i]; _avg_error+=_C[i]*_C[i]*0.5; } } void check_answer_get_error_gpu() { hipLaunchKernelGGL(( _check_answer_get_error), dim3(1),dim3(1), 0, 0, ); hipDeviceSynchronize(); }
f368f9d206051ae7023f0ffca3eccfb8fa0172a9.cu
#include "fp_gpu.cuh" __global__ void _set_input_train(int idx) { int ix=threadIdx.x+blockDim.x*blockIdx.x; int iy=threadIdx.y+blockDim.y*blockIdx.y; if(ix<ROW&&iy<COL) { _input[ix][iy]=_train_image[idx][ix][iy]; } } __global__ void _set_input_test(int idx) { int ix=threadIdx.x+blockDim.x*blockIdx.x; int iy=threadIdx.y+blockDim.y*blockIdx.y; if(ix<ROW&&iy<COL) { _input[ix][iy]=_test_image[idx][ix][iy]; } } void set_input_gpu_train(int idx) { dim3 block(32,32); dim3 grid((ROW-1)/block.x+1,(COL-1)/block.y+1); _set_input_train<<<block,grid>>>(idx); cudaDeviceSynchronize(); } void set_input_gpu_test(int idx) { dim3 block(32,32); dim3 grid((ROW-1)/block.x+1,(COL-1)/block.y+1); _set_input_test<<<block,grid>>>(idx); cudaDeviceSynchronize(); } __global__ void _input_conv() { int ix=threadIdx.x+blockDim.x*blockIdx.x; int iy=threadIdx.y+blockDim.y*blockIdx.y; int iz=threadIdx.z+blockDim.z*blockIdx.z; if(ix<CONV_W_NUM&&iy<CONV_SIZE&&iz<CONV_SIZE) { _conv_z[ix][iy][iz]=0; // #pragma unroll for(int l=0;l<CONV_W_SIZE;l++) for(int m=0;m<CONV_W_SIZE;m++) _conv_z[ix][iy][iz]+=_input[iy+l][iz+m]*_conv_w[ix][l][m]; _conv_z[ix][iy][iz]+=_conv_b[ix]; _conv_a[ix][iy][iz]=_sigmoid(_conv_z[ix][iy][iz]); } } void input_conv_gpu() { dim3 block(8,8,8); dim3 grid((CONV_W_NUM-1)/block.x+1,(CONV_SIZE-1)/block.y+1,(CONV_SIZE-1)/block.z+1); _input_conv<<<block,grid>>>(); cudaDeviceSynchronize(); } __global__ void _conv_pool() { int i=threadIdx.x+blockDim.x*blockIdx.x; int j=threadIdx.y+blockDim.y*blockIdx.y; int k=threadIdx.z+blockDim.z*blockIdx.z; if(i<CONV_W_NUM&&j<POOL_SIZE&&k<POOL_SIZE) { float _max=_conv_a[i][j*2][k*2]; _pool_pos[i][j][k]=0; if(_conv_a[i][j*2][k*2+1]>_max) { _max=_conv_a[i][j*2][k*2+1]; _pool_pos[i][j][k]=1; } if(_conv_a[i][j*2+1][k*2]>_max) { _max=_conv_a[i][j*2+1][k*2]; _pool_pos[i][j][k]=2; } if(_conv_a[i][j*2+1][k*2+1]>_max) { _max=_conv_a[i][j*2+1][k*2+1]; _pool_pos[i][j][k]=3; } _pool[i][j][k]=_max; } } void conv_pool_gpu() { dim3 block(8,8,8); dim3 grid((CONV_W_NUM-1)/block.x+1,(POOL_SIZE-1)/block.y+1,(POOL_SIZE-1)/block.z+1); _conv_pool<<<block,grid>>>(); cudaDeviceSynchronize(); } __global__ void _pool_fc1() { int i=threadIdx.x+blockDim.x*blockIdx.x; if(i<FC1_SIZE) { _fc1_z[i]=0; for(int j=0;j<CONV_W_NUM;j++) for(int k=0;k<POOL_SIZE;k++) for(int l=0;l<POOL_SIZE;l++) _fc1_z[i]+=_pool[j][k][l]*_fc1_w[i][j][k][l]; _fc1_z[i]+=_fc1_b[i]; _fc1_a[i]=_sigmoid(_fc1_z[i]); } } void pool_fc1_gpu() { dim3 block(32); dim3 grid((FC1_SIZE-1)/block.x+1); _pool_fc1<<<block,grid>>>(); cudaDeviceSynchronize(); } __global__ void _fc1_fc2() { int i=threadIdx.x+blockDim.x*blockIdx.x; if(i<FC2_SIZE) { _fc2_z[i]=0; for(int j=0;j<FC1_SIZE;j++) _fc2_z[i]+=_fc1_a[j]*_fc2_w[i][j]; _fc2_z[i]+=_fc2_b[i]; _fc2_a[i]=_sigmoid(_fc2_z[i]); } } void fc1_fc2_gpu() { dim3 block(32); dim3 grid((FC2_SIZE-1)/block.x+1); _fc1_fc2<<<block,grid>>>(); cudaDeviceSynchronize(); } __global__ void _set_answer_train(int idx) { int i=threadIdx.x+blockDim.x*blockIdx.x; if(i<FC2_SIZE) { _output[i]=_fc2_a[i]; _answer[i]=(_train_label[idx]==i)?1:0; } } __global__ void _set_answer_test(int idx) { int i=threadIdx.x+blockDim.x*blockIdx.x; if(i<FC2_SIZE) { _output[i]=_fc2_a[i]; _answer[i]=(_test_label[idx]==i)?1:0; } } void set_answer_gpu_train(int idx) { dim3 block(32); dim3 grid((FC2_SIZE-1)/block.x+1); _set_answer_train<<<block,grid>>>(idx); cudaDeviceSynchronize(); } void set_answer_gpu_test(int idx) { dim3 block(32); dim3 grid((FC2_SIZE-1)/block.x+1); _set_answer_test<<<block,grid>>>(idx); cudaDeviceSynchronize(); } __global__ void _check_answer_get_error() { float _max=_output[0]; int max_pos=0; for(int i=0;i<FC2_SIZE;i++) { if(_max<_output[i]) { _max=_output[i]; max_pos=i; } } if(_answer[max_pos]) _correct_cnt++; for(int i=0;i<FC2_SIZE;i++) { _C[i]=_output[i]-_answer[i]; _avg_error+=_C[i]*_C[i]*0.5; } } void check_answer_get_error_gpu() { _check_answer_get_error<<<1,1>>>(); cudaDeviceSynchronize(); }
fb535a785b955cbd7d90f0b80d45c97077d58e44.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "Overlay_Cuda.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int x_position = 1; int y_position = 1; unsigned char *main = NULL; hipMalloc(&main, XSIZE*YSIZE); int main_linesize = XSIZE*YSIZE; unsigned char *overlay = NULL; hipMalloc(&overlay, XSIZE*YSIZE); int overlay_linesize = XSIZE*YSIZE; int overlay_w = 1; int overlay_h = 1; unsigned char *overlay_alpha = NULL; hipMalloc(&overlay_alpha, XSIZE*YSIZE); int alpha_linesize = XSIZE*YSIZE; int alpha_adj_x = 2; int alpha_adj_y = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( Overlay_Cuda), dim3(gridBlock),dim3(threadBlock), 0, 0, x_position,y_position,main,main_linesize,overlay,overlay_linesize,overlay_w,overlay_h,overlay_alpha,alpha_linesize,alpha_adj_x,alpha_adj_y); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( Overlay_Cuda), dim3(gridBlock),dim3(threadBlock), 0, 0, x_position,y_position,main,main_linesize,overlay,overlay_linesize,overlay_w,overlay_h,overlay_alpha,alpha_linesize,alpha_adj_x,alpha_adj_y); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( Overlay_Cuda), dim3(gridBlock),dim3(threadBlock), 0, 0, x_position,y_position,main,main_linesize,overlay,overlay_linesize,overlay_w,overlay_h,overlay_alpha,alpha_linesize,alpha_adj_x,alpha_adj_y); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
fb535a785b955cbd7d90f0b80d45c97077d58e44.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "Overlay_Cuda.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int x_position = 1; int y_position = 1; unsigned char *main = NULL; cudaMalloc(&main, XSIZE*YSIZE); int main_linesize = XSIZE*YSIZE; unsigned char *overlay = NULL; cudaMalloc(&overlay, XSIZE*YSIZE); int overlay_linesize = XSIZE*YSIZE; int overlay_w = 1; int overlay_h = 1; unsigned char *overlay_alpha = NULL; cudaMalloc(&overlay_alpha, XSIZE*YSIZE); int alpha_linesize = XSIZE*YSIZE; int alpha_adj_x = 2; int alpha_adj_y = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); Overlay_Cuda<<<gridBlock,threadBlock>>>(x_position,y_position,main,main_linesize,overlay,overlay_linesize,overlay_w,overlay_h,overlay_alpha,alpha_linesize,alpha_adj_x,alpha_adj_y); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { Overlay_Cuda<<<gridBlock,threadBlock>>>(x_position,y_position,main,main_linesize,overlay,overlay_linesize,overlay_w,overlay_h,overlay_alpha,alpha_linesize,alpha_adj_x,alpha_adj_y); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { Overlay_Cuda<<<gridBlock,threadBlock>>>(x_position,y_position,main,main_linesize,overlay,overlay_linesize,overlay_w,overlay_h,overlay_alpha,alpha_linesize,alpha_adj_x,alpha_adj_y); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
0321491e4696caea6924625e68745a29fd865b35.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "matrix-mult.cuh" #include <gp/cuda-util-kernel.h> // Constants holding array sizes and pointers and coefficients. // // Values are set by cuda calls, they don't automatically take values // of variables in the C program with the same name. // __constant__ int array_size, array_size_lg; __constant__ int row_stride, row_stride_lg, dim_block_lg; __constant__ float *a, *b, *c; __constant__ int32_t *t_compute, *t_all; extern __shared__ float s[]; // Shared memory for buffering a elements. __constant__ int cs_itid_stride; texture<float> a_tex, b_tex; __global__ void mm(); __global__ void mm_iter(); __global__ void mm_blk_cache_ab(); __global__ void mm_blk_cache_ab_opt(); __global__ void mm_blk_cache_ab_tc(); __global__ void mm_blk_cache_a_local(); template <int dim_block> __global__ void mm_blk_cache_a_local_t(); static __host__ void collect_symbols() { CU_SYM(array_size); CU_SYM(array_size_lg); CU_SYM(row_stride); CU_SYM(row_stride_lg); CU_SYM(dim_block_lg); CU_SYM(a); CU_SYM(b); CU_SYM(c); CU_SYM(t_compute); CU_SYM(t_all); CU_SYM(cs_itid_stride); } __host__ void kernels_get_attr(GPU_Info& info) { collect_symbols(); info.GET_INFO(mm); info.GET_INFO(mm_iter); info.GET_INFO(mm_blk_cache_ab); info.GET_INFO(mm_blk_cache_ab_opt); info.GET_INFO(mm_blk_cache_a_local_t<3>); info.GET_INFO(mm_blk_cache_a_local_t<4>); info.GET_INFO(mm_blk_cache_ab_tc); } // This routine executes on the CPU. // __host__ void mmult_launch(dim3 dg, dim3 db, int version, void *a_dev, void *b_dev, int array_size) { static bool tex_bound = false; const bool need_tex = version == 6; if ( need_tex && !tex_bound ) { const size_t size = array_size * sizeof(float); size_t offset; const hipChannelFormatDesc fd = hipCreateChannelDesc(32,0,0,0,hipChannelFormatKindFloat); hipBindTexture(&offset, a_tex, a_dev, fd, size); hipBindTexture(&offset, b_tex, b_dev, fd, size); tex_bound = true; } else if ( !need_tex && tex_bound ) { hipUnbindTexture(a_tex); hipUnbindTexture(b_tex); tex_bound = false; } // Launch the kernel, using the provided configuration (block size, etc). // switch ( version ){ case 1:hipLaunchKernelGGL(( mm_iter), dim3(dg),dim3(db), 0, 0, ); break; case 2: { int shared_size = 2 * 4 * db.x; hipLaunchKernelGGL(( mm_blk_cache_ab), dim3(dg),dim3(db),shared_size, 0, ); } break; case 3: { int shared_size = 2 * 4 * db.x; hipLaunchKernelGGL(( mm_blk_cache_ab_opt), dim3(dg),dim3(db),shared_size, 0, ); } break; case 4: { int shared_size = 4 * db.x; if ( db.x == 64 )hipLaunchKernelGGL(( mm_blk_cache_a_local_t<3>), dim3(dg),dim3(db),shared_size, 0, ); } break; case 5: { int shared_size = 4 * db.x; if ( db.x == 256 )hipLaunchKernelGGL(( mm_blk_cache_a_local_t<4>), dim3(dg),dim3(db),shared_size, 0, ); } break; case 6: { hipLaunchKernelGGL(( mm_blk_cache_ab_tc), dim3(dg),dim3(db), 0, 0, ); } break; } } __global__ void mm() { // Compute a unique index (number) for this thread. // This will be used as an array index. // int idx = threadIdx.x + blockIdx.x * blockDim.x; if ( idx > array_size ) return; int row_mask = row_stride - 1; int col = idx & row_mask; int row = idx >> row_stride_lg; int idx_base = row << row_stride_lg; float c_value = 0; for ( int k=0; k<row_stride; k++ ) { int a_idx = idx_base | k; int b_idx = ( k << row_stride_lg ) | col; c_value += a[a_idx] * b[b_idx]; } c[idx] = c_value; } __global__ void mm_iter() { bool lane_0 = ( threadIdx.x & 0x1f ) == 0; int time_start = clock(); // Compute a unique index (number) for this thread. // This will be used as an array index. // int tid = threadIdx.x + blockIdx.x * blockDim.x; int thread_count = blockDim.x * gridDim.x; int row_mask = row_stride - 1; /// /// Iteration Strategy // // Value of c_idx in first iteration of loop below assuming: // Array size is 1024 x 1024 // Block size is 256 threads. // // 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 <- Bit position. // | blockIdx | threadIdx | <- Relationship to threads. // | row | col | <- Array element. // // Value of c_idx in the second iteration of the loop below: // // 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 <- Bit position. // | gridDim + blockIdx | threadIdx | <- Relationship to threads. // | row | col | <- Array element. // // Value of c_idx in the third iteration of the loop below: // // 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 <- Bit position. // | 2 * gridDim + blockIdx | threadIdx | <- Relationship to threads. // | row | col | <- Array element. for ( int c_idx = tid; c_idx < array_size; c_idx += thread_count ) { int col = c_idx & row_mask; int row = c_idx >> row_stride_lg; int a_idx_base = row << row_stride_lg; float c_value = 0; for ( int k=0; k<row_stride; k++ ) { int a_idx = a_idx_base | k; int b_idx = ( k << row_stride_lg ) | col; c_value += a[a_idx] * b[b_idx]; } c[c_idx] = c_value; } if ( !lane_0 ) return; t_all[tid>>5] = clock() - time_start; t_compute[tid>>5] = 0; } __global__ void mm_blk_cache_ab() { int group_count_lg = row_stride_lg - dim_block_lg; int group_count = 1 << group_count_lg; int dim_block = 1 << dim_block_lg; int local_row_mask = dim_block - 1; int block_lg = dim_block_lg << 1; int local_col = threadIdx.x & local_row_mask; int local_row = threadIdx.x >> dim_block_lg; int b_sidx_base = blockDim.x; int a_sidx = threadIdx.x; int b_sidx = threadIdx.x + b_sidx_base; int row_idx_pos = row_stride_lg; int row_sidx_pos = dim_block_lg; int c_col = ( blockIdx.x << dim_block_lg ) + local_col; int c_row = local_row << row_stride_lg; int itid_stride = gridDim.x << dim_block_lg; for ( ;; c_col += itid_stride ) { int col_overflow = c_col >> row_stride_lg; c_row += col_overflow << ( dim_block_lg + row_stride_lg ); c_col &= row_stride - 1; int c_idx = c_row + c_col; if ( c_row >= array_size ) break; float c_value = 0; int a_idx = c_row + local_col; int b_idx = ( local_row << row_idx_pos ) | c_col; for ( int k_group = 0; k_group < group_count; k_group++ ) { s[a_sidx] = a[a_idx]; s[b_sidx] = b[b_idx]; a_idx += 1 << dim_block_lg; b_idx += 1 << ( block_lg + group_count_lg ); __syncthreads(); for ( int kk = 0; kk < dim_block; kk++ ) { int a_sidx_k = ( local_row << row_sidx_pos ) | kk; int b_sidx_k = b_sidx_base + ( ( kk << row_sidx_pos ) | local_col ); c_value += s[a_sidx_k] * s[b_sidx_k]; } __syncthreads(); } c[c_idx] = c_value; } } __global__ void mm_blk_cache_ab_tc() { int group_count_lg = row_stride_lg - dim_block_lg; int group_count = 1 << group_count_lg; int dim_block = 1 << dim_block_lg; int local_row_mask = dim_block - 1; int local_col = threadIdx.x & local_row_mask; int local_row = threadIdx.x >> dim_block_lg; int c_col = ( blockIdx.x << dim_block_lg ) + local_col; int c_row = local_row << row_stride_lg; int itid_stride = gridDim.x << dim_block_lg; for ( ;; c_col += itid_stride ) { int col_overflow = c_col >> row_stride_lg; c_row += col_overflow << ( dim_block_lg + row_stride_lg ); c_col &= row_stride - 1; int c_idx = c_row + c_col; if ( c_row >= array_size ) break; float c_value = 0; int a_idx = c_row; int b_idx = c_col; for ( int k_group = 0; k_group < group_count; k_group++ ) { __syncthreads(); #define LOOP(DIM_BLOCK) \ for ( int kk = 0; kk < DIM_BLOCK; kk++ ) \ { \ c_value += tex1Dfetch(a_tex,a_idx) * tex1Dfetch(b_tex,b_idx); \ a_idx++; b_idx += row_stride; \ } if ( dim_block == 16 ) { LOOP(16); } else if ( dim_block == 8 ) { LOOP(8); } else { LOOP(dim_block); } #undef LOOP } c[c_idx] = c_value; } } __global__ void mm_blk_cache_ab_opt() { bool lane_0 = ( threadIdx.x & 0x1f ) == 0; int time_start = clock(); int time_compute = 0; int tid = threadIdx.x + blockIdx.x * blockDim.x; int thread_count = blockDim.x * gridDim.x; int group_count_lg = row_stride_lg - dim_block_lg; int group_count = 1 << group_count_lg; int dim_block = 1 << dim_block_lg; int local_row_mask = dim_block - 1; int block_lg = dim_block_lg << 1; int col_group_itid_pos = block_lg; int row_group_itid_pos = block_lg + group_count_lg; int col_group_mask = group_count - 1; int row_group_idx_pos = row_group_itid_pos; int col_group_idx_pos = dim_block_lg; int row_idx_pos = row_stride_lg; int row_sidx_pos = dim_block_lg; int local_col = threadIdx.x & local_row_mask; int local_row = threadIdx.x >> row_sidx_pos; int b_sidx_base = blockDim.x; int a_sidx = threadIdx.x; int b_sidx = threadIdx.x + b_sidx_base; for ( int itid = tid; itid < array_size; itid += thread_count ) { int row_group = itid >> row_group_itid_pos; int col_group = itid >> col_group_itid_pos & col_group_mask; if ( row_group > group_count ) break; int c_idx = ( row_group << row_group_idx_pos ) | ( local_row << row_idx_pos ) | ( col_group << col_group_idx_pos ) | local_col; float c_value = 0; int a_idx = ( row_group << row_group_idx_pos ) | ( local_row << row_idx_pos ) | local_col; int a_idx_stride = 1 << col_group_idx_pos; int b_idx = ( local_row << row_idx_pos ) | ( col_group << col_group_idx_pos ) | local_col; int b_idx_stride = 1 << row_group_idx_pos; for ( int k_group = 0; k_group < group_count; k_group++ ) { s[a_sidx] = a[a_idx]; s[b_sidx] = b[b_idx]; a_idx += a_idx_stride; b_idx += b_idx_stride; __syncthreads(); int compute_start = clock(); int a_sidx_k = ( local_row << row_sidx_pos ); int b_sidx_k = b_sidx_base | local_col; #define LOOP \ for ( int kk = 0; kk < dim_block; kk++ ) \ { \ c_value += s[a_sidx_k] * s[b_sidx_k]; \ a_sidx_k++; \ b_sidx_k += row_sidx_stride; \ } if ( row_sidx_pos == 3 ) { int row_sidx_stride = 8; #pragma unroll 8 LOOP; } else if ( row_sidx_pos == 4 ) { int row_sidx_stride = 16; #pragma unroll 16 LOOP; } else { int row_sidx_stride = 1 << row_sidx_pos; LOOP; } __syncthreads(); time_compute += clock() - compute_start; } c[c_idx] = c_value; } if ( !lane_0 ) return; t_all[tid>>5] = clock() - time_start; t_compute[tid>>5] = time_compute; } template <int DIM_BLOCK_LG> __global__ void mm_blk_cache_a_local_t() { int dim_block = 1 << DIM_BLOCK_LG; int block_lg = DIM_BLOCK_LG << 1; int local_a_col = threadIdx.x & ( dim_block - 1 ); int local_a_row = threadIdx.x >> DIM_BLOCK_LG; int c_idx_col = threadIdx.x; int b_sidx_copy = threadIdx.x; int itid_stride = gridDim.x << ( DIM_BLOCK_LG + row_stride_lg ); int c_idx_row = blockIdx.x << ( DIM_BLOCK_LG + row_stride_lg ); /// C Block Loop: Each iteration computes blk values of C. // for ( ;; c_idx_row += itid_stride ) { int row_overflow = c_idx_row >> array_size_lg << block_lg; c_idx_col += row_overflow; c_idx_row &= array_size - 1; if ( c_idx_col >= row_stride ) break; int c_idx = c_idx_row + c_idx_col; int a_idx = ( local_a_row << row_stride_lg ) + c_idx_row + local_a_col; int b_idx = c_idx_col; float cloc[1 << DIM_BLOCK_LG]; for ( int kk=0; kk<dim_block; kk++ ) cloc[kk] = 0; /// A Block Loop: Each iteration uses a blk x blk submatrix of A. // The iterations move across columns. // while ( b_idx < array_size ) { __syncthreads(); s[b_sidx_copy] = a[a_idx]; a_idx += dim_block; __syncthreads(); /// B Value Loop: Each iteration uses one value of B. # pragma unroll for ( int kk = 0; kk < dim_block; kk++, b_idx += row_stride ) { float b_val = b[b_idx]; for ( int ii = 0; ii < dim_block; ii++ ) cloc[ii] += s[ kk + dim_block * ii ] * b_val; } } for ( int ii = 0 ; ii < dim_block; ii++ ) c[ c_idx + ii * row_stride ] = cloc[ ii ]; } }
0321491e4696caea6924625e68745a29fd865b35.cu
#include "matrix-mult.cuh" #include <gp/cuda-util-kernel.h> // Constants holding array sizes and pointers and coefficients. // // Values are set by cuda calls, they don't automatically take values // of variables in the C program with the same name. // __constant__ int array_size, array_size_lg; __constant__ int row_stride, row_stride_lg, dim_block_lg; __constant__ float *a, *b, *c; __constant__ int32_t *t_compute, *t_all; extern __shared__ float s[]; // Shared memory for buffering a elements. __constant__ int cs_itid_stride; texture<float> a_tex, b_tex; __global__ void mm(); __global__ void mm_iter(); __global__ void mm_blk_cache_ab(); __global__ void mm_blk_cache_ab_opt(); __global__ void mm_blk_cache_ab_tc(); __global__ void mm_blk_cache_a_local(); template <int dim_block> __global__ void mm_blk_cache_a_local_t(); static __host__ void collect_symbols() { CU_SYM(array_size); CU_SYM(array_size_lg); CU_SYM(row_stride); CU_SYM(row_stride_lg); CU_SYM(dim_block_lg); CU_SYM(a); CU_SYM(b); CU_SYM(c); CU_SYM(t_compute); CU_SYM(t_all); CU_SYM(cs_itid_stride); } __host__ void kernels_get_attr(GPU_Info& info) { collect_symbols(); info.GET_INFO(mm); info.GET_INFO(mm_iter); info.GET_INFO(mm_blk_cache_ab); info.GET_INFO(mm_blk_cache_ab_opt); info.GET_INFO(mm_blk_cache_a_local_t<3>); info.GET_INFO(mm_blk_cache_a_local_t<4>); info.GET_INFO(mm_blk_cache_ab_tc); } // This routine executes on the CPU. // __host__ void mmult_launch(dim3 dg, dim3 db, int version, void *a_dev, void *b_dev, int array_size) { static bool tex_bound = false; const bool need_tex = version == 6; if ( need_tex && !tex_bound ) { const size_t size = array_size * sizeof(float); size_t offset; const cudaChannelFormatDesc fd = cudaCreateChannelDesc(32,0,0,0,cudaChannelFormatKindFloat); cudaBindTexture(&offset, a_tex, a_dev, fd, size); cudaBindTexture(&offset, b_tex, b_dev, fd, size); tex_bound = true; } else if ( !need_tex && tex_bound ) { cudaUnbindTexture(a_tex); cudaUnbindTexture(b_tex); tex_bound = false; } // Launch the kernel, using the provided configuration (block size, etc). // switch ( version ){ case 1: mm_iter<<<dg,db>>>(); break; case 2: { int shared_size = 2 * 4 * db.x; mm_blk_cache_ab<<<dg,db,shared_size>>>(); } break; case 3: { int shared_size = 2 * 4 * db.x; mm_blk_cache_ab_opt<<<dg,db,shared_size>>>(); } break; case 4: { int shared_size = 4 * db.x; if ( db.x == 64 ) mm_blk_cache_a_local_t<3><<<dg,db,shared_size>>>(); } break; case 5: { int shared_size = 4 * db.x; if ( db.x == 256 ) mm_blk_cache_a_local_t<4><<<dg,db,shared_size>>>(); } break; case 6: { mm_blk_cache_ab_tc<<<dg,db>>>(); } break; } } __global__ void mm() { // Compute a unique index (number) for this thread. // This will be used as an array index. // int idx = threadIdx.x + blockIdx.x * blockDim.x; if ( idx > array_size ) return; int row_mask = row_stride - 1; int col = idx & row_mask; int row = idx >> row_stride_lg; int idx_base = row << row_stride_lg; float c_value = 0; for ( int k=0; k<row_stride; k++ ) { int a_idx = idx_base | k; int b_idx = ( k << row_stride_lg ) | col; c_value += a[a_idx] * b[b_idx]; } c[idx] = c_value; } __global__ void mm_iter() { bool lane_0 = ( threadIdx.x & 0x1f ) == 0; int time_start = clock(); // Compute a unique index (number) for this thread. // This will be used as an array index. // int tid = threadIdx.x + blockIdx.x * blockDim.x; int thread_count = blockDim.x * gridDim.x; int row_mask = row_stride - 1; /// /// Iteration Strategy // // Value of c_idx in first iteration of loop below assuming: // Array size is 1024 x 1024 // Block size is 256 threads. // // 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 <- Bit position. // | blockIdx | threadIdx | <- Relationship to threads. // | row | col | <- Array element. // // Value of c_idx in the second iteration of the loop below: // // 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 <- Bit position. // | gridDim + blockIdx | threadIdx | <- Relationship to threads. // | row | col | <- Array element. // // Value of c_idx in the third iteration of the loop below: // // 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 <- Bit position. // | 2 * gridDim + blockIdx | threadIdx | <- Relationship to threads. // | row | col | <- Array element. for ( int c_idx = tid; c_idx < array_size; c_idx += thread_count ) { int col = c_idx & row_mask; int row = c_idx >> row_stride_lg; int a_idx_base = row << row_stride_lg; float c_value = 0; for ( int k=0; k<row_stride; k++ ) { int a_idx = a_idx_base | k; int b_idx = ( k << row_stride_lg ) | col; c_value += a[a_idx] * b[b_idx]; } c[c_idx] = c_value; } if ( !lane_0 ) return; t_all[tid>>5] = clock() - time_start; t_compute[tid>>5] = 0; } __global__ void mm_blk_cache_ab() { int group_count_lg = row_stride_lg - dim_block_lg; int group_count = 1 << group_count_lg; int dim_block = 1 << dim_block_lg; int local_row_mask = dim_block - 1; int block_lg = dim_block_lg << 1; int local_col = threadIdx.x & local_row_mask; int local_row = threadIdx.x >> dim_block_lg; int b_sidx_base = blockDim.x; int a_sidx = threadIdx.x; int b_sidx = threadIdx.x + b_sidx_base; int row_idx_pos = row_stride_lg; int row_sidx_pos = dim_block_lg; int c_col = ( blockIdx.x << dim_block_lg ) + local_col; int c_row = local_row << row_stride_lg; int itid_stride = gridDim.x << dim_block_lg; for ( ;; c_col += itid_stride ) { int col_overflow = c_col >> row_stride_lg; c_row += col_overflow << ( dim_block_lg + row_stride_lg ); c_col &= row_stride - 1; int c_idx = c_row + c_col; if ( c_row >= array_size ) break; float c_value = 0; int a_idx = c_row + local_col; int b_idx = ( local_row << row_idx_pos ) | c_col; for ( int k_group = 0; k_group < group_count; k_group++ ) { s[a_sidx] = a[a_idx]; s[b_sidx] = b[b_idx]; a_idx += 1 << dim_block_lg; b_idx += 1 << ( block_lg + group_count_lg ); __syncthreads(); for ( int kk = 0; kk < dim_block; kk++ ) { int a_sidx_k = ( local_row << row_sidx_pos ) | kk; int b_sidx_k = b_sidx_base + ( ( kk << row_sidx_pos ) | local_col ); c_value += s[a_sidx_k] * s[b_sidx_k]; } __syncthreads(); } c[c_idx] = c_value; } } __global__ void mm_blk_cache_ab_tc() { int group_count_lg = row_stride_lg - dim_block_lg; int group_count = 1 << group_count_lg; int dim_block = 1 << dim_block_lg; int local_row_mask = dim_block - 1; int local_col = threadIdx.x & local_row_mask; int local_row = threadIdx.x >> dim_block_lg; int c_col = ( blockIdx.x << dim_block_lg ) + local_col; int c_row = local_row << row_stride_lg; int itid_stride = gridDim.x << dim_block_lg; for ( ;; c_col += itid_stride ) { int col_overflow = c_col >> row_stride_lg; c_row += col_overflow << ( dim_block_lg + row_stride_lg ); c_col &= row_stride - 1; int c_idx = c_row + c_col; if ( c_row >= array_size ) break; float c_value = 0; int a_idx = c_row; int b_idx = c_col; for ( int k_group = 0; k_group < group_count; k_group++ ) { __syncthreads(); #define LOOP(DIM_BLOCK) \ for ( int kk = 0; kk < DIM_BLOCK; kk++ ) \ { \ c_value += tex1Dfetch(a_tex,a_idx) * tex1Dfetch(b_tex,b_idx); \ a_idx++; b_idx += row_stride; \ } if ( dim_block == 16 ) { LOOP(16); } else if ( dim_block == 8 ) { LOOP(8); } else { LOOP(dim_block); } #undef LOOP } c[c_idx] = c_value; } } __global__ void mm_blk_cache_ab_opt() { bool lane_0 = ( threadIdx.x & 0x1f ) == 0; int time_start = clock(); int time_compute = 0; int tid = threadIdx.x + blockIdx.x * blockDim.x; int thread_count = blockDim.x * gridDim.x; int group_count_lg = row_stride_lg - dim_block_lg; int group_count = 1 << group_count_lg; int dim_block = 1 << dim_block_lg; int local_row_mask = dim_block - 1; int block_lg = dim_block_lg << 1; int col_group_itid_pos = block_lg; int row_group_itid_pos = block_lg + group_count_lg; int col_group_mask = group_count - 1; int row_group_idx_pos = row_group_itid_pos; int col_group_idx_pos = dim_block_lg; int row_idx_pos = row_stride_lg; int row_sidx_pos = dim_block_lg; int local_col = threadIdx.x & local_row_mask; int local_row = threadIdx.x >> row_sidx_pos; int b_sidx_base = blockDim.x; int a_sidx = threadIdx.x; int b_sidx = threadIdx.x + b_sidx_base; for ( int itid = tid; itid < array_size; itid += thread_count ) { int row_group = itid >> row_group_itid_pos; int col_group = itid >> col_group_itid_pos & col_group_mask; if ( row_group > group_count ) break; int c_idx = ( row_group << row_group_idx_pos ) | ( local_row << row_idx_pos ) | ( col_group << col_group_idx_pos ) | local_col; float c_value = 0; int a_idx = ( row_group << row_group_idx_pos ) | ( local_row << row_idx_pos ) | local_col; int a_idx_stride = 1 << col_group_idx_pos; int b_idx = ( local_row << row_idx_pos ) | ( col_group << col_group_idx_pos ) | local_col; int b_idx_stride = 1 << row_group_idx_pos; for ( int k_group = 0; k_group < group_count; k_group++ ) { s[a_sidx] = a[a_idx]; s[b_sidx] = b[b_idx]; a_idx += a_idx_stride; b_idx += b_idx_stride; __syncthreads(); int compute_start = clock(); int a_sidx_k = ( local_row << row_sidx_pos ); int b_sidx_k = b_sidx_base | local_col; #define LOOP \ for ( int kk = 0; kk < dim_block; kk++ ) \ { \ c_value += s[a_sidx_k] * s[b_sidx_k]; \ a_sidx_k++; \ b_sidx_k += row_sidx_stride; \ } if ( row_sidx_pos == 3 ) { int row_sidx_stride = 8; #pragma unroll 8 LOOP; } else if ( row_sidx_pos == 4 ) { int row_sidx_stride = 16; #pragma unroll 16 LOOP; } else { int row_sidx_stride = 1 << row_sidx_pos; LOOP; } __syncthreads(); time_compute += clock() - compute_start; } c[c_idx] = c_value; } if ( !lane_0 ) return; t_all[tid>>5] = clock() - time_start; t_compute[tid>>5] = time_compute; } template <int DIM_BLOCK_LG> __global__ void mm_blk_cache_a_local_t() { int dim_block = 1 << DIM_BLOCK_LG; int block_lg = DIM_BLOCK_LG << 1; int local_a_col = threadIdx.x & ( dim_block - 1 ); int local_a_row = threadIdx.x >> DIM_BLOCK_LG; int c_idx_col = threadIdx.x; int b_sidx_copy = threadIdx.x; int itid_stride = gridDim.x << ( DIM_BLOCK_LG + row_stride_lg ); int c_idx_row = blockIdx.x << ( DIM_BLOCK_LG + row_stride_lg ); /// C Block Loop: Each iteration computes blk values of C. // for ( ;; c_idx_row += itid_stride ) { int row_overflow = c_idx_row >> array_size_lg << block_lg; c_idx_col += row_overflow; c_idx_row &= array_size - 1; if ( c_idx_col >= row_stride ) break; int c_idx = c_idx_row + c_idx_col; int a_idx = ( local_a_row << row_stride_lg ) + c_idx_row + local_a_col; int b_idx = c_idx_col; float cloc[1 << DIM_BLOCK_LG]; for ( int kk=0; kk<dim_block; kk++ ) cloc[kk] = 0; /// A Block Loop: Each iteration uses a blk x blk submatrix of A. // The iterations move across columns. // while ( b_idx < array_size ) { __syncthreads(); s[b_sidx_copy] = a[a_idx]; a_idx += dim_block; __syncthreads(); /// B Value Loop: Each iteration uses one value of B. # pragma unroll for ( int kk = 0; kk < dim_block; kk++, b_idx += row_stride ) { float b_val = b[b_idx]; for ( int ii = 0; ii < dim_block; ii++ ) cloc[ii] += s[ kk + dim_block * ii ] * b_val; } } for ( int ii = 0 ; ii < dim_block; ii++ ) c[ c_idx + ii * row_stride ] = cloc[ ii ]; } }
c4753b97553fbb402551c1e2b3273bfdd979d06c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <iostream> #include <algorithm> #include <cfloat> #include <chrono> #include <fstream> #include <random> #include <sstream> #include <vector> #include <chrono> #include <time.h> using namespace std; double passed_time; __global__ void distance(float *dst, float *x, float *y, float *mu_x, float *mu_y){ int i = blockIdx.x; int j = threadIdx.x; dst[i * blockDim.x + j] = (x[i] - mu_x[j]) * (x[i] - mu_x[j]); dst[i * blockDim.x + j] += (y[i] - mu_y[j]) * (y[i] - mu_y[j]); } __global__ void clustering(int *group, float *dst, int k){ int i = blockIdx.x; int j; float min_dst; min_dst = dst[i * k + 0]; group[i] = 1; for(j = 1; j < k; ++j){ if(dst[i * k + j] < min_dst){ min_dst = dst[i * k + j]; group[i] = j + 1; } } } __global__ void clear(float *sum_x, float *sum_y, int *nx, int *ny){ int j = threadIdx.x; sum_x[j] = 0; sum_y[j] = 0; nx[j] = 0; ny[j] = 0; } __global__ void move_centroid(float *sum_x, float *sum_y, int *nx, int *ny, float *x, float *y, int *group, int num_points){ int i; int j = threadIdx.x; for(i = 0; i < num_points; ++i){ if(group[i] == (j + 1)){ sum_x[j] += x[i]; sum_y[j] += y[i]; nx[j]++; ny[j]++; } } } __global__ void move_mu(float *mu_x, float *mu_y, float *sum_x, float *sum_y, int *nx, int *ny){ int j = threadIdx.x; mu_x[j] = sum_x[j]/nx[j]; mu_y[j] = sum_y[j]/ny[j]; } void kmeans-kernel(int num_reps, int num_points, int k, float *x_d, float *y_d, float *mu_x_d, float *mu_y_d, int *group_d, int *nx_d, int *ny_d, float *sum_x_d, float *sum_y_d, float *dst_d){ int i; for(i = 0; i < num_reps; ++i){ hipLaunchKernelGGL(( distance), dim3(num_points),dim3(k), 0, 0, dst_d, x_d, y_d, mu_x_d, mu_y_d); hipLaunchKernelGGL(( clustering), dim3(num_points),dim3(1), 0, 0, group_d, dst_d, k); hipLaunchKernelGGL(( clear), dim3(1),dim3(k), 0, 0, sum_x_d, sum_y_d, nx_d, ny_d); hipLaunchKernelGGL(( move_centroid), dim3(1),dim3(k), 0, 0, sum_x_d, sum_y_d, nx_d, ny_d, x_d, y_d, group_d, num_points); hipLaunchKernelGGL(( move_mu), dim3(1),dim3(k), 0, 0, mu_x_d, mu_y_d, sum_x_d, sum_y_d, nx_d, ny_d); } } void read_samples(float **x, float **y, float **mu_x, float **mu_y, int *num_points, int *k,char* arg){ FILE *fp; char buf[64]; *num_points = 0; fp = fopen(arg, "r"); while(fgets(buf, 64, fp) != NULL){ *num_points += 1; *x = (float*) realloc(*x, (*num_points)*sizeof(float)); *y = (float*) realloc(*y, (*num_points)*sizeof(float)); istringstream line_stream(buf); float x1,y1; line_stream >> x1 >> y1; (*x)[*num_points - 1] = x1; (*y)[*num_points - 1] = y1; } fclose(fp); *k = 0; fp = fopen("input/initCoord.txt", "r"); while(fgets(buf, 64, fp) != NULL){ *k += 1; *mu_x = (float*) realloc(*mu_x, (*k)*sizeof(float)); *mu_y = (float*) realloc(*mu_y, (*k)*sizeof(float)); istringstream line_stream(buf); float x1,y1; line_stream >> x1 >> y1; (*mu_x)[*k - 1] = x1; (*mu_y)[*k - 1] = x1; } fclose(fp); } void verify(int *group, float *mu_x, float *mu_y, int num_points, int k,char* arg){ FILE *fp; int i; string str(arg),str1,str2; str = "output/cuda/" + str; str1 = str + "_group_members.txt"; fp = fopen(str1.c_str(), "w"); for(i = 0; i < num_points; ++i){ fprintf(fp, "%d\n", group[i]); } fclose(fp); str2 = str + "_centroids.txt"; fp = fopen(str2.c_str(), "w"); for(i = 0; i < k; ++i){ fprintf(fp, "%0.6f %0.6f\n", mu_x[i], mu_y[i]); } fclose(fp); fp = fopen("CUDAtimes.txt", "a"); fprintf(fp, "%0.6f\n", passed_time); fclose(fp); } int main(int argc,char* argv[]){ // Initialize host variables ---------------------------------------------- int num_points; /* number of points */ int k; /* number of clusters */ int *group; float *x = NULL, *y = NULL, *mu_x = NULL, *mu_y = NULL; // Initialize device variables -------------------------------------------- int *group_d, *nx_d, *ny_d; float *x_d, *y_d, *mu_x_d, *mu_y_d, *sum_x_d, *sum_y_d, *dst_d; /* read data from files on cpu */ read_samples(&x, &y, &mu_x, &mu_y, &num_points, &k,argv[2]); // Allocate host memory ----=====------------------------------------------ group = (int*) malloc(num_points*sizeof(int)); // Allocate device variables ---------------------------------------------- cuda_ret = hipMalloc((void**) &group_d,num_points*sizeof(int)); if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory"); cuda_ret = hipMalloc((void**) &nx_d, k*sizeof(int)); if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory"); cuda_ret = hipMalloc((void**) &ny_d, k*sizeof(int)); if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory"); cuda_ret = hipMalloc((void**) &x_d, num_points*sizeof(float)); if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory"); cuda_ret = hipMalloc((void**) &y_d, num_points*sizeof(float)); if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory"); cuda_ret = hipMalloc((void**) &mu_x_d, k*sizeof(float)); if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory"); cuda_ret = hipMalloc((void**) &mu_y_d, k*sizeof(float)); if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory"); cuda_ret = hipMalloc((void**) &sum_x_d, k*sizeof(float)); if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory"); cuda_ret = hipMalloc((void**) &sum_y_d, k*sizeof(float)); if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory"); cuda_ret = hipMalloc((void**) &dst_d, num_points*k*sizeof(float)); if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory"); // Copy host variables to device ------------------------------------------ cuda_ret = hipMemcpy(x_d, x, num_points*sizeof(float), hipMemcpyHostToDevice); if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to the device"); cuda_ret = hipMemcpy(y_d, y, num_points*sizeof(float), hipMemcpyHostToDevice); if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to the device"); cuda_ret = hipMemcpy(mu_x_d, mu_x, k*sizeof(float), hipMemcpyHostToDevice); if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to the device"); cuda_ret = hipMemcpy(mu_y_d, mu_y, k*sizeof(float), hipMemcpyHostToDevice); if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to the device"); // Launch kernel ---------------------------------------------------------- printf("Launching kernel..."); fflush(stdout); const auto start = chrono::high_resolution_clock::now(); kmeans-kernel(100, num_points, k, x_d, y_d, mu_x_d, mu_y_d, group_d, nx_d, ny_d, sum_x_d, sum_y_d, dst_d); const auto end = chrono::high_resolution_clock::now(); const auto duration = chrono::duration_cast<chrono::duration<float>>(end - start); cerr << "CUDA Took: " << duration.count() << "s" << " for "<<argv[3]<<" points." << endl; cuda_ret = hipDeviceSynchronize(); if(cuda_ret != hipSuccess) FATAL("Unable to launch/execute kernel"); passed_time = duration.count(); // Copy device variables from host ---------------------------------------- cuda_ret = hipMemcpy(group, group_d, num_points*sizeof(int), hipMemcpyDeviceToHost); if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to host"); cuda_ret = hipMemcpy(mu_x, mu_x_d, k*sizeof(float), hipMemcpyDeviceToHost); if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to host"); cuda_ret = hipMemcpy(mu_y, mu_y_d, k*sizeof(float), hipMemcpyDeviceToHost); if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to host"); // Verify correctness ----------------------------------------------------- verify(group, mu_x, mu_y, num_points, k,argv[3]); // Free memory ------------------------------------------------------------ free(x); free(y); free(mu_x); free(mu_y); free(group); hipFree(x_d); hipFree(y_d); hipFree(mu_x_d); hipFree(mu_y_d); hipFree(group_d); hipFree(nx_d); hipFree(ny_d); hipFree(sum_x_d); hipFree(sum_y_d); hipFree(dst_d); return 0; }
c4753b97553fbb402551c1e2b3273bfdd979d06c.cu
#include <stdio.h> #include <stdlib.h> #include <iostream> #include <algorithm> #include <cfloat> #include <chrono> #include <fstream> #include <random> #include <sstream> #include <vector> #include <chrono> #include <time.h> using namespace std; double passed_time; __global__ void distance(float *dst, float *x, float *y, float *mu_x, float *mu_y){ int i = blockIdx.x; int j = threadIdx.x; dst[i * blockDim.x + j] = (x[i] - mu_x[j]) * (x[i] - mu_x[j]); dst[i * blockDim.x + j] += (y[i] - mu_y[j]) * (y[i] - mu_y[j]); } __global__ void clustering(int *group, float *dst, int k){ int i = blockIdx.x; int j; float min_dst; min_dst = dst[i * k + 0]; group[i] = 1; for(j = 1; j < k; ++j){ if(dst[i * k + j] < min_dst){ min_dst = dst[i * k + j]; group[i] = j + 1; } } } __global__ void clear(float *sum_x, float *sum_y, int *nx, int *ny){ int j = threadIdx.x; sum_x[j] = 0; sum_y[j] = 0; nx[j] = 0; ny[j] = 0; } __global__ void move_centroid(float *sum_x, float *sum_y, int *nx, int *ny, float *x, float *y, int *group, int num_points){ int i; int j = threadIdx.x; for(i = 0; i < num_points; ++i){ if(group[i] == (j + 1)){ sum_x[j] += x[i]; sum_y[j] += y[i]; nx[j]++; ny[j]++; } } } __global__ void move_mu(float *mu_x, float *mu_y, float *sum_x, float *sum_y, int *nx, int *ny){ int j = threadIdx.x; mu_x[j] = sum_x[j]/nx[j]; mu_y[j] = sum_y[j]/ny[j]; } void kmeans-kernel(int num_reps, int num_points, int k, float *x_d, float *y_d, float *mu_x_d, float *mu_y_d, int *group_d, int *nx_d, int *ny_d, float *sum_x_d, float *sum_y_d, float *dst_d){ int i; for(i = 0; i < num_reps; ++i){ distance<<<num_points,k>>>(dst_d, x_d, y_d, mu_x_d, mu_y_d); clustering<<<num_points,1>>>(group_d, dst_d, k); clear<<<1,k>>>(sum_x_d, sum_y_d, nx_d, ny_d); move_centroid<<<1,k>>>(sum_x_d, sum_y_d, nx_d, ny_d, x_d, y_d, group_d, num_points); move_mu<<<1,k>>>(mu_x_d, mu_y_d, sum_x_d, sum_y_d, nx_d, ny_d); } } void read_samples(float **x, float **y, float **mu_x, float **mu_y, int *num_points, int *k,char* arg){ FILE *fp; char buf[64]; *num_points = 0; fp = fopen(arg, "r"); while(fgets(buf, 64, fp) != NULL){ *num_points += 1; *x = (float*) realloc(*x, (*num_points)*sizeof(float)); *y = (float*) realloc(*y, (*num_points)*sizeof(float)); istringstream line_stream(buf); float x1,y1; line_stream >> x1 >> y1; (*x)[*num_points - 1] = x1; (*y)[*num_points - 1] = y1; } fclose(fp); *k = 0; fp = fopen("input/initCoord.txt", "r"); while(fgets(buf, 64, fp) != NULL){ *k += 1; *mu_x = (float*) realloc(*mu_x, (*k)*sizeof(float)); *mu_y = (float*) realloc(*mu_y, (*k)*sizeof(float)); istringstream line_stream(buf); float x1,y1; line_stream >> x1 >> y1; (*mu_x)[*k - 1] = x1; (*mu_y)[*k - 1] = x1; } fclose(fp); } void verify(int *group, float *mu_x, float *mu_y, int num_points, int k,char* arg){ FILE *fp; int i; string str(arg),str1,str2; str = "output/cuda/" + str; str1 = str + "_group_members.txt"; fp = fopen(str1.c_str(), "w"); for(i = 0; i < num_points; ++i){ fprintf(fp, "%d\n", group[i]); } fclose(fp); str2 = str + "_centroids.txt"; fp = fopen(str2.c_str(), "w"); for(i = 0; i < k; ++i){ fprintf(fp, "%0.6f %0.6f\n", mu_x[i], mu_y[i]); } fclose(fp); fp = fopen("CUDAtimes.txt", "a"); fprintf(fp, "%0.6f\n", passed_time); fclose(fp); } int main(int argc,char* argv[]){ // Initialize host variables ---------------------------------------------- int num_points; /* number of points */ int k; /* number of clusters */ int *group; float *x = NULL, *y = NULL, *mu_x = NULL, *mu_y = NULL; // Initialize device variables -------------------------------------------- int *group_d, *nx_d, *ny_d; float *x_d, *y_d, *mu_x_d, *mu_y_d, *sum_x_d, *sum_y_d, *dst_d; /* read data from files on cpu */ read_samples(&x, &y, &mu_x, &mu_y, &num_points, &k,argv[2]); // Allocate host memory ----=====------------------------------------------ group = (int*) malloc(num_points*sizeof(int)); // Allocate device variables ---------------------------------------------- cuda_ret = cudaMalloc((void**) &group_d,num_points*sizeof(int)); if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory"); cuda_ret = cudaMalloc((void**) &nx_d, k*sizeof(int)); if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory"); cuda_ret = cudaMalloc((void**) &ny_d, k*sizeof(int)); if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory"); cuda_ret = cudaMalloc((void**) &x_d, num_points*sizeof(float)); if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory"); cuda_ret = cudaMalloc((void**) &y_d, num_points*sizeof(float)); if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory"); cuda_ret = cudaMalloc((void**) &mu_x_d, k*sizeof(float)); if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory"); cuda_ret = cudaMalloc((void**) &mu_y_d, k*sizeof(float)); if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory"); cuda_ret = cudaMalloc((void**) &sum_x_d, k*sizeof(float)); if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory"); cuda_ret = cudaMalloc((void**) &sum_y_d, k*sizeof(float)); if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory"); cuda_ret = cudaMalloc((void**) &dst_d, num_points*k*sizeof(float)); if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory"); // Copy host variables to device ------------------------------------------ cuda_ret = cudaMemcpy(x_d, x, num_points*sizeof(float), cudaMemcpyHostToDevice); if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to the device"); cuda_ret = cudaMemcpy(y_d, y, num_points*sizeof(float), cudaMemcpyHostToDevice); if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to the device"); cuda_ret = cudaMemcpy(mu_x_d, mu_x, k*sizeof(float), cudaMemcpyHostToDevice); if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to the device"); cuda_ret = cudaMemcpy(mu_y_d, mu_y, k*sizeof(float), cudaMemcpyHostToDevice); if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to the device"); // Launch kernel ---------------------------------------------------------- printf("Launching kernel..."); fflush(stdout); const auto start = chrono::high_resolution_clock::now(); kmeans-kernel(100, num_points, k, x_d, y_d, mu_x_d, mu_y_d, group_d, nx_d, ny_d, sum_x_d, sum_y_d, dst_d); const auto end = chrono::high_resolution_clock::now(); const auto duration = chrono::duration_cast<chrono::duration<float>>(end - start); cerr << "CUDA Took: " << duration.count() << "s" << " for "<<argv[3]<<" points." << endl; cuda_ret = cudaDeviceSynchronize(); if(cuda_ret != cudaSuccess) FATAL("Unable to launch/execute kernel"); passed_time = duration.count(); // Copy device variables from host ---------------------------------------- cuda_ret = cudaMemcpy(group, group_d, num_points*sizeof(int), cudaMemcpyDeviceToHost); if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to host"); cuda_ret = cudaMemcpy(mu_x, mu_x_d, k*sizeof(float), cudaMemcpyDeviceToHost); if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to host"); cuda_ret = cudaMemcpy(mu_y, mu_y_d, k*sizeof(float), cudaMemcpyDeviceToHost); if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to host"); // Verify correctness ----------------------------------------------------- verify(group, mu_x, mu_y, num_points, k,argv[3]); // Free memory ------------------------------------------------------------ free(x); free(y); free(mu_x); free(mu_y); free(group); cudaFree(x_d); cudaFree(y_d); cudaFree(mu_x_d); cudaFree(mu_y_d); cudaFree(group_d); cudaFree(nx_d); cudaFree(ny_d); cudaFree(sum_x_d); cudaFree(sum_y_d); cudaFree(dst_d); return 0; }
d6bc48cdbea8e85fa51fc5def8998fc70d95903b.hip
// !!! This is a file automatically generated by hipify!!! /* ------------------------------------------------------- UNTILED CODE GENERATED BY FORMA COMPILER ---------------------------------------------------------*/ #include "hip/hip_runtime.h" #ifdef _TIMER_ #include "hip/hip_runtime_api.h" #endif #include "stdio.h" #include <rocm_smi/rocm_smi.h> #include <assert.h> #define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) ) #define max(a,b) FORMA_MAX(a,b) #define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) ) #define min(a,b) FORMA_MIN(a,b) #define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 ) #ifndef FORMA_MAX_BLOCKDIM_0 #define FORMA_MAX_BLOCKDIM_0 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_1 #define FORMA_MAX_BLOCKDIM_1 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_2 #define FORMA_MAX_BLOCKDIM_2 1024 #endif template<typename T> __global__ void __kernel_init__(T* input, T value) { int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x); input[loc] = value; } template<typename T> void initialize_array(T* d_input, int size, T value) { dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0)); dim3 init_block(FORMA_MAX_BLOCKDIM_0); hipLaunchKernelGGL(( __kernel_init__), dim3(init_grid),dim3(init_block), 0, 0, d_input,value); } void Check_CUDA_Error(const char* message); /*Texture references */ /* Device code Begin */ __global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int L, int M, int N, float * __restrict__ __var_4__){ int FORMA_BLOCKDIM_Z = (int)(blockDim.z); int FORMA_BLOCKDIM_Y = (int)(blockDim.y); int FORMA_BLOCKDIM_X = (int)(blockDim.x); int __iter_0__; __iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 1; if(__iter_0__ <= (N-2)){ int __iter_1__; __iter_1__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 1; if(__iter_1__ <= (M-2)){ int __iter_2__; __iter_2__ = (int)(blockIdx.z)*(int)(FORMA_BLOCKDIM_Z) + (int)(threadIdx.z) + 1; if(__iter_2__ <= (L-2)){ float __temp_0__; __temp_0__ = (2.000000f * input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))]); float __temp_1__; __temp_1__ = (input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__+(1)))] - __temp_0__); float __temp_2__; __temp_2__ = (__temp_1__ + input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__+(-1)))]); float __temp_3__; __temp_3__ = (0.125000f * __temp_2__); float __temp_4__; __temp_4__ = (2.000000f * input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))]); float __temp_5__; __temp_5__ = (input[__iter_0__+(N-0)*(__iter_1__+(1)+(M-0)*(__iter_2__))] - __temp_4__); float __temp_6__; __temp_6__ = (__temp_5__ + input[__iter_0__+(N-0)*(__iter_1__+(-1)+(M-0)*(__iter_2__))]); float __temp_7__; __temp_7__ = (0.125000f * __temp_6__); float __temp_8__; __temp_8__ = (__temp_3__ + __temp_7__); float __temp_9__; __temp_9__ = (2.000000f * input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))]); float __temp_10__; __temp_10__ = (input[__iter_0__+(1)+(N-0)*(__iter_1__+(M-0)*(__iter_2__))] - __temp_9__); float __temp_11__; __temp_11__ = (__temp_10__ + input[__iter_0__+(-1)+(N-0)*(__iter_1__+(M-0)*(__iter_2__))]); float __temp_12__; __temp_12__ = (0.125000f * __temp_11__); float __temp_13__; __temp_13__ = (__temp_8__ + __temp_12__); float __temp_14__; __temp_14__ = (__temp_13__ + input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))]); __var_4__[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))] = __temp_14__; } } } } __global__ void __kernel___forma_kernel__1__(float * __restrict__ __var_4__, int L, int M, int N, float * __restrict__ __var_3__){ int FORMA_BLOCKDIM_Z = (int)(blockDim.z); int FORMA_BLOCKDIM_Y = (int)(blockDim.y); int FORMA_BLOCKDIM_X = (int)(blockDim.x); int __iter_3__; __iter_3__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 1; if(__iter_3__ <= (N-2)){ int __iter_4__; __iter_4__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 1; if(__iter_4__ <= (M-2)){ int __iter_5__; __iter_5__ = (int)(blockIdx.z)*(int)(FORMA_BLOCKDIM_Z) + (int)(threadIdx.z) + 1; if(__iter_5__ <= (L-2)){ float __temp_15__; __temp_15__ = (2.000000f * __var_4__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))]); float __temp_16__; __temp_16__ = (__var_4__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__+(1)))] - __temp_15__); float __temp_17__; __temp_17__ = (__temp_16__ + __var_4__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__+(-1)))]); float __temp_18__; __temp_18__ = (0.125000f * __temp_17__); float __temp_19__; __temp_19__ = (2.000000f * __var_4__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))]); float __temp_20__; __temp_20__ = (__var_4__[__iter_3__+(N-0)*(__iter_4__+(1)+(M-0)*(__iter_5__))] - __temp_19__); float __temp_21__; __temp_21__ = (__temp_20__ + __var_4__[__iter_3__+(N-0)*(__iter_4__+(-1)+(M-0)*(__iter_5__))]); float __temp_22__; __temp_22__ = (0.125000f * __temp_21__); float __temp_23__; __temp_23__ = (__temp_18__ + __temp_22__); float __temp_24__; __temp_24__ = (2.000000f * __var_4__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))]); float __temp_25__; __temp_25__ = (__var_4__[__iter_3__+(1)+(N-0)*(__iter_4__+(M-0)*(__iter_5__))] - __temp_24__); float __temp_26__; __temp_26__ = (__temp_25__ + __var_4__[__iter_3__+(-1)+(N-0)*(__iter_4__+(M-0)*(__iter_5__))]); float __temp_27__; __temp_27__ = (0.125000f * __temp_26__); float __temp_28__; __temp_28__ = (__temp_23__ + __temp_27__); float __temp_29__; __temp_29__ = (__temp_28__ + __var_4__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))]); __var_3__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))] = __temp_29__; } } } } __global__ void __kernel___forma_kernel__2__(float * __restrict__ __var_3__, int L, int M, int N, float * __restrict__ __var_2__){ int FORMA_BLOCKDIM_Z = (int)(blockDim.z); int FORMA_BLOCKDIM_Y = (int)(blockDim.y); int FORMA_BLOCKDIM_X = (int)(blockDim.x); int __iter_6__; __iter_6__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 1; if(__iter_6__ <= (N-2)){ int __iter_7__; __iter_7__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 1; if(__iter_7__ <= (M-2)){ int __iter_8__; __iter_8__ = (int)(blockIdx.z)*(int)(FORMA_BLOCKDIM_Z) + (int)(threadIdx.z) + 1; if(__iter_8__ <= (L-2)){ float __temp_30__; __temp_30__ = (2.000000f * __var_3__[__iter_6__+(N-0)*(__iter_7__+(M-0)*(__iter_8__))]); float __temp_31__; __temp_31__ = (__var_3__[__iter_6__+(N-0)*(__iter_7__+(M-0)*(__iter_8__+(1)))] - __temp_30__); float __temp_32__; __temp_32__ = (__temp_31__ + __var_3__[__iter_6__+(N-0)*(__iter_7__+(M-0)*(__iter_8__+(-1)))]); float __temp_33__; __temp_33__ = (0.125000f * __temp_32__); float __temp_34__; __temp_34__ = (2.000000f * __var_3__[__iter_6__+(N-0)*(__iter_7__+(M-0)*(__iter_8__))]); float __temp_35__; __temp_35__ = (__var_3__[__iter_6__+(N-0)*(__iter_7__+(1)+(M-0)*(__iter_8__))] - __temp_34__); float __temp_36__; __temp_36__ = (__temp_35__ + __var_3__[__iter_6__+(N-0)*(__iter_7__+(-1)+(M-0)*(__iter_8__))]); float __temp_37__; __temp_37__ = (0.125000f * __temp_36__); float __temp_38__; __temp_38__ = (__temp_33__ + __temp_37__); float __temp_39__; __temp_39__ = (2.000000f * __var_3__[__iter_6__+(N-0)*(__iter_7__+(M-0)*(__iter_8__))]); float __temp_40__; __temp_40__ = (__var_3__[__iter_6__+(1)+(N-0)*(__iter_7__+(M-0)*(__iter_8__))] - __temp_39__); float __temp_41__; __temp_41__ = (__temp_40__ + __var_3__[__iter_6__+(-1)+(N-0)*(__iter_7__+(M-0)*(__iter_8__))]); float __temp_42__; __temp_42__ = (0.125000f * __temp_41__); float __temp_43__; __temp_43__ = (__temp_38__ + __temp_42__); float __temp_44__; __temp_44__ = (__temp_43__ + __var_3__[__iter_6__+(N-0)*(__iter_7__+(M-0)*(__iter_8__))]); __var_2__[__iter_6__+(N-0)*(__iter_7__+(M-0)*(__iter_8__))] = __temp_44__; } } } } __global__ void __kernel___forma_kernel__3__(float * __restrict__ __var_2__, int L, int M, int N, float * __restrict__ __var_1__){ int FORMA_BLOCKDIM_Z = (int)(blockDim.z); int FORMA_BLOCKDIM_Y = (int)(blockDim.y); int FORMA_BLOCKDIM_X = (int)(blockDim.x); int __iter_9__; __iter_9__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 1; if(__iter_9__ <= (N-2)){ int __iter_10__; __iter_10__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 1; if(__iter_10__ <= (M-2)){ int __iter_11__; __iter_11__ = (int)(blockIdx.z)*(int)(FORMA_BLOCKDIM_Z) + (int)(threadIdx.z) + 1; if(__iter_11__ <= (L-2)){ float __temp_45__; __temp_45__ = (2.000000f * __var_2__[__iter_9__+(N-0)*(__iter_10__+(M-0)*(__iter_11__))]); float __temp_46__; __temp_46__ = (__var_2__[__iter_9__+(N-0)*(__iter_10__+(M-0)*(__iter_11__+(1)))] - __temp_45__); float __temp_47__; __temp_47__ = (__temp_46__ + __var_2__[__iter_9__+(N-0)*(__iter_10__+(M-0)*(__iter_11__+(-1)))]); float __temp_48__; __temp_48__ = (0.125000f * __temp_47__); float __temp_49__; __temp_49__ = (2.000000f * __var_2__[__iter_9__+(N-0)*(__iter_10__+(M-0)*(__iter_11__))]); float __temp_50__; __temp_50__ = (__var_2__[__iter_9__+(N-0)*(__iter_10__+(1)+(M-0)*(__iter_11__))] - __temp_49__); float __temp_51__; __temp_51__ = (__temp_50__ + __var_2__[__iter_9__+(N-0)*(__iter_10__+(-1)+(M-0)*(__iter_11__))]); float __temp_52__; __temp_52__ = (0.125000f * __temp_51__); float __temp_53__; __temp_53__ = (__temp_48__ + __temp_52__); float __temp_54__; __temp_54__ = (2.000000f * __var_2__[__iter_9__+(N-0)*(__iter_10__+(M-0)*(__iter_11__))]); float __temp_55__; __temp_55__ = (__var_2__[__iter_9__+(1)+(N-0)*(__iter_10__+(M-0)*(__iter_11__))] - __temp_54__); float __temp_56__; __temp_56__ = (__temp_55__ + __var_2__[__iter_9__+(-1)+(N-0)*(__iter_10__+(M-0)*(__iter_11__))]); float __temp_57__; __temp_57__ = (0.125000f * __temp_56__); float __temp_58__; __temp_58__ = (__temp_53__ + __temp_57__); float __temp_59__; __temp_59__ = (__temp_58__ + __var_2__[__iter_9__+(N-0)*(__iter_10__+(M-0)*(__iter_11__))]); __var_1__[__iter_9__+(N-0)*(__iter_10__+(M-0)*(__iter_11__))] = __temp_59__; } } } } /*Device code End */ /* Host Code Begin */ extern "C" void host_code(float * h_input, float * __var_0__, int L, int M, int N) { /* Host allocation Begin */ float * input; hipMalloc(&input,sizeof(float)*((L-0)*(M-0)*(N-0))); Check_CUDA_Error("Allocation Error!! : input\n"); hipPointerAttribute_t ptrAttrib_h_input; hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice; if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess) if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice) memcpy_kind_h_input = hipMemcpyDeviceToDevice; hipGetLastError(); if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){ hipMemcpy(input,h_input,sizeof(float)*((L-0)*(M-0)*(N-0)), memcpy_kind_h_input); } float * __var_1__; hipMalloc(&__var_1__,sizeof(float)*((L-0)*(M-0)*(N-0))); Check_CUDA_Error("Allocation Error!! : __var_1__\n"); float * __var_2__; hipMalloc(&__var_2__,sizeof(float)*((L-0)*(M-0)*(N-0))); Check_CUDA_Error("Allocation Error!! : __var_2__\n"); float * __var_3__; hipMalloc(&__var_3__,sizeof(float)*((L-0)*(M-0)*(N-0))); Check_CUDA_Error("Allocation Error!! : __var_3__\n"); float * __var_4__; hipMalloc(&__var_4__,sizeof(float)*((L-0)*(M-0)*(N-0))); Check_CUDA_Error("Allocation Error!! : __var_4__\n"); /*Host Allocation End */ /* Kernel Launch Begin */ #ifdef _TIMER_ hipEvent_t _forma_timer_start_,_forma_timer_stop_; hipEventCreate(&_forma_timer_start_); hipEventCreate(&_forma_timer_stop_); hipEventRecord(_forma_timer_start_,0); #endif int __size_0___kernel___forma_kernel__0__ = ((N-2) - 1 ) + 1; int __size_1___kernel___forma_kernel__0__ = ((M-2) - 1 ) + 1; int __size_2___kernel___forma_kernel__0__ = ((L-2) - 1 ) + 1; int __block_0___kernel___forma_kernel__0__ = 16; int __block_1___kernel___forma_kernel__0__ = 4; int __block_2___kernel___forma_kernel__0__ = 4; dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__); int __SMemSize___kernel___forma_kernel__0__ = 0; int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__block_0___kernel___forma_kernel__0__); int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__); int __grid_2___kernel___forma_kernel__0__ = FORMA_CEIL(__size_2___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__); dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__,__grid_2___kernel___forma_kernel__0__); unsigned int power1, power2; rsmi_status_t result; uint32_t device; nvmlEnableState_t mode; result=nvmlInit(); result = nvmlDeviceGetHandleByIndex(0, &device); assert(RSMI_STATUS_SUCCESS == result); result=nvmlDeviceGetPowerManagementMode(device, &mode); printf("enabled = %d\n", mode); result=nvmlDeviceGetPowerUsage(device,&power1); assert(RSMI_STATUS_SUCCESS == result); hipDeviceSynchronize(); for (int x=0; x<500; x++) { hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, input, L, M, N, __var_4__); hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, __var_4__, L, M, N, __var_3__); hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, __var_3__, L, M, N, __var_2__); hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, __var_2__, L, M, N, __var_1__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n"); } hipDeviceSynchronize(); result=nvmlDeviceGetPowerUsage(device,&power2); assert(RSMI_STATUS_SUCCESS == result); power2 -= power1; printf("%u\n", power2); nvmlShutdown(); hipPointerAttribute_t ptrAttrib___var_0__; hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost; if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess) if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice) memcpy_kind___var_0__ = hipMemcpyDeviceToDevice; hipGetLastError(); hipMemcpy(__var_0__,__var_1__, sizeof(float)*((L-0)*(M-0)*(N-0)), memcpy_kind___var_0__); #ifdef _TIMER_ hipEventRecord(_forma_timer_stop_,0); hipEventSynchronize(_forma_timer_stop_); float elapsedTime; hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_); printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime); hipEventDestroy(_forma_timer_start_); hipEventDestroy(_forma_timer_stop_); #endif /*Kernel Launch End */ /* Host Free Begin */ hipFree(input); hipFree(__var_1__); hipFree(__var_2__); hipFree(__var_3__); hipFree(__var_4__); } /*Host Free End*/
d6bc48cdbea8e85fa51fc5def8998fc70d95903b.cu
/* ------------------------------------------------------- UNTILED CODE GENERATED BY FORMA COMPILER ---------------------------------------------------------*/ #include "cuda.h" #ifdef _TIMER_ #include "cuda_profiler_api.h" #endif #include "stdio.h" #include <nvml.h> #include <assert.h> #define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) ) #define max(a,b) FORMA_MAX(a,b) #define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) ) #define min(a,b) FORMA_MIN(a,b) #define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 ) #ifndef FORMA_MAX_BLOCKDIM_0 #define FORMA_MAX_BLOCKDIM_0 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_1 #define FORMA_MAX_BLOCKDIM_1 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_2 #define FORMA_MAX_BLOCKDIM_2 1024 #endif template<typename T> __global__ void __kernel_init__(T* input, T value) { int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x); input[loc] = value; } template<typename T> void initialize_array(T* d_input, int size, T value) { dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0)); dim3 init_block(FORMA_MAX_BLOCKDIM_0); __kernel_init__<<<init_grid,init_block>>>(d_input,value); } void Check_CUDA_Error(const char* message); /*Texture references */ /* Device code Begin */ __global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int L, int M, int N, float * __restrict__ __var_4__){ int FORMA_BLOCKDIM_Z = (int)(blockDim.z); int FORMA_BLOCKDIM_Y = (int)(blockDim.y); int FORMA_BLOCKDIM_X = (int)(blockDim.x); int __iter_0__; __iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 1; if(__iter_0__ <= (N-2)){ int __iter_1__; __iter_1__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 1; if(__iter_1__ <= (M-2)){ int __iter_2__; __iter_2__ = (int)(blockIdx.z)*(int)(FORMA_BLOCKDIM_Z) + (int)(threadIdx.z) + 1; if(__iter_2__ <= (L-2)){ float __temp_0__; __temp_0__ = (2.000000f * input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))]); float __temp_1__; __temp_1__ = (input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__+(1)))] - __temp_0__); float __temp_2__; __temp_2__ = (__temp_1__ + input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__+(-1)))]); float __temp_3__; __temp_3__ = (0.125000f * __temp_2__); float __temp_4__; __temp_4__ = (2.000000f * input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))]); float __temp_5__; __temp_5__ = (input[__iter_0__+(N-0)*(__iter_1__+(1)+(M-0)*(__iter_2__))] - __temp_4__); float __temp_6__; __temp_6__ = (__temp_5__ + input[__iter_0__+(N-0)*(__iter_1__+(-1)+(M-0)*(__iter_2__))]); float __temp_7__; __temp_7__ = (0.125000f * __temp_6__); float __temp_8__; __temp_8__ = (__temp_3__ + __temp_7__); float __temp_9__; __temp_9__ = (2.000000f * input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))]); float __temp_10__; __temp_10__ = (input[__iter_0__+(1)+(N-0)*(__iter_1__+(M-0)*(__iter_2__))] - __temp_9__); float __temp_11__; __temp_11__ = (__temp_10__ + input[__iter_0__+(-1)+(N-0)*(__iter_1__+(M-0)*(__iter_2__))]); float __temp_12__; __temp_12__ = (0.125000f * __temp_11__); float __temp_13__; __temp_13__ = (__temp_8__ + __temp_12__); float __temp_14__; __temp_14__ = (__temp_13__ + input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))]); __var_4__[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))] = __temp_14__; } } } } __global__ void __kernel___forma_kernel__1__(float * __restrict__ __var_4__, int L, int M, int N, float * __restrict__ __var_3__){ int FORMA_BLOCKDIM_Z = (int)(blockDim.z); int FORMA_BLOCKDIM_Y = (int)(blockDim.y); int FORMA_BLOCKDIM_X = (int)(blockDim.x); int __iter_3__; __iter_3__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 1; if(__iter_3__ <= (N-2)){ int __iter_4__; __iter_4__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 1; if(__iter_4__ <= (M-2)){ int __iter_5__; __iter_5__ = (int)(blockIdx.z)*(int)(FORMA_BLOCKDIM_Z) + (int)(threadIdx.z) + 1; if(__iter_5__ <= (L-2)){ float __temp_15__; __temp_15__ = (2.000000f * __var_4__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))]); float __temp_16__; __temp_16__ = (__var_4__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__+(1)))] - __temp_15__); float __temp_17__; __temp_17__ = (__temp_16__ + __var_4__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__+(-1)))]); float __temp_18__; __temp_18__ = (0.125000f * __temp_17__); float __temp_19__; __temp_19__ = (2.000000f * __var_4__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))]); float __temp_20__; __temp_20__ = (__var_4__[__iter_3__+(N-0)*(__iter_4__+(1)+(M-0)*(__iter_5__))] - __temp_19__); float __temp_21__; __temp_21__ = (__temp_20__ + __var_4__[__iter_3__+(N-0)*(__iter_4__+(-1)+(M-0)*(__iter_5__))]); float __temp_22__; __temp_22__ = (0.125000f * __temp_21__); float __temp_23__; __temp_23__ = (__temp_18__ + __temp_22__); float __temp_24__; __temp_24__ = (2.000000f * __var_4__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))]); float __temp_25__; __temp_25__ = (__var_4__[__iter_3__+(1)+(N-0)*(__iter_4__+(M-0)*(__iter_5__))] - __temp_24__); float __temp_26__; __temp_26__ = (__temp_25__ + __var_4__[__iter_3__+(-1)+(N-0)*(__iter_4__+(M-0)*(__iter_5__))]); float __temp_27__; __temp_27__ = (0.125000f * __temp_26__); float __temp_28__; __temp_28__ = (__temp_23__ + __temp_27__); float __temp_29__; __temp_29__ = (__temp_28__ + __var_4__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))]); __var_3__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))] = __temp_29__; } } } } __global__ void __kernel___forma_kernel__2__(float * __restrict__ __var_3__, int L, int M, int N, float * __restrict__ __var_2__){ int FORMA_BLOCKDIM_Z = (int)(blockDim.z); int FORMA_BLOCKDIM_Y = (int)(blockDim.y); int FORMA_BLOCKDIM_X = (int)(blockDim.x); int __iter_6__; __iter_6__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 1; if(__iter_6__ <= (N-2)){ int __iter_7__; __iter_7__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 1; if(__iter_7__ <= (M-2)){ int __iter_8__; __iter_8__ = (int)(blockIdx.z)*(int)(FORMA_BLOCKDIM_Z) + (int)(threadIdx.z) + 1; if(__iter_8__ <= (L-2)){ float __temp_30__; __temp_30__ = (2.000000f * __var_3__[__iter_6__+(N-0)*(__iter_7__+(M-0)*(__iter_8__))]); float __temp_31__; __temp_31__ = (__var_3__[__iter_6__+(N-0)*(__iter_7__+(M-0)*(__iter_8__+(1)))] - __temp_30__); float __temp_32__; __temp_32__ = (__temp_31__ + __var_3__[__iter_6__+(N-0)*(__iter_7__+(M-0)*(__iter_8__+(-1)))]); float __temp_33__; __temp_33__ = (0.125000f * __temp_32__); float __temp_34__; __temp_34__ = (2.000000f * __var_3__[__iter_6__+(N-0)*(__iter_7__+(M-0)*(__iter_8__))]); float __temp_35__; __temp_35__ = (__var_3__[__iter_6__+(N-0)*(__iter_7__+(1)+(M-0)*(__iter_8__))] - __temp_34__); float __temp_36__; __temp_36__ = (__temp_35__ + __var_3__[__iter_6__+(N-0)*(__iter_7__+(-1)+(M-0)*(__iter_8__))]); float __temp_37__; __temp_37__ = (0.125000f * __temp_36__); float __temp_38__; __temp_38__ = (__temp_33__ + __temp_37__); float __temp_39__; __temp_39__ = (2.000000f * __var_3__[__iter_6__+(N-0)*(__iter_7__+(M-0)*(__iter_8__))]); float __temp_40__; __temp_40__ = (__var_3__[__iter_6__+(1)+(N-0)*(__iter_7__+(M-0)*(__iter_8__))] - __temp_39__); float __temp_41__; __temp_41__ = (__temp_40__ + __var_3__[__iter_6__+(-1)+(N-0)*(__iter_7__+(M-0)*(__iter_8__))]); float __temp_42__; __temp_42__ = (0.125000f * __temp_41__); float __temp_43__; __temp_43__ = (__temp_38__ + __temp_42__); float __temp_44__; __temp_44__ = (__temp_43__ + __var_3__[__iter_6__+(N-0)*(__iter_7__+(M-0)*(__iter_8__))]); __var_2__[__iter_6__+(N-0)*(__iter_7__+(M-0)*(__iter_8__))] = __temp_44__; } } } } __global__ void __kernel___forma_kernel__3__(float * __restrict__ __var_2__, int L, int M, int N, float * __restrict__ __var_1__){ int FORMA_BLOCKDIM_Z = (int)(blockDim.z); int FORMA_BLOCKDIM_Y = (int)(blockDim.y); int FORMA_BLOCKDIM_X = (int)(blockDim.x); int __iter_9__; __iter_9__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 1; if(__iter_9__ <= (N-2)){ int __iter_10__; __iter_10__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 1; if(__iter_10__ <= (M-2)){ int __iter_11__; __iter_11__ = (int)(blockIdx.z)*(int)(FORMA_BLOCKDIM_Z) + (int)(threadIdx.z) + 1; if(__iter_11__ <= (L-2)){ float __temp_45__; __temp_45__ = (2.000000f * __var_2__[__iter_9__+(N-0)*(__iter_10__+(M-0)*(__iter_11__))]); float __temp_46__; __temp_46__ = (__var_2__[__iter_9__+(N-0)*(__iter_10__+(M-0)*(__iter_11__+(1)))] - __temp_45__); float __temp_47__; __temp_47__ = (__temp_46__ + __var_2__[__iter_9__+(N-0)*(__iter_10__+(M-0)*(__iter_11__+(-1)))]); float __temp_48__; __temp_48__ = (0.125000f * __temp_47__); float __temp_49__; __temp_49__ = (2.000000f * __var_2__[__iter_9__+(N-0)*(__iter_10__+(M-0)*(__iter_11__))]); float __temp_50__; __temp_50__ = (__var_2__[__iter_9__+(N-0)*(__iter_10__+(1)+(M-0)*(__iter_11__))] - __temp_49__); float __temp_51__; __temp_51__ = (__temp_50__ + __var_2__[__iter_9__+(N-0)*(__iter_10__+(-1)+(M-0)*(__iter_11__))]); float __temp_52__; __temp_52__ = (0.125000f * __temp_51__); float __temp_53__; __temp_53__ = (__temp_48__ + __temp_52__); float __temp_54__; __temp_54__ = (2.000000f * __var_2__[__iter_9__+(N-0)*(__iter_10__+(M-0)*(__iter_11__))]); float __temp_55__; __temp_55__ = (__var_2__[__iter_9__+(1)+(N-0)*(__iter_10__+(M-0)*(__iter_11__))] - __temp_54__); float __temp_56__; __temp_56__ = (__temp_55__ + __var_2__[__iter_9__+(-1)+(N-0)*(__iter_10__+(M-0)*(__iter_11__))]); float __temp_57__; __temp_57__ = (0.125000f * __temp_56__); float __temp_58__; __temp_58__ = (__temp_53__ + __temp_57__); float __temp_59__; __temp_59__ = (__temp_58__ + __var_2__[__iter_9__+(N-0)*(__iter_10__+(M-0)*(__iter_11__))]); __var_1__[__iter_9__+(N-0)*(__iter_10__+(M-0)*(__iter_11__))] = __temp_59__; } } } } /*Device code End */ /* Host Code Begin */ extern "C" void host_code(float * h_input, float * __var_0__, int L, int M, int N) { /* Host allocation Begin */ float * input; cudaMalloc(&input,sizeof(float)*((L-0)*(M-0)*(N-0))); Check_CUDA_Error("Allocation Error!! : input\n"); cudaPointerAttributes ptrAttrib_h_input; cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice; if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess) if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice) memcpy_kind_h_input = cudaMemcpyDeviceToDevice; cudaGetLastError(); if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){ cudaMemcpy(input,h_input,sizeof(float)*((L-0)*(M-0)*(N-0)), memcpy_kind_h_input); } float * __var_1__; cudaMalloc(&__var_1__,sizeof(float)*((L-0)*(M-0)*(N-0))); Check_CUDA_Error("Allocation Error!! : __var_1__\n"); float * __var_2__; cudaMalloc(&__var_2__,sizeof(float)*((L-0)*(M-0)*(N-0))); Check_CUDA_Error("Allocation Error!! : __var_2__\n"); float * __var_3__; cudaMalloc(&__var_3__,sizeof(float)*((L-0)*(M-0)*(N-0))); Check_CUDA_Error("Allocation Error!! : __var_3__\n"); float * __var_4__; cudaMalloc(&__var_4__,sizeof(float)*((L-0)*(M-0)*(N-0))); Check_CUDA_Error("Allocation Error!! : __var_4__\n"); /*Host Allocation End */ /* Kernel Launch Begin */ #ifdef _TIMER_ cudaEvent_t _forma_timer_start_,_forma_timer_stop_; cudaEventCreate(&_forma_timer_start_); cudaEventCreate(&_forma_timer_stop_); cudaEventRecord(_forma_timer_start_,0); #endif int __size_0___kernel___forma_kernel__0__ = ((N-2) - 1 ) + 1; int __size_1___kernel___forma_kernel__0__ = ((M-2) - 1 ) + 1; int __size_2___kernel___forma_kernel__0__ = ((L-2) - 1 ) + 1; int __block_0___kernel___forma_kernel__0__ = 16; int __block_1___kernel___forma_kernel__0__ = 4; int __block_2___kernel___forma_kernel__0__ = 4; dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__); int __SMemSize___kernel___forma_kernel__0__ = 0; int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__block_0___kernel___forma_kernel__0__); int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__); int __grid_2___kernel___forma_kernel__0__ = FORMA_CEIL(__size_2___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__); dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__,__grid_2___kernel___forma_kernel__0__); unsigned int power1, power2; nvmlReturn_t result; nvmlDevice_t device; nvmlEnableState_t mode; result=nvmlInit(); result = nvmlDeviceGetHandleByIndex(0, &device); assert(NVML_SUCCESS == result); result=nvmlDeviceGetPowerManagementMode(device, &mode); printf("enabled = %d\n", mode); result=nvmlDeviceGetPowerUsage(device,&power1); assert(NVML_SUCCESS == result); cudaDeviceSynchronize(); for (int x=0; x<500; x++) { __kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (input, L, M, N, __var_4__); __kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (__var_4__, L, M, N, __var_3__); __kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (__var_3__, L, M, N, __var_2__); __kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (__var_2__, L, M, N, __var_1__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n"); } cudaDeviceSynchronize(); result=nvmlDeviceGetPowerUsage(device,&power2); assert(NVML_SUCCESS == result); power2 -= power1; printf("%u\n", power2); nvmlShutdown(); cudaPointerAttributes ptrAttrib___var_0__; cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost; if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess) if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice) memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice; cudaGetLastError(); cudaMemcpy(__var_0__,__var_1__, sizeof(float)*((L-0)*(M-0)*(N-0)), memcpy_kind___var_0__); #ifdef _TIMER_ cudaEventRecord(_forma_timer_stop_,0); cudaEventSynchronize(_forma_timer_stop_); float elapsedTime; cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_); printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime); cudaEventDestroy(_forma_timer_start_); cudaEventDestroy(_forma_timer_stop_); #endif /*Kernel Launch End */ /* Host Free Begin */ cudaFree(input); cudaFree(__var_1__); cudaFree(__var_2__); cudaFree(__var_3__); cudaFree(__var_4__); } /*Host Free End*/
c0c655e3c2a9f27089393214bab31f9d76e1cfd8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <time.h> #include <unistd.h> #include <stdio.h> #include <stdlib.h> #include "gputimer.h" // CARD TARGETED : K40c // 1.5 MB const size_t CACHESIZE = 1.5 * (1<<20); // 32 B const size_t CLSIZE = 32; const size_t intsize = sizeof(int); void check_error(hipError_t cudaerr) { if (cudaerr != hipSuccess) { printf("FAILED WITH ERROR: \"%s\".\n", hipGetErrorString(cudaerr)); exit(-1); } } __global__ void fill_cache_stride(int* vals, int size, int stride) { uint tid = threadIdx.x + blockIdx.x * blockDim.x; uint nthreads = blockDim.x * gridDim.x; int sum; int thread_i; for (int i = tid; i < size/intsize; i += nthreads) { thread_i = i*stride; int n1 = vals[thread_i]; //int n2 = vals[thread_i+1]; sum += n1; } vals[0] = sum; //printf("first kernel\n"); } __global__ void fill_cache_stride_1thread(int* vals, int size, int stride) { //uint tid = threadIdx.x + blockIdx.x * blockDim.x; //uint nthreads = blockDim.x * gridDim.x; int sum; //for (int j = 0; j < 2; j++) { while (true) { for (int i = 0; i < size/intsize; i += stride) { int n1 = vals[i]; //int n2 = vals[thread_i+1]; sum += n1; }} vals[0] = sum; //printf("first kernel\n"); } __global__ void toggle_address(int* val) { int n1 = *val; *(val++) = n1; //printf("second kernel\n"); } int main(int argc, char** argv) { if (argc != 6) { printf("USAGE: ./loadXMB <# blocks: int> <# threads: int> <size_mult: double (multipler of cache size)> <stride: int> <toggle_val: int>\n"); } int blocks = atoi(argv[1]); int threads = atoi(argv[2]); double size_mult = atof(argv[3]); int stride = atoi(argv[4]); int toggle_val = atoi(argv[5]); srand(time(NULL)); int size = (int)(size_mult * CACHESIZE); int* valsHost = (int*) malloc(size); memset(valsHost, 0, size); for (int i = 0; i < size/intsize; i++) { valsHost[i] = (int)rand(); } int* valsDevice; hipMalloc((void**)&valsDevice, size); hipMemcpy(valsDevice, valsHost, size, hipMemcpyHostToDevice); int* val = &valsDevice[toggle_val]; GpuTimer timer1; GpuTimer timer2; timer1.Start(); //fill_cache_stride<<<blocks, threads>>>(valsDevice, size, stride); hipLaunchKernelGGL(( fill_cache_stride_1thread), dim3(1), dim3(1), 0, 0, valsDevice, size, stride); //check_error(hipDeviceSynchronize()); timer1.Stop(); timer2.Start(); //toggle_address<<<1,1>>>(val); //check_error(hipDeviceSynchronize()); timer2.Stop(); printf("blocks: %d | threads: %d | size_mult: %f | stride: %d | toggle_val: %d\n", blocks, threads, size_mult, stride, toggle_val); printf("timer1: %g | timer2: %g | val: %d\n", timer1.Elapsed(), timer2.Elapsed(), valsHost[0]); }
c0c655e3c2a9f27089393214bab31f9d76e1cfd8.cu
#include <time.h> #include <unistd.h> #include <stdio.h> #include <stdlib.h> #include "gputimer.h" // CARD TARGETED : K40c // 1.5 MB const size_t CACHESIZE = 1.5 * (1<<20); // 32 B const size_t CLSIZE = 32; const size_t intsize = sizeof(int); void check_error(cudaError_t cudaerr) { if (cudaerr != cudaSuccess) { printf("FAILED WITH ERROR: \"%s\".\n", cudaGetErrorString(cudaerr)); exit(-1); } } __global__ void fill_cache_stride(int* vals, int size, int stride) { uint tid = threadIdx.x + blockIdx.x * blockDim.x; uint nthreads = blockDim.x * gridDim.x; int sum; int thread_i; for (int i = tid; i < size/intsize; i += nthreads) { thread_i = i*stride; int n1 = vals[thread_i]; //int n2 = vals[thread_i+1]; sum += n1; } vals[0] = sum; //printf("first kernel\n"); } __global__ void fill_cache_stride_1thread(int* vals, int size, int stride) { //uint tid = threadIdx.x + blockIdx.x * blockDim.x; //uint nthreads = blockDim.x * gridDim.x; int sum; //for (int j = 0; j < 2; j++) { while (true) { for (int i = 0; i < size/intsize; i += stride) { int n1 = vals[i]; //int n2 = vals[thread_i+1]; sum += n1; }} vals[0] = sum; //printf("first kernel\n"); } __global__ void toggle_address(int* val) { int n1 = *val; *(val++) = n1; //printf("second kernel\n"); } int main(int argc, char** argv) { if (argc != 6) { printf("USAGE: ./loadXMB <# blocks: int> <# threads: int> <size_mult: double (multipler of cache size)> <stride: int> <toggle_val: int>\n"); } int blocks = atoi(argv[1]); int threads = atoi(argv[2]); double size_mult = atof(argv[3]); int stride = atoi(argv[4]); int toggle_val = atoi(argv[5]); srand(time(NULL)); int size = (int)(size_mult * CACHESIZE); int* valsHost = (int*) malloc(size); memset(valsHost, 0, size); for (int i = 0; i < size/intsize; i++) { valsHost[i] = (int)rand(); } int* valsDevice; cudaMalloc((void**)&valsDevice, size); cudaMemcpy(valsDevice, valsHost, size, cudaMemcpyHostToDevice); int* val = &valsDevice[toggle_val]; GpuTimer timer1; GpuTimer timer2; timer1.Start(); //fill_cache_stride<<<blocks, threads>>>(valsDevice, size, stride); fill_cache_stride_1thread<<<1, 1>>>(valsDevice, size, stride); //check_error(cudaDeviceSynchronize()); timer1.Stop(); timer2.Start(); //toggle_address<<<1,1>>>(val); //check_error(cudaDeviceSynchronize()); timer2.Stop(); printf("blocks: %d | threads: %d | size_mult: %f | stride: %d | toggle_val: %d\n", blocks, threads, size_mult, stride, toggle_val); printf("timer1: %g | timer2: %g | val: %d\n", timer1.Elapsed(), timer2.Elapsed(), valsHost[0]); }
bc2fc7748fb0640c0120d05ed6449352d38af3b2.hip
// !!! This is a file automatically generated by hipify!!! #if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using ThreadBlockShape = cutlass::gemm::GemmShape<16, 128, 16>; using WarpShape = cutlass::gemm::GemmShape<16, 128, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutSrc, int32_t, LayoutSrc, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 1, 4, 8, true>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const int8_t* d_src, const int8_t* d_filter, const int32_t* d_bias, const int8_t* d_z, int8_t* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, hipStream_t stream); #pragma GCC diagnostic pop #endif
bc2fc7748fb0640c0120d05ed6449352d38af3b2.cu
#if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using ThreadBlockShape = cutlass::gemm::GemmShape<16, 128, 16>; using WarpShape = cutlass::gemm::GemmShape<16, 128, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutSrc, int32_t, LayoutSrc, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 1, 4, 8, true>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const int8_t* d_src, const int8_t* d_filter, const int32_t* d_bias, const int8_t* d_z, int8_t* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream); #pragma GCC diagnostic pop #endif
e82a34861f440bba7fe15dc9c20afd17daca0ccd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<iostream> #include<cstdlib> using namespace std; __global__ void vectorAdd(int *a, int *b, int *result, int n) { int tid = blockIdx.x*blockDim.x + threadIdx.x; if(tid <= n) { result[tid] = a[tid] + b[tid]; } } void print_array(int *a, int N) { for(int i=0; i<N; i++) { cout<<" "<<a[i]; } cout<<endl; } void init_array(int *a, int N) { for(int i=0; i<N; i++) { a[i] = rand()%10 + 1; } } int main() { int *a, *b, *c; int *a_dev, *b_dev, *c_dev; int n = 8; //24 a = (int*)malloc(n * sizeof(n)); b = (int*)malloc(n * sizeof(n)); c = (int*)malloc(n * sizeof(n)); int size = n * sizeof(int); hipMalloc(&a_dev, size); hipMalloc(&b_dev, size); hipMalloc(&c_dev, size); init_array(a, n); init_array(b, n); print_array(a, n); print_array(b, n); //hipEvent_t start, end; //hipEventCreate(&start); //hipEventCreate(&end); hipMemcpy(a_dev, a, size, hipMemcpyHostToDevice); hipMemcpy(b_dev, b, size, hipMemcpyHostToDevice); //int threads = 1024; //int blocks = (n+threads-1)/threads; //hipEventRecord(start); //vectorAdd<<<blocks,threads>>>(a_dev, b_dev, c_dev, n); hipLaunchKernelGGL(( vectorAdd), dim3(1),dim3(1024), 0, 0, a_dev, b_dev, c_dev, n); //hipEventRecord(end); //hipDeviceSynchronize(); //float time = 0.0; //hipEventElapsedTime(&time, start, end); hipMemcpy(c, c_dev, size, hipMemcpyDeviceToHost); cout<<"Results : "<<endl; print_array(c, n); //cout<<"Time elapsed : "<<time<<endl; hipFree(a_dev); hipFree(b_dev); hipFree(c_dev); return 0; }
e82a34861f440bba7fe15dc9c20afd17daca0ccd.cu
#include<iostream> #include<cstdlib> using namespace std; __global__ void vectorAdd(int *a, int *b, int *result, int n) { int tid = blockIdx.x*blockDim.x + threadIdx.x; if(tid <= n) { result[tid] = a[tid] + b[tid]; } } void print_array(int *a, int N) { for(int i=0; i<N; i++) { cout<<" "<<a[i]; } cout<<endl; } void init_array(int *a, int N) { for(int i=0; i<N; i++) { a[i] = rand()%10 + 1; } } int main() { int *a, *b, *c; int *a_dev, *b_dev, *c_dev; int n = 8; //24 a = (int*)malloc(n * sizeof(n)); b = (int*)malloc(n * sizeof(n)); c = (int*)malloc(n * sizeof(n)); int size = n * sizeof(int); cudaMalloc(&a_dev, size); cudaMalloc(&b_dev, size); cudaMalloc(&c_dev, size); init_array(a, n); init_array(b, n); print_array(a, n); print_array(b, n); //cudaEvent_t start, end; //cudaEventCreate(&start); //cudaEventCreate(&end); cudaMemcpy(a_dev, a, size, cudaMemcpyHostToDevice); cudaMemcpy(b_dev, b, size, cudaMemcpyHostToDevice); //int threads = 1024; //int blocks = (n+threads-1)/threads; //cudaEventRecord(start); //vectorAdd<<<blocks,threads>>>(a_dev, b_dev, c_dev, n); vectorAdd<<<1,1024>>>(a_dev, b_dev, c_dev, n); //cudaEventRecord(end); //cudaDeviceSynchronize(); //float time = 0.0; //cudaEventElapsedTime(&time, start, end); cudaMemcpy(c, c_dev, size, cudaMemcpyDeviceToHost); cout<<"Results : "<<endl; print_array(c, n); //cout<<"Time elapsed : "<<time<<endl; cudaFree(a_dev); cudaFree(b_dev); cudaFree(c_dev); return 0; }
102432278c85c20f43501f57e13ec13ca44d875a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // @file im2row_gpu.cu // @brief Stack image patches as matrix rows (GPU) // @author Andrea Vedaldi /* Copyright (C) 2014-15 Andrea Vedaldi. All rights reserved. This file is part of the VLFeat library and is made available under the terms of the BSD license (see the COPYING file). */ #include "im2row.hpp" #include "../datacu.hpp" #include <iostream> using namespace vl ; /* ---------------------------------------------------------------- */ /* im2row */ /* ---------------------------------------------------------------- */ template <typename T> __global__ void im2row_forward_kernel(T* stacked, T const* data, const int numPatchesX, const int numPatchesY, const int numPatchSlices, const int width, const int height, const int windowWidth, const int windowHeight, const int strideX, const int strideY, const int padLeft, const int padTop, const int dilateX, const int dilateY) { /* each kernel copies the pixels in an image patch for one channel */ int index = threadIdx.x + blockIdx.x * blockDim.x ; if (index < numPatchSlices) { /* get the patch slice (x,y,z) to copy */ int x = index ; int y = x / numPatchesX ; int z = y / numPatchesY ; x %= numPatchesX ; y %= numPatchesY ; /* pick the top-left corer of the patch slice in the input image */ int x_data = x * strideX - padLeft ; int y_data = y * strideY - padTop ; data += (z * height + y_data) * width + x_data ; /* pick the column of the stacked image which contains this patch, and move down along the column at the beginning of the patch slice */ int patchSliceOffset = (windowWidth*windowHeight) * z ; stacked += (numPatchesY * patchSliceOffset + y) * numPatchesX + x ; /* copy the patch slice */ int windowExtentX = (windowWidth - 1) * dilateX + 1; int windowExtentY = (windowHeight - 1) * dilateY + 1; for (int v = 0 ; v < windowExtentY ; v += dilateY) { for (int u = 0 ; u < windowExtentX ; u += dilateX) { if (y_data + v >= 0 && y_data + v < height && x_data + u >= 0 && x_data + u < width) { *stacked = data[v * width + u] ; } else { *stacked = 0 ; } stacked += (numPatchesX*numPatchesY) ; } } } } /* ---------------------------------------------------------------- */ /* im2row backward kernel */ /* ---------------------------------------------------------------- */ // The next two functions assume b > 0. __forceinline__ __device__ int floordiv(int a, int b) { int q = a/b ; if (a >= 0 || a == q*b) return q ; return q - 1 ; } __forceinline__ __device__ int ceildiv(int a, int b) { int q = a/b ; if (a <= 0 || a == q*b) return q ; return q + 1 ; } int floordiv_cpu(int a, int b) { int q = a/b ; if (a >= 0 || a == q*b) return q ; return q - 1 ; } int ceildiv_cpu(int a, int b) { int q = a/b ; if (a <= 0 || a == q*b) return q ; return q + 1 ; } #if 0 template <typename T> void im2row_backward_kernel_fake( int index, T* data, T const* stacked, const int numPatchesX, const int numPatchesY, const int dataVolume, const int width, const int height, const int depth, const int windowWidth, const int windowHeight, const int strideX, const int strideY, const int padLeft, const int padTop, const int dilateX, const int dilateY, const int gcdx, const int gcdy, const int xbar, const int ybar, const int ubar, const int vbar) { // int index = 143 ; if (index < dataVolume) { T accumulator = 0 ; /* The goal of this kernel is to accumulate data[index]=data[x_data,y_data] all elements of the patch matrix that received copies of data[index] in the forward pass. To do this, we need to find which patches (x,y) that contain copies of this pixel and the relative offsets (u,v) within each such patch. First, we find which patches (x,y) contain copies of pixel (x_data,y_data) in the input tensor. The input tensor coordiante (x_data,y_data) of pixel (u,v) in patch (x,y) are related by equations: x_data = x * strideX + u * dilateX - padLeft, y_data = y * strideY + v * dilateY - padTop. Hence: x * strideX = x_data - u * dilateX + padLeft, same for y. Now we find all values of (x,y) that can be generated by this equation. These gives us the patches (x,y) that must be summed. We have: strideX * x + dilateX * u = x_data + padLeft. where x and u are integers. This is a linear Diophantine equation. Rewrite it as: ax + bu = c, where a = strideX, b = dilateY, c = x_data + padLeft. This equation has a solution only if the greatest common divisor g = gcd(a,b) of a and b divides c as well. In this case, let (x0,u0) be a solution (i.e. a x0 + b u0 = c); all other solutions are in the form x_k = x0 + Dx * k, Dx = b/g, u_k = u0 - Du * k, Du = a/g. Next, we look for the values of k such that x_k and u_k are within bounds: 1) 0 <= x_k <= Iw - 1 2) 0 <= u_k <= Ww - 1 Thus 0) recall: gcd(a,b) must divide c 1) ceil(- x0/Dx) <= k <= floor((Iw - 1 - x0)/Dx) 2) ceil((u0 - Ww + 1)/Du) <= k <= floor(u0/Du) Thus we need to look for the k in the interval k_min = ceil(max(-x0/Dx, (u0 - Ww + 1)/Du)), k_max = floor(min((Iw - 1 - x0)/Dx,u0/Du). Toghether with (*) and the corresponding equations for y, this produces a list of patches (x_k,y_p) that contains pixel (x_data,y_data) (the list can be empty). Furthermore, x_data is mapped to a specific pixel in patch x_k whose coordiante is u_k, also given above. */ int x_data = index ; int y_data = x_data / width ; int z = y_data / height ; x_data %= width ; y_data %= height ; int cx = x_data + padLeft ; int cy = y_data + padTop ; int qx = cx / gcdx ; int qy = cy / gcdy ; printf("x_data:%4d y_data:%4d | " "cx:%3d qx:%3d gcdx:%3d dx:%3d |" "cy:%3d qy:%3d gcdy:%3d dy:%3d\n", x_data, y_data, cx,qx,gcdx,cx - gcdx * qx, cy,qy,gcdy,cy - gcdy * qy) ; if (cx != gcdx * qx || cy != gcdy * qy) { return ; } int x0 = xbar * qx ; int u0 = ubar * qx ; int y0 = ybar * qy ; int v0 = vbar * qy ; // ax + bu = c, where // a = strideX, // b = dilateY, // c = x_data + padLeft. printf("checkx:%d\n", strideX*x0+dilateY*u0-x_data-padLeft) ; printf("checky:%d\n", strideY*y0+dilateY*v0-y_data-padTop) ; int Dx = dilateX / gcdx ; int Du = strideX / gcdx ; int Dy = dilateY / gcdy ; int Dv = strideY / gcdy ; int kmin1 = ceildiv_cpu(-x0,Dx) ; int kmax1 = floordiv_cpu(numPatchesX - 1 - x0,Dx) ; int kmin2 = ceildiv_cpu(u0 - windowWidth + 1,Du) ; int kmax2 = floordiv_cpu(u0,Du) ; int kmin = max(kmin1,kmin2) ; int kmax = min(kmax1,kmax2) ; int qmin1 = ceildiv_cpu(-y0,Dy) ; int qmax1 = floordiv_cpu(numPatchesY - 1 - y0,Dy) ; int qmin2 = ceildiv_cpu(v0 - windowHeight + 1,Dv) ; int qmax2 = floordiv_cpu(v0,Dv) ; int qmin = max(qmin1,qmin2) ; int qmax = min(qmax1,qmax2) ; printf("Dy:%3d Dv:%3d\n", Dy, Dv) ; printf("q: %3d to %3d (qmin1:%3d qmin2:%3d qmax1:%3d qmax2:%3d)\n", qmin,qmax,qmin1,qmin2,qmax1,qmax2) ; /* Now we have kmin <= k <= kmax, qmin <= q <= qmax and x_k = x0 + Dx * k, u_k = u0 - Du * k, y_q = y0 + Dy * q, v_q = v0 - Dv * q. Thus for each (k,q) in the allowable range, we visit patch (x_k,y_q) and pixel (u_k,v_q) within it. (x_k,y_q) tells us which row of the patch matix to look for, and (u_k,v_q) tells us which column. Linearizing all this: pm_row(k,q) = y_q * numPatchesX + x_k, pm_col(k,q) = ((z * windowHeight) + v_q) * windowWidth + u_k. This is further linearized into an index: pm_index(k,q) = (numPatchesX*numPatchesY) * pm_col(k,q) + pm_row(k,q) Substituting everything pm_row(k,q) = (y0 + Dy * q) * numPatchesX + x0 + Dx * k = (numPatchesX * Dy) * q + Dx * k + (y0 * numPatchesX + x0) = rqc * q + rkc * k + roc pm_col(k,q) = ((z * windowHeight) + v0 - Dv * q) * windowWidth + u0 - Du * k = - (windowWidth * Dv) * q - (Du) * k + (windowHeight * windowWidth * z + v0 * windowWidth + u0) = cqc * q + ckc * k + coc ; pm_index(k,q) = (numPatchesX*numPatchesY) * (cqc * q + ckc * k + coc) + rqc * q + rkc * k + roc = (numPatchesX*numPatchesY * cqc + rqc) * q + (numPatchesX*numPatchesY * ckc + rkc) * k + (numPatchesX*numPatchesY * coc + roc) = iqc * q + ikc * k + ioc */ int rqc = numPatchesX * Dy ; int rkc = Dx ; int roc = numPatchesX * y0 + x0 ; int cqc = - windowWidth * Dv ; int ckc = - Du ; int coc = windowWidth * (windowHeight * z + v0) + u0 ; int np = numPatchesX * numPatchesY ; int iqc = np * cqc + rqc ; int ikc = np * ckc + rkc ; int ioc = np * coc + roc ; stacked += ioc ; for (int q = qmin ; q <= qmax ; ++ q) { for (int k = kmin ; k <= kmax ; ++ k) { int index_ = iqc * q + ikc * k + ioc ; printf("index:%4d x:%3d y:%3d k:%3d q:%3d\n", index, x0+Dx*k, y0+Dy*q, k, q) ; accumulator += 1;//stacked[iqc * q + ikc * k] ; } } // data[index] = accumulator; } } #endif template <typename T> __global__ void im2row_backward_kernel(T* data, T const* stacked, const int numPatchesX, const int numPatchesY, const int dataVolume, const int width, const int height, const int depth, const int windowWidth, const int windowHeight, const int strideX, const int strideY, const int padLeft, const int padTop, const int dilateX, const int dilateY, const int gcdx, const int gcdy, const int xbar, const int ybar, const int ubar, const int vbar) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < dataVolume) { T accumulator = 0 ; /* The goal of this kernel is to accumulate data[index]=data[x_data,y_data] all elements of the patch matrix that received copies of data[index] in the forward pass. To do this, we need to find which patches (x,y) that contain copies of this pixel and the relative offsets (u,v) within each such patch. First, we find which patches (x,y) contain copies of pixel (x_data,y_data) in the input tensor. The input tensor coordiante (x_data,y_data) of pixel (u,v) in patch (x,y) are related by equations: x_data = x * strideX + u * dilateX - padLeft, y_data = y * strideY + v * dilateY - padTop. Now we find all values of (x,y) that can be generated by this equation. These gives us the patches (x,y) that must be summed. We have: strideX * x + dilateX * u = x_data + padLeft. where x and u are integers. This is a linear Diophantine equation. Rewrite it as: ax + bu = c, where a = strideX, b = dilateY, c = x_data + padLeft. This equation has a solution only if the greatest common divisor g = gcd(a,b) of a and b divides c as well. In this case, let (x0,u0) be a solution (i.e. a x0 + b u0 = c); all other solutions are in the form x_k = x0 + Dx * k, Dx = b/g, u_k = u0 - Du * k, Du = a/g. Next, we look for the values of k such that x_k and u_k are within bounds: 1) 0 <= x_k <= Pw - 1 2) 0 <= u_k <= Ww - 1 Thus 0) recall: gcd(a,b) must divide c 1) ceil(- x0/Dx) <= k <= floor((Iw - 1 - x0)/Dx) 2) ceil((u0 - Ww + 1)/Du) <= k <= floor(u0/Du) Thus we need to look for the k in the interval k_min = ceil(max(-x0/Dx, (u0 - Ww + 1)/Du)), k_max = floor(min((Pw - 1 - x0)/Dx,u0/Du). Toghether with (*) and the corresponding equations for y, this produces a list of patches (x_k,y_p) that contains pixel (x_data,y_data) (the list can be empty). Furthermore, x_data is mapped to a specific pixel in patch x_k whose coordiante is u_k, also given above. */ int x_data = index ; int y_data = x_data / width ; int z = y_data / height ; x_data %= width ; y_data %= height ; int cx = x_data + padLeft ; int cy = y_data + padTop ; int qx = cx / gcdx ; int qy = cy / gcdy ; if (cx != gcdx * qx || cy != gcdy * qy) { data[index] = 0 ; return ; } int x0 = xbar * qx ; int u0 = ubar * qx ; int y0 = ybar * qy ; int v0 = vbar * qy ; int Dx = dilateX / gcdx ; int Du = strideX / gcdx ; int Dy = dilateY / gcdy ; int Dv = strideY / gcdy ; int kmin1 = ceildiv(-x0,Dx) ; int kmax1 = floordiv(numPatchesX - 1 - x0,Dx) ; int kmin2 = ceildiv(u0 - windowWidth + 1,Du) ; int kmax2 = floordiv(u0,Du) ; int kmin = max(kmin1,kmin2) ; int kmax = min(kmax1,kmax2) ; int qmin1 = ceildiv(-y0,Dy) ; int qmax1 = floordiv(numPatchesY - 1 - y0,Dy) ; int qmin2 = ceildiv(v0 - windowHeight + 1,Dv) ; int qmax2 = floordiv(v0,Dv) ; int qmin = max(qmin1,qmin2) ; int qmax = min(qmax1,qmax2) ; /* Now we have kmin <= k <= kmax, qmin <= q <= qmax and x_k = x0 + Dx * k, u_k = u0 - Du * k, y_q = y0 + Dy * q, v_q = v0 - Dv * q. Thus for each (k,q) in the allowable range, we visit patch (x_k,y_q) and pixel (u_k,v_q) within it. (x_k,y_q) tells us which row of the patch matix to look for, and (u_k,v_q) tells us which column. Linearizing all this: pm_row(k,q) = y_q * numPatchesX + x_k, pm_col(k,q) = ((z * windowHeight) + v_q) * windowWidth + u_k. This is further linearized into an index: pm_index(k,q) = (numPatchesX*numPatchesY) * pm_col(k,q) + pm_row(k,q) Substituting everything pm_row(k,q) = (y0 + Dy * q) * numPatchesX + x0 + Dx * k = (numPatchesX * Dy) * q + Dx * k + (y0 * numPatchesX + x0) = rqc * q + rkc * k + roc pm_col(k,q) = ((z * windowHeight) + v0 - Dv * q) * windowWidth + u0 - Du * k = - (windowWidth * Dv) * q - (Du) * k + (windowHeight * windowWidth * z + v0 * windowWidth + u0) = cqc * q + ckc * k + coc ; pm_index(k,q) = (numPatchesX*numPatchesY) * (cqc * q + ckc * k + coc) + rqc * q + rkc * k + roc = (numPatchesX*numPatchesY * cqc + rqc) * q + (numPatchesX*numPatchesY * ckc + rkc) * k + (numPatchesX*numPatchesY * coc + roc) = iqc * q + ikc * k + ioc */ int rqc = numPatchesX * Dy ; int rkc = Dx ; int roc = numPatchesX * y0 + x0 ; int cqc = - windowWidth * Dv ; int ckc = - Du ; int coc = windowWidth * (windowHeight * z + v0) + u0 ; int np = numPatchesX * numPatchesY ; int iqc = np * cqc + rqc ; int ikc = np * ckc + rkc ; int ioc = np * coc + roc ; stacked += ioc ; for (int q = qmin ; q <= qmax ; ++ q) { for (int k = kmin ; k <= kmax ; ++ k) { accumulator += stacked[iqc * q + ikc * k] ; } } data[index] = accumulator; } } namespace vl { namespace impl { template<typename type> struct im2row<vl::VLDT_GPU, type> { /* ------------------------------------------------------------ */ /* forward */ /* ------------------------------------------------------------ */ static vl::ErrorCode forward(Context & context, type* stacked, type const* data, size_t width, size_t height, size_t depth, size_t windowWidth, size_t windowHeight, size_t strideX, size_t strideY, size_t padLeft, size_t padRight, size_t padTop, size_t padBottom, int dilateX, int dilateY) { /* Each kernel instance copies a feature dimension of a patch */ int windowExtentX = (windowWidth - 1)*dilateX + 1 ; int windowExtentY = (windowHeight - 1)*dilateY + 1 ; int numPatchesX = (width + (padLeft + padRight) - windowExtentX)/strideX + 1 ; int numPatchesY = (height + (padTop + padBottom) - windowExtentY)/strideY + 1 ; int numPatchSlices = numPatchesX * numPatchesY * depth ; hipLaunchKernelGGL(( im2row_forward_kernel<type>) , dim3(divideAndRoundUp(numPatchSlices, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0, stacked, data, numPatchesX, numPatchesY, numPatchSlices, width, height, windowWidth, windowHeight, strideX, strideY, padLeft, padTop, dilateX, dilateY) ; return context.setError(context.getCudaHelper().catchCudaError(__func__)) ; } /* ------------------------------------------------------------ */ /* backward */ /* ------------------------------------------------------------ */ static vl::ErrorCode backward(Context & context, type* data, type const* stacked, size_t width, size_t height, size_t depth, size_t windowWidth, size_t windowHeight, size_t strideX, size_t strideY, size_t padLeft, size_t padRight, size_t padTop, size_t padBottom, int dilateX, int dilateY) { /* Each kernel integrates all contributions to a particular element of data. */ int windowExtentX = (windowWidth - 1)*dilateX + 1 ; int windowExtentY = (windowHeight - 1)*dilateY + 1 ; int numPatchesX = (width + (padLeft + padRight) - windowExtentX)/strideX + 1 ; int numPatchesY = (height + (padTop + padBottom) - windowExtentY)/strideY + 1 ; int dataVolume = width * height * depth ; int xbar ; int ubar ; int gcdx = vl::gcd(strideX, dilateX, xbar, ubar) ; int ybar ; int vbar ; int gcdy = vl::gcd(strideY, dilateY, ybar, vbar) ; #if 0 for (int i = 0 ; i < dataVolume ; ++i) { im2row_backward_kernel_fake<type> (i, data, stacked, numPatchesX, numPatchesY, dataVolume, width, height, depth, windowWidth, windowHeight, strideX, strideY, padLeft, padTop, dilateX, dilateY, gcdx, gcdy, xbar, ybar, ubar, vbar) ; } #endif hipLaunchKernelGGL(( im2row_backward_kernel<type>) , dim3(divideAndRoundUp(dataVolume, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0, data, stacked, numPatchesX, numPatchesY, dataVolume, width, height, depth, windowWidth, windowHeight, strideX, strideY, padLeft, padTop, dilateX, dilateY, gcdx, gcdy, xbar, ybar, ubar, vbar) ; return context.setError(context.getCudaHelper().catchCudaError(__func__)) ; } } ; } } // Instantiations template struct vl::impl::im2row<vl::VLDT_GPU, float> ; #ifdef ENABLE_DOUBLE template struct vl::impl::im2row<vl::VLDT_GPU, double> ; #endif
102432278c85c20f43501f57e13ec13ca44d875a.cu
// @file im2row_gpu.cu // @brief Stack image patches as matrix rows (GPU) // @author Andrea Vedaldi /* Copyright (C) 2014-15 Andrea Vedaldi. All rights reserved. This file is part of the VLFeat library and is made available under the terms of the BSD license (see the COPYING file). */ #include "im2row.hpp" #include "../datacu.hpp" #include <iostream> using namespace vl ; /* ---------------------------------------------------------------- */ /* im2row */ /* ---------------------------------------------------------------- */ template <typename T> __global__ void im2row_forward_kernel(T* stacked, T const* data, const int numPatchesX, const int numPatchesY, const int numPatchSlices, const int width, const int height, const int windowWidth, const int windowHeight, const int strideX, const int strideY, const int padLeft, const int padTop, const int dilateX, const int dilateY) { /* each kernel copies the pixels in an image patch for one channel */ int index = threadIdx.x + blockIdx.x * blockDim.x ; if (index < numPatchSlices) { /* get the patch slice (x,y,z) to copy */ int x = index ; int y = x / numPatchesX ; int z = y / numPatchesY ; x %= numPatchesX ; y %= numPatchesY ; /* pick the top-left corer of the patch slice in the input image */ int x_data = x * strideX - padLeft ; int y_data = y * strideY - padTop ; data += (z * height + y_data) * width + x_data ; /* pick the column of the stacked image which contains this patch, and move down along the column at the beginning of the patch slice */ int patchSliceOffset = (windowWidth*windowHeight) * z ; stacked += (numPatchesY * patchSliceOffset + y) * numPatchesX + x ; /* copy the patch slice */ int windowExtentX = (windowWidth - 1) * dilateX + 1; int windowExtentY = (windowHeight - 1) * dilateY + 1; for (int v = 0 ; v < windowExtentY ; v += dilateY) { for (int u = 0 ; u < windowExtentX ; u += dilateX) { if (y_data + v >= 0 && y_data + v < height && x_data + u >= 0 && x_data + u < width) { *stacked = data[v * width + u] ; } else { *stacked = 0 ; } stacked += (numPatchesX*numPatchesY) ; } } } } /* ---------------------------------------------------------------- */ /* im2row backward kernel */ /* ---------------------------------------------------------------- */ // The next two functions assume b > 0. __forceinline__ __device__ int floordiv(int a, int b) { int q = a/b ; if (a >= 0 || a == q*b) return q ; return q - 1 ; } __forceinline__ __device__ int ceildiv(int a, int b) { int q = a/b ; if (a <= 0 || a == q*b) return q ; return q + 1 ; } int floordiv_cpu(int a, int b) { int q = a/b ; if (a >= 0 || a == q*b) return q ; return q - 1 ; } int ceildiv_cpu(int a, int b) { int q = a/b ; if (a <= 0 || a == q*b) return q ; return q + 1 ; } #if 0 template <typename T> void im2row_backward_kernel_fake( int index, T* data, T const* stacked, const int numPatchesX, const int numPatchesY, const int dataVolume, const int width, const int height, const int depth, const int windowWidth, const int windowHeight, const int strideX, const int strideY, const int padLeft, const int padTop, const int dilateX, const int dilateY, const int gcdx, const int gcdy, const int xbar, const int ybar, const int ubar, const int vbar) { // int index = 143 ; if (index < dataVolume) { T accumulator = 0 ; /* The goal of this kernel is to accumulate data[index]=data[x_data,y_data] all elements of the patch matrix that received copies of data[index] in the forward pass. To do this, we need to find which patches (x,y) that contain copies of this pixel and the relative offsets (u,v) within each such patch. First, we find which patches (x,y) contain copies of pixel (x_data,y_data) in the input tensor. The input tensor coordiante (x_data,y_data) of pixel (u,v) in patch (x,y) are related by equations: x_data = x * strideX + u * dilateX - padLeft, y_data = y * strideY + v * dilateY - padTop. Hence: x * strideX = x_data - u * dilateX + padLeft, same for y. Now we find all values of (x,y) that can be generated by this equation. These gives us the patches (x,y) that must be summed. We have: strideX * x + dilateX * u = x_data + padLeft. where x and u are integers. This is a linear Diophantine equation. Rewrite it as: ax + bu = c, where a = strideX, b = dilateY, c = x_data + padLeft. This equation has a solution only if the greatest common divisor g = gcd(a,b) of a and b divides c as well. In this case, let (x0,u0) be a solution (i.e. a x0 + b u0 = c); all other solutions are in the form x_k = x0 + Dx * k, Dx = b/g, u_k = u0 - Du * k, Du = a/g. Next, we look for the values of k such that x_k and u_k are within bounds: 1) 0 <= x_k <= Iw - 1 2) 0 <= u_k <= Ww - 1 Thus 0) recall: gcd(a,b) must divide c 1) ceil(- x0/Dx) <= k <= floor((Iw - 1 - x0)/Dx) 2) ceil((u0 - Ww + 1)/Du) <= k <= floor(u0/Du) Thus we need to look for the k in the interval k_min = ceil(max(-x0/Dx, (u0 - Ww + 1)/Du)), k_max = floor(min((Iw - 1 - x0)/Dx,u0/Du). Toghether with (*) and the corresponding equations for y, this produces a list of patches (x_k,y_p) that contains pixel (x_data,y_data) (the list can be empty). Furthermore, x_data is mapped to a specific pixel in patch x_k whose coordiante is u_k, also given above. */ int x_data = index ; int y_data = x_data / width ; int z = y_data / height ; x_data %= width ; y_data %= height ; int cx = x_data + padLeft ; int cy = y_data + padTop ; int qx = cx / gcdx ; int qy = cy / gcdy ; printf("x_data:%4d y_data:%4d | " "cx:%3d qx:%3d gcdx:%3d dx:%3d |" "cy:%3d qy:%3d gcdy:%3d dy:%3d\n", x_data, y_data, cx,qx,gcdx,cx - gcdx * qx, cy,qy,gcdy,cy - gcdy * qy) ; if (cx != gcdx * qx || cy != gcdy * qy) { return ; } int x0 = xbar * qx ; int u0 = ubar * qx ; int y0 = ybar * qy ; int v0 = vbar * qy ; // ax + bu = c, where // a = strideX, // b = dilateY, // c = x_data + padLeft. printf("checkx:%d\n", strideX*x0+dilateY*u0-x_data-padLeft) ; printf("checky:%d\n", strideY*y0+dilateY*v0-y_data-padTop) ; int Dx = dilateX / gcdx ; int Du = strideX / gcdx ; int Dy = dilateY / gcdy ; int Dv = strideY / gcdy ; int kmin1 = ceildiv_cpu(-x0,Dx) ; int kmax1 = floordiv_cpu(numPatchesX - 1 - x0,Dx) ; int kmin2 = ceildiv_cpu(u0 - windowWidth + 1,Du) ; int kmax2 = floordiv_cpu(u0,Du) ; int kmin = max(kmin1,kmin2) ; int kmax = min(kmax1,kmax2) ; int qmin1 = ceildiv_cpu(-y0,Dy) ; int qmax1 = floordiv_cpu(numPatchesY - 1 - y0,Dy) ; int qmin2 = ceildiv_cpu(v0 - windowHeight + 1,Dv) ; int qmax2 = floordiv_cpu(v0,Dv) ; int qmin = max(qmin1,qmin2) ; int qmax = min(qmax1,qmax2) ; printf("Dy:%3d Dv:%3d\n", Dy, Dv) ; printf("q: %3d to %3d (qmin1:%3d qmin2:%3d qmax1:%3d qmax2:%3d)\n", qmin,qmax,qmin1,qmin2,qmax1,qmax2) ; /* Now we have kmin <= k <= kmax, qmin <= q <= qmax and x_k = x0 + Dx * k, u_k = u0 - Du * k, y_q = y0 + Dy * q, v_q = v0 - Dv * q. Thus for each (k,q) in the allowable range, we visit patch (x_k,y_q) and pixel (u_k,v_q) within it. (x_k,y_q) tells us which row of the patch matix to look for, and (u_k,v_q) tells us which column. Linearizing all this: pm_row(k,q) = y_q * numPatchesX + x_k, pm_col(k,q) = ((z * windowHeight) + v_q) * windowWidth + u_k. This is further linearized into an index: pm_index(k,q) = (numPatchesX*numPatchesY) * pm_col(k,q) + pm_row(k,q) Substituting everything pm_row(k,q) = (y0 + Dy * q) * numPatchesX + x0 + Dx * k = (numPatchesX * Dy) * q + Dx * k + (y0 * numPatchesX + x0) = rqc * q + rkc * k + roc pm_col(k,q) = ((z * windowHeight) + v0 - Dv * q) * windowWidth + u0 - Du * k = - (windowWidth * Dv) * q - (Du) * k + (windowHeight * windowWidth * z + v0 * windowWidth + u0) = cqc * q + ckc * k + coc ; pm_index(k,q) = (numPatchesX*numPatchesY) * (cqc * q + ckc * k + coc) + rqc * q + rkc * k + roc = (numPatchesX*numPatchesY * cqc + rqc) * q + (numPatchesX*numPatchesY * ckc + rkc) * k + (numPatchesX*numPatchesY * coc + roc) = iqc * q + ikc * k + ioc */ int rqc = numPatchesX * Dy ; int rkc = Dx ; int roc = numPatchesX * y0 + x0 ; int cqc = - windowWidth * Dv ; int ckc = - Du ; int coc = windowWidth * (windowHeight * z + v0) + u0 ; int np = numPatchesX * numPatchesY ; int iqc = np * cqc + rqc ; int ikc = np * ckc + rkc ; int ioc = np * coc + roc ; stacked += ioc ; for (int q = qmin ; q <= qmax ; ++ q) { for (int k = kmin ; k <= kmax ; ++ k) { int index_ = iqc * q + ikc * k + ioc ; printf("index:%4d x:%3d y:%3d k:%3d q:%3d\n", index, x0+Dx*k, y0+Dy*q, k, q) ; accumulator += 1;//stacked[iqc * q + ikc * k] ; } } // data[index] = accumulator; } } #endif template <typename T> __global__ void im2row_backward_kernel(T* data, T const* stacked, const int numPatchesX, const int numPatchesY, const int dataVolume, const int width, const int height, const int depth, const int windowWidth, const int windowHeight, const int strideX, const int strideY, const int padLeft, const int padTop, const int dilateX, const int dilateY, const int gcdx, const int gcdy, const int xbar, const int ybar, const int ubar, const int vbar) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < dataVolume) { T accumulator = 0 ; /* The goal of this kernel is to accumulate data[index]=data[x_data,y_data] all elements of the patch matrix that received copies of data[index] in the forward pass. To do this, we need to find which patches (x,y) that contain copies of this pixel and the relative offsets (u,v) within each such patch. First, we find which patches (x,y) contain copies of pixel (x_data,y_data) in the input tensor. The input tensor coordiante (x_data,y_data) of pixel (u,v) in patch (x,y) are related by equations: x_data = x * strideX + u * dilateX - padLeft, y_data = y * strideY + v * dilateY - padTop. Now we find all values of (x,y) that can be generated by this equation. These gives us the patches (x,y) that must be summed. We have: strideX * x + dilateX * u = x_data + padLeft. where x and u are integers. This is a linear Diophantine equation. Rewrite it as: ax + bu = c, where a = strideX, b = dilateY, c = x_data + padLeft. This equation has a solution only if the greatest common divisor g = gcd(a,b) of a and b divides c as well. In this case, let (x0,u0) be a solution (i.e. a x0 + b u0 = c); all other solutions are in the form x_k = x0 + Dx * k, Dx = b/g, u_k = u0 - Du * k, Du = a/g. Next, we look for the values of k such that x_k and u_k are within bounds: 1) 0 <= x_k <= Pw - 1 2) 0 <= u_k <= Ww - 1 Thus 0) recall: gcd(a,b) must divide c 1) ceil(- x0/Dx) <= k <= floor((Iw - 1 - x0)/Dx) 2) ceil((u0 - Ww + 1)/Du) <= k <= floor(u0/Du) Thus we need to look for the k in the interval k_min = ceil(max(-x0/Dx, (u0 - Ww + 1)/Du)), k_max = floor(min((Pw - 1 - x0)/Dx,u0/Du). Toghether with (*) and the corresponding equations for y, this produces a list of patches (x_k,y_p) that contains pixel (x_data,y_data) (the list can be empty). Furthermore, x_data is mapped to a specific pixel in patch x_k whose coordiante is u_k, also given above. */ int x_data = index ; int y_data = x_data / width ; int z = y_data / height ; x_data %= width ; y_data %= height ; int cx = x_data + padLeft ; int cy = y_data + padTop ; int qx = cx / gcdx ; int qy = cy / gcdy ; if (cx != gcdx * qx || cy != gcdy * qy) { data[index] = 0 ; return ; } int x0 = xbar * qx ; int u0 = ubar * qx ; int y0 = ybar * qy ; int v0 = vbar * qy ; int Dx = dilateX / gcdx ; int Du = strideX / gcdx ; int Dy = dilateY / gcdy ; int Dv = strideY / gcdy ; int kmin1 = ceildiv(-x0,Dx) ; int kmax1 = floordiv(numPatchesX - 1 - x0,Dx) ; int kmin2 = ceildiv(u0 - windowWidth + 1,Du) ; int kmax2 = floordiv(u0,Du) ; int kmin = max(kmin1,kmin2) ; int kmax = min(kmax1,kmax2) ; int qmin1 = ceildiv(-y0,Dy) ; int qmax1 = floordiv(numPatchesY - 1 - y0,Dy) ; int qmin2 = ceildiv(v0 - windowHeight + 1,Dv) ; int qmax2 = floordiv(v0,Dv) ; int qmin = max(qmin1,qmin2) ; int qmax = min(qmax1,qmax2) ; /* Now we have kmin <= k <= kmax, qmin <= q <= qmax and x_k = x0 + Dx * k, u_k = u0 - Du * k, y_q = y0 + Dy * q, v_q = v0 - Dv * q. Thus for each (k,q) in the allowable range, we visit patch (x_k,y_q) and pixel (u_k,v_q) within it. (x_k,y_q) tells us which row of the patch matix to look for, and (u_k,v_q) tells us which column. Linearizing all this: pm_row(k,q) = y_q * numPatchesX + x_k, pm_col(k,q) = ((z * windowHeight) + v_q) * windowWidth + u_k. This is further linearized into an index: pm_index(k,q) = (numPatchesX*numPatchesY) * pm_col(k,q) + pm_row(k,q) Substituting everything pm_row(k,q) = (y0 + Dy * q) * numPatchesX + x0 + Dx * k = (numPatchesX * Dy) * q + Dx * k + (y0 * numPatchesX + x0) = rqc * q + rkc * k + roc pm_col(k,q) = ((z * windowHeight) + v0 - Dv * q) * windowWidth + u0 - Du * k = - (windowWidth * Dv) * q - (Du) * k + (windowHeight * windowWidth * z + v0 * windowWidth + u0) = cqc * q + ckc * k + coc ; pm_index(k,q) = (numPatchesX*numPatchesY) * (cqc * q + ckc * k + coc) + rqc * q + rkc * k + roc = (numPatchesX*numPatchesY * cqc + rqc) * q + (numPatchesX*numPatchesY * ckc + rkc) * k + (numPatchesX*numPatchesY * coc + roc) = iqc * q + ikc * k + ioc */ int rqc = numPatchesX * Dy ; int rkc = Dx ; int roc = numPatchesX * y0 + x0 ; int cqc = - windowWidth * Dv ; int ckc = - Du ; int coc = windowWidth * (windowHeight * z + v0) + u0 ; int np = numPatchesX * numPatchesY ; int iqc = np * cqc + rqc ; int ikc = np * ckc + rkc ; int ioc = np * coc + roc ; stacked += ioc ; for (int q = qmin ; q <= qmax ; ++ q) { for (int k = kmin ; k <= kmax ; ++ k) { accumulator += stacked[iqc * q + ikc * k] ; } } data[index] = accumulator; } } namespace vl { namespace impl { template<typename type> struct im2row<vl::VLDT_GPU, type> { /* ------------------------------------------------------------ */ /* forward */ /* ------------------------------------------------------------ */ static vl::ErrorCode forward(Context & context, type* stacked, type const* data, size_t width, size_t height, size_t depth, size_t windowWidth, size_t windowHeight, size_t strideX, size_t strideY, size_t padLeft, size_t padRight, size_t padTop, size_t padBottom, int dilateX, int dilateY) { /* Each kernel instance copies a feature dimension of a patch */ int windowExtentX = (windowWidth - 1)*dilateX + 1 ; int windowExtentY = (windowHeight - 1)*dilateY + 1 ; int numPatchesX = (width + (padLeft + padRight) - windowExtentX)/strideX + 1 ; int numPatchesY = (height + (padTop + padBottom) - windowExtentY)/strideY + 1 ; int numPatchSlices = numPatchesX * numPatchesY * depth ; im2row_forward_kernel<type> <<< divideAndRoundUp(numPatchSlices, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>> (stacked, data, numPatchesX, numPatchesY, numPatchSlices, width, height, windowWidth, windowHeight, strideX, strideY, padLeft, padTop, dilateX, dilateY) ; return context.setError(context.getCudaHelper().catchCudaError(__func__)) ; } /* ------------------------------------------------------------ */ /* backward */ /* ------------------------------------------------------------ */ static vl::ErrorCode backward(Context & context, type* data, type const* stacked, size_t width, size_t height, size_t depth, size_t windowWidth, size_t windowHeight, size_t strideX, size_t strideY, size_t padLeft, size_t padRight, size_t padTop, size_t padBottom, int dilateX, int dilateY) { /* Each kernel integrates all contributions to a particular element of data. */ int windowExtentX = (windowWidth - 1)*dilateX + 1 ; int windowExtentY = (windowHeight - 1)*dilateY + 1 ; int numPatchesX = (width + (padLeft + padRight) - windowExtentX)/strideX + 1 ; int numPatchesY = (height + (padTop + padBottom) - windowExtentY)/strideY + 1 ; int dataVolume = width * height * depth ; int xbar ; int ubar ; int gcdx = vl::gcd(strideX, dilateX, xbar, ubar) ; int ybar ; int vbar ; int gcdy = vl::gcd(strideY, dilateY, ybar, vbar) ; #if 0 for (int i = 0 ; i < dataVolume ; ++i) { im2row_backward_kernel_fake<type> (i, data, stacked, numPatchesX, numPatchesY, dataVolume, width, height, depth, windowWidth, windowHeight, strideX, strideY, padLeft, padTop, dilateX, dilateY, gcdx, gcdy, xbar, ybar, ubar, vbar) ; } #endif im2row_backward_kernel<type> <<< divideAndRoundUp(dataVolume, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>> (data, stacked, numPatchesX, numPatchesY, dataVolume, width, height, depth, windowWidth, windowHeight, strideX, strideY, padLeft, padTop, dilateX, dilateY, gcdx, gcdy, xbar, ybar, ubar, vbar) ; return context.setError(context.getCudaHelper().catchCudaError(__func__)) ; } } ; } } // Instantiations template struct vl::impl::im2row<vl::VLDT_GPU, float> ; #ifdef ENABLE_DOUBLE template struct vl::impl::im2row<vl::VLDT_GPU, double> ; #endif
b8eccc6ebd7079641047fdc4393995b195cbc006.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/sigmoid_cross_entropy_with_logits_impl.cuh" #include "include/hip/hip_fp16.h" template <typename T, typename S> __global__ void SigmoidCrossEntropyWithLogitsKernel(const size_t size, const T *logits, const S *labels, T *outputs) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) { const T reverse_factor = static_cast<T>(logits[i] >= 0); outputs[i] = log1p(exp(logits[i] - static_cast<T>(2) * reverse_factor * logits[i])) - logits[i] * (labels[i] - reverse_factor); } } template <> __global__ void SigmoidCrossEntropyWithLogitsKernel(const size_t size, const half *logits, const half *labels, half *outputs) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) { const half reverse_factor = static_cast<half>(logits[i] >= static_cast<half>(0.)); const float exp_logit = exp(__half2float(logits[i] - static_cast<half>(2) * reverse_factor * logits[i])); outputs[i] = __float2half(log1p(exp_logit)) - logits[i] * (labels[i] - reverse_factor); } } template <typename T, typename S> void SigmoidCrossEntropyWithLogits(const size_t size, const T *logits, const S *labels, T *outputs, hipStream_t cuda_stream) { hipLaunchKernelGGL(( SigmoidCrossEntropyWithLogitsKernel), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream, size, logits, labels, outputs); } template CUDA_LIB_EXPORT void SigmoidCrossEntropyWithLogits<half, half>(const size_t size, const half *logits, const half *labels, half *outputs, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void SigmoidCrossEntropyWithLogits<float, float>(const size_t size, const float *logits, const float *labels, float *outputs, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void SigmoidCrossEntropyWithLogits<double, double>(const size_t size, const double *logits, const double *labels, double *outputs, hipStream_t cuda_stream);
b8eccc6ebd7079641047fdc4393995b195cbc006.cu
/** * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/sigmoid_cross_entropy_with_logits_impl.cuh" #include "include/cuda_fp16.h" template <typename T, typename S> __global__ void SigmoidCrossEntropyWithLogitsKernel(const size_t size, const T *logits, const S *labels, T *outputs) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) { const T reverse_factor = static_cast<T>(logits[i] >= 0); outputs[i] = log1p(exp(logits[i] - static_cast<T>(2) * reverse_factor * logits[i])) - logits[i] * (labels[i] - reverse_factor); } } template <> __global__ void SigmoidCrossEntropyWithLogitsKernel(const size_t size, const half *logits, const half *labels, half *outputs) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) { const half reverse_factor = static_cast<half>(logits[i] >= static_cast<half>(0.)); const float exp_logit = exp(__half2float(logits[i] - static_cast<half>(2) * reverse_factor * logits[i])); outputs[i] = __float2half(log1p(exp_logit)) - logits[i] * (labels[i] - reverse_factor); } } template <typename T, typename S> void SigmoidCrossEntropyWithLogits(const size_t size, const T *logits, const S *labels, T *outputs, cudaStream_t cuda_stream) { SigmoidCrossEntropyWithLogitsKernel<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(size, logits, labels, outputs); } template CUDA_LIB_EXPORT void SigmoidCrossEntropyWithLogits<half, half>(const size_t size, const half *logits, const half *labels, half *outputs, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void SigmoidCrossEntropyWithLogits<float, float>(const size_t size, const float *logits, const float *labels, float *outputs, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void SigmoidCrossEntropyWithLogits<double, double>(const size_t size, const double *logits, const double *labels, double *outputs, cudaStream_t cuda_stream);
5a270f5f89da21879c07e6c27c49d157c972b589.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "multiplyBy2.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int size = XSIZE*YSIZE; long *in = NULL; hipMalloc(&in, XSIZE*YSIZE); long *out = NULL; hipMalloc(&out, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( multiplyBy2), dim3(gridBlock),dim3(threadBlock), 0, 0, size,in,out); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( multiplyBy2), dim3(gridBlock),dim3(threadBlock), 0, 0, size,in,out); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( multiplyBy2), dim3(gridBlock),dim3(threadBlock), 0, 0, size,in,out); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
5a270f5f89da21879c07e6c27c49d157c972b589.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "multiplyBy2.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int size = XSIZE*YSIZE; long *in = NULL; cudaMalloc(&in, XSIZE*YSIZE); long *out = NULL; cudaMalloc(&out, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); multiplyBy2<<<gridBlock,threadBlock>>>(size,in,out); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { multiplyBy2<<<gridBlock,threadBlock>>>(size,in,out); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { multiplyBy2<<<gridBlock,threadBlock>>>(size,in,out); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}